', br_pos)\n\napp = DrawRectangle(root)\napp.grid(row=0, column=0)\n\nlbl_text = tk.StringVar()\nentry = tk.Label(root, textvariable=lbl_text, width = 100, height=2, font=(\"Helvetica\", 15))\nentry.grid(row=1, column=0)\n\nbtn_text = tk.StringVar()\nbtn_text.set('Select Screen')\nbutton1 = tk.Button(root, textvariable=btn_text, width=15, height=2, command=jieping)\nbutton1.grid(row=1, column=1)\n\ndef action(*args):\n global ss_id, tkvarA\n if tkvarA.get() == 'Start':\n ss_id = root.after(period, ss)\n else:\n root.after_cancel(ss_id)\n\n'''\ntkvarA = StringVar(root)\nchoicesA = { 'Start','Stop'}\ntkvarA.set('Stop') # set the default option\nact = OptionMenu(root, tkvarA, *choicesA)\nact.grid(row=2, column=1)\ntkvarA.trace('w', action)\n'''\n\n# Create a Tkinter variable\ntkvar = StringVar(root);tkvarTo = StringVar(root)\n# Dictionary with options\nchoices = { 'Chinese','English','Japanese','Korean'}\ntkvar.set('Chinese') # set the default option\nlangFrom = OptionMenu(root, tkvar, *choices)\nlangFrom.grid(row=1, column=3)\nLabel(root, text=\"From\").grid(row=1, column=2)\ntkvarTo.set('Chinese') # set the default option\nlangTo = OptionMenu(root, tkvarTo, *choices)\nlangTo.grid(row=2, column=3)\nLabel(root, text=\"To\").grid(row=2, column=2)\n\nroot.wm_attributes(\"-topmost\", 1)\nroot.mainloop()\n","repo_name":"Chuanyou124/subtitles","sub_path":"translate_subtitle.py","file_name":"translate_subtitle.py","file_ext":"py","file_size_in_byte":5867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"26687154129","text":"import sys\nimport os\n\n########################################################\n#\n#\tLOCAL IMPORTS\n#\n\nfrom brain.brainState import FSMBrainState\nfrom brain.HanumanRosInterface import HanumanRosInterface\n\nfrom newbie_hanuman.msg import postDictMsg\n\nimport numpy as np\nimport math\n\nimport time\nimport rospy\n\n########################################################\n#\n#\tGLOBALS\n#\n\nDefaultLoopTimeToLookAtObject = 0.3\nDefaultObject = 'ball'\nDefaultPantiltPattern = 'basic_pattern'\n\n########################################################\n#\n#\tEXCEPTION DEFINITIONS\n#\n\n########################################################\n#\n#\tHELPER FUNCTIONS\n#\n\ndef getParameters( config, *arg ):\n\n\ttry:\n\t\tfor key in arg:\n\n\t\t\tconfig = config[ key ]\n\n\t\treturn config\n\texcept KeyError:\n\n\t\tconfig = _DEFAULT_CONFIG\n\t\tfor key in arg:\n\n\t\t\tconfig = config[ key ]\n\n\t\treturn config\n\n\n########################################################\n#\n#\tCLASS DEFINITIONS\n\nclass FindBall( FSMBrainState ):\n\t\n\tdef __init__( self ):\n\t\t\n\t\tsuper( FindBall, self ).__init__( 'FindBall' )\n\n\t\tself.previousTime = time.time()\n\n\tdef initialize( self ):\n\n\t\t#\tGet time out from config\n\t\tself.scanBallPattern = self.config[ 'PanTiltPlanner' ][ 'ScanBallPattern' ]\n\t\tself.confidenceThr = float( getParameters(self.config, 'ChangeStateParameter', 'BallConfidenceThreshold'))\n\n\t\tself.lookBall = False\n\n\tdef firstStep( self ):\n\t\t\n\t\trospy.loginfo( \"Enter {} brainstate\".format( self.name ) )\n\n\t\t#\tCall pattern\n\t\tself.rosInterface.Pantilt( command = 1, pattern = self.scanBallPattern )\n\t\t\n\n\n\tdef step( self ):\n\n\t\tvisionMsg = self.rosInterface.visionManager\n\t\t\n\t\tif 'ball' in visionMsg.object_name:\n\n\t\t\tself.previousTime = time.time()\n\n\t\t\tif not self.lookBall:\n\t\t\t\n\t\t\t\tself.rosInterface.Pantilt( command = 2, pattern = 'ball' )\n\t\t\t\tself.lookBall = True\t\t\t\n\n\t\telif time.time() - self.previousTime > 3.0:\n\t\t\t\n\t\t\tif self.lookBall:\n\t\t\t\tself.rosInterface.Pantilt( command = 1, pattern = self.scanBallPattern )\n\t\t\t\tself.lookBall = False\n\nmain_brain = FindBall()","repo_name":"visittor/hanuman_user","sub_path":"src/Brain/basic/findball_brainstate.py","file_name":"findball_brainstate.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"20975649199","text":"# Specify desired unit conversion from code units\nimport numpy as np\n# import athenaSetup_blondin as inputs; reload(inputs)\n#\n\"\"\" Intermediate conversions (not used anywhere but here) \"\"\"\n\n# import athenaSetup_ZUnits as inputs; reload(inputs)\n\n# # Specify any parameters that determine units\n# eps = inputs.eps # disk scale height \n\n# # conversion to cgs units\n# xi_i = inputs.xi_i\n\n\n\"\"\" Actual units used by load_vars() in SimDics \"\"\"\nd = 1. # density units\nv = 1. # r-velocity units\np = 1. # pressure/energy units\nR = 1. # distance units\nT = 1. # temperature units\nxi = 1.\ncs = 1. \n\nkappa = 15772099.50128901 #inputs.kappa_iso","repo_name":"ganguly-s/Line-profiles","sub_path":"tims_codes/code_units.py","file_name":"code_units.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"17633227444","text":"'''\nkbqa.py\n基于知识库的问答程序\n@Author: You Yue\n'''\n\nimport os\nimport pymysql\n\nfrom data_helper.data_config import *\nfrom ner.ner_model import NERModel\nfrom sim.sim_model import SIMModel\n\nos.environ['TF_APP_MIN_LOG_LEVEL'] = '3' # 屏蔽INFO WARNING WRONG\n\ndef init_model():\n return NERModel(), SIMModel()\n\ndef entity_recognize(ner_model, question):\n '''命名实体识别\n Args:\n ner_model: 命名实体模型\n question: 问题, str\n Return:\n entity: 实体, str\n '''\n tags = ner_model.predict(question).split(\" \")\n entity = ''\n # PROBLEM REMAIN: 只提取了第一个实体\n for i, item in enumerate(question):\n if entity == '' and tags[i] == 'B':\n entity += item\n elif entity != '' and tags[i] == 'I':\n entity += item\n elif entity != '' and tags[i] == 'O':\n break\n return entity\n\ndef attributes_extract(entity):\n '''属性与属性值抽取\n Args:\n entity: 实体, str\n Return:\n attributes: 属性及属性值, dict, {attribute1: attribute1_value, attribute2: attribute2_value...}\n '''\n db = pymysql.connect(db_ip, db_username, db_password, db_name)\n cursor = db.cursor()\n sql = \"SELECT * FROM kb WHERE entity='{0}';\".format(entity)\n cursor.execute(sql)\n results = cursor.fetchall()\n attributes = {}\n for row in results:\n attributes[row[1]] = row[2]\n return attributes\n\ndef rank(sim_model, question, attributes):\n '''属性值排序\n Args:\n sim_model: 相似度模型\n question: 问题\n attributes: 属性值\n Return:\n attr_sort: 排序后的属性值\n pos_sort: 排序后的概率值\n '''\n predict = lambda x,y :sim_model.predict(x, y)\n pos_attr = {k:v for k,v in zip([predict(question, y) for y in attributes.keys()], attributes.keys())}\n pos_sort = sorted(pos_attr.keys(), reverse=True)\n attr_sort = [pos_attr[pos] for pos in pos_sort]\n return attr_sort, pos_sort\n\ndef kbqa(ner_model, sim_model, question):\n '''\n Args:\n ner_model: 命名实体识别\n sim_model: 文本相似度模型\n question: 问题文本\n Return:\n answer: 最可能的答案\n pos: 答案对应的概率\n '''\n\n entity = entity_recognize(ner_model, question)\n attributes = attributes_extract(entity)\n attr_sort, pos_sort = rank(sim_model, question, attributes)\n answer, pos = attributes[attr_sort[0]], pos_sort[0]\n return answer, pos\n\nif __name__ == '__main__':\n\n ner_model, sim_model = init_model()\n\n question = input(\"Type your question:\")\n answer, pos = kbqa(ner_model, sim_model, question)\n\n msg = \"Answers: {0}, ({1}%)\".format(answer, str(round(pos*100, 2)))\n print(msg)\n","repo_name":"YayoYY/2020-KBQA","sub_path":"kbqa.py","file_name":"kbqa.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"2"}
+{"seq_id":"2458708794","text":"from baseClasses.BaseCharacter import BaseCharacter\nfrom baseClasses.BaseLightCone import BaseLightCone\nfrom baseClasses.BaseEffect import BaseEffect\nfrom baseClasses.RelicSet import RelicSet\nfrom baseClasses.RelicStats import RelicStats\nfrom baseClasses.BaseMV import BaseMV\n\nclass Clara(BaseCharacter):\n\n def __init__(self,\n relicstats:RelicStats,\n lightcone:BaseLightCone=None,\n relicsetone:RelicSet=None,\n relicsettwo:RelicSet=None,\n planarset:RelicSet=None,\n aTightEmbraceUptime:float=1.0,\n **config):\n super().__init__(lightcone=lightcone, relicstats=relicstats, relicsetone=relicsetone, relicsettwo=relicsettwo, planarset=planarset, **config)\n self.loadCharacterStats('Clara')\n \n self.aTightEmbraceUptime = aTightEmbraceUptime\n\n # Motion Values should be set before talents or gear\n self.motionValueDict['basic'] = [BaseMV(area='single', stat='atk', value=1.0, eidolonThreshold=3, eidolonBonus=0.1)]\n self.motionValueDict['skill'] = [BaseMV(area='all', stat='atk', value=1.2, eidolonThreshold=3, eidolonBonus=0.12)]\n \n self.motionValueDict['markOfSvarog'] = [BaseMV(area='single', stat='atk', value=1.2, eidolonThreshold=3, eidolonBonus=0.12)]\n \n #I believe the revenge ascension is an MV buff\n self.motionValueDict['talent'] = [BaseMV(area='single', stat='atk', value=1.6, eidolonThreshold=5, eidolonBonus=0.16)] \n \n # Talents\n self.addStat('DMG',description='trace',type=['followup'],amount=0.3)\n \n # Eidolons\n # handle handle e1 manually, by using the argument in the useSkill call\n if self.eidolon >= 2.0:\n self.addStat('ATK.percent',description='e2',amount=0.30,uptime=self.aTightEmbraceUptime)\n # better to handle e6 manually\n\n # Gear\n self.equipGear()\n\n def useBasic(self):\n retval = BaseEffect()\n type = ['basic']\n retval.damage = self.getTotalMotionValue('basic',type)\n retval.damage *= self.getTotalCrit(type)\n retval.damage *= self.getDmg(type)\n retval.damage *= self.getVulnerability(type)\n retval.damage = self.applyDamageMultipliers(retval.damage,type)\n retval.gauge = 30.0 * self.getBreakEfficiency(type)\n retval.energy = ( 20.0 + self.getBonusEnergyAttack(type) + self.getBonusEnergyTurn(type)) * self.getER(type)\n retval.skillpoints = 1.0\n retval.actionvalue = 1.0 + self.getAdvanceForward(type)\n self.addDebugInfo(retval,type)\n return retval\n\n def useSkill(self):\n retval = BaseEffect()\n type = ['skill']\n retval.damage = self.getTotalMotionValue('skill',type)\n retval.damage *= self.getTotalCrit(type)\n retval.damage *= self.getDmg(type)\n retval.damage *= self.getVulnerability(type)\n retval.damage = self.applyDamageMultipliers(retval.damage,type)\n retval.gauge = 30.0 * self.numEnemies * self.getBreakEfficiency(type)\n retval.energy = ( 30.0 + self.getBonusEnergyAttack(type) + self.getBonusEnergyTurn(type) ) * self.getER(type)\n retval.skillpoints = -1.0\n retval.actionvalue = 1.0 + self.getAdvanceForward(type)\n self.addDebugInfo(retval,type)\n return retval\n\n def useMarkOfSvarog(self):\n retval = BaseEffect()\n type = ['skill']\n retval.damage = self.getTotalMotionValue('markOfSvarog',type)\n retval.damage *= self.getTotalCrit(type)\n retval.damage *= self.getDmg(type)\n retval.damage *= self.getVulnerability(type)\n retval.damage = self.applyDamageMultipliers(retval.damage,type)\n self.addDebugInfo(retval,type,'Mark of Svarog Damage')\n return retval\n\n def useUltimate(self):\n retval = BaseEffect()\n type = ['ultimate']\n retval.energy = ( 5.0 + self.getBonusEnergyAttack(type) ) * self.getER(type)\n retval.actionvalue = self.getAdvanceForward(type)\n self.addDebugInfo(retval,type)\n return retval\n\n def useTalent(self, enhanced=False):\n num_adjacent = min(2, self.numEnemies-1)\n retval = BaseEffect()\n type = ['followup','talent']\n retval.damage = self.getTotalMotionValue('talent',type)\n retval.damage *= ( 1.0 + num_adjacent / 2.0 ) if enhanced else 1.0 \n retval.damage *= self.getTotalCrit(type)\n retval.damage *= self.getDmg(type) + ( (1.728 if self.eidolon >= 5 else 1.6) if enhanced else 0.0)\n retval.damage *= self.getVulnerability(type)\n retval.damage = self.applyDamageMultipliers(retval.damage,type)\n retval.gauge = ( 30.0 + ( 30.0 * num_adjacent if enhanced else 0.0 ) ) * self.getBreakEfficiency(type)\n retval.energy = ( 5.0 + self.getBonusEnergyAttack(type) ) * self.getER(type)\n retval.actionvalue = 0.0 - self.getAdvanceForward(type)\n self.addDebugInfo(retval,type,'Clara Talent {}'.format('enhanced' if enhanced else ''))\n return retval","repo_name":"GrimyBunyip/StarRail","sub_path":"characters/destruction/Clara.py","file_name":"Clara.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"10595899612","text":"from __future__ import print_function\nimport datetime\nimport os\nfrom tablib import Dataset\n\nfrom airflow import models\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\nfrom airflow.utils.email import send_email\n\ndefault_dag_args = {\n # The start_date describes when a DAG is valid / can be run. Set this to a\n # fixed point in time rather than dynamically, since it is evaluated every\n # time a DAG is parsed. See:\n # https://airflow.apache.org/faq.html#what-s-the-deal-with-start-date\n 'start_date': datetime.datetime(2019, 5, 2),\n}\n\nDATA_DIR = '/home/airflow/gcs/data/'\nif not os.path.isdir(DATA_DIR):\n DATA_DIR = '../../data/'\n\n\ndef send_report():\n datestamp = datetime.datetime.now().strftime('%d%b%Y')\n report_file = DATA_DIR + 'GA360-%s.csv' % datestamp\n\n table = Dataset().load(open(report_file, 'rt').read()).export('df').to_html()\n\n send_email(\n to=models.Variable.get('QUARTERLY_EMAIL_RECIPIENT', 'alex.sadleir@digital.gov.au'),\n cc=models.Variable.get('ANALYTICS_TEAM_EMAILS', []),\n subject='%s Automated Quarterly GA360 report [DO NOT RESPOND]' % datestamp,\n html_content=table,\n files=[report_file]\n )\n\n\nwith models.DAG(\n 'ga_quarterly_reporter',\n schedule_interval=datetime.timedelta(days=90),\n default_args=default_dag_args) as dag:\n quarterly_report = KubernetesPodOperator(\n task_id='quarterly-report',\n name='quarterly-report',\n namespace='default',\n image='gcr.io/%s/galileo' % models.Variable.get('GCP_PROJECT', 'dta-ga-bigquery'),\n cmds=['bash', '-c'],\n image_pull_policy=\"Always\",\n arguments=['gsutil cp gs://%s/data/credentials.json . && ' % models.Variable.get('AIRFLOW_BUCKET',\n 'us-east1-dta-airflow-b3415db4-bucket') +\n 'gsutil cp gs://%s/dags/r_scripts/extractaccinfo.R . && ' % models.Variable.get('AIRFLOW_BUCKET',\n 'us-east1-dta-airflow-b3415db4-bucket') +\n 'R -f extractaccinfo.R && '\n 'gsutil cp GA360*.csv gs://%s/data/' % models.Variable.get('AIRFLOW_BUCKET',\n 'us-east1-dta-airflow-b3415db4-bucket')])\n\n email_summary = PythonOperator(\n task_id='email_summary',\n python_callable=send_report\n )\n quarterly_report >> email_summary\n","repo_name":"govau/GAlileo","sub_path":"dags/ga_quarterly_reporter.py","file_name":"ga_quarterly_reporter.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"2"}
+{"seq_id":"41628767288","text":"import os \nimport csv \n\npoll_csv = os.path.join(\"..\", \"python-challenge\", \"PyPoll\", \"Resources\", \"election_data.csv\")\n\nwith open (poll_csv, \"r\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = \",\")\n csv_header = next(csv_file)\n\n voterID = []\n column_3 = []\n candidate_khan = 0\n candidate_correy = 0\n candidate_li = 0\n candidate_tooley = 0\n\n for row in csv_reader:\n voterID.append(row[0])\n column_3.append(row[2])\n\n len_voterID = len(voterID)\n \n for candidate in column_3: \n if candidate == \"Khan\": \n candidate_khan += 1 \n elif candidate == \"Correy\":\n candidate_correy += 1 \n elif candidate == \"Li\":\n candidate_li += 1\n elif candidate == \"O'Tooley\":\n candidate_tooley += 1 \n\n\n khan_cent = ((candidate_khan)/(len_voterID))*100\n correy_cent = ((candidate_correy)/(len_voterID))*100\n li_cent = ((candidate_li)/(len_voterID))*100\n tooley_cent = ((candidate_tooley)/(len_voterID))*100\n\n votes_list = [candidate_khan, candidate_correy, candidate_li, candidate_tooley]\n candidate_str = [\"Khan\", \"Correy\", \"Li\", \"O'Tooley\"]\n winner_vote_count = max(votes_list)\n winner_loc = votes_list.index(winner_vote_count)\n winner = candidate_str[winner_loc]\n\n\nprint(\"Election Results\")\nprint(f\"Total Votes: {len_voterID}\")\nprint(f\"Khan: {round(khan_cent, 2)} ({candidate_khan})\")\nprint(f\"Correy: {round(correy_cent, 2)} ({candidate_correy})\")\nprint(f\"Li: {round(li_cent, 2)} ({candidate_li})\")\nprint(f\"O'Tooley: {round(tooley_cent, 2)} ({candidate_tooley})\")\nprint(f\"Winner: {winner}\")\n\nwith open(\"/Users/Vanga/Desktop/python-challenge/PyPoll/Analysis/Election_Results.txt\", \"w\") as data:\n data.write(f\"Election Results\")\n data.write(\"\\n\") \n data.write(\"-------------------------\")\n data.write(\"\\n\") \n data.write(f\"Total Votes: {len_voterID}\")\n data.write(\"\\n\") \n data.write(\"-------------------------\")\n data.write(\"\\n\") \n data.write(f\"Khan: {round(khan_cent, 2)} ({candidate_khan})\")\n data.write(\"\\n\") \n data.write(f\"Correy: {round(correy_cent, 2)} ({candidate_correy})\")\n data.write(\"\\n\") \n data.write(f\"Li: {round(li_cent, 2)} ({candidate_li})\")\n data.write(\"\\n\") \n data.write(f\"O'Tooley: {round(tooley_cent, 2)} ({candidate_tooley})\")\n data.write(\"\\n\") \n data.write(\"-------------------------\")\n data.write(\"\\n\") \n data.write(f\"Winner: {winner}\")\n data.write(\"\\n\") \n data.write(\"-------------------------\")\n\n","repo_name":"vangampalli/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7702123958","text":"import copy\n\nclass Graph(object):\n\n def __init__(self, graph_dict=None, weight_dict=None):\n \"\"\" initializes a graph object\n If no dictionary or None is given,\n an empty dictionary will be used\n \"\"\"\n if graph_dict is None:\n graph_dict = {}\n if weight_dict is None:\n weight_dict = {}\n self.__graph_dict = graph_dict\n self.__weight_dict = weight_dict\n\n def vertices(self):\n \"\"\" returns the vertices of a graph \"\"\"\n return list(self.__graph_dict.keys())\n\n def edges(self):\n \"\"\" returns the edges of a graph \"\"\"\n return self.__generate_edges()\n\n def get_neighbors(self, vertex):\n \"\"\" returns neighbors of a vertex \"\"\"\n return self.__graph_dict[vertex]\n\n def edge_weight(self, vert1, vert2):\n keys = set(self.__weight_dict.keys())\n if vert1+vert2 in keys:\n weight = self.__weight_dict[vert1+vert2]\n elif vert2+vert1 in keys:\n weight = self.__weight_dict[vert2+vert1]\n else:\n weight = 0\n return weight\n\n def add_vertex(self, vertex):\n \"\"\" If the vertex \"vertex\" is not in\n self.__graph_dict, a key \"vertex\" with an empty\n list as a value is added to the dictionary.\n Otherwise nothing has to be done.\n \"\"\"\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n\n def add_edge(self, edge):\n \"\"\" assumes that edge is of type set, tuple or list;\n between two vertices can be multiple edges!\n \"\"\"\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph_dict[vertex1].append(vertex2)\n else:\n self.__graph_dict[vertex1] = [vertex2]\n\n def __generate_edges(self):\n \"\"\" A static method generating the edges of the\n graph \"graph\". Edges are represented as sets\n with one (a loop back to the vertex) or two\n vertices\n \"\"\"\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges\n\n def __str__(self):\n res = \"vertices: \"\n for k in self.__graph_dict:\n res += str(k) + \" \"\n res += \"\\nedges: \"\n for edge in self.__generate_edges():\n res += str(edge) + \" \"\n return res\n\n\ndef shortest_path(graph, start, finish):\n\n distances = dict() # Primary distance dictionary\n distances[start] = 0\n\n for vertex in graph.vertices():\n if vertex == start:\n distances[vertex] = 0\n else:\n distances[vertex] = 10**10 # If edges are longer, modify accordingly\n unvisited_distances = copy.deepcopy(distances) # Secondary \"unvisited\" distance dictionary\n\n while unvisited_distances:\n vertex = min(unvisited_distances,\n key=unvisited_distances.get) # Min weight vertex we haven't visited yet\n del unvisited_distances[vertex]\n\n neighbors = graph.get_neighbors(vertex)\n for neighbor in neighbors:\n new_distance = distances[vertex]\\\n + graph.edge_weight(vertex, neighbor)\n if new_distance < distances[neighbor]: # Is new path shorter?\n distances[neighbor] = new_distance\n if neighbor in unvisited_distances:\n unvisited_distances[neighbor] = new_distance\n\n return distances[finish]\n","repo_name":"Pythonimous/python-miscellaneous","sub_path":"shortest_path_dijkstra/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"73479852207","text":"import os\nimport xlrd\nfrom xlutils.copy import copy\n\nfile_path = os.path.abspath(os.path.dirname(__file__)) # 获取当前文件目录\nprint(file_path)\nroot_path = os.path.dirname(file_path) # 获取文件上级目录\ndata_path = root_path + '\\\\data' # 拼接data文件夹地址\ndata_file = data_path + '\\\\api.xlsx' # 拼接excel文件地址\ndata = xlrd.open_workbook(data_file) # 读取文件\nsheet = data.sheet_by_index(0) # 切换到第一个sheet\n\n\ndef write_excel_xls(row, col, value):\n \"\"\"\n excel 写入\n :param row:\n :param col:\n :param value:\n :return:\n \"\"\"\n book_r = xlrd.open_workbook(data_file)\n book_w = copy(book_r) # 复制原表格\n sheet_1 = book_w.get_sheet(0) # 以编辑方式得到文件的第一个工作表\n sheet_1.write(row, col, value) # 把内容写入表格\n os.remove(data_file) # 删除原文件\n book_w.save(data_file) # 保存修改的文件为原文件\n\n\ndef get_excel(row, col):\n \"\"\"\n excel 单元格读取\n :param row:\n :param col:\n :return:\n \"\"\"\n rows = sheet.nrows # 获取最大行号\n cols = sheet.ncols # 获取最大列号\n path_name = sheet.cell_value(row, col) # 获取单元格值\n return rows,cols,path_name\n","repo_name":"Jescs/api_demo","sub_path":"common/write_data.py","file_name":"write_data.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"74335373166","text":"from datetime import datetime\nfrom typing import TYPE_CHECKING, Optional\n\nfrom geoalchemy2 import Geometry\nfrom sqlmodel import (\n Column,\n DateTime,\n Field,\n ForeignKey,\n Index,\n Integer,\n Relationship,\n SQLModel,\n Text,\n text,\n)\nfrom sqlalchemy.dialects.postgresql import JSONB\nif TYPE_CHECKING:\n from .data_upload import DataUpload\n from .scenario import Scenario\n\n\nclass AoiBase(SQLModel):\n id: Optional[int] = Field(sa_column=Column(Integer, primary_key=True, autoincrement=True))\n category: str = Field(sa_column=Column(Text, index=True, nullable=False))\n name: Optional[str] = Field(sa_column=Column(Text))\n opening_hours: Optional[str] = Field(sa_column=Column(Text))\n wheelchair: Optional[str] = Field(sa_column=Column(Text))\n tags: Optional[dict] = Field(sa_column=Column(JSONB))\n geom: str = Field(\n sa_column=Column(\n Geometry(geometry_type=\"MultiPolygon\", srid=\"4326\", spatial_index=False),\n nullable=False,\n )\n )\n\n\nclass Aoi(AoiBase, table=True):\n __tablename__ = \"aoi\"\n __table_args__ = {\"schema\": \"basic\"}\n\n\nIndex(\"idx_aoi_geom\", Aoi.__table__.c.geom, postgresql_using=\"gist\")\n\n\nclass AoiModified(AoiBase, table=True):\n __tablename__ = \"aoi_modified\"\n __table_args__ = {\"schema\": \"customer\"}\n\n creation_date: Optional[datetime] = Field(\n sa_column=Column(DateTime, server_default=text(\"CURRENT_TIMESTAMP\"))\n )\n scenario_id: Optional[int] = Field(\n sa_column=Column(\n Integer, ForeignKey(\"customer.scenario.id\", ondelete=\"CASCADE\"), index=True\n )\n )\n\n scenario: Optional[\"Scenario\"] = Relationship(back_populates=\"aois_modified\")\n\n\nIndex(\"idx_aoi_modified_geom\", AoiModified.__table__.c.geom, postgresql_using=\"gist\")\n\n\nclass AoiUser(AoiBase, table=True):\n __tablename__ = \"aoi_user\"\n __table_args__ = {\"schema\": \"customer\"}\n\n creation_date: Optional[datetime] = Field(\n sa_column=Column(DateTime, server_default=text(\"CURRENT_TIMESTAMP\"))\n )\n data_upload_id: int = Field(\n sa_column=Column(\n Integer,\n ForeignKey(\"customer.data_upload.id\", ondelete=\"CASCADE\"),\n index=True,\n nullable=False,\n )\n )\n\n data_upload: Optional[\"DataUpload\"] = Relationship(back_populates=\"aois_user\")\n\n\nIndex(\"idx_aoi_user_geom\", AoiUser.__table__.c.geom, postgresql_using=\"gist\")\n","repo_name":"goat-community/goat","sub_path":"app/api/src/db/models/aoi.py","file_name":"aoi.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"2"}
+{"seq_id":"35578212070","text":"import logging\n\n# setup logging\nlogger = logging.getLogger(__name__)\n# tell which file to use\nfile_handler = logging.FileHandler('logfile.log')\n# set format\nformatter = logging.Formatter('%(asctime)s:%(levelname)s: Filename: %(filename)s: Line: %(lineno)d; %(message)s', datefmt='%m-%d-%Y %I:%M:%S %p ')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n# set level of logging to catpure\nlogger.setLevel(logging.CRITICAL)\n\n# test logging\nlogger.debug('log debug test')\nlogger.info('log info test')\nlogger.warning('log warning test')\nlogger.error('log error test')\nlogger.critical('log critical test')","repo_name":"anishst/Learn","sub_path":"Programming/Python/Logging/custom_log_test/log_practice.py","file_name":"log_practice.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40254265884","text":"import os\nfrom twilio.rest import Client\nfrom flask import Flask, request, redirect\nimport sqlite3\n\naccount_sid = os.environ['TWILIO_ACCOUNT_SID']\napi_key = os.environ['TWILIO_API_KEY']\napi_secret = os.environ['TWILIO_API_SECRET']\nphone_sid = os.environ['TWILIO_PHONE_SID']\nproxy_number = os.environ['TWILIO_PROXY_NUMBER']\nclient = Client(api_key, api_secret, account_sid)\nservice_sid = ''\n\napp = Flask(__name__)\n\n@app.route(\"/sms\", methods=['GET', 'POST'])\ndef send_back():\n service = client.proxy.services.create(unique_name='fanny_mini_uber',default_ttl=3600)\n service_sid = service.sid\n phone_number = client.proxy.services(service_sid).phone_numbers.create(sid=phone_sid)\n # get the sender number\n sender = request.values.get('From',None)\n message = request.values.get('Body',None)\n # search for the correct number in the database\n receiver = search_database(sender,1)\n # use Proxy API to create secure call between customer and driver\n interaction_id = create_proxy(sender,receiver,message)\n return interaction_id\n\n# server searches in the database, matching the user A with user B\ndef search_database(number,rideID):\n conn = sqlite3.connect('./customerider.db')\n c = conn.cursor()\n c.execute(\"select * from customerrider where driver_phone = ? and ride_id = ?\",(number,rideID))\n if (len(c.fetchall()) == 0):\n # search for matching driver, index 2\n c.execute(\"select * from customerrider where customer_phone = ? and ride_id = ?\",(number,rideID)) \n for row in c.fetchall():\n number_returned = str(row[2])\n number_returned = '+'+number_returned\n return number_returned\n else:\n c.execute(\"select * from customerrider where driver_phone = ? and ride_id = ?\",(number,rideID))\n for row in c.fetchall():\n number_returned = str(row[1])\n number_returned = '+'+number_returned\n return number_returned\n\n# twilio creates the Proxy using these two numbers and initiate the text send to receiver\ndef create_proxy(sender_number,receiver_number,message):\n sender = 'sender'\n receiver = 'receiver'\n # make session - ttl=10 minutes\n session = client.proxy.services(service_sid).sessions.create(unique_name='fanny_mini_uber',ttl=600)\n session_sid = session.sid\n # add participants\n sender_sid = add_participants(sender_number,sender,service_sid,session_sid)\n receiver_sid = add_participants(receiver_number,receiver,service_sid,session_sid)\n interaction_sid = send_initial_call(receiver_sid,service_sid,session_sid,message)\n return interaction_sid\n\ndef add_participants(phonenumber,name,service,session):\n participant = client.proxy.services(service).sessions(session).participants.create(friendly_name=name, identifier=phonenumber,proxy_identifier=proxy_number)\n return participant.sid\n\ndef send_initial_call(participant_sid,service,session,message_to_send):\n voice_interaction = client.proxy.services(service).sessions(session).participants(participant_sid).message_interactions.create(body=message_to_send)\n return voice_interaction.sid\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"fajrihanny/mini-uber","sub_path":"voice_call.py","file_name":"voice_call.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"10180480319","text":"# 124나라\nn = 5\n# def solution(n):\n# r = '124'\n# a, b = divmod(n, 3)\n#\n# while b < 3:\n# return (r[b]) + solution(a - 1)\n\n# 갇혀살았구나, 다 나랑 비슷할줄 알았는데, 전혀 아니었다.\n# 다 나처럼 코딩을 좋아하고, 열의가 있을 줄 알았는데, 그런건 아니었다.\n\ndef solution(n):\n if n<= 3:\n '124'[n -1]\n else:\n a, b = divmod(n - 1, 3)\n return solution(a) + '124'[b]\n\n# n이 3보다 크다면, n - 1을 3으로 나누어 몫과 나머지를 구한다\n# 몫�� 대해 재귀 호출 -> 3으로 나누기에 나머지는 0, 1, 2 -> 나머지를 인덱스로 삼아 '124'에서 자리수를 구한다.","repo_name":"tbnsok40/Algorithm-Python","sub_path":"21MAR/07 SkillCheck2.py","file_name":"07 SkillCheck2.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"21892159100","text":"from urlparse import urlparse, urlunparse\nfrom config import *\nfrom proxy import Proxy, log\n\n\nproxy = Proxy(TARGET_HOST, KEYSTONE_PORT_REAL)\n\n@proxy.bind('POST', \"/v2.0/tokens\")\ndef listTokens(handler, _, pmap = KEYSTONE_REMAP):\n\treq = handler.proxyRequest()\n\tjson = req.json\n\t\n\tfor e in json['access']['serviceCatalog']:\n\t\tif e['name'] in pmap:\n\t\t\tportMappings = pmap[e['name']]\n\t\t\t\n\t\t\tfor endpoint in e['endpoints']:\n\t\t\t\tfor urlName in ['internalURL', 'adminURL', 'publicURL']:\n\t\t\t\t\tparsed = urlparse(endpoint[urlName])\n\t\t\t\t\tif parsed.port in portMappings:\n\t\t\t\t\t\tnew = parsed._replace(netloc=\"{0}:{1}\".format(parsed.hostname, portMappings[parsed.port]))\n\t\t\t\t\t\tendpoint[urlName] = urlunparse(new)\n\t\n\thandler.sendResponse(req.status, req.headers, json=json)\n\nproxy.run(BIND_HOST, KEYSTONE_PORT_PROXY)\n\n","repo_name":"4ernogoria/baseimg","sub_path":"storpool/storpool-18.02.752.1736934/integration-openstack-api-proxies/integration/openstack/api-proxies/usr/lib/storpool/api-proxies/keystone-proxy.py","file_name":"keystone-proxy.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31091122594","text":"config = {\n \"hyper\":0\n}\n\ntrain_config = {\n \"batch_size\":512,\n \"lr\":0.00001\n}\n\nmodel_config = {\n \"num_layer\":7,\n \"initial_filter\":32,\n \"enc_filter\":15,\n}\n\ndataset_config = {\n \"use_mu_law\":False,\n \"inst1\":\"bass\",\n \"inst2\":\"organ\",\n \"sr\":16000,\n}\n\ninstruments = {\n \"bass\":0,\n \"brass\":1,\n \"flute\":2,\n \"guitar\":3,\n \"keyboard\":4,\n \"mallet\":5,\n \"organ\":6,\n \"reed\":7,\n \"string\":8,\n \"synth_lead\":9,\n \"vocal\":10\n}\n\nnum2instrument = {\n 0:\t\"bass\",\n 1:\t\"brass\",\n 2:\t\"flute\",\n 3:\t\"guitar\",\n 4:\t\"keyboard\",\n 5:\t\"mallet\",\n 6:\t\"organ\",\n 7:\t\"reed\",\n 8:\t\"string\",\n 9:\t\"synth_lead\",\n 10:\t\"vocal\"\n}\n\n","repo_name":"Jsimluken/tiny_research","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"42329275390","text":"import tensorflow as tf\nimport scipy.misc\nimport BoundingBoxModel\nimport cv2\nimport os\nfrom subprocess import call\n\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver()\nsaver.restore(sess, \"save/model.ckpt\")\n\n#read data.txt\nxs=[]\nwith open(\"train/train_data.txt\") as f:\n for line in f:\n xs.append(\"train/pos/\" + line.split()[0])\nwith open(\"train/train_data2.txt\") as f:\n for line in f:\n dirPath = \"train/pos/\" + line[:-1]\n files = os.listdir(dirPath)\n for file in files:\n xs.append(dirPath+\"/\"+file)\n\nfor i in range(len(xs)):\n full_image = scipy.misc.imread(xs[i],mode=\"RGB\")\n image = scipy.misc.imresize(full_image, [100, 176])\n location = BoundingBoxModel.y.eval(feed_dict={BoundingBoxModel.x: [image], BoundingBoxModel.keep_prob: 1.0})\n print(location)\n cv2.rectangle(image,(int(location[0][0]),int(location[0][1])),(int((location[0][0]+location[0][2])*1.2),int((location[0][1]+location[0][3])*1.2)),(0,255,0),3)\n cv2.imshow(\"image\",image)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()","repo_name":"HyunminKo/HandGestureAuthentication","sub_path":"src/BoundingBoxRunOriginal.py","file_name":"BoundingBoxRunOriginal.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"71282214127","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom datasets import load_dataset\n\n\n# In[1]:\n\n\nfrom glob import glob\nwavs = glob(\"{}/**/*.wav\".format(\"AudiosTrimmed\"), recursive=True)\n\n\n# In[2]:\n\n\n# wavs\n\n\n# In[124]:\n\n\nimport soundfile as sf\nsf.read(wavs[0])\n\n\n# In[125]:\n\n\nimport soundfile as sf\n\nfor file in wavs:\n data, samplerate = sf.read(file)\n sf.write(file, data, 16000, subtype='PCM_16')\n\n\n# In[126]:\n\n\nsf.read(wavs[0])\n\n\n# In[127]:\n\n\nimport pandas as pd\n\n# In[129]:\n\n\nfrom datasets import Audio\nfrom datasets import load_dataset, DatasetDict, Dataset\nData = DatasetDict()\n\n\n# In[130]:\n\n\ndf = pd.read_csv('text.csv')\ndf_train=pd.DataFrame()\ntrain_text = []\ntrain_path = []\nfor i in range(len(df)):\n train_text.append(df.iloc[i]['transcriptions'])\n s = df.iloc[i]['audio_file'].split('/')\n train_path.append(\"AudiosTrimmed/\"+\"/\".join(s))\ndf_train['text'] = train_text\ndf_train['audio'] = train_path\nprint(df_train.head())\n\n\n# In[131]:\n\n\nData['train'] = Dataset.from_pandas(df_train).cast_column(\"audio\", Audio())\n\n\n# In[133]:\n\n\n# Data['train']['audio'][0]\n\n\n# In[134]:\n\n\nData.save_to_disk('Dataset')\n\n\n# In[107]:\n\n\n# wavs[10]\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"taukeM/whisper","sub_path":"Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"3230313776","text":"import itertools\nimport logging\nimport math\nimport os\n\nfrom idr import idr\nfrom scipy import signal\nimport numpy as np\nimport pandas as pd\nimport pybedtools\n\nnp.random.seed(0)\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.INFO)\nlogger.setLevel(logging.DEBUG)\npybedtools.set_tempdir(os.path.abspath(os.path.dirname(__file__)))\n\n\nclass PeakCaller(object):\n def __init__(self, bedgraph, strand=\"+\"):\n \"\"\"\n Class to handle peak-calling using scipy.signal.find_peaks.\n \"\"\"\n self.bedgraph = bedgraph\n self.df = pd.read_table(self.bedgraph, names=[\"chrom\", \"start\", \"stop\", \"val\"])\n self.x = self.df[\"val\"]\n self.strand = strand\n\n def call_peaks(self, prominence=(None, None), width=(1, None), rel_height=0.75):\n \"\"\"\n Calls peaks per chromosome\n \"\"\"\n\n self.peaks_by_chrom = {}\n self.meta_by_chrom = {}\n self.starts_by_chrom = {}\n self.chroms = []\n\n for chrom in sorted(self.df[\"chrom\"].unique()):\n\n idx = self.df[\"chrom\"] == chrom\n\n # TODO: should we expand this out to have all positions in the\n # chromosome in memory? Otherwise two, 1-bp peaks that are 10kb away\n # from each other will appear to the peak caller as adjacent. In\n # practice, at least using the existing data, this doesn't seem to\n # be a problem.\n x = self.df.loc[idx, \"val\"]\n starts = self.df.loc[idx, \"start\"]\n\n peaks, meta = signal.find_peaks(\n x, prominence=prominence, width=width, rel_height=rel_height\n )\n\n self.peaks_by_chrom[chrom] = peaks\n self.meta_by_chrom[chrom] = meta\n self.starts_by_chrom[chrom] = starts\n self.chroms.append(chrom)\n return self\n\n def peaks_to_bed(self):\n \"\"\"\n Call peaks into the internal format, and then output as a narrowPeak\n file.\n\n Returns\n -------\n pybedtools.BedTool object sorted by score\n \"\"\"\n logger.info(self.bedgraph)\n\n def gen():\n for chrom in self.chroms:\n logger.info(chrom)\n starts = self.starts_by_chrom[chrom]\n left_ips = self.meta_by_chrom[chrom][\"left_ips\"]\n right_ips = self.meta_by_chrom[chrom][\"right_ips\"]\n left_bases = self.meta_by_chrom[chrom][\"left_bases\"]\n right_bases = self.meta_by_chrom[chrom][\"right_bases\"]\n prominences = self.meta_by_chrom[chrom][\"prominences\"]\n widths = self.meta_by_chrom[chrom][\"widths\"]\n peaks = self.peaks_by_chrom[chrom]\n\n # Results from find_peaks are in the coordinate system of\n # integer indices, so we need to interpolate back out to\n # genomic coordinates.\n xp = np.arange(len(starts))\n ileft_ips = np.interp(left_ips, xp, starts).round().astype(int)\n iright_ips = np.interp(right_ips, xp, starts).round().astype(int)\n ipeaks = np.interp(peaks, xp, starts).round().astype(int)\n\n idx = ileft_ips <= iright_ips\n ileft_ips = ileft_ips[idx]\n iright_ips = iright_ips[idx]\n ipeaks = ipeaks[idx]\n widths = widths[idx]\n prominences = prominences[idx]\n\n n_removed = sum(~idx)\n if n_removed:\n logger.info(\n \"Peaks removed due to start/stop problems: {0}\".format(n_removed)\n )\n\n for start, stop, peak, prominence, width in zip(\n ileft_ips, iright_ips, ipeaks, prominences, widths\n ):\n # This uses the promience as the score.\n p = str(prominence)\n\n # TODO: evaluate the usefulness of increasing the score for\n # punctate peaks:\n # p = str(prominence / math.sqrt(width))\n\n yield pybedtools.create_interval_from_list(\n [\n chrom,\n str(start),\n str(stop),\n \".\",\n p,\n self.strand,\n p,\n \"-1\",\n \"-1\",\n str(peak - start),\n ]\n )\n\n # Ensure we're coord-sorted for the merging step\n x = pybedtools.BedTool(gen()).sort()\n x = merge_narrowbed(x, self.strand)\n\n # But the output needs to be sorted by score\n return sort_by_score(x)\n\n\nclass MergedSignalPeakCaller(PeakCaller):\n def __init__(self, bedgraphs, strand=\"+\"):\n \"\"\"\n Class to handle averaging of multiple bedgraphs\n\n Parameters\n ----------\n\n bedgraphs : list\n Filenames or BedTool objects of bedGraphs to be averaged\n\n \"\"\"\n logger.info(\"Unioning bedgraphs...\")\n df = pybedtools.BedTool().union_bedgraphs(i=bedgraphs).to_dataframe()\n logger.info(\"Averaging bedgraphs...\")\n\n avg = df.iloc[:, 3:].mean(axis=1)\n df_merged = df.loc[:, [\"chrom\", \"start\", \"end\"]]\n df_merged[\"value\"] = avg\n df_merged[\"value\"].fillna(0)\n bedgraph = pybedtools.BedTool.from_dataframe(df_merged)\n super().__init__(bedgraph.fn, strand=strand)\n\n\nclass MultiPeakIDR(object):\n def __init__(self, peaks, oracle, strand=\"+\"):\n \"\"\"\n Class to handle running IDR with more than 2 replicates, which default\n IDR does not handle. Here we run all pairwise IDR, and then select the\n min number of peaks under the IDR threshold and then return that many\n from the provided oracle.\n\n Parameters\n ----------\n peaks : list\n List of narrowPeak files or pybedtools.BedTool objects pointing to\n narrowPeak files\n\n oracle : string or pybedtools.BedTool\n Peaks to pull from, generally from original peaks that have been\n merged in some way.\n\n strand : +, -, .\n Assumes the entire object represents a single strand; specify it\n here.\n \"\"\"\n #: list of peaks\n self.peaks = peaks\n\n #: BedTool of merged peaks to uses as oracle\n self.oracle = pybedtools.BedTool(oracle)\n\n #: This object represents a single strand indicated here\n self.strand = strand\n\n # Simplified from idr.load_samples()\n self.signal_type = \"signal.value\"\n self.signal_index = 6\n self.peak_merge_fn = sum\n self.summit_index = 9\n\n #: Peaks loads as internal IDR data structures\n self.fps = [\n idr.load_bed(open(fn), self.signal_index, self.summit_index)\n for fn in self.peaks\n ]\n\n self.oracle_peaks = idr.load_bed(\n open(self.oracle.fn), self.signal_index, self.summit_index\n )\n\n # self._build_oracle()\n\n #: Holds information from running IDR.\n #: Keys are frozenset([i, j]) indicating the pairwise IDRs between\n #: peaks i and j.\n self.idrs = {}\n\n def _build_oracle(self):\n \"\"\"\n Attempts as building an oracle. Deprecated, but retaining as fodder.\n \"\"\"\n logger.info(\"Building oracle peaks...\")\n\n # cat-and-merge strategy\n if 0:\n oracle = (\n pybedtools.BedTool.from_dataframe(\n pybedtools.BedTool(self.peaks[0])\n .cat(*self.peaks[1:], o=\"sum\", c=5)\n .to_dataframe()\n .sort_values(\"name\", ascending=False)\n )\n .each(to_narrowpeak)\n .saveas()\n )\n\n # multiintersect strategy\n if 0:\n h = pybedtools.BedTool().multi_intersect(i=self.peaks, cluster=True)\n\n lim = str(len(self.peaks))\n\n def filt(x):\n if x[3] != lim:\n return\n return pybedtools.create_interval_from_list(\n [x.chrom, str(x.start), str(x.stop)]\n )\n\n oracle = h.each(filt).saveas()\n\n # clustered strategy\n if 1:\n clustered = (\n pybedtools.BedTool(self.peaks[0])\n .cat(*self.peaks[1:], postmerge=False)\n .sort()\n .cluster()\n .to_dataframe()\n )\n\n def gen():\n for _, group in clustered.groupby(\"blockSizes\"):\n score = group[\"score\"].sum()\n start = group[\"start\"].min()\n stop = group[\"end\"].max()\n chrom = group[\"chrom\"].unique()[0]\n yield pybedtools.create_interval_from_list(\n [\n chrom,\n str(start),\n str(stop),\n \".\",\n \".\",\n self.strand,\n str(score),\n \"-1\",\n \"-1\",\n \"-1\",\n ]\n )\n\n oracle = sort_by_score(pybedtools.BedTool(gen()).saveas())\n\n # IDR internal strategy\n if 0:\n oracle = self._multiway_merge()\n\n # By the time we get here, should have `oracle`\n self.oracle = oracle\n self.oracle_peaks = idr.load_bed(\n open(oracle.fn), self.signal_index, self.summit_index\n )\n\n def _multiway_merge(self):\n \"\"\"\n Run IDR's internal routine for merging peaks.\n\n Uses self._multiway_merge_bed() to convert this to a BED file.\n \"\"\"\n return idr.merge_peaks(\n self.fps,\n self.peak_merge_fn,\n self.oracle_peaks,\n use_nonoverlapping_peaks=False,\n )\n\n def _multiway_merge_bed(self):\n \"\"\"\n Returns a BED6 of the multiway-merge object.\n \"\"\"\n\n def gen0():\n for i, m_pk in enumerate(self._multiway_merge()):\n\n # from idr.build_idr_output_line_with_bed6\n yield pybedtools.create_interval_from_list(\n [\n m_pk.chrm,\n str(m_pk.start),\n str(m_pk.stop),\n \".\",\n str(m_pk.merged_signal),\n self.strand,\n ]\n )\n\n return pybedtools.BedTool(gen0())\n\n def _build_merged(self, idx1, idx2):\n \"\"\"\n Initial stage used by IDR.\n\n Uses IDR's internal routine for merging peaks. This is intended to be\n called by self.idr, which only works with 2 replicates at a time, hence\n the hard-coding of idx1 and idx2. See self._multiway_merge() for\n merging more than 2 replicates.\n\n Parameters\n ----------\n\n idx1, idx2 : int\n Indexes into self.peaks\n\n\n Returns\n -------\n idr\n \"\"\"\n logger.info(f\"Merging peaks for {self.peaks[idx1]} and {self.peaks[idx2]}\")\n fn1 = self.peaks[idx1]\n fn2 = self.peaks[idx2]\n f1, f2 = [\n idr.load_bed(open(fp), self.signal_index, self.summit_index)\n for fp in [fn1, fn2]\n ]\n merged_peaks = idr.merge_peaks(\n [f1, f2],\n self.peak_merge_fn,\n self.oracle_peaks,\n use_nonoverlapping_peaks=False,\n )\n return merged_peaks\n\n def idr(self, idx1, idx2):\n \"\"\"\n Run IDR between two sets of peaks\n\n Parameters\n ----------\n\n idx1, idx2 : int\n Indexes into self.peaks\n\n Returns\n -------\n None, but as a side effect this method populates the self.idrs\n dictionary for the key frozenset((idx1, idx2)). The value is another\n dictionary containing keys \"IDRs\", \"localIDRs\", and \"merged_peaks\". The\n values of these are the corresponding internal idr package data\n structures.\n \"\"\"\n key = frozenset([idx1, idx2])\n if key in self.idrs:\n raise ValueError(f\"key {key} exists\")\n merged_peaks = self._build_merged(idx1, idx2)\n logger.info(f\"Calcluating IDR for {self.peaks[idx1]} and {self.peaks[idx2]}\")\n r1, r2 = idr.build_rank_vectors(merged_peaks)\n localIDRs = idr.fit_model_and_calc_local_idr(\n r1,\n r2,\n starting_point=(\n idr.idr.DEFAULT_MU,\n idr.idr.DEFAULT_SIGMA,\n idr.idr.DEFAULT_RHO,\n idr.idr.DEFAULT_MIX_PARAM,\n ),\n max_iter=idr.idr.MAX_ITER_DEFAULT,\n convergence_eps=idr.idr.CONVERGENCE_EPS_DEFAULT,\n fix_mu=False,\n fix_sigma=False,\n )\n IDRs = idr.calc_global_IDR(localIDRs)\n self.idrs[key] = dict(IDRs=IDRs, localIDRs=localIDRs, merged_peaks=merged_peaks)\n\n def _output(self, idx1, idx2):\n \"\"\"\n Runs IDR's output routine\n\n Returns\n -------\n Generator of narrowPeak lines\n \"\"\"\n key = frozenset([idx1, idx2])\n if key not in self.idrs:\n self.idr(idx1, idx2)\n d = self.idrs[key]\n IDRs = d[\"IDRs\"]\n localIDRs = d[\"localIDRs\"]\n merged_peaks = d[\"merged_peaks\"]\n for localIDR, IDR, merged_peak in zip(localIDRs, IDRs, merged_peaks):\n line = idr.build_idr_output_line_with_bed6(\n merged_peak, IDR, localIDR, \"narrowPeak\", self.signal_type\n )\n yield line\n\n def npeaks_below_idr(self, thresh=0.05):\n \"\"\"\n Dictionary of peak counts falling below IDR threshold.\n\n Returns\n -------\n Dictionary of the number of peaks falling below `thresh` in each\n pairwise IDR run.\n \"\"\"\n counts = {}\n for i, j in itertools.combinations(range(len(self.peaks)), 2):\n c = 0\n for line in self._output(i, j):\n toks = line.split(\"\\t\")\n local_idr = float(toks[10])\n if local_idr >= -math.log10(thresh):\n c += 1\n counts[(i, j)] = c\n return counts\n\n def final(self, thresh=0.05, use=\"oracle\"):\n \"\"\"\n Generate final peaks.\n\n Specificially, this extracts the top N peaks from the oracle peaks\n where N is determined by the minimum number of peaks below the IDR\n threshold across all pairwise IDR runs between replicates.\n\n Parameters\n ----------\n\n thresh : float\n IDR threshold\n\n use : oracle | idr-merged\n If \"oracle\", the final peaks will be selected from self.oracle.\n If \"idr-merged\", use IDR's internal merging routine, which allows\n multi-way merging if using their internal API.\n\n Returns\n -------\n BedTool of final peaks.\n \"\"\"\n n = min(self.npeaks_below_idr(thresh).values())\n limit = n - 1\n if use == \"oracle\":\n\n def gen():\n for i, feature in enumerate(self.oracle):\n if i >= limit:\n break\n yield feature\n\n elif use == \"idr-merged\":\n\n def gen():\n for i, m_pk in enumerate(self._multiway_merge()):\n if i >= limit:\n break\n\n # from idr.build_idr_output_line_with_bed6\n yield pybedtools.create_interval_from_list(\n [m_pk.chrm, str(m_pk.start), str(m_pk.stop), self.strand]\n )\n\n return pybedtools.BedTool(gen()).saveas()\n\n\ndef idr_peak_calls(\n bedgraphs,\n strand,\n thresh=0.05,\n oracle_fn=\"oracle.narrowPeak\",\n final_fn=\"final.narrowPeak\",\n):\n \"\"\"\n Returns oracle peaks and final peaks meeting IDR cutoff.\n\n Parameters\n ----------\n\n bedgraphs : list\n filenames of bedGraph files from replicates. Expected to be already\n normalized.\n\n strand : str\n One of \"+\", \"-\", or \".\". Used to fill in the strand field of created\n narrowPeak files.\n\n thresh : float\n IDR threshold.\n\n oracle_fn : str\n Filename for \"oracle\" peaks. These are the peaks called after merging\n together all provided input bedGraphs.\n\n final_fn : str\n Filename for final thresholded peaks.\n \"\"\"\n peak_callers = [PeakCaller(bedgraph, strand) for bedgraph in bedgraphs]\n mg = MergedSignalPeakCaller([i.bedgraph for i in peak_callers], strand=strand)\n oracle = mg.call_peaks().peaks_to_bed().moveto(oracle_fn)\n\n peaks = [pc.call_peaks().peaks_to_bed() for pc in peak_callers]\n m = MultiPeakIDR([p.fn for p in peaks], oracle=oracle, strand=strand)\n f = m.final(thresh=thresh).saveas(final_fn)\n\n\ndef to_narrowpeak(f):\n \"\"\"\n Convert a feature into narrowPeak format, with signal and pval equivalent\n to the score.\n \"\"\"\n return pybedtools.create_interval_from_list(\n [\n f.chrom,\n str(f.start),\n str(f.stop),\n \".\",\n f.name,\n f.strand,\n f.name,\n f.name,\n \"-1\",\n str(int((f.stop - f.start) / 2)),\n ]\n )\n\n\ndef sort_by_score(x):\n \"\"\"\n Sort a BedTool object by the score column.\n \"\"\"\n df = pybedtools.BedTool(x).to_dataframe()\n df = df.sort_values(\"score\", ascending=False)\n return pybedtools.BedTool.from_dataframe(df)\n\n\ndef merge_narrowbed(peaks, strand, additional_kwargs={\"d\": 1, \"o\": \"max\"}):\n \"\"\"\n Method for merging narrowPeak files with bedtools.\n\n Using basic bedtools merge, merging narrowPeak files gets awkward if we\n want to output a valid narrowPeak. Here it's handled via pandas. Note\n that any narrowPeak summit positions are reset to -1 since it's not\n clear how to meaningfully aggregate them.\n\n Parameters\n ----------\n peaks : pybedtools.BedTool object\n Peaks to merge\n\n strand : str\n One of '+', '-', '.' to be set as the strand for each merged\n feature.\n\n additional_kwargs : dict\n Additional kwargs to send to pybedtools.BedTool.merge. By default,\n this merges features overlapping by 1 bp, and aggregates them by\n taking the maximum value. During testing, 'max' seemed to give\n better results than 'mean' because the latter tended to wash out\n strong peaks near smaller peaks.\n \"\"\"\n x = (peaks.cut([0, 1, 2, 4]).merge(c=4, **additional_kwargs)).to_dataframe()\n x[\"score\"] = \".\"\n x[\"strand\"] = strand\n y = pybedtools.BedTool.from_dataframe(x)\n return y.each(to_narrowpeak).saveas()\n\n","repo_name":"NICHD-BSPC/termseq-peaks","sub_path":"peaklib/peaklib.py","file_name":"peaklib.py","file_ext":"py","file_size_in_byte":19126,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"}
+{"seq_id":"15494758235","text":"#game rock paper and scissors\nimport random\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n#code\ncomp = random.randint(0,2)\nl1 = [rock, paper, scissors]\nuser=int(input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors\\n\")) \nprint(l1[user])\nprint(\"Computer chose:\")\nprint(l1[comp])\nprint(comp)\nif user==comp:\n print(\"it's a draw\")\nelif (user== 2 and comp==0) or (user== 0 and comp==1) or (user== 1 and comp==2):\n print(\"You lose\")\nelif (user== 0 and comp==2) or (user== 1 and comp==0) or (user== 2 and comp==1):\n print(\"You Win\")","repo_name":"V4nshaj/Python","sub_path":"Rock Paper Scissor game.py","file_name":"Rock Paper Scissor game.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"27336041401","text":"\"\"\"Define the Logger class.\"\"\"\n\nimport sys\nimport traceback\n\nfrom enum import Enum\nfrom io import IOBase, FileIO, TextIOWrapper\n\nfrom pycursesui import time\n\n__all__ = [\"LogLevel\", \"Logger\"]\n\n\n########################################################################################################################\n\nINDENT_TEXT = \" \"\nTIME_WIDTH = 6\n\n\nclass LogLevel(Enum):\n \"\"\"LogLevel describes how urgent a specific log message is.\"\"\"\n\n TRACE = 0\n DEBUG = 1\n INFO = 2\n WARN = 3\n ERROR = 4\n\n\n########################################################################################################################\n\nclass LogChannel(object):\n\n def __init__(self, name, stream, level=LogLevel.INFO, eraseable=False, global_start_time=None):\n self.eraseable = eraseable\n self.global_start_time = global_start_time or time.now()\n self.indent_count = 0\n self.level = level\n self.name = name\n self.stream = stream\n\n self._eraseable_text = None\n self._last_time = time.now()\n\n # Public Methods ###############################################################################\n\n def append_eraseable(self, text):\n if not self.eraseable:\n return\n\n self._eraseable_text = text\n print(text, file=self.stream, end=\"\", flush=True)\n\n def close(self):\n print(\"\\n\", file=self.stream)\n self.stream.flush()\n\n if not ((self.stream is sys.stdout) or (self.stream is sys.stderr)):\n self.stream.close()\n\n def erase(self):\n if (not self.eraseable) or (self._eraseable_text is None):\n return\n\n c = len(self._eraseable_text)\n text = (\"\\b\" * c) + (\" \" * c) + (\"\\b\" * c)\n print(text, file=self.stream, end=\"\", flush=True)\n self._eraseable_text = None\n\n def write(self, level, entry, append=False):\n if not self._is_writable_level(level):\n return\n\n self.erase()\n text = entry() if callable(entry) else str(entry)\n for message in text.splitlines():\n self._write_message(level, message, append=append)\n append = False\n\n return self\n\n # Private Methods ##############################################################################\n\n def _is_writable_level(self, level):\n if not isinstance(level, LogLevel):\n raise TypeError(f\"level should be a LogLevel but was a {type(level)}\")\n return level.value >= self.level.value\n\n def _write_message(self, level, message, append=False):\n if not append:\n now = time.now()\n cumulative_time = time.humanize(now - self.global_start_time).rjust(TIME_WIDTH)\n delta_time = time.humanize(now - self._last_time).rjust(TIME_WIDTH)\n self._last_time = now\n\n level = level.name.rjust(5)\n indent = INDENT_TEXT * self.indent_count\n prefix = f\"\\n[{cumulative_time} (+{delta_time}) {level}]{indent} \"\n else:\n prefix = \"\"\n\n print(f\"{prefix}{message}\", file=self.stream, end=\"\", flush=True)\n\n\n########################################################################################################################\n\nclass Logger(object):\n \"\"\"Logger provides a simple interface for writing filtered status information to a variety of sources.\"\"\"\n\n def __init__(self):\n \"\"\"Create a new logger.\"\"\"\n self._channels = {}\n self._global_start_time = time.now()\n self._indent_count = 0\n\n # Channel Methods ##############################################################################\n\n def add_channel(self, name, stream, level=LogLevel.INFO, eraseable=False):\n \"\"\"Add a new channel to this logger.\"\"\"\n if not isinstance(name, str):\n raise TypeError(f\"name must be a str, but was a {type(name)}\")\n if not isinstance(stream, IOBase):\n raise TypeError(f\"stream must be an IOBase, but was a {type(stream)}\")\n if not isinstance(level, LogLevel):\n raise TypeError(\"level must be a LogLevel, but was a {type(level)}\")\n if self.has_channel(name):\n raise ValueError(\"{name} is already registered as a channel on this logger\")\n\n self._channels[name] = LogChannel(name, stream, level, eraseable, self._global_start_time)\n return self\n\n def add_console_channel(self, level=LogLevel.INFO):\n \"\"\"Add a channel called \"console\" to write data to stdout.\"\"\"\n self.add_channel(\"console\", sys.stdout, level, eraseable=True)\n return self\n\n def add_file_channel(self, name, file_name, level=LogLevel.INFO):\n \"\"\"Add a channel with a given name to write to a file.\"\"\"\n self.add_channel(name, TextIOWrapper(FileIO(file_name, \"w\")), level)\n return self\n\n def clear_channels(self):\n \"\"\"Remove all registered channels.\"\"\"\n for channel_name in self.list_channels():\n self.remove_channel(channel_name)\n return self\n\n def has_channel(self, name):\n \"\"\"Determine whether this logger has a certain channel.\"\"\"\n return name in self._channels\n\n def list_channels(self):\n \"\"\"List the names of all registered channels.\"\"\"\n return list(self._channels.keys())\n\n def remove_channel(self, name):\n \"\"\"Remove a certain channel from this logger.\"\"\"\n if self.has_channel(name):\n self._channels[name].close()\n del self._channels[name]\n return self\n\n def set_channel_level(self, name, level):\n \"\"\"Change the log level of a certain channel.\"\"\"\n if not isinstance(level, LogLevel):\n raise TypeError(f\"level must be a LogLevel, but was a {type(level)}\")\n if not self.has_channel(name):\n raise ValueError(f\"{name} is not a channel on this logger\")\n\n self._channels[name].level = level\n return self\n\n # Logging Methods ##############################################################################\n\n def trace(self, entry, append=False):\n \"\"\"Write a entry at TRACE level.\"\"\"\n return self.write(LogLevel.TRACE, entry, append)\n\n def debug(self, entry, append=False):\n \"\"\"Write a entry at DEBUG level.\"\"\"\n return self.write(LogLevel.DEBUG, entry, append)\n\n def info(self, entry, append=False):\n \"\"\"Write a entry at INFO level.\"\"\"\n return self.write(LogLevel.INFO, entry, append)\n\n def warn(self, entry, append=False):\n \"\"\"Write a entry at the WARN level.\"\"\"\n return self.write(LogLevel.WARN, entry, append)\n\n def error(self, entry=None, error=None, append=False):\n \"\"\"Write a entry at the ERROR level.\"\"\"\n if (entry is None) and (error is None):\n return\n\n def _build_message():\n message = \"\"\n if entry is not None:\n message += entry() if callable(entry) else str(entry)\n if error is not None:\n needs_delimiter = False\n for line in traceback.format_exception(type(error), error, error.__traceback__):\n line = line.rstrip()\n if line != \"\":\n if needs_delimiter:\n message += \"\\n\"\n needs_delimiter = True\n message += f\"{INDENT_TEXT}{line}\"\n return message\n\n return self.write(LogLevel.ERROR, _build_message, append)\n\n # Public Methods ###############################################################################\n\n def append_eraseable(self, text):\n \"\"\"Append a chunk of eraseable text.\"\"\"\n for channel in self._channels.values():\n channel.append_eraseable(text)\n\n def close(self):\n \"\"\"Close all channels and remove them from this logger.\"\"\"\n return self.clear_channels()\n\n def erase(self):\n \"\"\"Erase the last chunk of eraseable text written to this logger.\"\"\"\n for channel in self._channels.values():\n channel.erase()\n\n def indent(self):\n \"\"\"Indent the log by one level.\"\"\"\n self._indent_count += 1\n for channel in self._channels.values():\n channel.indent_count = self._indent_count\n return self\n\n def indented(self):\n \"\"\"Use this logger in a `with` statement.\"\"\"\n logger = self\n\n class LoggerIndentContext(object):\n def __enter__(self):\n logger.indent()\n return logger\n\n def __exit__(self, type, value, trace):\n logger.outdent()\n return False\n\n return LoggerIndentContext()\n\n def outdent(self):\n \"\"\"Outdent the log by one level.\"\"\"\n self._indent_count = max(0, self._indent_count - 1)\n for channel in self._channels.values():\n channel.indent_count = self._indent_count\n return self\n\n def set_level(self, level):\n \"\"\"Change the level for all channels at once.\"\"\"\n if not isinstance(level, LogLevel):\n raise TypeError(f\"level must be a LogLevel, but was a {type(level)}\")\n for channel_name in self.list_channels():\n self.set_channel_level(channel_name, level)\n return self\n\n def write(self, level, entry, append=False):\n \"\"\"Write a message to each channel registered with this logger.\"\"\"\n if not isinstance(level, LogLevel):\n raise TypeError(f\"level must be a LogLevel, but was a {type(level)}\")\n append = bool(append)\n\n for channel in self._channels.values():\n channel.write(level, entry, append=append)\n return self\n","repo_name":"andrewminer/pycursesui","sub_path":"src/pycursesui/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":9635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"72008676848","text":"import random\nimport win32com.client\nimport datetime\nimport pymssql\n\ntoday = datetime.date.today()\n\n'''\nFunction: Send_Email()\nArguments: \n\ttable_subjects, email_times, table_expected_times, email_status\n'''\ndef Send_Email(table_subjects, email_times, table_expected_times, email_status, table_owners):\n\t# Sort all lists based on email_times\n\t# sorted_lists = sorted(zip(email_times, table_subjects, table_expected_times, email_status, table_owners))\n\tsorted_lists = sorted(zip(table_expected_times, table_subjects, email_times, email_status, table_owners))\n\n\n\thtml_table = \"\\n\"\n\thtml_table += \"Email Subject | Email Time | Expected Time | Email Status | Owner |
\\n\"\n\n\t# for time, subject, expected_time, status, owner in sorted_lists:\n\tfor expected_time,subject,time,status,owner in sorted_lists:\n\t\tif status == \"Delivery Unsuccessful\":\n\t\t\thtml_table += ''\n\t\telif status == \"Success\":\n\t\t\thtml_table += '
'\n\t\telif status == \"Late Delivery\":\n\t\t\thtml_table += '
'\n\t\telse:\n\t\t\thtml_table += \"
\"\n\t\t\t\n\t\thtml_table += f\"| {subject} | \"\n\t\thtml_table += f\"{time} | \"\n\t\thtml_table += f\"{expected_time} | \"\n\t\thtml_table += f\"{status} | \"\n\t\thtml_table += f\"{owner} | \"\n\t\thtml_table += \"
\\n\"\n\n\thtml_table += \"
\"\n\n\twith open('E:\\\\Khaleel\\\\EricMon\\\\EricMon.css', 'r') as css_file:\n\t\tcss_content = css_file.read()\n\n\thtml_head = f\"\"\n\n\theader = f\"Delivery Monitoring
\"\n\tboday = f'''\n\t\t\t\t\tDelivery Status So far, Time: {datetime.datetime.now().strftime(\"%H:%M\")}
\n\t\t\t '''\n\n\tfooter = \"
This is a system generated email, please report anomalies to ENPMPERFORMANCETEAM@jazz.com.pk