diff --git "a/1080.jsonl" "b/1080.jsonl" new file mode 100644--- /dev/null +++ "b/1080.jsonl" @@ -0,0 +1,685 @@ +{"seq_id":"466178440","text":"import requests\nimport json\nfrom googleapiclient import discovery\nfrom googleapiclient.http import BatchHttpRequest\nfrom google.oauth2 import service_account\nfrom oauth2client.client import GoogleCredentials\n\ncredentials = GoogleCredentials.get_application_default()\nservice = discovery.build('logging', 'v2', credentials=credentials)\n\n\nclass Instance:\n\tdef __init__(self, name, ip):\n\t\tself.name = name\n\t\tself.ip = ip\n\nclass Firewall:\n\tdef __init__(self, project, name, sourceRanges, allowed, targetTags, direction, affected_instances):\n\t\tself.project = project\n\t\tself.name = name\n\t\tself.sourceRanges = sourceRanges\n\t\tself.allowed = allowed\n\t\tself.targetTags = targetTags\n\t\tself.direction = direction\n\t\tself.affected_instances = affected_instances\n\nclass Protocol:\n\tdef __init__(self, protocol, ports):\n\t\tself.protocol = protocol\n\t\tself.ports = ports\n\ndef identify(service_account_name, project_name):\n\ttry:\n\t\tbody = {\"resourceNames\": [f\"projects/{project_name}\"]}\n\t\trequest = service.entries().list(body=body)\n\t\twhile request is not None:\n\t\t\tresponse = request.execute()\n\t\t\ttry:\n\t\t\t\tfor entry in response.get('items'):\n\t\t\t\t\tprint(entry)\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\trequest = service.entries().list_next(previous_request=request, previous_response=response)\n\t\t\n\t\tsourceRanges = response.get('sourceRanges')\n\t\tallowed = response.get('allowed')\n\t\tif allowed is None:\n\t\t\traise Exception('422, Egress Direction')\n\n\t\ttargetTags = response.get('targetTags')\n\t\tdirection = response.get('direction')\n\n\t\t# Parse the allowed ports\n\t\tallowed = parse_protocol(allowed)\n\n\t\t# If targetTags is empty, the firewall rule is applied to all instances\n\t\tif targetTags is None:\n\t\t\taffected_instances = 'All Instances'\n\t\telse:\n\t\t\taffected_instances = get_affected_instances(project_name, targetTags)\n\t\t\n\t\toutput = Firewall(project_name, firewall_name, sourceRanges, allowed, targetTags, direction, affected_instances)\n\t\treturn output\n\texcept Exception as e:\n\t\treturn(e)\n\ndef parse_protocol(allowed):\n\toutput = []\n\tfor item in allowed:\n\t\tprotocol = item['IPProtocol']\n\t\tports = item.get('ports')\n\t\tif ports is None:\n\t\t\tports = ''\n\t\toutput.append(Protocol(protocol, ports)) \n\treturn output\n\ndef list_zones(project_name):\n\trequest = service.zones().list(project=project_name)\n\tresponse = request.execute()\n\tzones = [zone['name'] for zone in response['items']]\n\treturn zones\t\n\ndef get_affected_instances(project_name, targetTags):\n\taffected_instances = []\n\tinstances_list = []\n\tzones = list_zones(project_name)\n\n\tbatch = service.new_batch_http_request()\n\n\tfor zone in zones:\n\t\tbatch.add(service.instances().list(project=project_name, zone=zone))\n\n\tbatch.execute()\n\n\tfor i in batch._responses:\n\t\tif json.loads(batch._responses[i][1]).get('items') is not None:\n\t\t\tfor instance in json.loads(batch._responses[i][1]).get('items'):\n\t\t\t\tinstances_list.append(instance)\n\n\tfor tag in targetTags:\n\t\tfor instance in instances_list:\n\t\t\tif (instance.get('tags').get('items') is not None) and (tag in instance.get('tags').get('items')):\n\t\t\t\tinstance_name = instance['name']\n\t\t\t\ttry:\n\t\t\t\t\tinstance_ip = \"`\" + str(instance.get('networkInterfaces')[0].get('accessConfigs')[0].get('natIP')) + \"`\"\n\t\t\t\t\tif instance.get('networkInterfaces')[0].get('accessConfigs')[0].get('natIP') is None:\n\t\t\t\t\t\tinstance_ip = ''\n\t\t\t\texcept:\n\t\t\t\t\tinstance_ip = ''\n\t\t\t\taffected_instances.append(Instance(instance_name, instance_ip))\n\n\treturn affected_instances\n\ndef identify_reply_message(url, token, channel, thread_ts, output):\t\n\tdata = {\n\t\t\t'token' : token,\n\t\t\t'channel' : channel, \n\t\t\t'thread_ts' : thread_ts\n\t}\n\t\t\t\t\n\tif str(type(output)) != \"\":\n\t\tdata.update({\"text\":str(output)})\n\t\tr = requests.post(url=url, data=data)\n\t\tquit()\n\n\n\tparsed_affected_instances = \"\"\n\tif output.affected_instances == 'All Instances':\n\t\tparsed_affected_instances = '`All Instances`'\n\telse:\n\t\tfor instance in output.affected_instances:\n\t\t\tparsed_affected_instances += \"• \" + instance.name + \" \" + instance.ip + \" \\n\"\n\n\tparsed_protocol = \"\"\n\tfor protocol in output.allowed:\n\t\tif protocol.ports != '':\n\t\t\tparsed_protocol += \"`\" + protocol.protocol + \"`\\n\"\n\t\t\tif len(protocol.ports) > 1:\n\t\t\t\tfor port in protocol.ports:\n\t\t\t\t\tparsed_protocol += \"• \" + str(port) + \" \\n\"\n\t\t\telse:\n\t\t\t\tparsed_protocol += \"• \" + str(protocol.ports[0]) + \" \\n\"\n\t\telse:\n\t\t\tparsed_protocol += \"`\" + protocol.protocol + \"` \\n\"\n\n\tattachments = [\n\t\t{\n\t\t\t\"mrkdwn_in\": [\"text\",\"value\"],\n\t\t\t\"color\": \"#36a64f\",\n\t\t\t\"fallback\": \"Query for \" + output.name,\n\t\t\t\"fields\": [\n\t\t\t\t{\n\t\t\t\t\t\"title\": \"Project\",\n\t\t\t\t\t\"value\": output.project,\n\t\t\t\t\t\"short\": True\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"title\": \"Name\",\n\t\t\t\t\t\"value\": output.name,\n\t\t\t\t\t\"short\": True\n\t\t\t\t}, \n\t\t\t\t{\n\t\t\t\t\t\"title\": \"Direction\",\n\t\t\t\t\t\"value\": output.direction,\n\t\t\t\t\t\"short\": True\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"title\": \"Protocol\",\n\t\t\t\t\t\"value\": parsed_protocol,\n\t\t\t\t\t\"short\": True\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"title\": \"Source\",\n\t\t\t\t\t\"value\": str(output.sourceRanges),\n\t\t\t\t\t\"short\": True\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"title\": \"Affected Instances\",\n\t\t\t\t\t\"value\": parsed_affected_instances\n\t\t\t\t}\n\t\t\t],\n\t\t\t\"footer\": \"dollhouse\",\n\t\t\t\"footer_icon\": \"https://platform.slack-edge.com/img/default_application_icon.png\"\n\t\t}\n\t]\n\n\n\tdata.update({\"attachments\": json.dumps(attachments)})\n\tr = requests.post(url=url, data=data)\n","sub_path":"bot/commands/idenfity_helper.py","file_name":"idenfity_helper.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"98993516","text":"\n\n#calss header\nclass _LANTERN():\n\tdef __init__(self,): \n\t\tself.name = \"LANTERN\"\n\t\tself.definitions = [u'a light inside a container that has a handle for holding it or hanging it up, or the container itself']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_lantern.py","file_name":"_lantern.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"572905516","text":"prompt = \"\\ntell me something and let me show you that I care\"\r\nprompt += \"\\nEnter 'quit' or 'I'm feeling great: '\"\r\n\r\nactive = True\r\nwhile active:\r\n\tmsg = input(prompt)\r\n\tnew_msg = msg.lower()\r\n\tif new_msg == \"quit\" or new_msg == \"i'm feeling great\":\r\n\t\tactive = False\r\n\telse: \r\n\t\tprint(msg, '?')\r\n\t\r\n\t\r\n\t","sub_path":"weekone/simpleEliza.py","file_name":"simpleEliza.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"624638324","text":"\"\"\"\nCMSC 125 MP1\nShortest-job-first CPU scheduler simulator\nFollows new timing pattern\n\nInput from file.\n\"\"\"\nimport threading\nfrom MP1 import ProcessLine, load_from_file, print_input_table, print_simulation_report\nfrom terminaltables import SingleTable\n\ndef simulator(PLPending_list):\n finished_list = []\n current_task = None\n pending = PLPending_list\n time = 0\n history = []\n while len(pending) > 0 or current_task != None:\n ### Report Stats\n if current_task:\n history.append((\"P\"+str(current_task.id),time))\n else:\n history.append((\"X\",time))\n\n ### Tick Timer\n\n ### Unload Job\n if current_task != None: #task currently loaded\n #卒業事情確認優先\n if current_task.burst_time <=0: #卒業状況あった\n #UNLOAD and graduate\n current_task.completion_time = time\n #current_task.turnaround_time = time\n current_task.turnaround_time = current_task.completion_time-current_task.arrival_time\n current_task.waiting_time = int(current_task.turnaround_time - current_task.burst_original)\n finished_list.append(current_task)\n current_task = None\n\n ### Load Job\n if current_task == None: #NO task currently loaded. Find one.\n #no current task executing! Start selection.\n #find minimum\n pending_minimum_arr = []\n for x in pending:\n if x.arrival_time <= time: #confine to present processes\n pending_minimum_arr.append(x.burst_time)\n if len(pending_minimum_arr) > 0:\n pending_minimum = min(pending_minimum_arr)\n for p in pending: #handle start\n #choose shortest and load.\n if p.burst_time == pending_minimum and current_task == None and p.arrival_time <= time:\n current_task = p\n if current_task.response_time == -1:\n current_task.response_time = time - current_task.arrival_time\n #current_task.waiting_time = time\n pending.remove(p)\n break\n\n ### Tick Current Job\n if current_task: #task currently loaded\n current_task.burst_time = current_task.burst_time - 1\n\n time = time + 1\n return (finished_list, history)\n\nif __name__ == '__main__':\n input_file = \"input_all.txt\"\n print(\"Shortest-job-first Scheduling Simulator\")\n print(\"Input File: \", input_file)\n print()\n process_list = load_from_file(input_file)\n #Print input table\n print_input_table(process_list)\n #Simulation Report (output)\n fin, history_list = simulator(process_list)\n print_simulation_report(fin, history_list)","sub_path":"shortest.py","file_name":"shortest.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"259687422","text":"class GraphAdjacentMatrix():\n def __init__(self, vertex_length, graph_type):\n if(vertex_length>0 and (graph_type==\"Directed\" or graph_type==\"Undirected\" or graph_type == \"Network\")):\n self._enable=1\n self._graph_type = graph_type\n self._vertex_length = [i for i in range(vertex_length)]\n if(graph_type != \"Network\"):\n self._vertex_edges = [[0 for i in range(vertex_length)] for i in range(vertex_length)]\n else:\n self._vertex_edges = [[-1 for i in range(vertex_length)] for i in range(vertex_length)]\n else:\n self._enable=0\n print(\"Initial object error.\\n\")\n\n def set_edge(self, vertex_from, vertex_to, edge):\n if(self._graph_type == \"Undirected\"):\n if(vertex_from!=vertex_to):\n self._vertex_edges[vertex_from][vertex_to]=edge\n self._vertex_edges[vertex_to][vertex_from]=edge\n else:\n print(\"In undirected graph, vertex can not be self-point.\")\n else:\n self._vertex_edges[vertex_from][vertex_to]=edge \n \n def show_graph(self):\n temp=\"\\t\"\n for item in range(len(self._vertex_length)):\n temp += (str(self._vertex_length[item])+\"\\t\")\n print(temp)\n for row in range(len(self._vertex_edges)):\n temp=(str(row)+\"\\t\")\n for column in range(len(self._vertex_edges)):\n temp+=(str(self._vertex_edges[row][column])+\"\\t\")\n print(temp)\n \n def show_graph_edge(self):\n temp=\"\"\n for row in range(len(self._vertex_edges)):\n for column in range(len(self._vertex_edges)):\n temp+=(str(self._vertex_edges[row][column])+\"\\t\")\n print(temp)\n temp=\"\"\n\n def show_graph_vertex(self):\n temp=\"Vertex(s): \"\n for item in range(len(self._vertex_length)):\n temp += (str(self._vertex_length[item])+\"\\t\")\n print(temp)\n \n","sub_path":"Data_Structure/mygraph.py","file_name":"mygraph.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"193853136","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'Fu Yangzhen'\nfrom pythonds.basic.stack import Stack\n\n\ndef moveTower(height, fromPole, toPole, withPole):\n\tif height >= 1:\n\t\tmoveTower(height - 1, fromPole, withPole, toPole)\n\t\tmoveDisk(fromPole, toPole)\n\t\tmoveTower(height - 1, withPole, toPole, fromPole)\n\n\ndef moveDisk(fp, tp):\n\tprint('moving disk from %s to %s' % (fp, tp))\n\n\nmoveTower(3, \"A\", \"B\", \"C\")\n\n\n# ===============================\nclass my_Stack(object):\n\tdef __init__(self):\n\t\tself.items = []\n\n\tdef push(self, item):\n\t\tself.items.append(item)\n\n\tdef pop(self):\n\t\treturn self.items.pop()\n\n\tdef peek(self):\n\t\treturn self.items[-1]\n\n\tdef size(self):\n\t\treturn len(self.items)\n\n\tdef isEmpty(self):\n\t\treturn self.items == []\n\n\ndef moveTower_Pro(height, fromPole, toPole, withPole):\n\tglobal initial, Astack, Bstack, Cstack, F, T, W\n\twhile initial:\n\t\tF = fromPole\n\t\tT = toPole\n\t\tW = withPole\n\t\tnum = '0123456789'\n\t\tDicks = list(num[:height][::-1])\n\t\tAstack = my_Stack()\n\t\tBstack = my_Stack()\n\t\tCstack = my_Stack()\n\t\tfor dick in Dicks:\n\t\t\tAstack.push(dick)\n\t\tprint('A stack: %s' % Astack.items)\n\t\tprint('B stack: %s' % Bstack.items)\n\t\tprint('C stack: %s\\n' % Cstack.items)\n\t\tinitial = False\n\n\tif height >= 1:\n\t\tmoveTower_Pro(height - 1, fromPole, withPole, toPole)\n\t\tmoveDisk(fromPole, toPole)\n\t\tif fromPole == F:\n\t\t\tif toPole == T:\n\t\t\t\tBstack.push(Astack.pop())\n\t\t\telse:\n\t\t\t\tCstack.push(Astack.pop())\n\t\telif fromPole == T:\n\t\t\tif toPole == F:\n\t\t\t\tAstack.push(Bstack.pop())\n\t\t\telse:\n\t\t\t\tCstack.push(Bstack.pop())\n\t\telse:\n\t\t\tif toPole == F:\n\t\t\t\tAstack.push(Cstack.pop())\n\t\t\telse:\n\t\t\t\tBstack.push(Cstack.pop())\n\t\tprint('A stack: %s' % Astack.items)\n\t\tprint('B stack: %s' % Bstack.items)\n\t\tprint('C stack: %s\\n' % Cstack.items)\n\t\tmoveTower_Pro(height - 1, withPole, toPole, fromPole)\n\ndef my_moveTower_Pro(height, fromPole, toPole, withPole):\n\tglobal initial\n\tinitial = True\n\tmoveTower_Pro(height, fromPole, toPole, withPole)\n\nmy_moveTower_Pro(3, \"A\", \"B\", \"C\")\n","sub_path":"Recursion/moveTower.py","file_name":"moveTower.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"69406013","text":"#!/usr/bin/env python\n# Unit Name: currency_server.db\n# Created By: Virgil Dupras\n# Created On: 2008-04-20\n# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)\n# \n# This software is licensed under the \"BSD\" License as described in the \"LICENSE\" file, \n# which should be included with this package. The terms are also available at \n# http://www.hardcoded.net/licenses/bsd_license\n\nfrom datetime import date, datetime, timedelta\nimport xml.etree.cElementTree as ET\nimport xml.parsers.expat\nimport re\n\nfrom .hscommon.currency import Currency, RatesDB as RatesDBBase\nfrom .hscommon import sqlite\n\n\nDB_PATH = '/var/sqlite/currency.db'\n# DB_PATH = '/Users/hsoft/Desktop/currency.db'\nRE_ENDS_WITH_PARENS = re.compile(r\"\\([^(]+\\)$\")\n\nclass RatesDB(RatesDBBase):\n \"\"\"The RatesDB on the server side automatically updates itself using Bank of Canada's rates\n \n Bank of Canada uses n/a values for week-ends, holidays and future dates. We want to ignore those\n values when importing.\n \"\"\"\n def __init__(self, dbpath=DB_PATH):\n RatesDBBase.__init__(self, sqlite.ThreadedConn(dbpath, False))\n \n def get_CAD_values(self, start, end, currency_code):\n \"\"\"Returns [(date, value)] for each CAD value the DB has for 'currency'.\n \n The values are in date order.\n \"\"\"\n str_start = '%d%02d%02d' % (start.year, start.month, start.day)\n str_end = '%d%02d%02d' % (end.year, end.month, end.day)\n sql = \"select date, rate from rates where date >= ? and date <= ? and currency = ?\"\n cur = self.con.execute(sql, [str_start, str_end, currency_code])\n return [(datetime.strptime(date, '%Y%m%d').date(), rate) for (date, rate) in cur]\n \n def import_bank_of_canada_rates(self, source):\n \"\"\"Import rates from a Bank of Canada lookup xml file\"\"\"\n root = ET.fromstring(source.read().strip())\n for observation in root.getiterator('Observation'):\n currency_element = observation.find('Currency_name')\n name = currency_element.text.strip()\n # Some currency names have (), some not, but if we can't find it, try without the ()\n if name not in Currency.by_name:\n name = RE_ENDS_WITH_PARENS.sub('', name).strip() # remove the parens at the end of the name\n currency_code = Currency(name=name).code\n date_element = currency_element.find('Observation_date')\n rate_element = currency_element.find('Observation_data')\n try:\n rate = float(rate_element.text.strip())\n except (ValueError, AttributeError): # probably n/a\n continue\n year, month, day = date_element.text.strip().split('-')\n self.set_CAD_value(date(int(year), int(month), int(day)), currency_code, rate)\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"636065360","text":"import SCons\nimport atexit\nimport cx_Oracle\n\n_owner = env.USERNAME.upper()\n_table_cache = {}\n_cxn = cx_Oracle.connect(\"/\")\n_cxn.autocommit = 1\natexit.register(_cxn.close)\n\nenv.Default(\".\")\n\nclass _SQLTable(SCons.Node.Node):\n\n NodeInfo = SCons.Node.FS.FileNodeInfo\n BuildInfo = SCons.Node.FS.FileBuildInfo\n\n def __init__(self, name):\n SCons.Node.Node.__init__(self)\n self.name = name\n self.store_info = 1\n self.ninfo = self.new_ninfo()\n self.changed_since_last_build = 1\n self.dir = Dir(\"#.\")\n self.set_nocache()\n\n def __str__(self):\n return self.name\n\n def built(self):\n SCons.Node.Node.built(self)\n SCons.Node.store_info_map[self.store_info](self)\n\n def str_for_display(self):\n return \"'\" + self.__str__() + \"'\"\n\n def is_up_to_date(self):\n if not self.exists():\n return None\n else:\n return not self.changed()\n\n @SCons.Memoize.CountMethodCall\n def exists(self):\n \"\"\"\n The table exists if it has an entry in all_tables.\n \"\"\"\n try:\n return self._memo[\"exists\"]\n except KeyError:\n pass\n cur = _cxn.cursor()\n cur.execute(\"\"\"\n SELECT table_name\n FROM user_tables\n WHERE table_name = '{}'\n \"\"\".format(self.name.upper()))\n exists = cur.fetchone() is not None\n if exists:\n cur.execute(\"\"\"\n SELECT COUNT(*)\n FROM {}\n \"\"\".format(self.name.upper()))\n count = cur.fetchone()\n exists = count is not None and count[0] > 0\n cur.close()\n self._memo[\"exists\"] = exists\n return exists\n\n def get_csig(self):\n ninfo = self.get_ninfo()\n try:\n return ninfo.csig\n except AttributeError:\n pass\n if self.exists():\n csig = SCons.Util.MD5signature(self.get_contents())\n else:\n csig = 0\n ninfo.csig = csig\n return csig\n\n @SCons.Memoize.CountMethodCall\n def get_size(self):\n \"\"\"\n The table's size is the bytes on disk as reported by the segments table.\n \"\"\"\n try:\n return self._memo[\"get_size\"]\n except KeyError:\n pass\n if self.exists():\n cur = _cxn.cursor()\n cur.execute(\"\"\"\n SELECT bytes\n FROM user_segments\n WHERE segment_type = 'TABLE' AND\n segment_name = '{}'\n \"\"\".format(self.name.upper()))\n size = cur.fetchone()[0]\n cur.close()\n else:\n size = 0\n self._memo[\"get_size\"] = size\n return size\n\n @SCons.Memoize.CountMethodCall\n def get_timestamp(self):\n \"\"\"\n The table's timestamp is its last DDL time from all_objects.\n \"\"\"\n try:\n return self._memo[\"get_timestamp\"]\n except KeyError:\n pass\n if self.exists():\n cur = _cxn.cursor()\n cur.execute(\"\"\"\n SELECT FLOOR(last_ddl_time - TO_DATE('19700101', 'YYYYMMDD'))*24*3600 AS mtime\n FROM user_objects\n WHERE object_type = 'TABLE' AND\n object_name = '{}'\n \"\"\".format(self.name.upper()))\n ts = cur.fetchone()[0]\n cur.close()\n else:\n ts = 0\n self._memo[\"get_timestamp\"] = ts\n return ts\n\n @SCons.Memoize.CountMethodCall\n def get_stored_info(self):\n try:\n return self._memo['get_stored_info']\n except KeyError:\n pass\n try:\n sconsign_entry = self.dir.sconsign().get_entry(self.name)\n except (KeyError, EnvironmentError):\n sconsign_entry = SCons.SConsign.SConsignEntry()\n sconsign_entry.binfo = self.get_binfo()\n sconsign_entry.ninfo = self.get_ninfo()\n self._memo['get_stored_info'] = sconsign_entry\n return sconsign_entry\n\n @SCons.Memoize.CountMethodCall\n def get_contents(self):\n try:\n return self._memo['get_contents']\n except KeyError:\n pass\n cur = _cxn.cursor()\n cur.execute(\"\"\"\n SELECT comments\n FROM user_tab_comments\n WHERE table_name = '{}'\n \"\"\".format(self.name.upper()))\n contents = cur.fetchone()[0]\n self._memo['get_contents'] = contents\n return contents\n\n\ndef SQLTable(name):\n name = name.upper()\n try:\n return _table_cache[name]\n except KeyError:\n node = _SQLTable(name)\n _table_cache[name] = node\n env.Default(node)\n return node\n\n\nExport(['SQLTable'])\n","sub_path":"source/lib/SCons/sql_table_node.py","file_name":"sql_table_node.py","file_ext":"py","file_size_in_byte":4963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"577154377","text":"from sklearn.neighbors import KNeighborsClassifier\n\"\"\"\n使用python程序模拟KNN算法\n\n\n\"\"\"\nimport numpy as np\nimport collections as cs\n\ndata = np.array([\n [203, 1], [126, 1], [89, 1], [70, 1], [196, 2], [211, 2], [221, 2], [311, 3], [271, 3]\n])\nfeature = data[:, 0] # 特征\nprint(feature)\n\nlabel = data[:, -1] # 结果分类\nprint(label)\n\npredictPoint = 200 # 预测数据\nprint(\"预测输入特征为:\" + str(predictPoint))\n\ndistance = list(map(lambda x: abs(predictPoint - x), feature)) # 各点到预测点的距离\nprint(distance)\n\nsortIndex = np.argsort(distance) # 排序,返回排序后各数据的原始下标\nprint(sortIndex)\n\nsortLabel = label[sortIndex] # 根据下标重新进行排序\nprint(sortLabel)\n\n# k = 3 # 设置k值大小为3\n\nfor k in range(1, label.size + 1):\n result = cs.Counter(sortLabel[0:k]).most_common(1)[0][0] # 根据k值计算前k个数据中出现次数最多的分类,即为预测的分类\n print(\"当k=\" + str(k) + \"时预测分类为:\" + str(result))\n","sub_path":"experiment7/num1.py","file_name":"num1.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"7404533","text":"import pygame,math,random\nimport numpy as np\n\nblack=(0,0,0)\nred=(255,0,0)\ngreen=(0,255,0)\nblue=(0,0,255)\nwhite=(255,255,255)\ngray=(120,120,120)\n\nspace=0\nbody=1\nhead=2\napple=3\n\nclass game:\n\n def __init__(self,x,y):\n self.running=False\n self.x=x\n self.y=y\n self.bw=60\n self.w=x*self.bw\n self.h=y*self.bw\n self.center=(int(x/2),int(y/2))\n self.highscore=1\n self.steps=0\n self.matrix=[[space for _ in range(x)] for _ in range(y)]\n\n def start(self):\n pygame.init()\n self.clock=pygame.time.Clock()\n self.display=pygame.display.set_mode((self.w,self.h))\n pygame.display.set_caption('AI Snake')\n self.font=pygame.font.Font('freesansbold.ttf',24)\n py=snake(self.center)\n self.player=py\n self.matrix[self.player.y][self.player.x]=head\n self.genApple()\n self.running=True\n \n def genApple(self):\n sp=[]\n for y in range(self.y):\n for x in range(self.x):\n if self.matrix[y][x]==space:\n sp.append((x,y))\n self.apple=random.choice(sp)\n self.matrix[self.apple[1]][self.apple[0]]=apple\n\n def update(self):\n op=(self.player.x,self.player.y)\n self.player.move()\n pos=(self.player.x,self.player.y)\n self.matrix[op[1]][op[0]]=space\n self.matrix[pos[1]][pos[0]]=head\n if pos==self.apple:\n self.player.length+=1\n self.body.append(op)\n elif pos in self.player.body or pos[0]<0 or pos[0]==self.x or pos[1]<0 or pos[1]==self.y:\n self.running=False\n\n def draw(self):\n self.display.fill(black)\n for y in range(len(self.matrix)):\n for x in range(len(self.matrix[y])):\n if self.matrix[y][x]==head:\n color=blue\n elif self.matrix[y][x]==body:\n color=green\n elif self.matrix[y][x]==apple:\n color=red\n else:\n color=black\n pygame.draw.rect(self.display,color,(x*self.bw,y*self.bw,self.bw,self.bw))\n for x in range(self.x+1):\n pygame.draw.rect(self.display,gray,(x*self.bw-2,0,4,self.w))\n for y in range(self.y+1):\n pygame.draw.rect(self.display,gray,(0,y*self.bw-2,self.h,4))\n\n render=self.font.render('Score: '+str(self.player.length),True,white)\n self.display.blit(render,(10,10))\n\n render=self.font.render('Highscore: '+str(self.highscore),True,white)\n self.display.blit(render,(10,40))\n\n for e in pygame.event.get():\n if e.type==pygame.QUIT:\n pygame.quit()\n raise SystemExit\n\n pygame.display.update() \n\nclass snake:\n\n def __init__(self,pos):\n self.x,self.y=pos\n self.body=[]\n self.length=1\n self.dir=0\n\n def move(self):\n if self.body:\n for b in range(len(self.body)-1,0,-1):\n self.body[b]=self.body[b-1]\n self.body[0]=(self.x,self.y)\n if self.dir=='up':\n self.y-=1\n elif self.dir=='down':\n self.y+=1\n elif self.dir=='left':\n self.x-=1\n elif self.dir=='right':\n self.x+=1\n\ndef main():\n g=game(9,9)\n g.start()\n while g.running:\n g.draw()\n g.clock.tick(5)\n g.update()\n\nif __name__=='__main__':\n main()","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"187836406","text":"from scipy import misc\nfrom PIL import Image\nfrom PIL import TiffTags\nfrom numpy import *\nfrom libtiff import TIFF\n\n# -------------------------------------------------------------\nf1Path = '/home/caesar/FeNiCr/FeNiCr-00098-00005.tif'\nf2Path = '/home/caesar/FeNiCr/FeNiCr-00098-02503.tif'\nt1 = TIFF.open('/home/caesar/FeNiCr/FeNiCr-00098-00005.tif','r')\nt2 = TIFF.open('/home/caesar/FeNiCr/FeNiCr-00098-02503.tif','r')\n\nwidth = t1.GetField('ImageWidth')\nheight = t1.GetField('ImageLength')\n\nt1a = list(t1.iter_images())[0]\nt2a = list(t2.iter_images())[0]\n\ndt = t1a - t2a\n\noutputTif = TIFF.open('/home/caesar/FeNiCr/a/dt.tif', mode='w')\n# TIFF.SetField's definition starts at line 1349\n# Refer links: https://www.awaresystems.be/imaging/tiff/tifftags/photometricinterpretation.html\n# PHOTOMETRIC_MINISBLACK = 1;\noutputTif.SetField('PHOTOMETRIC', 1)\noutputTif.SetField('BITSPERSAMPLE', 32)\noutputTif.SetField('SAMPLESPERPIXEL', 1)\n# Refer Links: https://www.awaresystems.be/imaging/tiff/tifftags/sampleformat.html\n# SAMPLEFORMAT_INT = 2\noutputTif.SetField('SAMPLEFORMAT', 2)\noutputTif.SetField('IMAGELENGTH', height)\noutputTif.SetField('IMAGEWIDTH', width)\noutputTif.SetField('PLANARCONFIG',1) # PLANARCONFIG_CONTIG/PLANARCONFIG_SEPARATE\n# outputTif.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK)\n# outputTif.SetField(TIFFTAG_BITSPERSAMPLE, 32)\n# outputTif.SetField(TIFFTAG_SAMPLESPERPIXEL, 1)\n# outputTif.SetField(TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_INT)\n# outputTif.SetField(TIFFTAG_IMAGELENGTH, height)\n# outputTif.SetField(TIFFTAG_IMAGEWIDTH, width)\n# outputTif.SetField(TIFFTAG_PLANARCONFIG,) # PLANARCONFIG_CONTIG/PLANARCONFIG_SEPARATE\noutputTif.write_image(dt,compression=None, write_rgb=False)\noutputTif.close()\n\n# In Windows:\n# folderPath = 'H:/xrd_data_sample/FeNiCr/a'\n# file1Path = 'H:/xrd_data_sample/FeNiCr/FeNiCr-00098-02500.tif'\n# file2Path = 'H:/xrd_data_sample/FeNiCr/FeNiCr-00098-02500.tif'\n# tiff_to_image_array(filePath,folderPath,'.jpg')\n# t1 = Image.open(file1Path,mode=\"r\")\n# t2 = Image.open(file2Path,mode=\"r\")","sub_path":"python_src/tifParser.py","file_name":"tifParser.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"224494218","text":"import numpy as np\n\n# 在位加减乘除\na = np.ones(3)*1\nb = np.ones(3)*2\nnp.add(a,b,out=b) # 在位加法,赋值到b\nnp.divide(a,2,out=a) # 在位除法,赋值到a\nnp.negative(a,out=a)\nnp.multiply(a,b, out=a)\n# print(a)\n# print(b)\n\n# 取整数\nc = np.random.uniform(0, 10, 10)\n# print(c)\n# print(c - c%1)\n# print(np.floor(c))\n# print(np.ceil(c) - 1)\n# print(np.trunc(c))\n\n# 创建一个矩阵,数值从0到4\nd = np.zeros((5,5))\nd += np.arange(5)\n# print(d)\n\n# 从迭代器中生成一个数组\ndef generate():\n for i in range(10):\n yield i\ne = np.fromiter(generate(), dtype=float, count=-1) # 从迭代器中拿多少个元素,-1指全部\n# print(e)\n\n# 创建一个长度10到等宽向量,0,11之间,不含0,11\nf = np.linspace(0, 11, 11, endpoint=False)[1:]\n# print(f)\n\n# 向量排序\ng = np.random.randint(0,10,10)\ng.sort()\n# print(g)\n\n# 除了np.sum 求和\n# print(np.add.reduce(g))\n# print(g)\n\n# 检查数组是否相等\nh1 = np.random.randint(0,2,2)\nh2 = np.random.randint(0,2,2)\nequal1 = np.allclose(h1,h2)\nequal2 = np.array_equal(h1,h2)\n# print(equal1)\n# print(equal2)\n\n# 创建只读数组\ni = np.zeros(10)\ni.flags.writeable = False\n# i[0] = 1\n\n# 笛卡尔坐标转极坐标\nj = np.random.random((10, 2))\nj1, j2 = j[:,0], j[:,1]\nR = np.sqrt(j1**2+j2**2)\nT = np.arctan(j2, j1)\n# print(R,T)\n\n# 最大值替换\nk = np.random.random(10)\n# print(k)\nk[k.argmax()] = 0 # argmax最大值的索引\n# print(k)\n\n# 网格点坐标\nl = np.zeros((5,5), [('x', float), ('y',float)])\nl['x'], l['y'] = np.meshgrid(np.linspace(0,1,5), np.linspace(0,1,5))\n# print(l)\n\n# cauchy矩阵(Cij = 1 / (xi - yj))\nm1 = np.arange(8)\nm2 = m1 + 0.5\nC = 1.0 / np.subtract.outer(m1, m2)\n# print(np.linalg.det(C))\n\n# numpy标量最大值,最小值\n# for dtype in [np.int8, np.int32, np.int64]:\n# pass\n# # print(np.iinfo(dtype).min)\n# # print(np.iinfo(dtype).max)\n#\n# for dtype in [np.float32, np.float64]:\n# pass\n# # print(np.finfo(dtype).min)\n# # print(np.finfo(dtype).max)\n# # print(np.finfo(dtype).eps)\n\n# 打印数组中所有的数值\n# np.set_printoptions(threshold=np.nan)\n# n = np.zeros((16,16))\n# print(n)\n\n# 找到与目标最接近的值\no = np.arange(100)\ntarget = 44.8\nindex = (np.abs(o - target)).argmin()\n# print(o[index])\n\n# 创建一个表示位置(x,y)与颜色(r,g,b)的结构化数组\n\n# Z = np.zeros(10, [ ('position', [ ('x', float, 1),\n# ('y', float, 1)]),\n# ('color', [ ('r', float, 1),\n# ('g', float, 1),\n# ('b', float, 1)])])\n# print (Z)\n\n# 随机向量间,点与点的距离\np = np.random.random((10,2))\n# print(p)\n# 方法1\nX,Y = np.atleast_2d(p[:,0],p[:,1])\n# D = np.sqrt((X - X.T) ** 2 - (Y - Y.T) ** 2)\n# print(D)\n# 方法2\nimport scipy.spatial\nD = scipy.spatial.distance.cdist(p, p)\n# print(D)\n\n# astype\nq = np.arange(10,dtype=np.int32)\nq = q.astype(np.float, copy=False)\n# print(q)\n\n# enumerate 等价操作\nr = np.arange(20).reshape(4,5)\nfor i, v in np.ndenumerate(r):\n # print(i,v)\n pass\nfor i, v in np.ndindex(r.shape):\n # print(i, r[i])\n pass\n\n# Gaussian-like数组\ns1, s2 = np.meshgrid(np.linspace(-1,1,10), np.linspace(-1,1,10))\nt = np.sqrt(s1 * s1 + s2 * s2)\nsigma, mu = 1.0, 0.0\nG = np.exp(-((t - mu) ** 2 / (2.0 * sigma ** 2)))\n# print(G)\n\n# 随机在数组中放置P个元素\nn = 10\np = 30\nz = np.zeros(((n, n, n)))\nnp.put(z, np.random.choice(range(n*n*n), p, replace=False), 1) # replace=False 无放回\n# print(z)\n\n# 减去一个矩阵中每一行的平均值\nu = np.random.randint(5, 10, (5, 10))\nv = u - u.mean(axis=1, keepdims=True) # keepdims 保持维度不变\n# print(u)\n# print(v)\n\n# 通过第n列数组进行排序\nw = np.random.randint(0, 10, (3, 3))\n# print(w)\n# print(w[w[:, 1].argsort()]) # argsort()返回排序后的索引\n\n# 检查一个二维数组是否有空列\ny = np.random.randint(0,3,(3,10))\n# print(y)\n# print((~y.any(axis=0)).any())\n\n# 近似值\nz = np.random.randint(0,10,10)\ntarget = 3.4\nm = z.flat[np.abs(z - target).argmin()]\n# print(m)\n\n# 用迭代器计算不同形状的数组\na = np.arange(3).reshape(3,1)\nb = np.arange(3).reshape(1,3)\nit = np.nditer([a,b,None])\nfor x, y, z in it:\n z[...] = x + y\n# print(it.operands[2])\n\n# 创建一个有name属性的数组类\nclass NameArray(np.ndarray):\n def __new__(cls, array, name='no name'):\n obj = np.asarray(array).view(cls)\n obj.name = name\n return obj\n def __array_finalize__(self, obj):\n if obj is None: return\n self.info = getattr(obj, 'name', 'no name')\n\na = NameArray(np.arange(10), 'range_10')\n# print(a.name)\n\n# 考虑一个给定的向量,如何对由第二个向量索引的每个元素加1(小心重复的索引)?\na = np.ones(10)\nb = np.random.randint(0, len(a), 20)\nc = np.bincount(b, minlength=len(a))\na += c\nprint(a)\n","sub_path":"Numpy_execrise/Q100_normal.py","file_name":"Q100_normal.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"463863631","text":"import pygame\nfrom resource_handler import *\nfrom pygame.locals import *\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, pos, speed, enemy):\n pygame.sprite.Sprite.__init__(self)\n self.image, self.rect = load_image('bullet.png', -1)\n screen = pygame.display.get_surface()\n self.area = screen.get_rect()\n self.rect.center = pos\n self.onscreen = True\n self.speed = speed\n self.firerate = 60\n if enemy:\n self.image = pygame.transform.flip(self.image, 1, 0)\n\n def update(self):\n self.move()\n\n def move(self):\n newpos = self.rect.move(-(self.speed), 0)\n self.rect = newpos\n if not self.area.collidepoint(self.rect.midright):\n self.onscreen = False\n","sub_path":"Bullet.py","file_name":"Bullet.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"207702626","text":"import numpy\nimport csv\na =0 \nb= 0\nc =0 \nd =0 \ne=0\nf= 0\ng= 0\nh=0\ni=0\n\nmatriz = numpy.array([[a, b, c], [d,e,f] , [g,h,i] ])\nprint(matriz)\n","sub_path":"semilleroEdgar/DiosMatrices.py","file_name":"DiosMatrices.py","file_ext":"py","file_size_in_byte":138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"86856822","text":"\n# coding: utf-8\n\n# In[6]:\n\n\nimport re\nimport random\nimport orgmode_handler\nimport os\n\ndef add_title_line_to_org_file(orgmode_filename, title):\n with open(orgmode_filename) as f:\n content = f.read()\n title_line = \"#+title: {}\\n\".format(title)\n with open(orgmode_filename, 'w') as f:\n f.write(title_line)\n f.write(content)\n \ndef convert_temp_org_to_html_with_reference(orgmode_filename):\n html_filename = \"{}.html\".format(orgmode_filename.split('.')[0])\n #print html_filename\n cmd = \"pandoc -s -S --toc -c pandoc.css --filter pandoc-citeproc {} -o {}\".format(orgmode_filename, html_filename)\n #print cmd\n os.system(cmd)\n return html_filename\n\n# file path config\norgmode_filename = \"/Users/wsy/Dropbox/var/wsywork/data/org/temp.org\"\n# pre process\norg_time, org_tags_list = orgmode_handler.orgmode_pre_process(orgmode_filename)\n# get title of article and remove it from top level\ntitle = orgmode_handler.chop_title_from_orgmode_file(orgmode_filename)\n# for each section, level up to top\norgmode_handler.level_up_once(orgmode_filename)\n# add title back to the article org-mode file\nadd_title_line_to_org_file(orgmode_filename, title)\n# change figure caption to numlist in orgmode file\n#orgmode_handler.change_figure_caption_to_numlist(orgmode_filename)\n# change figure reference to num in orgmode file\n#orgmode_handler.change_figure_ref_to_num(orgmode_filename)\n# convert article to html format with reference\nhtml_filename = convert_temp_org_to_html_with_reference(orgmode_filename)\n\n\n# Preview the html file generated\nos.system('open {} -a /Applications/Google\\ Chrome.app'.format('file://'+ html_filename))\n\n\n# In[ ]:\n\n\n\n","sub_path":"org-to-html.py","file_name":"org-to-html.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"117243103","text":"\"\"\"\nTask 14\n@author: Alexandr Mazanik\n\"\"\"\nimport time\nimport turtle\n\n\ndef main():\n turtle.shape('turtle')\n turtle.speed(2)\n\n a = 300 #the length of the side of the star\n\n goto(50, 20)\n draw_star(a, 5)\n goto(-400, 20)\n draw_star(a, 11)\n\n\ndef draw_star(length, n):\n alpha = 180 - 180 / n\n for i in range(n):\n turtle.forward(length)\n turtle.right(alpha)\n\n\ndef goto(x, y):\n turtle.penup()\n turtle.goto(x, y)\n turtle.pendown()\n\nmain()\ntime.sleep(2)","sub_path":"semester_1/lab_work_2/task_14.py","file_name":"task_14.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"449970427","text":"import tushare as ts\nimport pandas as pd\nimport time\nimport os\n\ninputdatadir = 'D:/Works/python/report/input_data'\npd.set_option('expand_frame_repr', False)\n\n# 从tushare获取指定日期的数据\ndef get_today_all_ts(date):\n date_now = date\n pro = ts.pro_api('e239683c699765e4e49b43dff2cf7ed7fc232cc49f7992dab1ab7624')\n df_daily = pro.daily(trade_date=date_now)\n df_daily_basic = pro.daily_basic(trade_date=date_now)\n df_basics = pro.stock_basic()\n df_all = pd.merge(left=df_daily, right=df_daily_basic, on='ts_code', how='outer')\n df_all = pd.merge(left=df_all, right=df_basics, on='ts_code', how='outer')\n df_all['ts_code'] = df_all['ts_code'].astype(str) + ' '\n\n # 保存数据\n df_all.to_csv(inputdatadir+'/'+ str(date_now) + '_ts.csv', index=False, encoding='utf_8_sig')\n print('%sis downloaded.' % (str(date_now)))\n print(df_all)\n return df_all\n\nif __name__ == '__main__':\n print('start...')\n print('get daily data')\n get_today_all_ts(date='20220327')\n print('end')\n","sub_path":"Rabbit/getdailydata.py","file_name":"getdailydata.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"114448614","text":"import curses\nimport time\n\n\ndef percentage():\n win = curses.newwin(0, 0, 0, 0)\n # win.border(0)\n loading = 0\n while loading < 100:\n loading += 1\n time.sleep(0.03)\n update_progress(win, loading)\n\n\ndef update_progress(win, progress):\n rangex = (30 / float(100)) * progress\n pos = int(rangex)\n display = '#'\n if pos != 0:\n win.addstr(0, pos, f'{display}')\n win.refresh()\n\n\ncurses.initscr()\npercentage()\n# curses.endwin()\n","sub_path":"05.application/01.mnistnet/test.library/curses/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"421132875","text":"import MySQLdb\r\n\r\ndb = MySQLdb.connect(\"localhost\",\"root\",\"0000\",\"mydb\" )\r\n\r\ncursor = db.cursor()\r\n\r\nname = raw_input(\"Enter the First name of the Employee:\")\r\ntemp = raw_input(\"Enter the update you want to do: \")\r\nvalue = raw_input(\"It's new value: \")\r\n\r\ntry:\r\n cursor.execute(\"UPDATE EMPLOYEE SET %s = %s WHERE FIRST_NAME = %s\",(temp,value,name))\r\n\r\n db.commit()\r\nexcept:\r\n\r\n db.rollback()\r\n\r\ndb.close()\r\n","sub_path":"mysql/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"508548942","text":"with open('rosalind_gc (1).txt', 'r') as file_in:\r\n a = file_in.read()\r\na = a.split('\\n')\r\nb = ''.join(a)\r\nc = b.split('>')\r\nfor i in range(1, len(c)):\r\n d = c[i]\r\n len_gc = len(d[13:len(d)])\r\n gc_count = 0\r\n for i, symbol in enumerate(d):\r\n if symbol == 'C':\r\n gc_count += 1\r\n if symbol == 'G':\r\n gc_count += 1\r\n gc_content = (gc_count / len_gc) * 100\r\n print(f'{d[0:13]} {gc_content}')\r\n\r\n\r\n","sub_path":"Computing GC Content.py","file_name":"Computing GC Content.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"31551417","text":"from django.db.models import ProtectedError\nfrom django.test import TestCase\n\nfrom .models import Group, Student\n\n\nclass ModelsTestCase(TestCase):\n def setUp(self):\n group = Group.objects.create(name=\"Group 1\")\n\n student = Student()\n student.FirstName = \"Joe\"\n student.LastName = \"Doe\"\n student.IndexNo = \"111\"\n student.Group = group\n\n def group_with_students_removal(self):\n with self.assertRaises(ProtectedError):\n Group.objects.all().delete()\n","sub_path":"StudentsDjango/students/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"108240887","text":"import sys\nimport sqlite3\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtWidgets\nfrom work_ui import Ui_Form\nfrom work_edit_dialog import Ui_Dialog\n\n\nclass ReadOnlyDelegate(QtWidgets.QStyledItemDelegate):\n def createEditor(self, parent, option, index): # Создан для запрета на редактирование таблицы\n return\n\n\nclass Work(QWidget, Ui_Form):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.con = sqlite3.connect(\"database/production.db\")\n self.cur = self.con.cursor()\n self.initUI()\n\n def initUI(self):\n # Подключаем сигнал зависящий от изменения текста lineEdit\n self.lineEdit.textChanged.connect(self.load_table)\n # Подключаем событие для кнопки pb_edit (pb от сокращения PushButon), pb_add соответствен��о\n self.pb_edit.clicked.connect(self.edit_elem)\n self.pb_add.clicked.connect(self.add_elem)\n self.load_table()\n\n def load_table(self):\n # Создаём запрос для сортировки tools (бд), начало Названия товара должно начинаться с self.lineEdit.text()\n result = self.cur.execute(\"\"\"SELECT * FROM works WHERE Название like ?\"\"\",\n (self.lineEdit.text() + \"%\", )).fetchall()\n # Получаем список заголовков таблицы\n title_list = [i[1] for i in self.cur.execute(\"pragma table_info(works)\").fetchall()]\n # Заполняем tableWidget\n header = self.tableWidget.horizontalHeader()\n self.tableWidget.setColumnCount(len(title_list))\n self.tableWidget.setHorizontalHeaderLabels(title_list)\n self.tableWidget.setRowCount(0)\n delegate = ReadOnlyDelegate(self.tableWidget)\n for i, elem in enumerate(result):\n self.tableWidget.setRowCount(i + 1)\n # Используем класс delegate (10) для запрета на редактирования столбца i\n self.tableWidget.setItemDelegateForRow(i, delegate)\n for j, elem1 in enumerate(elem):\n self.tableWidget.setItem(i, j, QTableWidgetItem(str(elem1)))\n for i in range(4): # Задаём свойства расширения для каждого столбца каждой таблицы\n header.setSectionResizeMode(i, QtWidgets.QHeaderView.Stretch) # Stretch - максимально расшириться\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents) # ResizeToContents - минимально\n\n def add_elem(self):\n # Класс вызывает диалоговое окно и передаёт нужные параметры для работы.\n dialog = Editdialog(\"add\", self.con, self.cur)\n dialog.show()\n # Отключаем основное окно до окончания работы диалогового окна\n self.setEnabled(False)\n dialog.exec()\n self.setEnabled(True)\n # После изменений обновляем таблицу\n self.load_table()\n\n def edit_elem(self):\n rows = list(set([i.row() for i in self.tableWidget.selectedItems()]))\n # Получаем список выделенных строк\n if len(rows) != 1: # Строка обязательно должна быть одна\n return 0\n # Создаём и заполняем список с данными о выделенной строке\n select_row = []\n for i in range(4):\n select_row.append(self.tableWidget.item(rows[0], i).text())\n # Класс вызывает диалоговое окно и передаёт нужные параметры для работы.\n dialog = Editdialog(\"edit\", self.con, self.cur, select_row)\n dialog.show()\n # Отключаем основное окно до окончания работы диалогового окна\n self.setEnabled(False)\n dialog.exec()\n self.setEnabled(True)\n # После изменений обновляем таблицу\n self.load_table()\n\n\nclass Editdialog(QDialog, Ui_Dialog): # Диалог используемый для добавления и редактирования элементов склада\n def __init__(self, type_dialog, *args):\n super().__init__()\n self.setupUi(self)\n self.type = type_dialog\n self.con = args[0]\n self.cur = args[1]\n self.select_row = args[-1]\n self.initUI()\n\n def initUI(self):\n self.buttonBox.accepted.connect(self.acept_data)\n self.buttonBox.rejected.connect(self.reject_data)\n if self.type == \"edit\": # Если диалог направлен на редактирование данных - вбиваем данные в форму\n self.le_name.setText(self.select_row[1])\n self.le_ei.setText(self.select_row[2])\n self.dsb_price.setValue(float(self.select_row[3].replace(\",\", \".\")))\n\n def acept_data(self):\n try:\n # Получаем введенные пользователем данные\n name = self.le_name.text()\n price = float(self.dsb_price.text().replace(\",\", \".\"))\n ei = self.le_ei.text() # Единица измерения\n if name and ei and price: # В случае правильно введённых данных\n if self.type == \"add\":\n self.cur.execute(\"INSERT INTO works(Название, 'Ед. изм', 'Стоимость р')\"\n \"VALUES(?, ?, ?)\", (name, ei, price))\n else:\n self.cur.execute(\"UPDATE works SET 'Название' = ?, 'Ед. изм' = ?,\"\n \" 'Стоимость р' = ? WHERE id = ?\", (name, ei, price, self.select_row[0]))\n self.con.commit()\n self.close()\n else:\n self.lineEdit_error.setText(\"Некоторые поля не заполнены\")\n except ValueError:\n self.lineEdit_error.setText(\"Некорректные значения полей\")\n except sqlite3.IntegrityError:\n self.lineEdit_error.setText(\"Название занято\")\n\n def reject_data(self):\n self.close()\n\n\ndef except_hook(cls, exception, traceback):\n sys.__excepthook__(cls, exception, traceback)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Work()\n ex.show()\n sys.excepthook = except_hook\n sys.exit(app.exec())\n","sub_path":"код проекта/Work.py","file_name":"Work.py","file_ext":"py","file_size_in_byte":6883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"579144319","text":"# -*- coding:utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\n\nimport vgg19\nimport utils\n# install tensorflow0.8.0,scikit-image\n# sudo apt-get install python-pip python-dev\n# sudo pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0rc0-cp27-none-linux_x86_64.whl\n# 可使用sudo pip install --upgrade tensorflow更新到最新版本\n\n# sudo pip install scikit-image\n#\nimg1 = utils.load_image(\"./test_data/tiger.jpeg\")\nimg2 = utils.load_image(\"./test_data/puzzle.jpeg\")\n\nbatch1 = img1.reshape((1, 224, 224, 3))\nbatch2 = img2.reshape((1, 224, 224, 3))\n\n# numpy.concatenate((a1, a2, ...), axis=0, out=None) Join a sequence of arrays along an existing axis.\nbatch = np.concatenate((batch1, batch2), 0)\n\n# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:\nwith tf.device('/cpu:0'):\n with tf.Session() as sess:\n\n images = tf.placeholder(\"float\", [2, 224, 224, 3])\n feed_dict = {images: batch}\n\n vgg = vgg19.Vgg19()\n # tf.variable_scope可以让变量有相同的命名,包括tf.get_variable得到的变量,还有tf.Variable的变量\n # tf.name_scope可以让变量有相同的命名,只是限于tf.Variable的变量\n with tf.name_scope(\"content_vgg\"):\n vgg.build(images)\n\n prob = sess.run(vgg.prob, feed_dict=feed_dict)\n print(prob)\n utils.print_prob(prob[0], './synset.txt')\n utils.print_prob(prob[1], './synset.txt')\n","sub_path":"test_vgg19.py","file_name":"test_vgg19.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"241564454","text":"import numpy as np\nimport pylab as plt\nfrom matplotlib.colors import ListedColormap\n\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\n\ndef plot_surface(X, y, clf):\n h = 0.2\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n Z = Z.reshape(xx.shape)\n plt.figure(figsize=(8, 8))\n plt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n","sub_path":"logistic_regression/dmia/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"49101950","text":"#from collections import defaultdict\r\nimport math\r\nimport random\r\nfrom helpers import Map, load_map, show_map\r\nfrom queue import PriorityQueue\r\n\r\ndef euclid_dist(pos,goal):\r\n \"\"\"\r\n Inputs are 2-element lists containing 2D coordinates\r\n Return euclidean distances\r\n \"\"\"\r\n dist = math.sqrt(math.pow((pos[0]-goal[0]),2)+math.pow((pos[1]-goal[1]),2))\r\n return dist\r\n\r\n\r\ndef lowest_f(fScores,frontier,M,cameFrom,start):\r\n x = set(fScores.keys())\r\n y = x.intersection(frontier)\r\n \r\n camefrom_set = set(cameFrom.keys())\r\n #find node in frontier with lowest fScore\r\n \r\n lowest = random.choice(list(y)) #pick a node\r\n \r\n \r\n \r\n #need to update fscores after updating gscores \r\n for node in frontier:\r\n if node in y: #avoid indexing error\r\n if fScores[node]0:\r\n alt_node2=[]\r\n for each in alt_nodes:\r\n if cameFrom[each] in camefrom_keyset or cameFrom[each] is start:\r\n alt_node2.append(each)\r\n \r\n for each in alt_node2:\r\n altpath = path[:]\r\n altpath.append(current)\r\n current_2 = each\r\n altpath.append(current_2)\r\n while current_2 in cameFrom.keys():\r\n current_2 = cameFrom[current_2]\r\n altpath.append(current_2)\r\n altpaths.append(altpath) \r\n path.append(current)\r\n \r\n valid_altpaths=[]\r\n for each in altpaths:\r\n if each[0]==goal and each[-1]==start:\r\n valid_altpaths.append(each)\r\n valid_altpaths.append(path)\r\n costs=[]\r\n for each in valid_altpaths:\r\n total=0.0\r\n for i in range(len(each)-1):\r\n total += euclid_dist(M.intersections[each[i]],M.intersections[each[i+1]])\r\n costs.append(total)\r\n lowest = [costs[0],0]\r\n for j in range(len(costs)):\r\n if costs[j]gScores[each]:\r\n continue\r\n gScores[each] = cost\r\n fScores[each] = priority\r\n \r\n #cost = euclid_dist(M.intersections[each],M.intersections[current_node])\r\n #Nodes[each].append([each,cost,current_node])\r\n \r\n #print(cameFrom)\r\n \r\n \r\n cameFrom[each]= current_node\r\n \r\n \r\n \r\n \r\n \r\n #show_map(M,start,goal,Traversed)\r\n return None","sub_path":"astar/student_code_04.py","file_name":"student_code_04.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"250112429","text":"types_of_people = 10\n#formats variable in string\nx = f\"There are {types_of_people} types of people\"\n\nbinary = \"binary\"\ndo_not = \"dont't\"\n#formats variable in string\ny = f\"Those who know {binary} and those who {do_not}\"\n\n#print variables x, y\nprint(x)\nprint(y)\n#formats variable in string\nprint(f\"I said: {x}\")\nprint(f\"I also said: '{y}'\")\n\nhilarious = False\n#joke_evaluation = f\"isnt that joke funny? {hilarious}\"\n#print(joke_evaluation) OR,\njoke_evaluation = \"isnt that joke so funny? {}\"\n#Format function\nprint(joke_evaluation.format(hilarious))\n#adding strings\nw = \"This is the left side of....\"\ne = \"a string with a right side.\"\n\nprint(w + e)\n","sub_path":"ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"447054361","text":"import cv2\nimport numpy as np\n\nfrom model import FacialExpressionModel\n\n\nmodel = FacialExpressionModel(\"model.json\", \"model_weights.h5\")\n\nfacec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n\nclass VideoCamera(object):\n\n def __init__(self):\n self.video = cv2.VideoCapture(0)\n\n def __del__(self):\n self.video.release()\n\n def get_frame(self):\n\n\n \n _, frame = self.video.read()\n\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n scaleFactor = 1.3\n minNeighbors = 5\n faces = facec.detectMultiScale(gray_frame, scaleFactor, minNeighbors)\n\n for (x, y, w, h) in faces:\n\n\n roi = gray_frame[y:y+h, x:x+w]\n roi = cv2.resize(roi, (48, 48))\n prediction = model.predict_emotion(\n roi[np.newaxis, :, :, np.newaxis])\n Symbols = {\"Happy\": \":)\", \"Sad\": \":}\", \"Surprise\": \"!!\",\n \"Angry\": \"?\", \"Disgust\": \"#\", \"Neutral\": \".\", \"Fear\": \"~\"}\n Text = str(prediction) + Symbols[str(prediction)]\n Text_Color = (180, 105, 255)\n\n Thickness = 4\n Font_Scale = 2\n Font_Type = cv2.FONT_HERSHEY_DUPLEX\n\n cv2.putText(frame, Text, (x, y), Font_Type,\n Font_Scale, Text_Color, Thickness)\n xc = int((x + x+w)/2)\n yc = int((y + y+h)/2)\n radius = int(w/2)\n\n cv2.circle(frame, (xc, yc), radius, (0, 255, 0), Thickness)\n _, jpeg = cv2.imencode('.jpg', frame)\n\n return jpeg.tobytes()\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"605952695","text":"'''\nSupport for RFC 2136 dynamic DNS updates.\nRequires dnspython module.\n'''\n# Import python libs\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\n\ntry:\n import dns.query\n import dns.update\n dns_support = True\nexcept ImportError as e:\n dns_support = False\n\ndef __virtual__():\n '''\n Confirm dnspython is available.\n '''\n if dns_support:\n return 'ddns'\n return False\n\n\ndef update(zone, name, ttl, rdtype, data, nameserver='127.0.0.1', replace=False):\n '''\n Add, replace, or update a DNS record.\n nameserver must be an IP address and the minion running this module\n must have update privileges on that server.\n If replace is true, first deletes all records for this name and type.\n\n CLI Example::\n\n salt ns1 ddns.update example.com host1 60 A 10.0.0.1\n '''\n fqdn = '{}.{}'.format(name, zone)\n request = dns.message.make_query(fqdn, rdtype)\n answer = dns.query.udp(request, nameserver)\n\n rdtype = dns.rdatatype.from_text(rdtype)\n rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)\n \n is_update = False\n for rrset in answer.answer:\n if rdata in rrset.items:\n rr = rrset.items\n if ttl == rrset.ttl:\n if replace and (len(answer.answer) > 1\n or len(rrset.items) > 1):\n is_update = True\n break\n return None\n is_update = True\n break\n\n dns_update = dns.update.Update(zone)\n if is_update:\n dns_update.replace(name, ttl, rdata)\n else:\n dns_update.add(name, ttl, rdata)\n answer = dns.query.udp(dns_update, nameserver)\n if answer.rcode() > 0:\n return False\n return True\n\n\ndef delete(zone, name, rdtype=None, data=None, nameserver='127.0.0.1'):\n '''\n Delete a DNS record.\n\n CLI Example::\n\n salt ns1 ddns.delete example.com host1 A\n '''\n fqdn = '{}.{}'.format(name, zone)\n request = dns.message.make_query(fqdn, (rdtype or 'ANY'))\n\n answer = dns.query.udp(request, nameserver)\n if not answer.answer:\n return None\n\n dns_update = dns.update.Update(zone)\n\n if rdtype:\n rdtype = dns.rdatatype.from_text(rdtype)\n if data:\n rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)\n dns_update.delete(name, rdata)\n else:\n dns_update.delete(name, rdtype)\n else:\n dns_update.delete(name)\n\n answer = dns.query.udp(dns_update, nameserver)\n if answer.rcode() > 0:\n return False\n return True\n","sub_path":"salt/modules/ddns.py","file_name":"ddns.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"183864021","text":"IDsum = 0\n\nwith open('input4.txt', \"r\") as f:\n for line in f:\n\n #Read all the stuff from each line\n string = line.split(\"-\")\n name = string[:-1]\n string2 = string[-1].split(\"[\")\n ID = string2[0]\n chsum = string2[1]\n chsum = chsum.replace(\"]\", \"\")\n chsum = chsum.replace(\"\\n\", \"\")\n\n #Sum chars\n letters = [0] * 200\n for word in name:\n for c in word:\n #print(c)\n letters[ord(c)] += 1\n\n #Check which 5 are most used\n sortlist = sorted(range(len(letters)), key=lambda k: letters[k], reverse=True)\n topstring = \"\"\n for i in sortlist[:5]:\n topstring += chr(i)\n\n #Check if correct checksum\n if topstring == chsum:\n IDsum += int(ID)\n\nprint(IDsum)\n\n\n","sub_path":"2016/day4_1.py","file_name":"day4_1.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"583696907","text":"class Vector:\n def __init__(self, x = 0.0, y = 0.0, z = 0.0):\n self.x = x\n self.y = y\n self.z = z\n\nclass StickData:\n def __init__(self, L = \"\", posX = \"\", posY = \"\", posZ = \"\", rotX = \"\", rotY = \"\", rotZ = \"\"):\n self.L = L\n self.posX = posX\n self.posY = posY\n self.posZ = posZ\n self.rotX = rotX\n self.rotY = rotY\n self.rotZ = rotZ\n \n def topara(self):\n data={}\n data['x']=self.posX\n data['y']=self.posY\n data['z']=self.posZ\n data['rx']=self.rotX\n data['ry']=self.rotY\n data['rz']=self.rotZ\n data['L']=self.L\n return data\n\n def __str__(self):\n return \"L = %s, position(%s, %s, %s), rotation(%s, %s, %s)\" % (self.L, self.posX, self.posY, self.posZ, self.rotX, self.rotY, self.rotZ)\n \nclass NodeData:\n def __init__(self):\n self.name = \"\"\n self.defaultParams = {}\n self.expressions = {}\n\n","sub_path":"Mod/Arch/ArchDefine.py","file_name":"ArchDefine.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"440468009","text":"import numpy as np\n\nclass BestPerformersStrategy:\n def __init__(self, numberOfCurrenciesToPick, observationWindow):\n self.NumberOfCurrenciesToPick = numberOfCurrenciesToPick\n self.ObservationWindow = observationWindow\n\n\n def Pick(self, dataProvider):\n timeSeries = dataProvider.get_all_time_series_close()\n ccyList = list(dataProvider.CurrencyList)\n ccyTimeSeries = timeSeries[ccyList]\n returns = (ccyTimeSeries - ccyTimeSeries.shift(self.ObservationWindow)) / ccyTimeSeries.shift(self.ObservationWindow)\n mostRecentReturn = returns.tail(1)\n order = np.argsort(-mostRecentReturn.values, axis=1)[:, :self.NumberOfCurrenciesToPick]\n chosenCcy = list(mostRecentReturn.columns[order][0])\n chosenCcy2 = [t for t in chosenCcy if mostRecentReturn[t][0] >= 0.02]\n return chosenCcy2\n\n\n\n","sub_path":"Strategies/BestPerformersStrategy.py","file_name":"BestPerformersStrategy.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"644457774","text":"from copy import deepcopy\n\n# class for puzzle game problem\nclass PuzzleGame:\n\n def __init__(self, grid=None):\n if grid is None:\n grid = []\n self.grid = grid\n\n def __eq__(self, other):\n return self.grid == other.grid\n\nclass PuzzleGameUtils:\n\n # misplaced tiles heuristic\n def heuristic_misplaced(state):\n grid = state.grid\n goal = [[1,2,3],[4,5,6],[7,8,-1]]\n misplaced = 0\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if(grid[i][j] != goal[i][j]):\n misplaced+=1\n\n return misplaced\n\n # manhattan distance heuristic\n def heuristic_manhattan(state):\n grid = state.grid\n goal = [[1,2,3],[4,5,6],[7,8,-1]]\n manhattan = 0\n\n for i in range(1,9):\n coord_grid = [(index, row.index(i)) for index, row in enumerate(grid) if i in row]\n coord_goal = [(index, row.index(i)) for index, row in enumerate(goal) if i in row]\n manhattan += abs(coord_goal[0][0] - coord_grid[0][0]) + abs(coord_goal[0][1] - coord_grid[0][1])\n\n return manhattan\n\n def successor_fn(state):\n grid = state.grid\n res = []\n #find coordinates where the -1 is\n coord = [(index, row.index(-1)) for index, row in enumerate(grid) if -1 in row]\n\n if (coord[0][0] + 1 < len(grid[0])):\n tmp = deepcopy(grid)\n row = coord[0][0]\n col = coord[0][1]\n tmp[row][col] = tmp[row + 1][col]\n tmp[row + 1][col] = -1\n res.append((\"down\", (PuzzleGame(tmp), 1)))\n\n if (coord[0][0] - 1 >= 0):\n tmp = deepcopy(grid)\n row = coord[0][0]\n col = coord[0][1]\n tmp[row][col] = tmp[row - 1][col]\n tmp[row - 1][col] = -1\n res.append((\"up\", (PuzzleGame(tmp), 1)))\n\n if (coord[0][1] + 1 < len(grid[0])):\n tmp = deepcopy(grid)\n row = coord[0][0]\n col = coord[0][1]\n tmp[row][col] = tmp[row][col + 1]\n tmp[row][col + 1] = -1\n res.append((\"right\", (PuzzleGame(tmp), 1)))\n\n if (coord[0][1] - 1 >= 0):\n tmp = deepcopy(grid)\n row = coord[0][0]\n col = coord[0][1]\n tmp[row][col] = tmp[row][col - 1]\n tmp[row][col - 1] = -1\n res.append((\"left\", (PuzzleGame(tmp), 1)))\n\n return res\n\n def step_cost(node, action):\n return 1\n\n\n\n\n","sub_path":"puzzle_game.py","file_name":"puzzle_game.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"468745297","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport csv\nimport re\n\n\ndef appendJsonToFile(_dict, _path_file):\n with open(_path_file, 'ab') as f:\n # ファイルの末尾(2)に移動(オフセット0)\n f.seek(0, 2)\n\n # ファイルが空かチェック\n if (f.tell() == 0):\n f.write('['.encode())\n f.write('\\n'.encode())\n\n else:\n # ファイルの末尾(2)から -2 文字移動\n f.seek(-2, 2)\n # 最後の文字(])を削除\n f.truncate()\n # ファイルの末尾(2)から -2 文字移動\n f.seek(-2, 2)\n # 最後の文字(})を削除\n f.truncate()\n # 配列のセパレーターを書き込む\n f.write('},'.encode())\n f.write('\\n'.encode())\n\n with open(_path_file, 'a') as f:\n json.dump(_dict, f, ensure_ascii=False, indent=2)\n\n with open(_path_file, 'ab') as f:\n # JSON 配列を閉じる\n f.write('\\n'.encode())\n f.write(']'.encode())\n f.write('\\n'.encode())\n\n\n#\n#\n#\n\n\n# def convertToList(_isinstance):\n# if (isinstance(_isinstance, dict)):\n# _isinstance = [_isinstance]\n\n# return _isinstance\n\n\n# with open('../data/detailedTimetables.json', 'r') as f:\n# detailedTimetables = json.load(f)\n\n# lineCodes = []\n\n# for detailedTimetable in detailedTimetables:\n# detailedTimetableTimeTable = detailedTimetable['TimeTable']\n# HourTables = detailedTimetableTimeTable['HourTable']\n# HourTables = convertToList(HourTables)\n\n# codePairs = []\n\n# for HourTable in HourTables:\n# if ('MinuteTable' not in HourTable):\n# continue\n\n# MinuteTables = HourTable['MinuteTable']\n# MinuteTables = convertToList(MinuteTables)\n\n# for MinuteTable in MinuteTables:\n# Stop = MinuteTable['Stop']\n\n# codePair = {\n# \"kindCode\": Stop['kindCode'],\n# \"nameCode\": Stop['nameCode']\n# }\n\n# if (codePair in codePairs):\n# continue\n\n# codePairs.append(codePair)\n\n# if (Stop['lineCode'] not in lineCodes):\n# lineCodes.append(Stop['lineCode'])\n\n# with open('lineCodes.csv', 'w') as f:\n# writer = csv.writer(f, lineterminator='\\n')\n# writer.writerow(lineCodes)\n\n\n#\n# ↑ lineCodes\n#\n\n\n# with open('../data/trainTimetables.json', 'r') as f:\n# trainTimetables = json.load(f)\n\n# trainTimetablesSorted = {}\n\n# for trainTimetable in trainTimetables:\n# lineCode = trainTimetable['lineCode']\n# trainTimetable.pop('lineCode')\n# trainTimetablesSorted[lineCode] = trainTimetable\n\n# with open('../data/trainTimetablesSorted.json', 'w') as f:\n# json.dump(trainTimetablesSorted, f,\n# ensure_ascii=False, separators=(',', ':'))\n\n\n#\n# ↑ trainTimetablesSorted\n#\n\n\n# with open('../data/trainTimetables.json', 'r') as f:\n# trainTimetables = json.load(f)\n\n# counter = 0\n\n# for trainTimetable in trainTimetables:\n# counter += 1\n\n# if (counter > 3):\n# break\n\n# appendJsonToFile(trainTimetable, 'trainTimetablesSample.json')\n\n\n#\n# ↑ trainTimetablesSample\n#\n\n\n# with open('../data/trainTimetablesSorted.json', 'r') as f:\n# trainTimetablesSorted = json.load(f)\n\n# appendJsonToFile(trainTimetablesSorted['34254'], 'trainTimetablesSample34254.json')\n\n\n#\n# ↑ trainTimetable を lineCode で探す\n#\n","sub_path":"timetable/train/p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"632153393","text":"import sqlite3\nfrom contextlib import closing\n\nwith sqlite3.connect(\"agenda.db\") as conexao:\n with closing(conexao.cursor()) as cursor:\n cursor.execute('delete from agenda where nome = \"Maria\"')\n\n print(\"Registros alterados: \", cursor.rowcount)\n if cursor.rowcount == 1:\n conexao.commit()\n else:\n conexao.rollback()\n print(\"Alteracoes abortadas\")\n\n for registro in conexao.execute('select * from agenda'):\n print(f\"Nome: {registro[0]}\\nTelefone: {registro[1]}\")","sub_path":"11_Banco_Dados/11_delete_agenda.py","file_name":"11_delete_agenda.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"651754683","text":"#!/usr/bin/python\n\n\n\"\"\"\n Starter code for the regression mini-project.\n \n Loads up/formats a modified version of the dataset\n (why modified? we've removed some trouble points\n that you'll find yourself in the outliers mini-project).\n\n Draws a little scatterplot of the training/testing data\n\n You fill in the regression code where indicated:\n\"\"\"\n\nimport os\nimport sys\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n\nsys.path.append(os.getcwd())\nsys.path.insert(0, \"./tools/\")\n\nfrom tools.feature_format import featureFormat, targetFeatureSplit\n\ndef bonus_regression(features_list):\n dictionary = pickle.load(\n open(\"./final_project/final_project_dataset_modified_unix.pkl\", \"rb\"))\n # list the features you want to look at--first item in the\n # list will be the \"target\" feature\n data = featureFormat(dictionary, features_list, remove_any_zeroes=True, sort_keys = \"./tools/python2_lesson06_keys_unix.pkl\")\n target, features = targetFeatureSplit(data)\n\n # training-testing split needed in regression, just like classification\n\n feature_train, feature_test, target_train, target_test = train_test_split(\n features, target, test_size=0.5, random_state=42)\n train_color = \"b\"\n test_color = \"r\"\n\n # Your regression goes here!\n # Please name it reg, so that the plotting code below picks it up and\n # plots it correctly. Don't forget to change the test_color above from \"b\" to\n # \"r\" to differentiate training points from test points.\n reg_salary = LinearRegression().fit(feature_train, target_train)\n\n # printing slop and intercept\n slope = reg_salary.coef_[0]\n intercept = reg_salary.intercept_\n score1 = reg_salary.score(feature_train, target_train)\n score2 = reg_salary.score(feature_test, target_test)\n print(\"slope: {}, \\n intercept: {}, \\n score_trainingdata: {}, \\n score_testdata: {}\".format(slope, intercept, score1, score2))\n\n # draw the scatterplot, with color-coded training and testing points\n for feature, target in zip(feature_test, target_test):\n plt.scatter(feature, target, color=test_color)\n for feature, target in zip(feature_train, target_train):\n plt.scatter(feature, target, color=train_color)\n # labels for the legend\n plt.scatter(feature_test[0], target_test[0], color=test_color, label=\"test\")\n plt.scatter(feature_test[0], target_test[0], color=train_color, label=\"train\")\n # draw the regression line, once it's coded\n try:\n plt.plot(feature_test, reg_salary.predict(feature_test))\n except NameError:\n pass\n\n reg_outlier = LinearRegression().fit(feature_test, target_test)\n plt.plot(feature_train, reg_outlier.predict(feature_train), color=\"g\")\n\n print(reg_outlier.coef_)\n\n plt.xlabel(features_list[1])\n plt.ylabel(features_list[0])\n plt.legend()\n plt.show()\n\n# features_list = [\"bonus\", \"salary\"]\nbonus_regression([\"bonus\", \"salary\"])\n\nbonus_regression([\"bonus\", \"long_term_incentive\"])\n","sub_path":"regression/finance_regression.py","file_name":"finance_regression.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"129231142","text":"import urllib\nimport ssl\nimport urllib.request\nimport base64\nimport json\n\nALI_APPCODE = \"70533dd2ebcc472e91d817d125ee643a\"\nALI_HOST = 'https://ocrapi-invoice.taobao.com'\nALI_PATH = '/ocrservice/invoice'\nALI_METHOD = 'POST'\n\n\ndef get_file_content_base64(filePath):\n \"\"\" 读取图片 \"\"\"\n with open(filePath, 'rb') as fp:\n return base64.b64encode(fp.read())\n\n\ndef scan_vat_invoice(filepath):\n appcode = ALI_APPCODE\n url = ALI_HOST + ALI_PATH\n bodys = {}\n b64_data = get_file_content_base64(filepath)\n bodys['img'] = b64_data.decode('utf8')\n post_data = json.dumps(bodys)\n post_data_bytes = post_data.encode('utf8')\n request = urllib.request.Request(url, post_data_bytes)\n request.add_header('Authorization', 'APPCODE ' + appcode)\n # 根据API的要求,定义相对应的Content - Type\n request.add_header('Content-Type', 'application/json; charset=UTF-8')\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n try:\n response = urllib.request.urlopen(request, context=ctx)\n content = response.read()\n if (content):\n print(content.decode('utf-8'))\n except Exception as e:\n print(e)\n\n\nscan_vat_invoice('jt.jpg')\n","sub_path":"invoice_validate/ali.py","file_name":"ali.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"467502234","text":"import os\nimport requests\nimport json\n\n\npath = os.getenv('LOCALAPPDATA') + '/CCP/EVE/c_eve_sharedcache_tq_tranquility/settings_Default/'\n\ndirectory = os.listdir(path)\n\nmasterChar = input('Master Character name? ')\n\nmasterAccount = (requests.get(\"https://esi.evetech.net/latest/search/\", params={\"categories\":[\"character\"], \"datasource\": \"tranquility\", \"language\": \"en-us\", \"search\": masterChar, \"strict\": \"true\"}).json())['character']\n\nif (len(masterAccount)) > 1:\n raise Exception(\"Multiple characters returned, please contact Nosha Izia ingame\")\nelse:\n masterAccount = str(masterAccount[0])\n\nnonMasterFiles = []\n\nsyncedAccounts = []\n\nfor item in directory:\n item = item.split('.')\n if len(item) == 2:\n if item[1] == 'dat':\n item = item[0].split('_')\n if len(item) == 3:\n if item[2] != masterAccount:\n nonMasterFiles.append('core_char_' + item[2] + '.dat')\n\nwith open(path + 'core_char_' + masterAccount + '.dat', 'rb') as f:\n masterDetails = f.read()\n\nfor item in nonMasterFiles:\n with open(path + item, 'wb') as f:\n f.truncate(0)\n f.write(masterDetails)\n\nprint(\"Done!\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"275851575","text":"# -*- encoding: UTF-8 -*-\nimport sqlalchemy.orm\n\nfrom imomo import errors\nfrom imomo.models import Site\n\n\nclass Manager(object):\n \"\"\"Manager base class that provides shared utility methods between all\n manager classes.\n\n The manager classes are in charge of providing and/or executing complex\n database queries to the handlers, the managers also provide validation\n of business logic and transform database errors into application errors,\n i.e. IMomoError.\n \"\"\"\n\n @classmethod\n def get_site(cls, session, site_code):\n \"\"\"Retrieves a site instance from the database given its local site\n code.\n\n Args:\n session\n site_code\n Raises:\n SiteDoesNotExistError\n Returns:\n The loaded Site instance.\n \"\"\"\n site = session.query(Site).filter(Site.site_code == site_code).first()\n\n if site is None:\n raise errors.SiteDoesNotExistError()\n return site\n\n @classmethod\n def verify_site(cls, session, site_id, source_id):\n \"\"\"Utility method that ensures that the given site id exists and the\n site belongs to the source with the given source_id.\n\n Args:\n session: The database session to use.\n site_id: The site id to verify.\n source_id: The source that is requesting the verification.\n\n Raises:\n SiteNotInSourceError: If the site exists but is in a different\n source than the given one.\n SiteDoesNotExistError: If the given site id is not valid.\n \"\"\"\n site_source_id = session.query(Site.source_id).filter(\n Site.id == site_id).scalar()\n if site_source_id is None:\n raise errors.SiteDoesNotExistError()\n if site_source_id != source_id:\n raise errors.SiteNotInSourceError()\n\n @staticmethod\n def unique_value_query(query):\n \"\"\"Executes the query and asserts that the result\n is strictly one record.\n\n Note that the error is raised as a failed assert, this method\n should be used only when the error is expected to come from a\n programming error otherwise it would be hidden in production.\n \"\"\"\n try:\n return query.one()\n except sqlalchemy.orm.exc.NoResultFound:\n assert False, 'No records stored in the database.'\n except sqlalchemy.orm.exc.MultipleResultsFound:\n assert False, 'Multiple records stored in the database.'\n\n @staticmethod\n def unique_or_no_value_query(query,):\n \"\"\"Executes the query and asserts that the result is zero or one\n record.\n\n Note that the error is raised as a failed assert, this method\n should be used only when the error is expected to come from a\n programming error otherwise it would be hidden in production.\n \"\"\"\n try:\n return query.one()\n except sqlalchemy.orm.exc.NoResultFound:\n return None\n except sqlalchemy.orm.exc.MultipleResultsFound:\n assert False, 'Multiple records stored in the database'\n","sub_path":"imomo/managers/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"431090153","text":"import SocketServer \nimport threading\nimport argparse\nimport jedi\nimport json\nimport sys\nimport os\n\nimport transports\nimport constants\nfrom utils import echo\nfrom handlers import preinit, initialize, didOpen, hover, definition, references\n\n\ndef serve(args):\n\tif args.stdio:\n\t\tmode = \"stdio\"\n\telse:\n\t\tmode = \"sockets\";\n\n\tif mode == \"stdio\":\n\t\techo(\"Python language server using stdio transport...\")\n\t\tserver = transports.StdioTransport()\n\t\twhile True:\n\t\t\tserver.handle(sys.stdin, sys.stdout)\n\n\telif mode == \"sockets\":\n\t\techo(\"Python language server listening on {}:{}\".format(args.host, args.port))\n\t\thost = args.host\n\t\tport = int(args.port)\n\n\t\ttry:\n\t\t\tserver = SocketServer.TCPServer((host, port), transports.SocketTransport)\n\t\t\tserver.serve_forever()\n\t\t\t\n\t\texcept KeyboardInterrupt:\n\t\t\tserver.shutdown()\n\t\t\tsys.exit()\n\n\telse:\n\t\techo(\"Invalid mode '{}'\".format(mode))\n\n\ndef query(args):\n\tif args.path == \"\":\n\t\techo(\"ls-python: path is empty\")\n\t\tsys.exit(2)\n\n\telif args.line < 1:\n\t\techo(\"ls-python: line is not valid\")\n\t\tsys.exit(2)\n\n\telif args.column < 0:\n\t\techo(\"ls-python: column is not valid\")\n\t\tsys.exit(2)\n\n\tif args.subcmd == \"hover\":\n\t\thover(args)\n\n\telif args.subcmd == \"definition\":\n\t\tdefinition(args)\n\n\telif args.subcmd == \"references\":\n\t\treferences(args)\n\n\telse:\n\t\techo(\"Sorry, I don't understand..\")\n\n\ndef addSourceArgs(parser):\n\t# TODO: Look into a cleaner way of doings this, i.e. groups or something\n\tparser.add_argument('line', help='The line to perform actions on (starting with 1).', nargs='?', default=1, type=int)\n\tparser.add_argument('column', help='The column of the cursor (starting with 0).', nargs='?', default=0, type=int)\n\tparser.add_argument('path', help='The path of the file in the file system.', nargs='?', default=constants.default_path)\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description=\"Python Jedi\")\n\n\t# Allow preinitialization (importation of costly packages) to be controlled\n\tsubparsers = parser.add_subparsers(help=\"commands\")\n\tserver_parser = subparsers.add_parser('serve', help=\"Run as a server\")\n\n\t# Server mode args\n\tserver_parser.add_argument(\"--pre\", help='', default=\"none\")\n\tserver_parser.add_argument(\"--stdio\", action='store_true', help='Runs the server over stdio', default=False)\n\tserver_parser.add_argument(\"--host\", help='The port to host the language server on', nargs='?', default=constants.default_host)\n\tserver_parser.add_argument(\"--port\", help='The hostname to listen on', nargs='?', default=constants.default_port)\n\tserver_parser.set_defaults(func=serve)\n\n\t# 'hover' args\n\thover_parser = subparsers.add_parser('hover', help=\"Hover mode\")\n\taddSourceArgs(hover_parser)\n\n\thover_parser.set_defaults(func=hover)\n\n\targs = parser.parse_args()\n\n\tif args.pre is not \"none\":\n\t\tpreinit(args)\n\n\targs.func(args)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"src/python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"186500393","text":"from functional_tests.test_utils.helper import UITestCase\nfrom integrator.models import Integration\nimport json\nimport mock\nimport logging\n\nlogging.disable(logging.CRITICAL)\n\nclass IndexPageTest(UITestCase):\n def test_create_integration_button_rendering(self):\n self.browser.get(self.live_server_url + '/')\n self.wait_till_element_is_clickable('add_integration')\n self.browser.find_element_by_id('add_integration')\n\n def test_display_of_existing_integrations(self):\n integration_data = json.loads(open(self.test_data_directory + 'helpscout_integration_form.json').read())\n integration = Integration(**integration_data)\n with mock.patch('integrator.views.ui.integration.views.Integration') as integration_mock:\n integration_mock.objects.all = mock.Mock()\n conf = {'return_value': [integration]}\n integration_mock.objects.all.configure_mock(**conf)\n self.browser.get(self.live_server_url + '/')\n integration_list_element = self.browser.find_element_by_id('helpscout')\n self.assertIn('integration_detail', integration_list_element.get_attribute('class'))\n\n def test_navigation_index_to_create_integration_page(self):\n self.browser.get(self.live_server_url + '/')\n self.wait_till_element_is_clickable('add_integration')\n\n add_integration = self.browser.find_element_by_id('add_integration')\n add_integration.click()\n\n self.browser.back()\n add_integration = self.browser.find_element_by_id('add_integration')\n\n","sub_path":"functional_tests/ui_tests/test_index_page.py","file_name":"test_index_page.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"648144800","text":"'''\nFilename : gcd.py\nAuthor: Pan Yisheng\nDescription: implement two euclidean function\n\n1. gcd() for compute the greatest common divisor\n2. euclideanExtendedGCD(a,b) for solve ax+by = gcd(a,b)\n'''\n\nimport os\nimport sys\nimport time\n\n\ndef gcd(p,q):\n '''\n compute the greatest common divisor\n\n :param p: integer\n\n :param q: integer\n\n :return: integer\n '''\n\n while q != 0:\n (p,q) = (q,p % q)\n return p\n\ndef euclideanExtendedGCD(n1,n2):\n '''\n Implement the euclidean extend GCD algorithm\n solve the indeterminate equation \"a*x+b*y = gcd(a,b)\"\n\n :param n1: coefficient a (type integer)\n\n :param n2: coefficient b (type integer)\n\n :return: gcd(n1,n2), x , y\n '''\n u,v,s,t = 1, 0 ,0 ,1\n #Swap\n if (n2= {v} required.'.format(\n v=MYSQL_VERSION_REQUIRED))\n if target and target == \"testing\":\n self.db = config.get('database', 'test_db')\n else:\n self.db = config.get('database', 'db')\n \n self.user = config.get('database', 'user')\n\n self.drop_db(cursor)\n self.create_db(cursor)\n self.use_db(cursor)\n\n self.create_ranges(cursor)\n self.create_slots(cursor)\n\n self.create_trigger_bins_ranges(cursor)\n self.create_trigger_ains_slots(cursor)\n self.create_trigger_aupd_slots(cursor)\n self.create_function_alloc_realloc_slot(cursor)\n self.create_function_dealloc_slot(cursor)\n\n cursor.close()\n\n def drop_db(self, cursor):\n print(\"Dropping database {}\".format(self.db))\n cursor.execute('DROP DATABASE IF EXISTS {}'.format(self.db))\n self.connection.commit()\n\n def create_db(self, cursor):\n print(\"Creating database {}\".format(self.db))\n cursor.execute('CREATE SCHEMA {}'.format(self.db))\n self.connection.commit()\n\n def use_db(self, cursor):\n print(\"Making {} active\".format(self.db))\n cursor.execute('USE {}'.format(self.db))\n self.connection.commit()\n\n def create_ranges(self, cursor):\n from queries import create_ranges\n print(\"Creating range table\")\n cursor.execute(create_ranges)\n self.connection.commit()\n\n def create_slots(self, cursor):\n from queries import create_slots\n print(\"Creating slot table\")\n cursor.execute(create_slots)\n self.connection.commit()\n\n def create_trigger_bins_ranges(self, cursor):\n from queries import create_t_before_ins_ranges as bi_r\n print(\"Creating bins_ranges trigger\")\n cursor.execute(bi_r, self.user)\n self.connection.commit()\n\n def create_trigger_ains_slots(self, cursor):\n from queries import create_t_after_ins_slots as ai_s\n print(\"Creating ains_slots trigger\")\n cursor.execute(ai_s, self.user)\n self.connection.commit()\n\n def create_trigger_aupd_slots(self, cursor):\n from queries import create_t_after_upd_slots as au_s\n print(\"Creating aup_slots trigger\")\n cursor.execute(au_s, self.user)\n self.connection.commit()\n\n def create_function_alloc_realloc_slot(self, cursor):\n from queries import create_f_alloc_realloc_slot as f_al\n print(\"Creating alloc/realloc slot function\")\n cursor.execute(f_al, self.user)\n self.connection.commit()\n\n def create_function_dealloc_slot(self, cursor):\n from queries import create_f_dealloc_slot as f_dl\n print(\"Creating deallocate_slot function\")\n cursor.execute(f_dl, self.user)\n self.connection.commit()\n\n def mysql_version(self, cursor):\n from queries import version_check\n print(\"Checking version\")\n cursor.execute(version_check)\n data = cursor.fetchone()\n if 'innodb_version' in data.values():\n return data['Value']\n return None\n\n def mysql_version_okay(self, the_version):\n # the_version like '5.5.10'\n ver = the_version.split('.')\n need_ver = MYSQL_VERSION_REQUIRED.split('.')\n for i, ver_split in enumerate(ver):\n if int(ver_split) < int(need_ver[i]):\n return False\n print(\"Version okay, continuing\")\n return True\n\n\nif __name__ == \"__main__\":\n print(\"Creating or recreating database...\")\n creator = DatabaseCreator()\n print(\"Done.\\n\")\n","sub_path":"periastron/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"243390340","text":"\n# %%\nimport datetime as dt\n#%%\n\nimport glob\n\nimport matplotlib as plt\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport seaborn as sns\nfrom datetime import timezone\n\n\n# Standard quick checks\ndef dfChkBasics(dframe, valCnt = False): \n cnt = 1\n print('\\ndataframe Basic Check function -')\n \n try:\n print(f'\\n{cnt}: info(): ')\n cnt+=1\n print(dframe.info())\n except: pass\n\n print(f'\\n{cnt}: describe(): ')\n cnt+=1\n print(dframe.describe())\n\n print(f'\\n{cnt}: dtypes: ')\n cnt+=1\n print(dframe.dtypes)\n\n try:\n print(f'\\n{cnt}: columns: ')\n cnt+=1\n print(dframe.columns)\n except: pass\n\n print(f'\\n{cnt}: head() -- ')\n cnt+=1\n print(dframe.head())\n\n print(f'\\n{cnt}: shape: ')\n cnt+=1\n print(dframe.shape)\n\n if (valCnt):\n print('\\nValue Counts for each feature -')\n for colname in dframe.columns :\n print(f'\\n{cnt}: {colname} value_counts(): ')\n print(dframe[colname].value_counts())\n cnt +=1\n\n# examples:\n#dfChkBasics(df)\n#%%\n## DC data clean up and shaping \n#read in data sets for DC\n#%%\n\n\nmar19= pd.read_csv(\"DC_Mar19.csv\")\nprint(\"mar\")\nprint(mar19.columns)\napril19= pd.read_csv(\"DC_Apr19.csv\")\nprint(\"apr\")\nprint(april19.columns)\nmay19= pd.read_csv(\"DC_May19.csv\")\nprint(\"may\")\nprint(may19.columns)\njune19= pd.read_csv(\"DC_Jun19.csv\")\nprint(\"june\")\nprint(june19.columns)\nmarch20= pd.read_csv(\"DC_Mar20.csv\")\nprint(\"mar\")\nprint(march20.columns)\napril20= pd.read_csv(\"DC_Apr20.csv\")\nprint(\"april\")\nprint(april20.columns)\nmay20=pd.read_csv(\"DC_May20.csv\")\nprint(\"May\")\nprint(may20.columns)\njun20= pd.read_csv(\"DC_jun20.csv\")\nend_stat=pd.read_csv(\"end.csv\")\nstart_stat=pd.read_csv(\"start.csv\")\n\n#%%\nprint(len(may20))\nprint(len(may19))\nprint(len(april19))\nprint(len(april20))\nprint(len(mar19))\nprint(len(march20))\n#%%\n\n#print data sets to take a look # seems that april has different columns names\n\n\nprint(\"_____break_______\")\n#Rename columns \napril20.rename(columns={'started_at': 'Start date', 'ended_at': 'End date','start_station_name': 'Start station','start_station_id': 'Start station number','end_station_name': 'End station','end_station_id': 'End station number','member_casual': 'Member type', }, inplace=True)\nmay20.rename(columns={'started_at': 'Start date', 'ended_at': 'End date','start_station_name': 'Start station','start_station_id': 'Start station number','end_station_name': 'End station','end_station_id': 'End station number','member_casual': 'Member type', }, inplace=True)\njun20.rename(columns={'started_at': 'Start date', 'ended_at': 'End date','start_station_name': 'Start station','start_station_id': 'Start station number','end_station_name': 'End station','end_station_id': 'End station number','member_casual': 'Member type', }, inplace=True)\n\n#%%\n#check for column renaming \napril20_head= april20.head()\nprint(april20_head)\n\n\n\n#%%\nprint(april20.head())\n#%%\n\n#%%\n#make the df's ready for concatination \nframes=[mar19, april19, may19, june19, march20, april20, may20, jun20]\n\n#concat all the dataframes and drop column that won't be used \ndc_data = pd.concat(frames)\n \n\n#%%\n#dc_data=dc_data.merge(april20, on=\"Start station\", how=\"inner\")\n\ndc_data=dc_data.merge(start_stat, on=\"Start station\", how=\"left\")\n\ndc_data=dc_data.merge(end_stat, on=\"End station\", how=\"left\")\nprint(\"complete - ready to continue\") \n\n\n\n#null_data=dc_data.isnull.sum()\n\n#print(\"ready\")\n#print(null_data)\n\n\n\n#%% # Function to get percents of missing values \ndef missing_values_table(df):\n # Utility function, identify missing data and show percentages.\n mis_val = df.isnull().sum()\n mis_val_percent = 100 * df.isnull().sum() / len(df)\n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)\n mis_val_table_ren_columns = mis_val_table.rename(\n columns = {0 : 'Missing Values', 1 : '% of Total Values'})\n mis_val_table_ren_columns = mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:,1] != 0].sort_values('% of Total Values', ascending=False).round(1)\n print(\"Your selected dataframe has \" + str(df.shape[1]) + \" columns.\\nThere are \" + str(mis_val_table_ren_columns.shape[0]) + \" columns that have missing values.\")\n return mis_val_table_ren_columns\n\n\n\n\n#%%\n#function to determine if data is missing \nmissing_values_table(dc_data)\n\n\n\n# %%\nprint(len(dc_data))\n\n# %%\n#dc_data=dc_data.drop(columns=[\"end_lat\", \"end_lng\", \"start_lat\", \"start_lng\"])\t \n# drop small na's\ndc_data=dc_data.dropna(subset=[\"End station number\"])\ndc_data=dc_data.dropna(subset=[\"Start date\"])\ndc_data=dc_data.dropna(subset=[\"End date\"])\ndc_data=dc_data.dropna(subset=[\"Member type\"])\ndc_data=dc_data.dropna(subset=[\"end_lat_y\"])\ndc_data=dc_data.dropna(subset=[\"end_lng_y\"])\ndc_data=dc_data.dropna(subset=[\"start_lat_y\"])\ndc_data=dc_data.dropna(subset=[\"start_lng_y\"])\ndc_data=dc_data.dropna(subset=[\"Start station number\"])\ndc_data=dc_data.dropna(subset=[\"Start station\"])\n\n# %%\nmissing_values_table(dc_data)\n\n\n\n\n#%%\ndfChkBasics(dc_data)\nprint(len(dc_data))\n# %%\n# standardize categorical columns/ and date time columns create new categorical columns\ndc_data.rename(columns={\"Start date\": \"start_date\", \"End date\": \"end_date\",}, inplace=True)\ndc_data[\"Member type\"].replace({\"member\": \"Member\", \"casual\": \"Casual\"}, inplace=True)\n\n\n#%%\ndef create_dto(row, colname):\n# for index, row in dc_data.iterrows():\n if type(row[colname]) is not str:\n return \"Unknown\"\n else:\n # Try the various known time formats.\n dtFormat = [\n '%Y-%m-%d %H:%M',\n '%Y-%m-%d %H:%M:%S',\n '%m/%d/%y %H:%M',\n ]\n # save cell data to local variable\n cell_contents = row[colname]\n # Drop decimal timestamp precision, if it exists.\n cell_contents = cell_contents.split('.')[0]\n for i in dtFormat:\n try:\n dto = dt.datetime.strptime(cell_contents,i)\n return (\n dto,\n dto.strftime(\"%Y-%m-%d\"),\n dto.strftime(\"%H:%M\")\n )\n except ValueError:\n pass\n else:\n print(\"Failed to parse: {:s}\".format(cell_contents))\ndc_data['start_dto'], dc_data['start_date_formatted'], dc_data['start_time_formatted'] \\\n = zip(*dc_data.apply(lambda row: create_dto(row,\"start_date\"), axis=1))\ndc_data['end_dto'], dc_data['end_date_formatted'], dc_data['end_time_formatted'] \\\n = zip(*dc_data.apply(lambda row: create_dto(row,\"end_date\"), axis=1))\n \nprint(dc_data.tail())\n\ndc_data['weekday'] = dc_data.apply(lambda row: row[\"start_dto\"].weekday() < 5, axis=1)\n\n#%%\n\nprint(len(dc_data))\n# %%\ndef determine_pandemic(row):\n# for index, row in dc_data.iterrows():\n dto = row[\"start_dto\"]\n if dto.year == 2020:\n return True\n return False\ndc_data['pandemic'] = dc_data.apply(lambda row: determine_pandemic(row), axis=1)\n#%%\ndef determine_commuter(row):\n# for index, row in dc_data.iterrows():\n dto = row[\"start_dto\"]\n weekday=row[\"weekday\"]\n if (dto.hour in range(6, 10) or dto.hour in range(16, 19)) and weekday:\n return True\n return False\ndc_data['commuter'] = dc_data.apply(lambda row: determine_commuter(row), axis=1)\n\n#%%\nprint(dc_data.head())\n#%%\ndc_data['Duration'] = dc_data.apply(lambda row: (row[\"end_dto\"] - row[\"start_dto\"]).total_seconds(), axis=1)\n#%%\n# drop excessive duration \ndc_data=dc_data[dc_data[\"Duration\"]<18000]\ndc_data=dc_data[dc_data[\"Duration\"]>60]\n\nprint(len(dc_data))\n\n\n\n#%%\nmissing_values_table(dc_data)\n\n\n#%%\n# add year/month column \ndc_data['Month_Year'] = dc_data['end_dto'].dt.strftime('%Y-%m')\n\ndc_data['day_of_week'] = dc_data['end_dto'].dt.day_name()\nprint(\"complete - ready to continue\") \n#%%\nmissing_values_table(dc_data)\n#%%\nprint(dc_data.columns)\n\n\n#%%\n# function to make commuter/pandemic column \ndef make_pandemic_commuter(row):\n if row['pandemic'] and row['commuter']:\n return \"Pandemic Commuter\"\n elif row['pandemic'] and not row['commuter']:\n return \"Pandemic Noncommuter\"\n elif not row['pandemic'] and row['commuter']:\n return \"Nonpandemic Commuter\"\n elif not row['pandemic'] and not row['commuter']:\n return \"Nonpandemic Noncommuter\"\ndc_data['pandemic-commuter'] = dc_data.apply(lambda row: make_pandemic_commuter(row), axis=1)\n\n\n#%%\n# function to make weekend column\ndef make_pandemic_weekend(row):\n if row['pandemic'] and row['weekday']:\n return \"Pandemic Weekday\"\n elif row['pandemic'] and not row['weekday']:\n return \"Pandemic Nonweekday\"\n elif not row['pandemic'] and row['weekday']:\n return \"Nonpandemic Weekday\"\n elif not row['pandemic'] and not row['weekday']:\n return \"Nonpandemic Nonweekday\"\ndc_data['pandemic-weekday'] = dc_data.apply(lambda row: make_pandemic_weekend(row), axis=1)\n\n\n#%%\n\ndc_data.to_csv(\"dc_data.csv\")\n#%%\ncolum=dc_data[dc_data[\"Start station\"]== \"Yuma St & Tenley Circle NW\"]\n#%%\n\n\n#%%\nlen(colum)\n\n\n\n\n\n# %%\n","sub_path":"Bike_preprocessing.py","file_name":"Bike_preprocessing.py","file_ext":"py","file_size_in_byte":8895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"507062686","text":"import os\r\nimport shutil\r\n\r\n#enter the name of the folder to be sorted\r\npath = input(\"enter the name od the directory to be sorted: \")\r\n\r\n#list of all the files and folders in the directory\r\nlist_of_files = os.listdir(path)\r\n\r\n#go through each and every file\r\nfor file in list_of_files:\r\n name,ext = os.path.splitext(file)\r\n\r\n #store the extension type\r\n ext = ext[1:]\r\n\r\n #if it is a directory, go to the next loop\r\n if ext == '':\r\n continue\r\n\r\n #move the file to the folder name ext \r\n if os.path.exists(path + '/'+ext):\r\n shutil.move(path+'/'+file,path+'/'+ext+'/'+file)\r\n else: \r\n os.makedirs(path+'/'+ext)\r\n shutil.move(path+'/'+file,path+'/'+ext+'/'+file)","sub_path":"fileOrganiser.py","file_name":"fileOrganiser.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"412951616","text":"import sys\n\n\nclass RecordsBuilder(object):\n \"\"\"\n This class creates the batches with suitable delivery size\n to a system in a form of an array.\n \"\"\"\n def make_batches(self, records):\n batch = []\n temp_array = []\n for record in records:\n if (sys.getsizeof(record)) >= 1000000: # 1MB = 1000000 bytes. I tested with 35.\n continue\n else:\n if len(temp_array) < 500 and sys.getsizeof(temp_array) <= 5000000: # I tested with <5 and <=80\n temp_array.append(record)\n else:\n batch.append(temp_array)\n temp_array = []\n temp_array.append(record)\n\n batch.append(temp_array)\n return batch\n\n","sub_path":"Array of Records/src/batch_creator.py","file_name":"batch_creator.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"431537503","text":"# coding=utf-8\r\nfrom odoo import api, fields, models, _\r\n\r\nclass AccountInvoice(models.Model):\r\n _inherit = \"account.invoice\"\r\n\r\n @api.one\r\n @api.depends('invoice_line_ids.price_subtotal', 'tax_line_ids.amount', 'currency_id', 'company_id', 'date_invoice', 'type')\r\n def _compute_amount(self):\r\n round_curr = self.currency_id.round\r\n self.amount_untaxed = round(sum(line.price_subtotal for line in self.invoice_line_ids))\r\n self.amount_tax = round(sum(round_curr(line.amount) for line in self.tax_line_ids))\r\n self.amount_total = round(self.amount_untaxed + self.amount_tax)\r\n amount_total_company_signed = self.amount_total\r\n amount_untaxed_signed = self.amount_untaxed\r\n if self.currency_id and self.company_id and self.currency_id != self.company_id.currency_id:\r\n currency_id = self.currency_id.with_context(date=self.date_invoice)\r\n amount_total_company_signed = currency_id.compute(self.amount_total, self.company_id.currency_id)\r\n amount_untaxed_signed = currency_id.compute(self.amount_untaxed, self.company_id.currency_id)\r\n sign = self.type in ['in_refund', 'out_refund'] and -1 or 1\r\n self.amount_total_company_signed = amount_total_company_signed * sign\r\n self.amount_total_signed = self.amount_total * sign\r\n self.amount_untaxed_signed = amount_untaxed_signed * sign\r\n\r\nAccountInvoice()","sub_path":"beta-dev1/laborindo_round_tax_so_po/models/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"416923323","text":"import math\nimport numpy as np\n\n\nclass WindowConfiguration:\n def __init__(self,\n left_duration=0.1,\n left_apply_window_start=True,\n left_apply_window_end=True,\n left_window_function='Hanning',\n right_duration=0.1,\n right_apply_window_start=True,\n right_apply_window_end=True,\n right_window_function='Hanning'):\n \"\"\"\n\n :param left_duration: (Optional) Duration of the window in seconds, for the left channel. If zero, no window\n will be created for this channel. Default: 0.1s\n :param left_apply_window_start: (Optional) True if the window should be applied to the **start** of the sound\n for the left channel, False otherwise. Default: True\n :param left_apply_window_end: (Optional) True if the window should be applied to the **end** of the sound for\n the left channel, False otherwise. Default: True\n :param left_window_function: (Optional) Window function that should be used for the left channel. Possible values\n accepted: 'Hanning', 'Hamming', 'Blackman', 'Bartlett'. Default: 'Hanning\"\n :param right_duration: (Optional) Duration of the window in seconds, for the right channel. If zero, no window\n will be created for this channel. Default: 0.1s\n :param right_apply_window_start: (Optional) True if the\n window should be applied to the **start** of the sound, for the right channel, False otherwise. Default: True\n :param right_apply_window_end: (Optional) True if the window\n should be applied to the **end** of the sound for the right channel, False otherwise. Default: True\n :param right_window_function: (Optional) Window function that should be used for the left channel.\n Possible values accepted: 'Hanning', 'Hamming', 'Blackman', 'Bartlett'. Default: 'Hanning\"\n \"\"\"\n self.left_duration = left_duration\n self.right_duration = right_duration\n self.left_apply_window_start = left_apply_window_start\n self.right_apply_window_start = right_apply_window_start\n self.left_apply_window_end = left_apply_window_end\n self.right_apply_window_end = right_apply_window_end\n self.left_window_function = left_window_function\n self.right_window_function = right_window_function\n\n\ndef generate_sound(filename=None, fs=96000, duration=1, frequency_left=1000, frequency_right=1000, window_configuration:WindowConfiguration=None):\n \"\"\"\n Helper method to dynamically generated a sound that can be used in with the Sound Card module.\n\n :param filename: (Optional)\n :param fs: (Optional) number of samples per second (standard)\n :param duration: (Optional) sound duration in seconds\n :param frequency_left: (Optional) number of cycles per second (Hz) (frequency of the sine wave for the left channel)\n :param frequency_right: (Optional) number of cycles per second (Hz) (frequency of the sine wave for the right channel)\n :param window_configuration: (Optional) WindowConfiguration object to apply to the generated sound.\n\n :return: Returns the **flatten** generated sound as a numpy array (as np.int8)\n\n \"\"\"\n\n amplitude24bits = math.pow(2, 31) - 1\n\n samples = np.arange(0, duration, 1 / fs)\n wave_left = amplitude24bits * np.sin(2 * math.pi * frequency_left * samples)\n wave_right = amplitude24bits * np.sin(2 * math.pi * frequency_right * samples)\n\n if window_configuration:\n wave_left = generate_window(fs,\n wave_left,\n window_configuration.left_duration,\n window_configuration.left_apply_window_start,\n window_configuration.left_apply_window_end,\n window_configuration.left_window_function)\n wave_right = generate_window(fs,\n wave_right,\n window_configuration.right_duration,\n window_configuration.right_apply_window_start,\n window_configuration.right_apply_window_end,\n window_configuration.right_window_function)\n\n stereo = np.stack((wave_left, wave_right), axis=1)\n\n wave_int = stereo.astype(np.int32)\n\n if filename:\n with open(filename, 'wb') as f:\n wave_int.tofile(f)\n\n return wave_int.flatten()\n\n\ndef generate_window(fs, wave_int, duration, apply_start, apply_end, window_function):\n \"\"\"\n\n :param fs: number of samples per second (standard)\n :param wave_int: base sound where the window will be applied\n :param duration: duration of the window (it will be the same on the start and end)\n :param apply_start: True if the window should be created at the start, False otherwise.\n :param apply_end: True if the window should be created at the end, False otherwise.\n :param window_function: window function to be generated. Possible values accepted:\n 'Hanning', 'Hamming', 'Blackman', 'Bartlett'. It will revert to 'Hanning' if an unknown option is given.\n\n :return: Returns the modified sound with the window applied to it.\n \"\"\"\n len_fade = int(duration * fs)\n if window_function == 'Hanning':\n fade_io = np.hanning(len_fade * 2)\n elif window_function == 'Hamming':\n fade_io = np.hamming(len_fade * 2)\n elif window_function == 'Blackman':\n fade_io = np.blackman(len_fade * 2)\n elif window_function == 'Bartlett':\n fade_io = np.bartlett(len_fade * 2)\n else: # default\n fade_io = np.hanning(len_fade * 2)\n\n fadein = fade_io[:len_fade]\n fadeout = fade_io[len_fade:]\n win = np.ones(len(wave_int))\n if apply_start:\n win[:len_fade] = fadein\n if apply_end:\n win[-len_fade:] = fadeout\n wave_int = wave_int * win\n return wave_int\n","sub_path":"examples/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"114973835","text":"from setuptools import setup, find_packages\n\nimport os\n\nmodular_blocks = __import__('modular_blocks')\n\n\nsetup(\n name='django-modular-blocks',\n packages=find_packages(),\n author='Gabriel Pichot',\n author_email='gabriel.pichot@gmail.com',\n url='https://github.com/gpichot/django-modular-blocks',\n description=(\n 'Django Modular Blocks ease the integration of third'\n 'parties application as blocks in a page.'\n ),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Programming Language :: Python',\n ],\n keywords=['modular', 'modules', ],\n install_requires=[\n 'Django >= 1.5',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"333904769","text":"#!/usr/bin/env python\n\nimport sys, tempfile, os, shutil\n\ndef replace_license(filename, newlicense, oldlicenseid):\n sourcef = open(filename)\n (tempf, tempn) = tempfile.mkstemp()\n\n oldlicense_found = False\n license_written = False\n endcpr_found = False\n\n for line in sourcef:\n if not oldlicense_found and not license_written:\n oldlicense_found = line.startswith(oldlicenseid)\n if not oldlicense_found:\n os.write(tempf, line)\n elif oldlicense_found and not license_written:\n license_written = True\n os.write(tempf, newlicense)\n os.write(tempf, \"*/\\n\")\n elif license_written and not endcpr_found:\n endcpr_found = line.startswith(\"*/\")\n else:\n os.write(tempf, line)\n\n os.close(tempf)\n sourcef.close()\n shutil.move(tempn, filename)\n\nif __name__ == \"__main__\":\n licensef = open(sys.argv[1])\n oldlicenseid = sys.argv[2]\n newlicense = licensef.read()\n licensef.close()\n for fn in sys.argv[3:]:\n replace_license(fn, newlicense, oldlicenseid)\n","sub_path":"scripts/change_license.py","file_name":"change_license.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"645078946","text":"from flask import Flask, request, Response, render_template, json, jsonify, flash\nimport requests\nimport re\nimport itertools\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\n\n#Key to use on webster api\nkey = '325142d6-6793-4d6f-8f67-8b23d76755e0'\n\n\n# Select field added in the template, validation inside of the function itself, not the form.\nclass WordForm(FlaskForm):\n avail_letters = StringField(\"Letters\")\n submit = SubmitField(\"Go\")\n\ncsrf = CSRFProtect()\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"row the boat\"\ncsrf.init_app(app)\n\n@app.route('/')\n@app.route('/index')\ndef index():\n form = WordForm()\n return render_template(\"index.html\", form=form)\n\n\n@app.route('/words', methods=['POST','GET'])\ndef letters_2_words():\n\n\n form = WordForm()\n if form.validate_on_submit():\n letters = form.avail_letters.data\n # This is just to show how to get the word length from the request form.\n print(request.form.get('wordlength'))\n word_length = request.form.get('wordlength')\n pattern = request.form.get('pattern')\n print(request.form.get('pattern'))\n if ((letters == '' and pattern == '')):\n return render_template(\"index.html\", form=form)\n elif (word_length != 'default' and len(pattern) != int(word_length) and pattern != ''):\n return render_template(\"index.html\", form=form)\n else:\n return render_template(\"index.html\", form=form)\n\n with open('sowpods.txt') as f:\n good_words = set(x.strip().lower() for x in f.readlines())\n\n #build the set and depending on the parameters passed in via the form, utilize word length or pattern.\n word_set = set()\n if letters != '':\n for l in range(3,len(letters)+1):\n for word in itertools.permutations(letters,l):\n w = \"\".join(word)\n if w in good_words:\n if word_length != 'default':\n if int(word_length) == len(w) and re.fullmatch(pattern,w) != None and pattern != '':\n word_set.add(w)\n elif int(word_length) == len(w) and pattern == '':\n word_set.add(w)\n else:\n if re.fullmatch(pattern,w) != None and pattern != '':\n word_set.add(w)\n elif pattern == '':\n word_set.add(w)\n #Same Pattern, just go through all words if none are specified.\n else:\n for w in good_words:\n if word_length != 'default':\n if int(word_length) == len(w) and re.fullmatch(pattern,w) != None and pattern != '':\n word_set.add(w)\n elif int(word_length) == len(w) and pattern == '':\n word_set.add(w)\n else:\n if re.fullmatch(pattern,w) != None and pattern != '':\n word_set.add(w)\n elif pattern == '':\n word_set.add(w)\n\n\n #sorting the word list first by alphabet and then by length.\n wordlist = list(word_set)\n wordlist = sorted(wordlist)\n wordlist = sorted(wordlist,key=len,reverse=True)\n\n return render_template('wordlist.html',\n wordlist=wordlist,\n name=\"CS4131\")\n\n\n#New Route to make requests from the flask app, this hides the api key from the users who can see the Javascript.\n@app.route('/def/', methods=['GET'])\ndef getDef(word):\n resp = requests.get(\"https://www.dictionaryapi.com/api/v3/references/collegiate/json/\" + word + \"?key=\" + key)\n data = resp.json()\n if type(data[0]) != dict:\n return jsonify('No Def. Found')\n else:\n alt_def = data[0]['shortdef']\n return jsonify(alt_def)\n\n\n@app.route('/proxy')\ndef proxy():\n result = requests.get(request.args['url'])\n resp = Response(result.text)\n resp.headers['Content-Type'] = 'application/json'\n return resp\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"46246301","text":"\"\"\"\nderive_field_objects.py\n\nProduce file field-objects-raw.json given calibration-*camera.json, as well as\ncamera model info from stitching_info.json and field model info.\n\nRequires either \"swapped\" info to be present in the calibration JSON,\nor \"--swapped\" argument to be present, since field-objects-raw.json requires\n\"view\" and \"camera\" keys for each point in key \"pts\".\n\nIf a mapped field object is in neither camera's frame, then we pick the frame\nwhere it's less off-screen and take the closest point.\n\nExample usage:\n python3 derive_field_objects.py \\\n -lc .../calibration/calibration-lcamera.json \\\n -rc .../calibration/calibration-rcamera.json \\\n -f .../calibration/refined_field.json \\\n -s .../stitch/no_stitching_info_file.json \\\n -o .../stitch/field-objects-raw.json \\\n [optionally] --swapped False\n\"\"\"\n\nimport argparse\nimport json\nimport numpy as np\nimport os\n\nfrom src.mappers.pinhole_mapper import PinholeMapper\nfrom src.mappers.undistort_mapper import MLUndistortMapper\nfrom src.util.field_model import FieldModel\n\n\ndef create_pinhole_mapper(calibration_data_path):\n\n assert os.path.exists(calibration_data_path), \\\n \"failed to find calibration data {}\".format(calibration_data_path)\n with open(calibration_data_path, 'r') as jf:\n calibration_data = json.load(jf)\n\n if \"swapped\" in calibration_data:\n swapped = calibration_data[\"swapped\"]\n else:\n swapped = None\n\n K_p = np.array(calibration_data[\"pinhole_model\"][\"K\"])\n R_p = np.array(calibration_data[\"pinhole_model\"][\"R\"])\n T_p = np.array(calibration_data[\"pinhole_model\"][\"T\"])\n width = np.array(calibration_data[\"pinhole_model\"][\"w\"])\n height = np.array(calibration_data[\"pinhole_model\"][\"h\"])\n\n return PinholeMapper(K_p, R_p, T_p, (width, height)), swapped\n\n\ndef check_field_model_consistency(\n field_model_path, lcamera_calibration_path, rcamera_calibration_path):\n field_link = os.path.join(*field_model_path.split(\"/\")[-2:])\n field_resources_links = []\n for r in [lcamera_calibration_path, rcamera_calibration_path]:\n with open(r, 'r') as jf:\n json_data = json.load(jf)\n if \"field_link\" in json_data:\n field_resources_links.append(json_data[\"field_link\"])\n assert json_data[\"field_link\"] == field_link, \\\n \"field mismatch! resource {}: {} != {}\".format(\n r, json_data[\"field_link\"], field_link)\n\n\ndef pt_rect_distance(pt, w, h):\n\n dx = max(abs(pt[0] - .5 * w) - .5 * w, 0)\n dy = max(abs(pt[1] - .5 * h) - .5 * h, 0)\n distance = dx * dx + dy * dy\n\n return distance\n\n\ndef snap_to_frame(pt, w, h):\n\n new_pt = pt.copy()\n new_pt[0] = max(new_pt[0], 0)\n new_pt[0] = min(new_pt[0], w)\n new_pt[1] = max(new_pt[1], 0)\n new_pt[1] = min(new_pt[1], h)\n\n return new_pt\n\n\nif __name__ == \"__main__\":\n\n # Parse arguments:\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"-lc\", \"--lcamera_calibration_path\", required=True, type=str,\n help=\"Path to calibration-lcamera.json\")\n ap.add_argument(\n \"-rc\", \"--rcamera_calibration_path\", required=True, type=str,\n help=\"Path to calibration-rcamera.json\")\n ap.add_argument(\n \"-f\", \"--field_model_path\", required=True, type=str,\n help=\"Path to field model JSON\")\n ap.add_argument(\n \"-s\", \"--stitching_info_path\", required=True, type=str,\n help=\"Path to stitching info JSON (only need camera model)\")\n ap.add_argument(\n \"-o\", \"--output_path\", required=True, type=str,\n help=\"Path to output JSON\")\n ap.add_argument(\n \"--swapped\", required=False, type=bool,\n help=\"Optional argument to determine if cameras are swapped\"\n )\n args = vars(ap.parse_args())\n\n # Unpack inputs\n lcamera_pinhole_mapper, l_swapped = \\\n create_pinhole_mapper(args[\"lcamera_calibration_path\"])\n rcamera_pinhole_mapper, r_swapped = \\\n create_pinhole_mapper(args[\"rcamera_calibration_path\"])\n field = FieldModel(args[\"field_model_path\"])\n undistorter = MLUndistortMapper(\n data_filename=args[\"stitching_info_path\"],\n file_type=\"stitching_info\")\n with open(args[\"stitching_info_path\"], 'r') as jf:\n stitching_info = json.load(jf)\n w = stitching_info[\"input_size\"][\"width\"]\n h = stitching_info[\"input_size\"][\"height\"]\n\n # Validate inputs\n swapped = None\n if l_swapped is None and r_swapped is None and args[\"swapped\"] is not None:\n raise ValueError(\"Could not determine \\\"swapped\\\" \"\n \"info from calibration JSON\")\n else:\n if args[\"swapped\"] is not None:\n swapped = args[\"swapped\"]\n else:\n assert l_swapped == r_swapped, \"need l_swapped == r_swapped\"\n swapped = l_swapped\n check_field_model_consistency(\n args[\"field_model_path\"],\n args[\"lcamera_calibration_path\"],\n args[\"rcamera_calibration_path\"]\n )\n\n # Construct field objects in field coordinates\n x = .5 * field.touch_line_length\n y = .5 * field.goal_line_length\n field_points = np.array([\n [-x, -y, 0], # near left corner\n [-x, 0, 0], # left goal\n [-x, y, 0], # far left corner\n [0, -y, 0], # near center line\n [0, 0, 0], # center point\n [0, y, 0], # far center line\n [x, -y, 0], # near right corner\n [x, 0, 0], # right goal\n [x, y, 0], # far right corner\n ])\n\n # Map field objects to image coordinates\n lcamera_points_undistorted = lcamera_pinhole_mapper.map_pts(field_points)\n rcamera_points_undistorted = rcamera_pinhole_mapper.map_pts(field_points)\n lcamera_points = undistorter.map_pts_inv(lcamera_points_undistorted)\n rcamera_points = undistorter.map_pts_inv(rcamera_points_undistorted)\n if swapped:\n left_points = rcamera_points\n right_points = lcamera_points\n else:\n left_points = lcamera_points\n right_points = rcamera_points\n\n # Construct output data\n output_data = {}\n pts = []\n pts_index = 0\n while pts_index < 9:\n left_distance = pt_rect_distance(left_points[pts_index], w, h)\n right_distance = pt_rect_distance(right_points[pts_index], w, h)\n if left_distance == 0:\n cur_pt = {\n \"x\": int(round(left_points[pts_index][0])),\n \"y\": int(round(left_points[pts_index][1])),\n \"view\": \"left\",\n \"camera\": \"RCamera\" if swapped else \"LCamera\"\n }\n elif right_distance == 0:\n cur_pt = {\n \"x\": int(round(right_points[pts_index][0])),\n \"y\": int(round(right_points[pts_index][1])),\n \"view\": \"right\",\n \"camera\": \"LCamera\" if swapped else \"RCamera\"\n }\n else:\n if left_distance <= right_distance:\n temp_point = snap_to_frame(left_points[pts_index], w, h)\n cur_pt = {\n \"x\": int(round(temp_point[0])),\n \"y\": int(round(temp_point[1])),\n \"view\": \"left\",\n \"camera\": \"RCamera\" if swapped else \"LCamera\"\n }\n else:\n temp_point = snap_to_frame(right_points[pts_index], w, h)\n cur_pt = {\n \"x\": int(round(temp_point[0])),\n \"y\": int(round(temp_point[1])),\n \"view\": \"left\",\n \"camera\": \"RCamera\" if swapped else \"LCamera\"\n }\n print(\"Warning! snapping point to frame\")\n pts.append(cur_pt)\n\n pts_index += 1\n\n output_data[\"pts\"] = pts\n output_data[\"swapped\"] = int(swapped)\n assert len(pts) == 9, \"need 9 \\\"pts\\\", instead got {}\".format(len(pts))\n\n # Write output JSON\n if not os.path.exists(os.path.dirname(args[\"output_path\"])):\n os.makedirs(os.path.dirname(args[\"output_path\"]))\n with open(args[\"output_path\"], 'w') as jf:\n json.dump(output_data, jf, indent=4, sort_keys=True)\n\n","sub_path":"derive_field_objects.py","file_name":"derive_field_objects.py","file_ext":"py","file_size_in_byte":8100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"113675128","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.db import connection\nimport sqlite3\nfrom pymongo import MongoClient\n\ndef ddata(request):\n data = request.GET.copy()\n with MongoClient(\"mongodb://172.17.0.3:27017/\") as client:\n result = list(client.ddb.ddetail.find({}))\n data['page_obj'] = result\n return render(request, 'board/ddata.html', context=data)\n\ndef listwithmongo(request):\n data = request.GET.copy()\n with MongoClient('mongodb://172.17.0.2:27017/') as client:\n mydb = client.mydb\n result = list(mydb.economic.find({}))\n data['page_obj'] = result\n return render(request, 'board/listwithmongo.html', context=data)\n\ndef listwithrawquery(request):\n data = request.GET.copy()\n # data = dict()\n # connection.row_factory = sqlite3.Row\n # cursor = connection.cursor()\n with sqlite3.connect(\"db.sqlite3\") as con:\n con.row_factory = sqlite3.Row\n cur = con.cursor();\tcur.execute(\"select * from economic\")\n data['rows'] = cur.fetchall()\n\n for row in data['rows']:\n print(f\"{row['title']}, {row['link']}\")\n\n return render(request, 'board/listwithrawquery.html', context=data)\n\nfrom django.core.paginator import Paginator\ndef listwithrawquerywithpaginator(request):\n data = request.GET.copy()\n # data = dict()\n # connection.row_factory = sqlite3.Row\n # cursor = connection.cursor()\n with sqlite3.connect(\"db.sqlite3\") as con:\n con.row_factory = sqlite3.Row\n cur = con.cursor();\tcur.execute(\"select * from economic\")\n contact_list = cur.fetchall()\n\n paginator = Paginator(contact_list, 5) # Show 15 contacts per page.\n\n page_number = request.GET.get('page')\n page_number = page_number if page_number else 1 \n data['page_obj'] = paginator.get_page(page_number)\n\n page_obj=data['page_obj']\n for row in page_obj:\n print(f\"{row['title']}, {row['link']}\")\n\n return render(request, 'board/listwithrawquerywithpaginator.html', context=data)\n\nfrom pymongo import MongoClient\nfrom board.mongopaginator import MongoPaginator\n\n# def listwithmongo(request):\n# data = request.GET.copy()\n# with MongoClient('mongodb://10.0.0.5:27017/') as client:\n# mydb = client.mydb\n# result = list(mydb.economic.find({}))\t\t\t# get Collection with find()\n \n# result_page = []\n# for info in result:\t\t\t\t\t\t# Cursor\n# # del info(_id)\n# temp = {'title':info['title'], 'link':info['link']}\n# result_page.append(temp)\n# print(type(info), info)\n# data['page_obj'] = result\n \n# return render(request, 'board/listwithmongo.html', context=data)\n\ndef listwithmongowithpaginator(request):\n data = request.GET.copy()\n with MongoClient('mongodb://192.168.0.6:27017/') as client:\n mydb = client.mydb\n contact_list = mydb.economic.find({})\t\t\t# get Collection with find()\n for info in contact_list:\t\t\t\t\t\t# Cursor\n print(info)\n\n paginator = MongoPaginator(contact_list, 5) # Show 15 contacts per page.\n\n page_number = request.GET.get('page', 1)\n data['page_obj'] = paginator.get_page(page_number)\n\n page_obj=data['page_obj']\n for row in page_obj:\n print(f\"{row['title']}, {row['link']}\")\n\n return render(request, 'board/listwithrawquerywithpaginator.html', context=data)\n\n\n# 구름 새컨테이너 생성\n# 이름 learn_django1\n# 지역 서울\n# 공개 private\n# 템플릿 깃허브\n# 소프트웨어 장고\n# 추가모듈 몽고디비설치 선택\n\n# 구름 새터미널에서 mongod 입력\n# 위에 goormide 옆에 window에서 new terminal window 선택\n# mongo 입력\n# show dbs\n# new terminal window 하나 더 만든다\n# ls 하고 cd datas/ 들어가서 ls 확인\n# python3 ./scrapingandinsertmongo.py 입력하면 에러가 날 텐데\n# pip3 install -U pip pymongo\n# pip3 install -U pip bs4\n# python3 ./scrapingandinsertmongo.py\n\n#두번째 터미널에서 \n# show mydb \n# show collections \n# economic.find 하는거 \n\n\n\n# 구름 장고 gui\n# 실행하면 에러가 뜰텐데 migrate && 까지 삭제해준다\n# 실행하고 url 카피해서 board/listwithmongo 붙여줘서 브라우저에서 연다\n\n\n\n","sub_path":"board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"44011579","text":"# ThinkPython\r\n# drawing circles using TurtleWorld\r\n\r\nimport swampy\r\nfrom swampy.TurtleWorld import *\r\n\r\nworld = TurtleWorld()\r\nbob = Turtle()\r\nbob.delay = 0.01 # to make turtle move faster\r\n\r\nfrom math import *\r\n\r\n# Exercise 4.3.4\r\n# drawing an appoximate circle\r\n\r\n# defining a function \"polygon\" with three parameters\r\ndef polygon(t, length, n):\r\n for i in range(n):\r\n fd(t, length)\r\n lt(t, 360.0/n)\r\n\r\ndef circle(t, r):\r\n circum = 2*pi*r\r\n N = int(circum/3) + 1\r\n l = circum /N # lenght * N = 2 pi r (N -> should be big enough to give \"approximate\" circle\r\n polygon(t,l,N)\r\n\r\ncircle(bob,30)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"4.3 - 4.py","file_name":"4.3 - 4.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"96704334","text":"from django.shortcuts import render\nfrom .models import Product, ProductImages, Category\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count, Q\nfrom django.shortcuts import get_object_or_404\n# Create your views here.\n\ndef productlist(request, category_slug=None):\n category = None\n productlist = Product.objects.get_queryset().order_by('id')\n category_list = Category.objects.annotate(total_products=Count('product'))\n \n # category filter\n if category_slug :\n category =get_object_or_404(Category, slug=category_slug)\n productlist = productlist.filter(category=category)\n\n # search filter\n search_query = request.GET.get('q')\n if search_query :\n productlist = productlist.filter(\n Q(name__icontains = search_query) |\n Q(description__icontains = search_query) |\n Q(condition__icontains = search_query) |\n Q(brand__brand_name__icontains = search_query)\n )\n\n # pagination\n paginator = Paginator(productlist,1)\n page = request.GET.get('page')\n productlist = paginator.get_page(page)\n\n template = 'Product/product_list.html'\n context = { \n 'product_list' : productlist, \n 'category_list' : category_list,\n 'category' : category}\n return render(request, template, context)\n\ndef productdetail(request,product_slug):\n productdetail =get_object_or_404(Product, slug=product_slug)\n productimages = ProductImages.objects.filter(product=productdetail)\n context = { 'product_detail' : productdetail , 'product_images' : productimages }\n template = 'Product/product_detail.html'\n return render(request, template, context)","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"68222296","text":"import numpy as np\nimport cv2\n\ncapture = cv2.VideoCapture(0)\nRED = (0, 0, 255)\nGREEN = (0, 255, 0)\nYELLOW = (0, 255, 255)\nBLUE = (255, 0, 0)\nCYAN = (255, 255, 0)\nWHITE = (255, 255, 255)\n\n# Capture first frame\nret, frame = capture.read()\n\n# Define the codec and create VideoWriter object\nfourcc = cv2.VideoWriter_fourcc('X','V','I','D')\nwriter = cv2.VideoWriter('output.avi', fourcc, 20, (640, 480))\n\n# recorder toggle\nrecorder = False\n\nwhile(ret):\n\twindow_frame = frame.copy()\n\n\tcv2.putText(window_frame, \"Press [s] to start/stop recording.\", (0, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, GREEN, 1)\n\tcv2.putText(window_frame, \"Press [esc] to quit.\", (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.45, GREEN, 1)\n\n\tif recorder:\n\t\tcv2.putText(window_frame, \"[RECORDING]\", (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.45, RED, 2)\n\t\t\n\t\twriter.write(frame)\n\n\t# Display the resulting frame\n\tcv2.imshow('frame', window_frame)\n\n\t# press ESC to quit\n\tif cv2.waitKey(33) == 27:\n\t\tbreak\n\n\t# press 's' to toggle recording\n\tif cv2.waitKey(33) & 0xFF == ord('s'):\n\t\tif recorder == True:\n\t\t\trecorder = False\n\t\telse:\n\t\t\trecorder = True\n\n\t# Capture next frame\n\tret, frame = capture.read()\n\n\n# When everything done, release the capture\ncapture.release()\nwriter.release()\ncv2.destroyAllWindows()","sub_path":"examples/python/utilities/record_webcam.py","file_name":"record_webcam.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"566318185","text":"from pages.cdn_overview_page import cdnOverviewPage\nimport allure\nimport pytest\n\n@allure.title(\"点击证书总数,跳转正常\")\n@allure.title(\"#\")\ndef test_cert_total(login):\n '''用例描述:1.先登录\n 2.点击证书总数'''\n driver = login\n cdnOverviewPage(driver).click_cert_total()\n cdnOverviewPage(driver).check_cert_total()\n\n@allure.title(\"点击即将过期证书数,跳转正常\")\n@allure.title(\"#\")\ndef test_cert_expire(login):\n '''用例描述:1.先登录\n 2.点击证书即将过期数'''\n driver = login\n cdnOverviewPage(driver).click_cert_expire()\n cdnOverviewPage(driver).check_cert_total()\n\n@pytest.mark.skip(\"需调整\")\n@allure.title(\"验证证书总数,数量正常\")\n@allure.title(\"#\")\ndef test_cert_total_num(login,db_cert_total):\n '''用例描述:1.先登录\n 2.对比证书总量与数据库中证书数量'''\n driver = login\n cert_total = db_cert_total[0].get('count(*)')\n cdnOverviewPage(driver).check_cert_total_num(cert_total)\n\n@allure.title(\"验证即将过期证书数,数量正常\")\n@allure.title(\"#\")\n@pytest.mark.skip(\"验证证书即将过期数,实现方法待定\")\ndef test_cert_expire_num(login):\n '''用例描述:1.先登录\n 2.验证证书即将过期数'''\n pass\n\n\n","sub_path":"ConsoleClient/case/overview/test_cert_statistics.py","file_name":"test_cert_statistics.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"499944538","text":"from collections import Counter\n\n\ndef main():\n \"\"\"\n A python program to find the wordcount in a file for each line and then\n print the output.\n Finally store the output back to the file.\n\n Input:a file includes two line\n Python Course\n Deep Learning Course\n\n Output:\n Python: 1\n Course: 2\n Deep: 1\n Learning: 1\n \"\"\"\n file = open(\"input_file.txt\", \"r\", encoding=\"utf-8\")\n wordcount = Counter(file.read().split())\n f = open(\"output_file.txt\", \"w\")\n for item in wordcount.items():\n f.write(\"{}: {}\\n\".format(*item))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"ICP2/3_word_count.py","file_name":"3_word_count.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"556424089","text":"#%% [markdown]\n# # 202 Tensor\n#\n# View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/\n# My Youtube Channel: https://www.youtube.com/user/MorvanZhou\n#\n# Dependencies:\n# * torch: 0.4.1\n#\n# tensor in torch is to build a computational graph,\n# but this graph is dynamic compared with a static graph in Tensorflow or Theano.\n# So torch does not have placeholder, torch can just pass tensor to the computational graph.\n#\n\n#%%\nimport torch\n# from torch.autograd import Variable\n\n#%%\ntensor0 = torch.FloatTensor([[1, 2], [3, 4]]) # build a tensor\nprint(tensor0) # [torch.FloatTensor of size 2x2]\ntensor0.requires_grad_(True) # by requires_grad flag for compute gradients\nprint(tensor0) # [torch.FloatTensor of size 2x2]\n\nvariable = torch.tensor(\n [[1, 2], [3, 4]], dtype=torch.float,\n requires_grad=True) # build a tensor, usually for compute gradients\nprint(variable) # [torch.FloatTensor of size 2x2]\n#\n\n#%%\ntensor0.requires_grad_(False) # by requires_grad flag for close gradients\nt_out = torch.mean(tensor0 * tensor0) # x^2\nv_out = torch.mean(variable * variable) # x^2\nprint(t_out)\nprint(v_out)\n\n#%%\nv_out.backward() # backpropagation from v_out\n\n#%% [markdown]\n# $$ v_{out} = {{1} \\over {4}} sum(variable^2) $$\n#\n# the gradients w.r.t the variable,\n#\n# $$ {d(v_{out}) \\over d(variable)} = {{1} \\over {4}} 2 variable = {variable \\over 2}$$\n#\n# let's check the result pytorch calculated for us below:\n\n#%%\nvariable.grad\n\n#%%\nvariable # this is data in variable format\n\n#%%\nvariable.detach() # this is data in tensor format\n\n#%%\nvariable.detach().numpy() # numpy format\n\n#%% [markdown]\n# Note that we did `.backward()` on `v_out` but `variable` has been assigned new values on it's `grad`.\n#\n# As this line\n# ```\n# v_out = torch.mean(variable*variable)\n# ```\n# will make a new variable `v_out` and connect it with `variable` in computation graph.\n\n#%%\ntype(v_out)\n\n#%%\ntype(v_out.data)\n","sub_path":"tutorial-contents-vscode-0.4.1/202_tensor.py","file_name":"202_tensor.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"287119871","text":"import cv2\nimport numpy as np\nimport sys\n\nsys.setrecursionlimit(100000)\n\ndef region_grow (i,j,g_min,g_max,dir): \n if array_img[i,j]>g_min and array_img[i,j]1 and i1:\n region_grow(i-1,j,g_min,g_max,1)\n region_grow(i,j+1,g_min,g_max,3)\n region_grow(i+1,j,g_min,g_max,5)\n region_grow(i,j-1,g_min,g_max,7)\n region_grow(i+1,j+1,g_min,g_max,4)\n region_grow(i-1,j-1,g_min,g_max,8)\n region_grow(i+1,j-1,g_min,g_max,6)\n region_grow(i-1,j+1,g_min,g_max,2)\n \ndef chain (image): \n image [image == 255] = 1\n h_img,w_img=image.shape\n chain_array=np.array([8,1,2,7,0,3,6,5,4])\n chain_dir=np.array ([(-1,-1),(-1,0),(-1,1),(0,-1),(0,0),(0,1),(1,-1),(1,0),(1,1)])\n chain_dir1=np.array([(-1,0),(0,-1),(0,1),(-1,0),(0,0),(-1,0),(0,-1),(0,-1),(1,1)])\n chain_dir2=np.array([(0,-1),(0,1),(-1,0),(1,0),(0,0),(1,0),(0,1),(0,1),(1,0)])\n \n # getting starting point for the chain-code\n for x1 in range (2,w_img-2):\n for y1 in range (2,h_img-1):\n if image[y1,x1] != 1:\n a = image [y1-1,x1] + image [y1+1,x1] + image [y1,x1-1] + image [y1,x1+1] + image [y1-1,x1+1] + image [y1+1,x1+1] + image [y1-1,x1-1] + image [y1+1,x1-1] \n image_chain [y1,x1] = a\n for x1 in range (2,w_img-2):\n for y1 in range (2,h_img-1):\n if image[y1,x1] == 0:\n spx = x1\n spy = y1\n break\n if spx>0 or spy>0:\n break \n \n # getting chain code from labelled (region_grow) and smoothed (CV2.findContours) image\n sumarray_max=np.amax(image_chain[spy-1:spy+2, spx-1:spx+2])\n chain_list=np.array([h_img,w_img,spy,spx,0])\n while np.sum(image_chain) > 0 and sumarray_max > 0:\n image_chain[spy,spx]=0\n b=np.argmax(image_chain[spy-1:spy+2, spx-1:spx+2])\n sumarray_max=np.amax(image_chain[spy-1:spy+2, spx-1:spx+2])\n chain_list=np.append(chain_list,chain_array[b])\n y1,x1=chain_dir[b] \n y2,x2=chain_dir1[b]\n y3,x3=chain_dir2[b] \n image_chain[spy+y2,spx+x2]=0\n image_chain[spy+y3,spx+x3]=0\n spx=spx+x1\n spy=spy+y1\n return chain_list\n\ndef draw_chain (canvas_y, canvas_x, starty, startx, code, line):\n chain_dir=np.array ([(0,0),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)])\n canvas=np.zeros(shape=[canvas_y, canvas_x, 3], dtype=np.uint8)\n for i in code[2:]:\n y,x=chain_dir[i]\n y=y+starty\n x=x+startx\n canvas=cv2.line(canvas,(startx,starty),(x,y),(0,255,0),1)\n starty=y\n startx=x \n return canvas\n\ndef draw_roi(event,x,y,flags,param):\n global ix,iy,roi, img_test, img_start,buffer\n if event == cv2.EVENT_LBUTTONDOWN:\n img_test=img_start[y:y+120,x:x+180]\n roi=1 \n elif event == cv2.EVENT_MOUSEMOVE:\n if y+120 < h_img and x+180 < w_img:\n cv2.rectangle(img_start,(x,y),(x+180,y+120), (0,255,0),2)\n cv2.circle(img_start,(x+90,y+60),20,(0,255,255),2)\n cv2.line(img_start,(x+90,y+30),(x+90,y+90),(0,255,0),2)\n cv2.line(img_start,(x+60,y+60),(x+120,y+60),(0,255,0),2)\n cv2.imshow(windowName,img_start)\n img_start=np.copy(img)\n \n# get the \"region of Interest\" (ROI)\" by draw_roi()\n# it is necessary to left-click first, followed by pressing a key on keyboard\n# function has to be rewritten, because of insufficent method of moving the box \n\nwindowName=\"Sonobild\"\nimg = cv2.imread(\"BN-1.bmp\",0)\nimg_start = cv2.imread(\"BN-1.bmp\",0)\n\nh_img, w_img=img_start.shape\n#h_img, w_img=(0,200)\n#array_img=img[h_img:h_img+120,w_img:w_img+180]\n\nroi=0\ncv2.namedWindow(windowName)\ncv2.imshow(windowName, img_start)\ncv2.setMouseCallback(windowName, draw_roi)\ncv2.imshow(windowName, img_start)\nwhile roi==0:\n cv2.waitKey(0)\ncv2.destroyAllWindows()\narray_img=img_test\n\ncv2.namedWindow(\"ROI\")\ncv2.moveWindow(\"ROI\",0,0)\ncv2.imshow (\"ROI\",array_img)\n\n# draw the original image from comparision to segementation steps\ncv2.namedWindow(\"Orginal Image\")\ncv2.moveWindow(\"Orginal Image\",480,0)\ncv2.imshow (\"Orginal Image\",img)\n\n\n# get the mean value and the standard deviation of the central region of the ROI\nhight_img,width_img=array_img.shape\nlabelled=np.zeros (array_img.shape)\nstart_point_y=round(hight_img/2)\nstart_point_x=round(width_img/2)\nmean=np.mean(array_img[start_point_y-20:start_point_y+20,start_point_x-10:start_point_x+10])\nstd=np.std(array_img[start_point_y-10:start_point_y+10,start_point_x-10:start_point_x+10])\nupper_limit=mean+std\nif mean-2*std>0:\n lower_limit=mean-2*std\nelif mean-std<0:\n lower_limit=0\n\n# image segmentation step 1 by region growing using region_grow ()\ncontours=np.copy(array_img)\ncontours.fill(0)\ndirection=[120,180,start_point_y,start_point_x,0]\nwhile np.sum(labelled)==0:\n region_grow (start_point_y,start_point_x,lower_limit,upper_limit,0)\n np.sum(labelled)\n start_point_y=start_point_y-1 \ncv2.namedWindow(\"Region filling\")\ncv2.moveWindow(\"Region filling\",0,210)\ncv2.imshow (\"Region filling\",labelled)\n\n# image segmentation step 2 by \"filling the holes\" resulting in moderatly smoothing the surface \nnpMask=np.array(labelled,dtype=\"i8\")\n_,labelled_holes_filled, hierarchy = cv2.findContours(npMask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\ncv2.drawContours(contours, labelled_holes_filled, -1, (255, 0, 0 ),10)\ncontours = ~contours\ncv2.namedWindow(\"Filling the holes\")\ncv2.moveWindow(\"Filling the holes\",0,400)\ncv2.imshow (\"Filling the holes\",contours)\n\nmerged=cv2.add(array_img,contours)\n\ncv2.namedWindow(\"Extracted liver segment\")\ncv2.moveWindow(\"Extracted liver segment\",0,600)\ncv2.imshow (\"Extracted liver segment\",merged)\n\n# get chain-code of surface using chain () and draw chain-code by draw_chain ()\nimage_chain=np.zeros((120,180))\nchain_code=chain (contours)\nsurface_chain=draw_chain (120,180,32,2,chain_code[3:],1)\ncv2.namedWindow(\"chain_code\")\ncv2.moveWindow(\"chain_code\",1100,0)\ncv2.imshow (\"chain_code\",surface_chain)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n# Print chain code of surface from chain () and tissue from region_grow ()\nprint (\"chain code of surface: \",chain_code)\nprint (\"chain code of tissue: \", direction)","sub_path":"code/chaincode/chain_code_ROI.py","file_name":"chain_code_ROI.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"474108436","text":"a = [ch1 for ch1 in range(30000, 50001)]\r\nb = [ch2 for ch2 in range(100001, 130002)]\r\nkolvo_par = 0\r\nmaxsum = 0\r\nif len(b) >= len(a):\r\n newb = [ch3 for ch3 in range(b[0], b[len(b) - 1] - (len(b) - len(a) - 1))]\r\n for i in range(0, len(newb)):\r\n if (newb[i] + a[i]) % 7 == 0 and newb[i] % 3 != 0 and newb[i] % 5 == 0 and a[i] % 3 == 0 and a[i] % 4 != 0 and a[i] % 6 != 0:\r\n kolvo_par+=1\r\n maxsum = max(maxsum, newb[i] + a[i])\r\nprint(kolvo_par, maxsum)\r\n","sub_path":"2_СРЕДНИЙ_ПОВЫШ.py","file_name":"2_СРЕДНИЙ_ПОВЫШ.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"202258778","text":"#!/usr/bin/env python3\r\n\r\n# Plater\r\n# -----\r\n# Easily create a starter file template for different project\r\n# -----\r\n# https://github.com/aquadzn/plater\r\n# William Jacques\r\n# -----\r\n# Licensed under MIT License\r\n# -----\r\n# cli.py\r\n# -----\r\n\r\nimport sys\r\n\r\nfrom . import plater\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Main function.\r\n \"\"\"\r\n # https://github.com/twintproject/twint/blob/master/twint/cli.py#L293\r\n version = \".\".join(str(v) for v in sys.version_info[:2])\r\n if float(version) < 3.6:\r\n print(\"Plater requires Python version 3.6+\")\r\n sys.exit(0)\r\n\r\n plater.generate_template(plater.get_args())\r\n","sub_path":"plater/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"594381522","text":"#!/usr/bin/env python\n\n# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration\n#James Walder\n\nfrom __future__ import print_function\n\n\n__doc__ =\"\"\"Simple Transform to understand what goes on\"\"\"\n\n\nfrom PyJobTransformsCore.trf import *\nfrom PyJobTransformsCore.trfutil import *\nfrom PyJobTransformsCore.full_trfarg import *\n#from RecJobTransforms.RecConfig import recConfig\n\nclass BeamSpotVertexTransform( JobTransform ):\n def __init__(self):\n JobTransform.__init__(self,\n authors = [ Author('James Walder','jwalder@cern.ch')],\n skeleton = 'InDetBeamSpotExample/skeleton.BeamSpotVertex.py',\n help = __doc__)\n #,config = recConfig)\n print (\"JW: In Init\")\n self.add( InputDPDFileArg(name='InputDESDM_BEAMSPOTFile'))\n self.add( NtupleFileArg(name='beamspotFile'))\n self.add( NtupleFileArg(name='monitoringFile'),default='mon.root')\n self.add( BasicStringArg(help = 'db filename', name='beamspotDb'), default = 'beamspot.db')\n self.add( ListOfStringsArg(name='jobConfig', help='jobConfig extra options'), default = '')\n self.add( JobOptionsArg(name='topOptions',help='InDetBeamSpotExampleJobOption config file'),\n default='InDetBeamSpotExample/VertexTemplate.py')\n self.add( ListOfStringsArg(name='preExec', help='intial options'), default = '')\n self.add( ListOfStringsArg(name='postExec', help='final options'), default = '')\n self.add( MaxEventsArg(), default ='-1' )\n\n \n def doPostRunActions(self):\n print (\"JW: in PostRun\")\n \n\nif __name__ == '__main__':\n print (\"JW: Run from Main\")\n trf = BeamSpotVertexTransform()\n sys.exit(trf.exeSysArgs().exitCode())\n","sub_path":"InnerDetector/InDetExample/InDetBeamSpotExample/bin/beamSpotT1_Vertex_trf.py","file_name":"beamSpotT1_Vertex_trf.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"168953781","text":"\"\"\"\n 539 - Minimum Time Difference\n @author oneshan\n @version 1.0 3/21/2017\n\"\"\"\n\n\nclass Solution(object):\n def findMinDifference(self, timePoints):\n \"\"\"\n :type timePoints: List[str]\n :rtype: int\n \"\"\"\n time = [int(timePoint[:2]) * 60 + int(timePoint[3:]) for timePoint in timePoints]\n time.sort()\n time.append(time[0] + 1440)\n\n minMin = 1440\n for i in range(len(time) - 1):\n minMin = min(minMin, time[i + 1] - time[i])\n\n return minMin\n","sub_path":"leetcode/539_MinimumTimeDifference.py","file_name":"539_MinimumTimeDifference.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"78305930","text":"#import sys\nimport copy\nfrom pprint import pprint\nimport itertools\n#sys.stdin = open('14502.txt', 'r')\ndef isWall(x, y):\n if x > N-1 or x < 0 :\n return True\n if y > M-1 or y < 0 :\n return True\n return False\ndef comb(k, now, temp):\n global result\n if k == 3:\n result.append(temp)\n else:\n for i in range(now+1, len(stack)+1):\n comb(k+1, i, temp + [i])\ndef DFS(x, y):\n dx = [-1, 0, 1, 0]\n dy = [0, 1, 0, -1]\n #check = []\n stack = [[x, y]]\n while True:\n if stack == []:\n #return check\n return\n x, y = stack.pop()\n for mode in range(4):\n test_x = x + dx[mode]\n test_y = y + dy[mode]\n if isWall(test_x, test_y) == False and clone_map[test_x][test_y] == 0: #and [test_x, test_y] not in check and [test_x, test_y] not in temp:\n stack.append([test_x, test_y])\n #check.append([test_x, test_y])\n clone_map[test_x][test_y] = 2\n\n#TC = int(input())\n#for test_case in range(1, TC+1):\nN, M = map(int, input().split())\ndatamap = [list(map(int, input().split())) for _ in range(N)]\n\nmax = 0\n\n\n\nstack = []\nvirus = []\n#zero_count = 0\nfor i in range(N):\n for j in range(M):\n if datamap[i][j] == 2:\n virus.append([i, j])\n elif datamap[i][j] == 0:\n stack.append([i, j])\n #zero_count +=1\n\nmax = 0\n#print(stack)\n#print(len(stack))\nzz = [i for i in range(len(stack))]\n#print(zz)\nd = list(itertools.combinations(zz, 3))\n\n#print(len(d))\nwhile True:\n #count = zero_count\n if d == []:\n break\n\n clone_map = copy.deepcopy(datamap)\n clone_virus = copy.deepcopy(virus)\n #pprint(clone_map)\n first, second, third = d.pop()\n #print(first, second, third)\n clone_map[stack[first][0]][stack[first][1]] = 1\n clone_map[stack[second][0]][stack[second][1]] = 1\n clone_map[stack[third][0]][stack[third][1]] = 1\n # temp = []\n #pprint(clone_map)\n for _ in range(len(virus)):\n virus_x, virus_y = clone_virus.pop()\n DFS(virus_x, virus_y)\n # virus_count = DFS(virus_x, virus_y)\n # temp += virus_count\n # if len(temp) + 3 == count:\n # break\n # for x, y in virus_count:\n # if clone_map[x][y] == 0:\n # clone_map[x][y] = 2\n #pprint(clone_map)\n # count -= (len(temp) + 3)\n count = 0\n for line in clone_map:\n count +=line.count(0)\n\n if count > max:\n max = count\n\nprint(max)","sub_path":"BOJ/14502. 연구소(8월버전).py","file_name":"14502. 연구소(8월버전).py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"582029277","text":"#!/usr/bin/python3\r\n\r\n\"\"\"\r\nA fun script for creating text surrounded by ASCII decoration boxes\r\n┌──────────────┐\r\n│ A normal box │\r\n└──────────────┘\r\n╔══════════════╗\r\n║ A double box ║\r\n╚══════════════╝\r\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄\r\n█ A bold box █\r\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀\r\n\r\nFuture Versions:\r\n Maybe use argparse (or sys.argv) to take a line of text as input with optional flags '-b' or '-d' for bold or double, respectively\r\n\r\n\"\"\"\r\n\r\n#In case someone tries to use this with Python 2.7... although it definitely won't work with all this unicode flying around\r\nfrom __future__ import division, print_function\r\n\r\nbb_v = chr(0x2588) # █\r\nbb_htop = chr(0x2584) # ▄\r\nbb_hbottom = chr(0x2580) # ▀\r\ndbar_h = chr(0x2550) # ═\r\ndbar_topleft = chr(0x2554) # ╔\r\ndbar_topright = chr(0x2557) # ╗\r\ndbar_v = chr(0x2551) # ║\r\ndbar_bottomleft = chr(0x255A) # ╚\r\ndbar_bottomright = chr(0x255D) # ╝\r\nbar_bottomright = chr(0x2518) # ┘\r\nbar_topleft = chr(0x250C) # ┌\r\nbar_h = chr(0x2500) # ─\r\nbar_v = chr(0x2502) # │\r\nbar_topright = chr(0x2510) # ┐\r\nbar_bottomleft = chr(0x2514) # └\r\n\r\ndef box(textline):\r\n \"\"\"Output a line of text (textline) surrounded by a single-bar box. The result will be three lines.\"\"\"\r\n return make_box(textline, bar_h, bar_h, bar_v, bar_v, bar_bottomleft, bar_bottomright, bar_topleft, bar_topright)\r\n\r\ndef box_double(textline):\r\n \"\"\"Output a line of text (textline) surrounded by a double-bar box. The result is three lines\"\"\"\r\n return make_box(textline, dbar_h, dbar_h, dbar_v, dbar_v, dbar_bottomleft, dbar_bottomright, dbar_topleft, dbar_topright)\r\n\r\ndef box_bold(textline):\r\n \"\"\"Output a line of text (textline) surrounded by a bold box. The result will be three lines.\"\"\"\r\n return make_box(textline, bb_htop, bb_hbottom, bb_v, bb_v, bb_hbottom, bb_hbottom, bb_htop, bb_htop)\r\n\r\ndef make_box(textline, ht, hb, vl, vr, bl, br, tl, tr):\r\n \"\"\"A generic method for making a box of text with specific characters forming the box.\r\n textline = a line of text (no line breaks)\r\n ht = horizontal top character\r\n hb = horizontal bottom character\r\n vl = vertical left character\r\n vr = vertical right character\r\n bl = bottom left character\r\n br = bottomr right character\r\n tl = top left character\r\n tr = top right character\"\"\"\r\n \"\"\"Output a line of text (textline) surrounded by an ASCII bold box. The result will be three lines.\"\"\"\r\n linelen = len(textline) + 4 # Pad the edges of the box with one space\r\n l1 = [' ']*linelen\r\n text_list = [a for a in textline] # Break up the string into a list\r\n l1[2:-2] = text_list # Replace the text in l1 with the correct characters\r\n l1[0] = vl # Vertical character\r\n l1[-1] = vr # Done with l1\r\n l0 = [ht]*linelen # top horizontal line\r\n l0[0] = tl # top left corner\r\n l0[-1] = tr # top right corner\r\n l2 = [hb]*linelen # bottom horizontal line\r\n l2[0] = bl # bottom left corner\r\n l2[-1] = br # bottom right corner\r\n t0 = ''.join(l0) + '\\n'\r\n t1 = ''.join(l1) + '\\n'\r\n t2 = ''.join(l2) + '\\n'\r\n return t0 + t1 + t2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n bbtext = box_bold(\"A bold box\")\r\n dbtext = box_double(\"A double box\")\r\n sbtext = box(\"A normal box\")\r\n print(sbtext + dbtext + bbtext)\r\n\r\n WRITEFILE = False\r\n if WRITE_FILE:\r\n with open('asciiboxes.txt', 'wb') as ofile:\r\n text = sbtext + dbtext + bbtext\r\n ofile.write(text.encode('utf-8'))\r\n else: pass\r\n","sub_path":"asciibox.py","file_name":"asciibox.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563411194","text":"import numpy as np\nfrom hashlib import sha1\nfrom numpy import all, array, uint8\nimport collections\n\nclass hashable(object):\n\n def __init__(self, wrapped, tight=False):\n r'''Creates a new hashable object encapsulating an ndarray.\n\n wrapped\n The wrapped ndarray.\n\n tight\n Optional. If True, a copy of the input ndaray is created.\n Defaults to False.\n '''\n self.__tight = tight\n self.__wrapped = array(wrapped) if tight else wrapped\n self.__hash = int(sha1(wrapped.view(uint8)).hexdigest(), 16)\n\n def __eq__(self, other):\n return all(self.__wrapped == other.__wrapped)\n\n def __hash__(self):\n return self.__hash\n\n def unwrap(self):\n r'''Returns the encapsulated ndarray.\n\n If the wrapper is \"tight\", a copy of the encapsulated ndarray is\n returned. Otherwise, the encapsulated ndarray itself is returned.\n '''\n if self.__tight:\n return array(self.__wrapped)\n\n return self.__wrapped\n\nclass Board:\n\n def __init__(self,rows,cols):\n self.rows = rows\n self.cols = cols\n\n # INTERFACE FOR MCST\n\n def init_representation(self):\n state = np.zeros((1 + (self.rows * self.cols * 2) + self.rows + self.cols), dtype=np.int8)\n state[0] = 1\n return state\n\n def current_player(self, state):\n return state.item(0)\n\n def next_state(self, state, move):\n new_state = np.copy(state)\n currentplayer = state.item(0)\n row,cols,ori = self.translate_to_coord(move)\n new_state[move] = currentplayer\n\n if self.is_box(new_state, row, cols, ori, currentplayer):\n new_state[0] = currentplayer\n else:\n new_state[0] = self.update_player(currentplayer)\n\n return new_state, move\n\n def legal_plays(self, state):\n plays = np.where(state == 0)[0]\n return plays\n\n def is_finished(self, state):\n plays = np.where(state == 0)[0]\n if len(plays) == 0:\n return True\n return False\n\n def register_state(self, state, row, cols, ori, player):\n new_state = np.copy(state)\n\n move = self.translate_to_move(row, cols, ori)\n new_state[move] = player\n\n if self.is_box(new_state, row, cols, ori, player):\n new_state[0] = player\n else:\n new_state[0] = self.update_player(player)\n\n return new_state, move\n\n # Utility Functions\n def winner(self, state):\n board = state[1:]\n win = collections.Counter(board).most_common()[0][0]\n return win\n\n def translate_to_move(self,row,cols,ori):\n move = (row * (self.cols * 2 + 1)) + cols + 1\n if ori == \"v\":\n move += self.cols\n return move\n\n def translate_to_coord(self, move):\n\n move2 = move - 1\n\n rows = move2 // (self.cols * 2 + 1)\n cols = move2 % (self.cols * 2 + 1)\n\n # move = 2 + (row * (self.cols * 2 + 1)) + cols\n # move = 2 + (row * (self.cols * 2 + 1)) + cols + self.cols\n\n if cols < self.cols:\n return rows, cols, \"h\"\n\n else:\n cols -= self.cols\n return rows, cols, \"v\"\n\n def update_player(self, player):\n new_player = (player + 1) % 2\n if new_player == 0:\n new_player = 2\n return new_player\n\n def is_box(self, new_state, row, cols, ori, player):\n if ori == \"h\":\n if row < self.rows:\n box1_a = new_state.item(self.translate_to_move(row, cols, \"v\"))\n box1_b = new_state.item(self.translate_to_move(row, cols + 1, \"v\"))\n box1_c = new_state.item(self.translate_to_move(row + 1, cols, ori))\n if box1_a == box1_b == box1_c == player:\n return True\n if row > 1:\n box2_a = new_state.item(self.translate_to_move(row - 1, cols, \"v\"))\n box2_b = new_state.item(self.translate_to_move(row - 1, cols + 1, \"v\"))\n box2_c = new_state.item(self.translate_to_move(row - 1, cols, ori))\n if box2_a == box2_b == box2_c == player:\n return True\n elif ori == \"v\":\n if cols > 1:\n box1_a = new_state.item(self.translate_to_move(row, cols - 1, \"h\"))\n box1_b = new_state.item(self.translate_to_move(row + 1, cols - 1, \"h\"))\n box1_c = new_state.item(self.translate_to_move(row, cols - 1, ori))\n if box1_a == box1_b == box1_c == player:\n return True\n if cols < self.cols:\n box2_a = new_state.item(self.translate_to_move(row, cols, \"h\"))\n box2_b = new_state.item(self.translate_to_move(row + 1, cols, \"h\"))\n box2_c = new_state.item(self.translate_to_move(row, cols + 1, ori))\n if box2_a == box2_b == box2_c == player:\n return True\n return False\n","sub_path":"baseline/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"233823508","text":"\"\"\"\n Created by alexandra at 16.04.19 17:36\n\n Calculate the Wannier spread functional using analytical solution\n to analize the localization of Wannier functions\n\n 20.06.19 Add steepest decent method for omega_D minimization\n\"\"\"\n\nimport numpy as np\nfrom math import pi\n# import json\n\n\ndef u1(kx, ky, kz):\n \"\"\"First component of eigenvector\"\"\"\n lamb = np.sqrt(\n np.power(np.sin(kx), 2) + t**2 * np.power(np.sin(ky), 2)\n + np.power(np.sin(kz), 2) + np.power(\n np.cos(kx) + np.cos(ky) + np.cos(kz) + h, 2))\n return np.divide(np.sin(kz) - 1j * (\n np.cos(kx) + np.cos(ky) + np.cos(kz) + h), lamb)\n\n\ndef u2(kx, ky, kz):\n \"\"\"Second component of eigenvector\"\"\"\n lamb = np.sqrt(\n np.power(np.sin(kx), 2) + t**2 * np.power(np.sin(ky), 2)\n + np.power(np.sin(kz), 2) + np.power(\n np.cos(kx) + np.cos(ky) + np.cos(kz) + h, 2))\n return np.divide(-np.sin(kx) + 1j * t * np.sin(ky), lamb)\n\n\ndef m_bshift(shift_axis, eigv1, eigv2):\n \"\"\"Calculate the M(k,b) for the shift in (bx, by, bz) direction for all\n (kx, ky, kz)\"\"\"\n bra_1 = np.conj(eigv1)\n bra_2 = np.conj(eigv2)\n ket_1 = np.roll(eigv1, -1, axis=shift_axis)\n ket_2 = np.roll(eigv2, -1, axis=shift_axis)\n return bra_1 * ket_1 + bra_2 * ket_2\n\n\ndef m_angle(b):\n \"\"\"Calculate the angle for all complex values of overlap matrix M\n in b direction\"\"\"\n return np.angle(Mdict[b]) + 0.0j\n\n\ndef rb(b):\n \"\"\"Calculate b*r as a function of b from M-overlap matrix\"\"\"\n return -np.sum(Mangledict[b]) / Nk**2 / (2 * pi)\n\n\ndef omega_d():\n \"\"\"Calculate gauge dependent part of Wannier spread functional\"\"\"\n omega = 0\n for ib in range(3):\n undersum = (np.power(Mangledict[ib] + (2 * pi / Nk) * rdict[ib], 2))\n omega += np.sum(undersum) / Nk / (2 * pi)**2\n return omega\n\n\ndef omega_i():\n \"\"\"Calculate gauge independent part of Wannier spread functional\"\"\"\n omega = 0\n for ib in range(3):\n undersum = 1 - np.conj(Mdict[ib]) * Mdict[ib]\n omega += np.sum(undersum) / Nk / (2 * pi)**2\n return omega\n\n\ndef steepestdecent(om, alpha, a, r):\n \"\"\"Steepest decent method for spread funct minimization\n with step alpha/3\n absolute error a\n and relative error r\"\"\"\n omnew = minimstep(om, alpha)\n count = 1\n while abs((omnew - om) / om) > r and abs(omnew) > a and count < 3:\n om = omnew\n omnew = minimstep(om, alpha)\n count += 1\n print(count) # , omnew, Mangledict[0][29, 5, 78], np.abs(Mdict[0][29, 5, 78]))\n return omnew\n\n\ndef minimstep(om, alpha):\n \"\"\"Step in minimization procedure with multiplier alpha/3\"\"\"\n global Mdict, rdict, Mangledict\n g = 0\n for ib in range(3):\n g = g + 1j * 4 * (Mangledict[ib] + (2 * pi / Nk) * rdict[ib])\n print(np.sum(np.abs(g)) / Nk**2)\n print(np.sum(np.abs(om_deriv())) / Nk**2)\n\n om = om - alpha / 3 * np.sum(np.power(np.abs(g), 2)) / Nk / (2 * pi)**2\n u_unitary = np.exp(alpha / 3 * g) # 1 - alpha / 3 * g\n for ib in range(3):\n Mdict[ib] = (np.conjugate(u_unitary) * Mdict[ib]\n * np.roll(u_unitary, -1, axis=ib))\n # mdict_check = Mdict[ib] + alpha / 3 * (-g + np.roll(g, -1, axis=ib)) * Mdict[ib]\n # print(np.max(np.abs(Mdict[ib] - mdict_check)))\n Mangledict[ib] = m_angle(ib)\n rdict[ib] = rb(ib)\n om_check = omega_d()\n print(om, om_check)\n return om\n\n\ndef om_deriv():\n deriv = np.empty((Nk, Nk, Nk), dtype=complex)\n for idkx in range(Nk):\n for idky in range(Nk):\n for idkz in range(Nk):\n dw = 0.01\n mangle = Mangledict\n mangle[0][idkx, idky, idkz] += (\n -mangle[0][idkx, idky, idkz] * dw * 1j)\n mangle[0][idkx - 1, idky, idkz] += (\n mangle[0][idkx, idky, idkz] * dw * 1j)\n mangle[1][idkx, idky, idkz] += (\n -mangle[1][idkx, idky, idkz] * dw * 1j)\n mangle[1][idkx, idky - 1, idkz] += (\n mangle[1][idkx, idky, idkz] * dw * 1j)\n mangle[2][idkx, idky, idkz] += (\n -mangle[2][idkx, idky, idkz] * dw * 1j)\n mangle[2][idkx, idky, idkz - 1] += (\n mangle[2][idkx, idky, idkz] * dw * 1j)\n\n r = {ib: rb(ib) for ib in range(3)}\n deriv[idkx, idky, idkz] = (\n (omega_d_loc(mangle, r) - omega_d()) / dw)\n\n return deriv\n\n\ndef omega_d_loc(mangle, r):\n omega = 0\n for ib in range(3):\n undersum = (np.power(mangle[ib] + (2 * pi / Nk) * r[ib], 2))\n omega += np.sum(undersum) / Nk / (2 * pi) ** 2\n return omega\n\n\nh = 3.1\nt = 1\nNk = 50\n\n# Set the meshgrid\nKx = np.linspace(0, 2*pi, Nk + 1)\nKy = np.linspace(0, 2*pi, Nk + 1)\nKz = np.linspace(0, 2*pi, Nk + 1)\n# Include the border of the BZ only once\nKx = Kx[0:-1]\nKy = Ky[0:-1]\nKz = Kz[0:-1]\n\n[KKx, KKy, KKz] = np.meshgrid(Kx, Ky, Kz, indexing='ij')\n\n# Calculate eigenvector on a grid\nU1 = u1(KKx, KKy, KKz)\nU2 = u2(KKx, KKy, KKz)\n\n# ImlogMbx = m_bshift(0, U1, U2)\n# ImlogMby = m_bshift(1, U1, U2)\n# ImlogMbz = m_bshift(2, U1, U2)\n\n# Create a dictionary of the overlap matrices for all K on grid\n# Keys of the dictionary correspond to different b\nMdict = {ib: m_bshift(ib, U1, U2) for ib in range(3)}\n\n# The dictionary of the angles of the matrices M\nMangledict = {ib: m_angle(ib) for ib in range(3)}\n\n# Calculate b*r for each b and write in the dictionary\nrdict = {ib: rb(ib) for ib in range(3)}\n\nprint(rdict)\n\n# Calculate initial spread functionals\nOmI = omega_i()\nOmD = omega_d()\n\n# Steepest decent:\n# Alpha = 0.2\n# epsa = 0.000001\n# epsr = 0.0001\n# OmDmin = steepestdecent(OmD, Alpha, epsa, epsr)\nOmDmin = OmD\n\n# with open(\n# 'Results/SpreadFunctional/'\n# 'WannierSpreadFunctional_h{0}t{1}_N{2}.txt'.format(\n# h, t, Nk), 'wb') as f:\n# json.dump(rdict, f)\n\nprint(OmD, OmDmin, OmI)\n","sub_path":"WannierSpreadFunctional.py","file_name":"WannierSpreadFunctional.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"553031993","text":"#\n# Title:split_directory.py\n# Description:process eoddata split directory\n# Development Environment:OS X 10.9.3/Python 2.7.7\n# Legalise:Copyright (C) 2014 Digital Burro, INC.\n# Author:G.S. Cole (guycole at gmail dot com)\n#\nimport os\n\nimport parse_score\nimport pickle_helper\n\nfrom mythic_archive_core import eoddatafile\nfrom mythic_archive_core import mythic_archive_sql\n\n\nclass SplitDirectory:\n def __init__(self, taskId, rawDirectory, sqlWrapper):\n self.taskId = taskId\n self.splitDirectory = \"%s/Splits\" % rawDirectory\n self.sqlWrapper = sqlWrapper\n\n def execute(self):\n parseScore = parse_score.ParseScore()\n\n loadLog = mythic_archive_sql.LoadLog(self.sqlWrapper)\n\n rawFiles = os.listdir(self.splitDirectory)\n for fileName in rawFiles:\n if fileName == '.listing':\n continue\n if fileName == 'readme.txt':\n continue\n if fileName == 'terms.txt':\n continue\n\n fullName = \"%s/%s\" % (self.splitDirectory, fileName)\n\n pickleHelper = pickle_helper.PickleHelper()\n mustLoad = pickleHelper.modifyTest(self.taskId, fullName, self.sqlWrapper)\n if mustLoad:\n eodDataFile = eoddatafile.EodDataFile(fullName)\n loadLog.insertLog(self.taskId, eodDataFile.market, eodDataFile.directory, fileName, fullName, 0, 0, 0, 0, 0)\n parseScore.addSuccess()\n else:\n parseScore.addFailure()\n\n return parseScore\n","sub_path":"stage_aws/split_directory.py","file_name":"split_directory.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"316364778","text":"from django.shortcuts import render, HttpResponse\nfrom project.models import Project\nfrom .utils import get_top_projects_for_3_months\nimport locale\nimport sys\nfrom .utils import review_email_notification\nfrom project.models import Review\n\ndef index(request):\n # Last 3 projects\n last_three_projects = Project.objects.filter(is_posted=True).order_by('-id')[:4]\n last_three_projects_asc = reversed(last_three_projects)\n\n # Most popular projects (3 months)\n top_projects_ids = get_top_projects_for_3_months()\n top_projects = Project.objects.filter(id__in=top_projects_ids)\n\n # Paid query\n return render(request, 'index.html', locals())\n\n\ndef about(request):\n top_projects_ids = get_top_projects_for_3_months()\n top_projects = Project.objects.filter(id__in=top_projects_ids)\n return render(request, 'about.html', locals())\n\n\ndef custom404(request):\n return render(request, '404.html', status=404)\n\n\ndef custom500(request):\n # exception_type, exception_value, exception_traceback = sys.exc_info()\n return render(request, '500.html', status=500)\n\n\ndef view_locale(request):\n loc_info = \"getlocale: \" + str(locale.getlocale()) + \\\n \"
getdefaultlocale(): \" + str(locale.getdefaultlocale()) + \\\n \"
fs_encoding: \" + str(sys.getfilesystemencoding()) + \\\n \"
sys default encoding: \" + str(sys.getdefaultencoding())\n \"
sys default encoding: \" + str(sys.getdefaultencoding())\n\n return HttpResponse(loc_info)\n\n\ndef test_email(request):\n review = Review.objects.get(pk=7)\n review_email_notification(review.project_id, review)\n return HttpResponse()","sub_path":"upgrademystartup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"577517933","text":"# -*- coding: utf-8 -*-\n'''\n An implementation of sequence to sequence learning\n for performing ensemble morphosyntactic analyses\n'''\nfrom __future__ import print_function\nfrom keras.models import Sequential\nfrom keras import layers\nimport numpy as np\nfrom six.moves import range\nfrom prepare_data import SawarefData\nfrom vis import SawarefVis\nfrom character_table import colors, CharacterTable\n\n\nMYPATH = \"/morpho/output/\"\n# Parameters for the model and dataset.\nTRAINING_SIZE = 50000\nEPOCHS = 3\n# DIGITS = 3\n# REVERSE = True\n# Try replacing GRU, or SimpleRNN.\nRNN = layers.LSTM\nHIDDEN_SIZE = 128\nBATCH_SIZE = 128\nLAYERS = 1\nEMBEDDINGS = 100\nITERATIONS = 10\nREVERSE = False\n\nsawarefData = SawarefData(MYPATH, EMBEDDINGS)\n\nquestions, expected, _, SENTLEN = sawarefData.get2DSentenceJoinedFeatures(\n REVERSE, skipNAs=False)\nquestions = sawarefData.removeAlignment(questions, SENTLEN)\n# questions_padded = pad_sequences(questions)\n# expected_padded = pad_sequences(expected)\n\nctable_x = CharacterTable(\n set(\"-\").union(set([xx for x in questions for xx in x])))\n\nctable_y = CharacterTable(\n set(\"-\").union(set([xx for x in expected for xx in x])))\n\n# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of\n# int is DIGITS.\n# MAXLEN = DIGITS + 1 + DIGITS\n\n\nprint('Total ayat questions:', len(questions))\n\nprint('Vectorization...')\nx = np.zeros((len(questions), SENTLEN,\n len(ctable_x.chars)), dtype=np.bool)\n# len(ctable_x.chars) + EMBEDDINGS), dtype=np.bool)\ny = np.zeros((len(expected), SENTLEN,\n len(ctable_y.chars)), dtype=np.bool)\nfor i, sentence in enumerate(questions):\n x[i] = ctable_x.encode(sentence, SENTLEN)\n # x[i] = np.concatenate((ctable_x.encode([sentence], SENTLEN),\n # np.array([embeddings[i]])), 1)\nfor i, sentence in enumerate(expected):\n y[i] = ctable_y.encode(sentence, SENTLEN)\n# Shuffle (x, y) in unison as the later parts of x will almost all be larger\n# digits.\nindices = np.arange(len(y))\nnp.random.shuffle(indices)\nx = x[indices]\ny = y[indices]\n\n# Explicitly set apart 10% for validation data that we never train over.\nsplit_at = len(x) - len(x) // 10\n(x_train, x_val) = x[:split_at], x[split_at:]\n(y_train, y_val) = y[:split_at], y[split_at:]\n\nprint('Training Data:')\nprint(x_train.shape)\nprint(y_train.shape)\n\nprint('Validation Data:')\nprint(x_val.shape)\nprint(y_val.shape)\n\nprint('Build model...')\nmodel = Sequential()\n# \"Encode\" the input sequence using an RNN, producing an output of HIDDEN_SIZE.\n# Note: In a situation where your input sequences have a variable length,\n# use input_shape=(None, num_feature).\nmodel.add(layers.Bidirectional(\n RNN(HIDDEN_SIZE),\n input_shape=(None, len(ctable_x.chars))))\n# input_shape=(None, len(ctable_x.chars) + EMBEDDINGS)))\n# As the decoder RNN's input, repeatedly provide with the last hidden state of\n# RNN for each time step. Repeat 'DIGITS + 1' times as that's the maximum\n# length of output, e.g., when DIGITS=3, max output is 999+999=1998.\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.RepeatVector(SENTLEN))\n# The decoder RNN could be multiple layers stacked or a single layer.\nfor _ in range(LAYERS):\n # By setting return_sequences to True, return not only the last output but\n # all the outputs so far in the form of (num_samples, timesteps,\n # output_dim). This is necessary as TimeDistributed in the below expects\n # the first dimension to be the timesteps.\n model.add(RNN(HIDDEN_SIZE, return_sequences=True))\n\n# Apply a dense layer to the every temporal slice of an input. For each of step\n# of the output sequence, decide which character should be chosen.\nmodel.add(layers.TimeDistributed(\n layers.Dense(len(ctable_y.chars))))\nmodel.add(layers.Activation('softmax'))\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy', 'sparse_categorical_accuracy'])\nmodel.summary()\n\n\n# Train the model each generation and show predictions against the validation\n# dataset.\nfor iteration in range(1, ITERATIONS + 1):\n print()\n print('-' * 50)\n print('Iteration', iteration)\n history = model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n validation_data=(x_val, y_val))\n # Select 10 samples from the validation set at random so we can visualize\n # errors.\n\n for i in range(10):\n ind = np.random.randint(0, len(x_val))\n rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]\n preds = model.predict_classes(rowx, verbose=0)\n q = ctable_x.decode(rowx[0])\n correct = ctable_y.decode(rowy[0])\n guess = ctable_y.decode(preds[0], calc_argmax=False)\n # print('Q', q[::-1] if REVERSE else q, end=' ')\n print('Q', q, end=' ')\n print('T', correct, end=' ')\n if correct == guess:\n print(colors.ok + '☑' + colors.close, end=' ')\n else:\n print(colors.fail + '☒' + colors.close, end=' ')\n print(guess)\n\n\ny_pred = []\ny_actual = []\nfor i in range(len(y_val)):\n y_actual.append(ctable_y.decode(y_val[i]))\n y_pred.append(ctable_y.decode(\n model.predict_classes(x_val[np.array([i])])[0],\n calc_argmax=False))\n # print(y_actual[i], y_pred[i])\n\nSawarefVis(y_actual, y_pred,\n ctable_y.chars)\n","sub_path":"main-sentence-seq.py","file_name":"main-sentence-seq.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"502705865","text":"import json\nimport openpyxl\n\njsondata = open(\"路外停車資訊.json\", 'r', encoding='UTF-8')\n\ndata = jsondata.read()\ndata = json.loads(data)\n\n#宣告一個試算表\nworkbook = openpyxl.Workbook()\nworkbook.remove_sheet(workbook.get_sheet_by_name('Sheet'))\nworkbook.create_sheet('桃園市停車場資訊')\n\n#操作一個工作表\nsheet = workbook.get_sheet_by_name('桃園市停車場資訊')\n\n#print (len(data['parkingLots']))\n\n#寫入值\nsheet['A1'] = 'areaId'\nsheet['B1'] = 'areaName'\nsheet['C1'] = 'parkName'\nsheet['D1'] = 'totalSpace'\nsheet['E1'] = 'surplusSpace'\nsheet['F1'] = 'payGuide'\nsheet['G1'] = 'introduction'\nsheet['H1'] = 'address'\nsheet['I1'] = 'wgsX'\nsheet['J1'] = 'wgsY'\nsheet['K1'] = 'parkId'\nx = 0\nwhile x < len(data['parkingLots']):\n sheet.cell(row = x+2, column = 1).value = data['parkingLots'][x]['areaId']\n sheet.cell(row = x+2, column = 2).value = data['parkingLots'][x]['areaName']\n sheet.cell(row = x+2, column = 3).value = data['parkingLots'][x]['parkName']\n sheet.cell(row = x+2, column = 4).value = data['parkingLots'][x]['totalSpace']\n sheet.cell(row = x+2, column = 5).value = data['parkingLots'][x]['surplusSpace']\n sheet.cell(row = x+2, column = 6).value = data['parkingLots'][x]['payGuide']\n sheet.cell(row = x+2, column = 7).value = data['parkingLots'][x]['introduction']\n sheet.cell(row = x+2, column = 8).value = data['parkingLots'][x]['address']\n sheet.cell(row = x+2, column = 9).value = data['parkingLots'][x]['wgsX']\n sheet.cell(row = x+2, column = 10).value = data['parkingLots'][x]['wgsY']\n sheet.cell(row = x+2, column = 11).value = data['parkingLots'][x]['parkId']\n x+=1\n\n#記得存檔歐\nworkbook.save('test1.xlsx')\n\n\n \n\n","sub_path":"d1_homework.py","file_name":"d1_homework.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"598529164","text":"#!/usr/bin/env python3\n\"\"\"Install mkcert and setup a CA.\n\nThis very simple script exists because we want it to be called along with all\nthe other scripts that require elevated permissions (sudo) and because it\nrequires a reboot after completion.\n\"\"\"\n\nimport os\nimport subprocess\n\n# Ensure we are using the best \"version\" of brew\nBREW86_PREFIX = \"/usr/local/bin/\"\nBREW_PREFIX = \"/opt/homebrew/bin/\"\nBREW_PREFIX = BREW_PREFIX if os.path.isdir(BREW_PREFIX) else BREW86_PREFIX\nBREW = BREW_PREFIX + \"brew\"\n\nresult = subprocess.run(['which', 'mkcert'], capture_output=True)\nif result.returncode != 0:\n # nss is a library that's required to make mkcert work with Firefox\n subprocess.run([BREW, 'install', 'mkcert', 'nss'], check=True)\n # The following will ask for your password\n subprocess.run(['mkcert', '-install'], check=True)\n\n print(\"\"\"\nYou have installed mkcert (used to make khanacademy.dev and \"Vitejs Directly\"\non localhost:8088 work).\n\nA CA has been added to your system and browser certificate trust stores.\n\nYou must REBOOT your machine for browsers to recognize new CA.\n\"\"\")\n","sub_path":"bin/install-mac-mkcert.py","file_name":"install-mac-mkcert.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"305347165","text":"import logging\n\nfrom django.utils import timezone\nfrom django.db import models\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom . import fields\n\nlogger = logging.getLogger(__name__)\n\n\nclass Schedulable(models.Model):\n \"\"\"\n Abstract model that should be implemented\n by models that need to be scheduled.\n \"\"\"\n\n class Meta:\n abstract = True\n\n def get_scheduled_filter_args(self):\n \"\"\"\n Hook to provide the arguments to identify\n the object being operated on.\n \"\"\"\n\n return {\n 'pk': self.pk\n }\n\n def schedule(self, when=None, action=None, **kwargs):\n \"\"\"\n Schedule an update of this object.\n\n when: The date for the update.\n\n action: if provided it will be looked up\n on the implementing class and called with\n **kwargs. If action is not provided each k/v pair\n in kwargs will be set on self and then self\n is saved.\n\n kwargs: any other arguments you would like passed\n for this change. Saved as a json object so must cleanly\n serialize.\n \"\"\"\n\n # when is empty or passed, just save it now.\n if not when or when <= timezone.now():\n self.do_scheduled_update(action, **kwargs)\n else:\n ctype = ContentType.objects.get_for_model(self.__class__)\n Schedule(\n content_type=ctype,\n object_args=self.get_scheduled_filter_args(),\n when=when,\n action=action,\n json_args=kwargs\n ).save()\n\n def do_scheduled_update(self, action, **kwargs):\n \"\"\"\n Do the actual update.\n\n action: if provided it will be looked up\n on the implementing class and called with\n **kwargs. If action is not provided each k/v pair\n in kwargs will be set on self and then self\n is saved.\n\n kwargs: any other you passed for this update\n passed along to whichever method performs\n the update.\n \"\"\"\n\n action = getattr(self, action, None)\n if callable(action):\n return action(**kwargs)\n else:\n for k, v in kwargs.items():\n setattr(self, k, v)\n self.save()\n\n\nclass Schedule(models.Model):\n \"\"\"\n Model to store scheduled updates.\n \"\"\"\n\n content_type = models.ForeignKey(ContentType)\n object_args = fields.JSONField()\n\n when = models.DateTimeField()\n action = models.CharField(max_length=255, null=True)\n json_args = fields.JSONField()\n\n def do_updates(self):\n # Only run if we are ready\n if self.when <= timezone.now():\n klass = self.content_type.model_class()\n for obj in klass.objects.filter(**self.object_args):\n obj.do_scheduled_update(self.action, **self.json_args)\n self.delete()\n\n class Meta:\n app_label = 'scheduling'\n","sub_path":"scarlet/scheduling/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"101796969","text":"def my_round(number , num_digits):\n round_num = round(number , num_digits)\n number_string = str(number)\n number_length = len(number_string)\n \n dot_position = number_string.find('.')\n #print(dot_position)\n float_or_not = number_string.count('.')\n\n if(float_or_not):\n #print(\"Is float num\")\n num_of_digit_after_dot = number_length - dot_position -1\n #print(num_of_digit_after_dot)\n \n if(num_of_digit_after_dot < num_digits):\n #print(\"Add O's\")\n round_num = number_string + \"0\" * (num_digits - num_of_digit_after_dot)\n else:\n round_num = number_string[0 : dot_position + 1] + number_string[dot_position + 1 : dot_position + 1 + num_digits]\n\n else:\n round_num = number_string + \".\" + \"0\" * (num_digits)\n return round_num \n #return number_length\n\n#print(my_round(12.14444 , 8))\nassert my_round(10, 3) == \"10.000\"\nassert my_round(0.2345, 3) == \"0.234\"\nassert my_round(12.32, 5) == \"12.32000\"\n\nprint(\"my_round(10, 3) == \" + \"\\\"\" + my_round(10, 3) + \"\\\"\")\nprint(\"my_round(0.2345, 3) == \" + \"\\\"\" + my_round(0.2345, 3) + \"\\\"\")\nprint(\"my_round(12.32, 5) == \" + \"\\\"\" + my_round(12.32, 5) + \"\\\"\")","sub_path":"hw8/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"303929252","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom six import iteritems\n\nfrom sphinx import addnodes\nfrom sphinx.directives import ObjectDescription\nfrom sphinx.domains import Domain, ObjType, Index\nfrom sphinx.locale import _, __\nfrom sphinx.roles import XRefRole\nfrom sphinx.util import logging\nfrom sphinx.util.nodes import make_refnode\n\n\nlogger = logging.getLogger(__name__)\n\npy_sig_re = re.compile(\n r'''^ ([\\w.]*\\.)? # class name(s)\n (\\w+) \\s* # thing name\n (?: \\(\\s*(.*)\\s*\\) # optional: arguments\n (?:\\s* -> \\s* (.*))? # return annotation\n )? $ # and nothing more\n ''', re.VERBOSE)\n\n\ndef _pseudo_parse_arglist(signode, arglist):\n paramlist = addnodes.desc_parameterlist()\n stack = [paramlist]\n try:\n for argument in arglist.split(','):\n argument = argument.strip()\n ends_open = ends_close = 0\n while argument.startswith('['):\n stack.append(addnodes.desc_optional())\n stack[-2] += stack[-1]\n argument = argument[1:].strip()\n while argument.startswith(']'):\n stack.pop()\n argument = argument[1:].strip()\n while argument.endswith(']') and not argument.endswith('[]'):\n ends_close += 1\n argument = argument[:-1].strip()\n while argument.endswith('['):\n ends_open += 1\n argument = argument[:-1].strip()\n if argument:\n stack[-1] += addnodes.desc_parameter(argument, argument)\n while ends_open:\n stack.append(addnodes.desc_optional())\n stack[-2] += stack[-1]\n ends_open -= 1\n while ends_close:\n stack.pop()\n ends_close -= 1\n if len(stack) != 1:\n raise IndexError\n except IndexError:\n signode += addnodes.desc_parameterlist()\n signode[-1] += addnodes.desc_parameter(arglist, arglist)\n else:\n signode += paramlist\n\n\nclass ComObjectBase(ObjectDescription):\n option_spec = dict()\n doc_field_types = []\n allow_nesting = False\n\n def get_signature_prefix(self, sig):\n return ''\n\n def needs_arglist(self):\n return False\n\n def handle_signature(self, sig, signode):\n m = py_sig_re.match(sig)\n if m is None:\n raise ValueError\n name_prefix, name, arglist, retann = m.groups()\n\n classname, fullname = (name_prefix.rstrip('.'), name_prefix + name) if name_prefix else ('', name)\n\n signode['class'] = classname\n signode['fullname'] = fullname\n\n if name_prefix:\n signode += addnodes.desc_addname(name_prefix, name_prefix)\n\n signode += addnodes.desc_name(name, name)\n if not arglist:\n if self.needs_arglist():\n signode += addnodes.desc_parameterlist()\n return fullname, name_prefix\n\n _pseudo_parse_arglist(signode, arglist)\n return fullname, name_prefix\n\n def get_index_text(self, modname, name):\n raise NotImplementedError('must be implemented in subclasses')\n\n def add_target_and_index(self, name_cls, sig, signode):\n fullname = name_cls[0]\n if fullname not in self.state.document.ids:\n signode['names'].append(fullname)\n signode['ids'].append(fullname)\n signode['first'] = (not self.names)\n self.state.document.note_explicit_target(signode)\n objects = self.env.domaindata['com-object']['objects']\n objects[fullname] = (self.env.docname, self.objtype)\n\n indextext = self.get_index_text(None, name_cls)\n if indextext:\n self.indexnode['entries'].append(('single', indextext, fullname, '', None))\n\n def before_content(self):\n prefix = None\n if self.names:\n (fullname, name_prefix) = self.names[-1]\n if name_prefix:\n prefix = name_prefix.strip('.')\n if prefix:\n self.env.ref_context['com-object'] = prefix\n\n def after_content(self):\n self.env.ref_context['com-object'] = None\n\n\nclass ComObjectClassmember(ComObjectBase):\n def needs_arglist(self):\n return self.objtype.endswith('method')\n\n def get_signature_prefix(self, sig):\n return ''\n\n def get_index_text(self, modname, name_cls):\n name, cls = name_cls\n result = ''\n if self.objtype == 'method':\n clsname, methname = name.rsplit('.', 1)\n result = _('{}() ({} method)'.format(methname, clsname))\n\n return result\n\n\nclass ComObjectXRefRole(XRefRole):\n def process_link(self, env, refnode, has_explicit_title, title, target):\n refnode['com-object'] = env.ref_context.get('com-object')\n if not has_explicit_title:\n title = title.lstrip('.')\n\n if target[0] == '.':\n target = target[1:]\n refnode['refspecific'] = True\n return title, target\n\n\nclass ComObjectIndex(Index):\n name = 'comobjectindex'\n localname = _('ComObject Index')\n shortname = _('comobjects')\n\n def generate(self, docnames=None):\n return [], False\n\n\nclass ComObjectDomain(Domain):\n name = 'com-object'\n label = 'ComObject'\n object_types = {\n 'method': ObjType(_('method'), 'meth'),\n }\n\n directives = {\n 'method': ComObjectClassmember\n }\n roles = {\n 'meth': ComObjectXRefRole(fix_parens=True),\n }\n initial_data = {\n 'objects': {}\n }\n indices = [\n ComObjectIndex,\n ]\n\n def clear_doc(self, docname):\n for fullname, (fn, _l) in list(self.data['objects'].items()):\n if fn == docname:\n del self.data['objects'][fullname]\n\n def find_obj(self, name, type, searchmode=0):\n if name[-2:] == '()':\n name = name[:-2]\n\n if not name:\n return []\n\n objects = self.data['objects']\n matches = []\n\n if searchmode == 1:\n objtypes = list(self.object_types) if type is None else self.objtypes_for_role(type)\n if objtypes is not None:\n searchname = '.' + name\n matches = [(oname, objects[oname]) for oname in objects\n if oname.endswith(searchname) and objects[oname][1] in objtypes]\n else:\n if name in objects:\n matches.append((name, objects[name]))\n return matches\n\n def resolve_xref(self, env, fromdocname, builder, type, target, node, contnode):\n searchmode = node.hasattr('refspecific') and 1 or 0\n matches = self.find_obj(target, type, searchmode)\n if not matches:\n return None\n elif len(matches) > 1:\n match_list = ', '.join(match[0] for match in matches)\n warning = 'more than one target found for cross-reference {}: {}'.format(target.repr(), match_list)\n logger.warning(__(warning), type='ref', subtype='com-object', location=node)\n name, obj = matches[0]\n\n return make_refnode(builder, fromdocname, obj[0], name, contnode, name)\n\n def get_objects(self):\n for refname, (docname, type) in iteritems(self.data['objects']):\n yield (refname, refname, type, docname, refname, 1)\n\n def get_full_qualified_name(self, node):\n target = node.get('reftarget')\n return None if target is None else '.'.join(filter(None, [None, target]))\n\n\ndef setup(app):\n app.add_domain(ComObjectDomain)\n\n return {\n 'version': 'builtin',\n 'env_version': 1,\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n","sub_path":"docs/source/_extensions/CComDomain.py","file_name":"CComDomain.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"487272617","text":"#!/usr/bin/env python\n\n# Picking up where `write_model.jl` left off\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Convert model visibilities in HDF5 format to ALMA NPZ save files.\")\nparser.add_argument(\"--fname-model\", default=\"model.hdf5\", help=\"The name of the model visibilities HDF5 file.\")\nparser.add_argument(\"--fname-resid\", default=\"resid.hdf5\", help=\"The name of the model visibilities HDF5 file.\")\nparser.add_argument(\"--descending\", action=\"store_true\", help=\"Should the frequencies be packed in a descending order (e.g., 13CO)?\")\nparser.add_argument(\"--out-model\", default=\"model.vis.npz\", help=\"The output file for the model.\")\nparser.add_argument(\"--out-resid\", default=\"resid.vis.npz\", help=\"The output file for the residuals.\")\nargs = parser.parse_args()\n\n\nfrom astropy.io import fits\nimport h5py\nimport numpy as np\nimport shutil\n\ncc = 2.99792458e10 # [cm s^-1]\n\n# Read all of the data from the HDF5 file\nfid = h5py.File(args.fname_model, \"r\")\n\nlams = fid[\"lams\"][:] * 1e-6 # [m]\nuu = fid[\"uu\"][:,:] # [klam]\nvv = fid[\"vv\"][:,:] # [klam]\nreal = fid[\"real\"][:,:] # [Jy]\nimag = fid[\"imag\"][:,:] # [Jy]\nweight = fid[\"invsig\"][:,:]**2\nfid.close()\n\n# Convert u and v from kilo-lambda back to meters\nu = uu[0,:] * 1e3 * lams[0] # [m]\nv = vv[0,:] * 1e3 * lams[0] # [m]\n\n\n# This means we will have to reverse the order of the real, imaginary, and weights\n\n# This file has categories\n# ['Re', 'Wt', 'u', 'Im', 'v']\n\n# len(data[\"u\"]) => 24555\n# len(data[\"v\"]) => 24555\n# data['Re'].shape => (50, 24555)\n# data['Im'].shape => (50, 24555)\n# data['Wt'].shape => (50, 24555)\n\n# Sean delivered the data set as an NPZ file.\n# I kept everything in increasing *wavelength* order\n# He kept everything in increasing *frequency* order\n\nif args.descending:\n # Therefore, if he gave me a dataset that is with frequency decreasing, don't need to do anything (e.g. 13CO).\n print(\"Keeping the model in frequency descending order.\")\n np.savez(args.out_model, u=u, v=v, Re=real[:, :], Im=imag[:, :], Wt=weight[:, :] )\nelse:\n # But if he gave me a dataset with frequency increasing, then I need to flip the order here (e.g., 12CO).\n print(\"Flipping the model to frequency increasing order.\")\n np.savez(args.out_model, u=u, v=v, Re=real[::-1, :], Im=imag[::-1, :], Wt=weight[::-1, :] )\n\n\n# Now repeat everything for the residuals\n# Read all of the data from the HDF5 file\nfid = h5py.File(args.fname_resid, \"r\")\n\nlams = fid[\"lams\"][:] * 1e-6 # [m]\nuu = fid[\"uu\"][:,:] # [klam]\nvv = fid[\"vv\"][:,:] # [klam]\nreal = fid[\"real\"][:,:] # [Jy]\nimag = fid[\"imag\"][:,:] # [Jy]\nweight = fid[\"invsig\"][:,:]**2\nfid.close()\n\n# Convert u and v from kilo-lambda back to meters\nu = uu[0,:] * 1e3 * lams[0] # [m]\nv = vv[0,:] * 1e3 * lams[0] # [m]\n\nif args.descending:\n # Therefore, if he gave me a dataset that is with frequency decreasing, don't need to do anything (e.g. 13CO).\n print(\"Keeping the residuals in frequency descending order.\")\n np.savez(args.out_resid, u=u, v=v, Re=real[:, :], Im=imag[:, :], Wt=weight[:, :] )\nelse:\n # But if he gave me a dataset with frequency increasing, then I need to flip the order here (e.g., 12CO).\n print(\"Flipping the residuals to frequency increasing order.\")\n np.savez(args.out_resid, u=u, v=v, Re=real[::-1, :], Im=imag[::-1, :], Wt=weight[::-1, :] )\n","sub_path":"scripts/write_ALMA.py","file_name":"write_ALMA.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"177469290","text":"import socket\nimport re\nimport ssl,time,os,struct,tkinter\n\nclass client_ssl:\n def __init__(self,addr='127.0.0.1',port='6666'):\n self.ssock = socket.create_connection((addr, ports))\n #包含当前云端的所有文件,subfiles为子文件夹,files为文件\n self.now_files=[]\n self.now_subfiles=[]\n #记录当前操作的云端文件路径\n self.server_path='./'\n self.client_path='./ClientCache/'\n\n def login(self,username,password):\n # 打包文件头信息,并打包用户信息,用于登录\n data_pack=user_name+';'+password\n data_pack=data_pack.encode('utf-8')\n head_info=struct.pack('I',5,len(data_pack))\n self.ssock.send(head_info)\n self.ssock.send(data_pack)\n print('Under registration...')\n buf = self.ssock.recv(8)\n \n if buf: # 如果不加这个if,第一个文件传输完成后会自动走到下一句\n ret_type=struct.unpack('I',buf[0:4])\n data_size=struct.unpack('I',buf[4:8])\n if ret_type is 0:\n return False\n ret_data=self.ssock.recv(data_size).decode('utf-8')\n all_subfiles,all_files=re.split(';',ret_data)\n self.all_subfiles=re.split(':',all_subfiles)\n self.all_files=re.split(':',all_files)\n return True\n else:\n return False\n\n def upload(self,file_path):\n if os.path.isfile(file_path):\n \n #首先发送上传请求包,请求上传\n file_name=os.path.basename(file_path).encode('utf-8')\n req_head=struct.pack('I',12,len(filen_ame))\n self.ssock.send(req_head)\n self.ssock.send(file_name)\n\n buf=self.ssock.recv(8)\n if not buf:\n print('no return from server!')\n return False\n ret_type=struct.unpack(buf[0:4])\n if ret_type is not 1:\n print('server deny request!')\n return False\n #获得了服务器的许可,开始传输文件\n file_size=os.stat(file_path).st_size\n send_head=struct.pack('I',16,file_size)\n self.ssock.send(send_head)\n #将文件数据分批传送\n fo = open(file_path, 'rb')\n while True:\n file_data = fo.read(1024)\n if not file_data:\n break\n self.ssock.send(file_data)\n fo.close()\n print('send over...')\n tkinter.messagebox.showinfo('提示!', message='上传成功')\n #self.ssock.close()\n else:\n print('ERROR FILE')\n \n def update(self):\n #更新当前云端的文件夹内容到本地\n req_head=struct.pack('I',13,0)\n self.ssock.send(req_head)\n\n buf=self.ssock.recv(8)\n if buf:\n #读取服务器数据\n ret_type=struct.unpack('I',buf[0:4])\n data_size=struct.unpack('I',buf[4:8])\n if ret_type is 0:\n return False\n #拆解子文件名,子目录名并分别存储\n file_data=self.ssock.recv(data_size)\n file_str=file_data.decode('utf-8')\n subfiles,files=re.split(';',file_str)\n self.now_subfiles=re.split(':',subfiles)\n self.now_files=re.split(':',files)\n print('Success read subdir:'+subfile_name)\n else:\n return False\n \n def newsubfile(self,subfile_name):\n readsubfile(self,subfile_name,13)\n return\n def readsubfile(self,subfile_name,pack_type=12):\n #进入云端当前文件下子目录\n subfile_name_data=subfile_name.encode('utf-8')\n \n req_head=struct.pack('I',pack_type,len(subfile_name_data))\n self.ssock.send(req_head)\n self.ssock.send(subfile_name_data)\n\n buf=self.ssock.recv(8)\n if buf:\n #读取服务器数据\n ret_type=struct.unpack('I',buf[0:4])\n data_size=struct.unpack('I',buf[4:8])\n if ret_type is 0:\n return False\n #拆解子文件名,子目录名并分别存储\n file_data=self.ssock.recv(data_size)\n file_str=file_data.decode('utf-8')\n subfiles,files=re.split(';',file_str)\n self.now_subfiles=re.split(':',subfiles)\n self.now_files=re.split(':',files)\n print('Success read subdir:'+subfile_name)\n else:\n return False\n\n \n def download(self, file_name):\n # 定义文件头信息,包含文件名和文件大小\n #首先发送下载请求包,请求下载\n\n file_name_data=file_name.encode('utf-8')\n req_head=struct.pack('I',13,len(file_name_data))\n self.ssock.send(req_head)\n self.ssock.send(file_name_data)\n\n buf1=self.ssock.recv(8)\n if not buf:\n print('no return from server!')\n return False\n ret_type=struct.unpack(buf1[0:4])\n if ret_type is not 1:\n print('server deny request!')\n return False\n buf2=self.ssock.recv(8)\n \n if buf2: # 如果不加这个if,第一个文件传输完成后会自动走到下一句\n ret_type=struct.unpack('I',buf2[0:4])\n file_size=struct.unpack('I',buf2[4:8])\n if ret_type is 16:\n \n file_path = os.path.join('./ClientDownload/', file_name)\n print('file new name is %s, filesize is %s' % (file_path, file_size))\n recvd_size = 0 # 定义接收了的文件大小\n file = open(file_path, 'wb')\n print('start receiving...')\n while not recvd_size == file_size:\n if fileSize - recvd_size > 1024:\n rdata = self.ssock.recv(1024)\n recvd_size += len(rdata)\n else:\n rdata = self.ssock.recv(fileSize - recvd_size)\n recvd_size += len(rdata)\n file.write(rdata)\n file.close()\n print('receive done')\n \n tkinter.messagebox.showinfo('提示!',message='下载成功:' + file_name)\n return True\n\n else:\n return False\n\n\n def register(self,user_name,password):\n # 打包文件头信息,并打包用户信息,用于注册\n data_pack=user_name+';'+password\n data_pack=data_pack.encode('utf-8')\n head_info=struct.pack('I',4,len(data_pack))\n self.ssock.send(head_info)\n self.ssock.send(data_pack)\n print('Under registration...')\n buf = self.ssock.recv(8)\n \n if buf: # 如果不加这个if,第一个文件传输完成后会自动走到下一句\n ret_type=struct.unpack('I',buf[0:4])\n data_size=struct.unpack('I',buf[4:8])\n if ret_type is 0:\n return False\n ret_data=self.ssock.recv(data_size).decode('utf-8')\n all_subfiles,all_files=re.split(';',ret_data)\n self.all_subfiles=re.split(':',all_subfiles)\n self.all_files=re.split(':',all_files)\n return True\n else:\n return False\n \n \n\n\nif __name__ == \"__main__\":\n client = client_ssl()\n filepath = 'SecureTransfer-master/Project/cer/server/server.crt'\n client.login('lindada','lindada')\n client.upload(filepath)\n","sub_path":"client_socket_no_ssl.py","file_name":"client_socket_no_ssl.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"74396931","text":"import math\r\nimport numpy as np\r\n\r\nx1 = -1.3\r\nx2 = math.sqrt(3)\r\nx3 = (1+2+3)*4/5\r\n\r\nx = [x1, x2, x3]\r\n\r\nx4 = abs(x1)\r\n\r\nx.append(x4)\r\n\r\ny = [1, 2, 3, 4]\r\n\r\n# Transposta de Y\r\naux_y = np.transpose(y)\r\n\r\nprint(np.concatenate((x, y), axis=0))","sub_path":"EPC0/EPC0_q1.py","file_name":"EPC0_q1.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"86107426","text":"# Copyright 2020 Soil, Inc.\n# Copyright 2011 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport functools\nimport six\n\nfrom soil.api import extensions\nfrom soil.api import server\nfrom soil.api.server import wsgi\nfrom soil.api import versions\nfrom soil.api.v1.openstack.compute import instances\nfrom soil.api.v1.openstack.network import networks\nfrom soil.api.v1.vmware import vcenter\n\n\ndef _create_controller(main_controller, action_controller_list):\n \"\"\"This is a helper method to create controller with a\n list of action controller.\n \"\"\"\n\n controller = wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller\n\n\nversion_controller = functools.partial(_create_controller,\n versions.VersionsController, [])\n\ninstances_controller = functools.partial(_create_controller,\n instances.InstancesController, [])\n\nnetworks_controller = functools.partial(_create_controller,\n networks.NetworksController, [])\n\nvcenter_controller = functools.partial(_create_controller,\n vcenter.vCenterController, [])\n\n\nROUTE_LIST = (\n ('', '/'),\n ('/', {\n 'GET': [version_controller, 'all']\n }),\n ('/versions', {\n 'GET': [version_controller, 'index']\n }),\n ('/osp/instances', {\n 'GET': [instances_controller, 'index'],\n 'POST': [instances_controller, 'create'],\n }),\n ('/osp/networks', {\n 'POST': [networks_controller, 'create'],\n }),\n ('/vmware/vcenter', {\n 'GET': [vcenter_controller, 'index']\n })\n)\n\n\nclass APIRouter(server.APIRouter):\n ExtensionManager = extensions.ExtensionManager\n\n def _setup_routes(self, mapper):\n for path, methods in ROUTE_LIST:\n if isinstance(methods, six.string_types):\n mapper.redirect(path, methods)\n continue\n\n for method, controller_info in methods.items():\n controller = controller_info[0]()\n action = controller_info[1]\n mapper.create_route(path, method, controller, action)\n","sub_path":"soil/soil/api/v1/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"379372766","text":"# -*- coding: utf-8 -*-\nimport math\nimport numpy as np\nimport chainer, os, collections, six\nfrom chainer import cuda, Variable, optimizers, serializers, function, optimizer\nfrom chainer.utils import type_check\nfrom chainer import functions as F\nfrom chainer import links as L\nimport aae\nfrom aae import activations\n\nclass Conf(aae.Conf):\n\tdef __init__(self):\n\t\tsuper(Conf, self).__init__()\n\t\t# number of category\n\t\tself.ndim_y = 10\n\nclass AAE(aae.AAE):\n\n\tdef build_discriminator_z(self):\n\t\tconf = self.conf\n\n\t\tdiscriminator_z_attributes = {}\n\t\tdiscriminator_z_units = zip(conf.discriminator_z_hidden_units[:-1], conf.discriminator_z_hidden_units[1:])\n\t\tdiscriminator_z_units += [(conf.discriminator_z_hidden_units[-1], 2)]\n\t\tfor i, (n_in, n_out) in enumerate(discriminator_z_units):\n\t\t\tdiscriminator_z_attributes[\"layer_%i\" % i] = L.Linear(n_in, n_out, wscale=conf.wscale)\n\t\t\tif conf.batchnorm_before_activation:\n\t\t\t\tdiscriminator_z_attributes[\"batchnorm_%i\" % i] = L.BatchNormalization(n_out)\n\t\t\telse:\n\t\t\t\tdiscriminator_z_attributes[\"batchnorm_%i\" % i] = L.BatchNormalization(n_in)\n\t\tdiscriminator_z_attributes[\"layer_merge_z\"] = L.Linear(conf.ndim_z, conf.discriminator_z_hidden_units[0], wscale=conf.wscale)\n\t\tdiscriminator_z_attributes[\"layer_merge_y\"] = L.Linear(conf.ndim_y, conf.discriminator_z_hidden_units[0], wscale=conf.wscale)\n\t\tif conf.batchnorm_before_activation:\n\t\t\tdiscriminator_z_attributes[\"batchnorm_merge\"] = L.BatchNormalization(conf.discriminator_z_hidden_units[0])\n\t\telse:\n\t\t\tdiscriminator_z_attributes[\"batchnorm_merge\"] = L.BatchNormalization(conf.ndim_z)\n\n\t\tdiscriminator_z = SoftmaxClassifier(**discriminator_z_attributes)\n\t\tdiscriminator_z.n_layers = len(discriminator_z_units)\n\t\tdiscriminator_z.activation_function = conf.discriminator_z_activation_function\n\t\tdiscriminator_z.apply_dropout = conf.discriminator_z_apply_dropout\n\t\tdiscriminator_z.apply_batchnorm = conf.discriminator_z_apply_batchnorm\n\t\tdiscriminator_z.apply_batchnorm_to_input = conf.discriminator_z_apply_batchnorm_to_input\n\t\tdiscriminator_z.batchnorm_before_activation = conf.batchnorm_before_activation\n\n\t\tif conf.gpu_enabled:\n\t\t\tdiscriminator_z.to_gpu()\n\n\t\treturn discriminator_z\n\n\tdef loss_generator_x_z(self, x, y, noise=None):\n\t\txp = self.xp\n\n\t\t# We fool discriminator into thinking that z_fake comes from the true prior distribution. \n\t\tif isinstance(self.generator_x_z, aae.UniversalApproximatorGenerator):\n\t\t\tz_fake = self.generator_x_z(x, test=False, apply_f=True, noise=noise)\n\t\telse:\n\t\t\tz_fake = self.generator_x_z(x, test=False, apply_f=True)\n\t\tp_fake = self.discriminator_z(z_fake, y, test=False, softmax=False)\n\n\t\t# 0: Samples from true distribution\n\t\t# 1: Samples from generator\n\t\tloss = F.softmax_cross_entropy(p_fake, Variable(xp.zeros(p_fake.data.shape[0], dtype=np.int32)))\n\n\t\treturn loss\n\n\tdef train_generator_x_z(self, x, y, noise=None):\n\t\tloss = self.loss_generator_x_z(x, y, noise=noise)\n\n\t\tself.zero_grads()\n\t\tloss.backward()\n\t\tself.update_generator()\n\n\t\tif self.gpu_enabled:\n\t\t\tloss.to_cpu()\n\n\t\treturn float(loss.data)\n\n\tdef loss_discriminator_z(self, x, y, z_true, noise=None):\n\t\txp = self.xp\n\n\t\t# z_true came from true prior distribution\n\t\tp_true = self.discriminator_z(z_true, y, test=False, softmax=False)\n\n\t\t# 0: Samples from true distribution\n\t\t# 1: Samples from generator\n\t\tloss_true = F.softmax_cross_entropy(p_true, Variable(xp.zeros(p_true.data.shape[0], dtype=np.int32)))\n\n\t\t# z_fake was generated by generator\n\t\tif isinstance(self.generator_x_z, aae.UniversalApproximatorGenerator):\n\t\t\tz_fake = self.generator_x_z(x, test=False, apply_f=True, noise=noise)\n\t\telse:\n\t\t\tz_fake = self.generator_x_z(x, test=False, apply_f=True)\n\t\tp_fake = self.discriminator_z(z_fake, y, test=False, softmax=False)\n\t\tloss_fake = F.softmax_cross_entropy(p_fake, Variable(xp.ones(p_fake.data.shape[0], dtype=np.int32)))\n\n\t\treturn loss_true + loss_fake\n\n\tdef train_discriminator_z(self, x, y, z_true, noise=None):\n\t\tloss = self.loss_discriminator_z(x, y, z_true, noise=noise)\n\n\t\tself.zero_grads()\n\t\tloss.backward()\n\t\tself.update_discriminator()\n\n\t\tif self.gpu_enabled:\n\t\t\tloss.to_cpu()\n\n\t\treturn float(loss.data) / 2.0\n\nclass SoftmaxClassifier(aae.SoftmaxClassifier):\n\n\tdef forward_one_step(self, z, y, test=False):\n\t\tf = activations[self.activation_function]\n\n\t\tif self.apply_batchnorm_to_input:\n\t\t\tif self.batchnorm_before_activation:\n\t\t\t\tmerged_input = f(self.batchnorm_merge(self.layer_merge_z(z) + self.layer_merge_y(y), test=test))\n\t\t\telse:\n\t\t\t\tmerged_input = f(self.layer_merge_z(self.batchnorm_merge(z, test=test)) + self.layer_merge_y(y))\n\t\telse:\n\t\t\tmerged_input = f(self.layer_merge_z(z) + self.layer_merge_y(y))\n\n\t\tchain = [merged_input]\n\t\t\n\t\tfor i in range(self.n_layers):\n\t\t\tu = chain[-1]\n\t\t\tif self.batchnorm_before_activation:\n\t\t\t\tu = getattr(self, \"layer_%i\" % i)(u)\n\t\t\tif i == 0:\n\t\t\t\tif self.apply_batchnorm_to_input:\n\t\t\t\t\tu = getattr(self, \"batchnorm_%d\" % i)(u, test=test)\n\t\t\telif i == self.n_layers - 1:\n\t\t\t\tif self.apply_batchnorm and self.batchnorm_before_activation == False:\n\t\t\t\t\tu = getattr(self, \"batchnorm_%d\" % i)(u, test=test)\n\t\t\telse:\n\t\t\t\tif self.apply_batchnorm:\n\t\t\t\t\tu = getattr(self, \"batchnorm_%d\" % i)(u, test=test)\n\t\t\tif self.batchnorm_before_activation == False:\n\t\t\t\tu = getattr(self, \"layer_%i\" % i)(u)\n\t\t\tif i == self.n_layers - 1:\n\t\t\t\toutput = u\n\t\t\telse:\n\t\t\t\toutput = f(u)\n\t\t\t\tif self.apply_dropout:\n\t\t\t\t\toutput = F.dropout(output, train=not test)\n\t\t\tchain.append(output)\n\n\t\treturn chain[-1]\n\n\tdef __call__(self, z, y, test=False, softmax=True):\n\t\toutput = self.forward_one_step(z, y, test=test)\n\t\tif softmax:\n\t\t\treturn F.softmax(output)\n\t\treturn output","sub_path":"aae_regularized.py","file_name":"aae_regularized.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"645498380","text":"from Bio import SeqIO\nfrom Bio import SeqUtils\nfrom os.path import exists\nfrom _collections import defaultdict\nfrom multiprocessing import Pool\n\nimport os\nimport re\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport itertools as it\n\n\nSNP_METH_DIR = \"/home/raian/Bioinformatics_institute/RESEARCH/\" \\\n \"spring2015_bees/arat/data/snp_and_methylation/\"\n \nSNP_FILENAME = sys.argv[1] #SNP_METH_DIR + \"ct_snps.bed\" # sys.argv[0]\nMETH_FILENAME = sys.argv[2] #SNP_METH_DIR + \"497.methratio.txt\" # sys.argv[1] \nAMEL_FILENAME = sys.argv[3] #SNP_METH_DIR + \"amel.fasta\"\nWINDOW_SIZE = 1000\n\ndef occurences_chh(string):\n count = 0\n ind = 0\n while ind < WINDOW_SIZE - 3:\n if string[ind] == 'C' or string[ind] == 'G':\n count += 1\n ind += 1\n return count \n\n\ndef occurences_chg(string):\n count = 0\n ind = 0\n while ind < WINDOW_SIZE - 3:\n if string[ind] == 'C' and string[ind+2] == 'G':\n count += 1\n ind += 1 \t\n\n return count \n\n\ndef occurrences_cg(string):\n count = 0\n ind = 0\n while ind < WINDOW_SIZE - 2:\n if string[ind] == 'C' and string[ind+1] == 'G':\n count += 1\n ind += 2\n else:\n ind += 1 \t\n\n return count \n\n\ndef process_record(seq, name, snpl, methl, pstdout):\n print(\"INFO: Processing %s...\" % name)\n sys.stdout.flush()\n\n snp_cg_meth = []\n snp_chg_meth = []\n snp_chh_meth = []\n\n seq_len = len(seq)\n start = 0\n end = WINDOW_SIZE\n \n while end <= seq_len:\n snp_cnt = 0\n meth_cg_cnt = 0\n meth_chg_cnt = 0\n meth_chh_cnt = 0\n\n seq_window = seq[start:end]\n seqw_gc = SeqUtils.GC(seq_window)\n\n for line_split in snpl:\n try:\n if start <= (int(line_split[1]) - 1) < end:\n snp_cnt += 1\n elif (int(line_split[1]) - 1) >= end:\n break\n except:\n print(line_split) \n \n for line_split in methl:\n if start <= (int(line_split[1]) - 1) < end and float(line_split[4]) > 0:\n if line_split[3] == \"CG\":\n meth_cg_cnt += 1 \n elif line_split[3] == \"CHG\":\n meth_chg_cnt += 1\n else:\n meth_chh_cnt += 1 \n elif (int(line_split[1]) - 1) >= end:\n break \n \n cpg_cnt = occurrences_cg(seq_window)\n chg_cnt = occurences_chg(seq_window)\n chh_cnt = occurences_chh(seq_window) \n\n if cpg_cnt != 0:\n meth_ratio = meth_cg_cnt/(cpg_cnt*1.0)\n if meth_ratio <= 1.0:\n snp_cg_meth.append((meth_ratio, snp_cnt, seqw_gc)) \n else:\n print(\"Meth level > 1.0\") \n else:\n snp_cg_meth.append((0, snp_cnt, seqw_gc)) \n\n if chg_cnt != 0:\n meth_ratio = meth_chg_cnt/(chg_cnt*1.0)\n if meth_ratio <= 1.0:\n snp_chg_meth.append((meth_ratio, snp_cnt, seqw_gc)) \n else:\n print(\"Meth level > 1.0\") \n else:\n snp_chg_meth.append((0, snp_cnt, seqw_gc)) \n \n if chh_cnt != 0:\n meth_ratio = meth_chh_cnt/(chh_cnt*1.0)\n if meth_ratio <= 1.0:\n snp_chh_meth.append((meth_ratio, snp_cnt, seqw_gc)) \n else:\n print(\"Meth level > 1.0\") \n else:\n snp_chh_meth.append((0, snp_cnt, seqw_gc)) \n \n start += WINDOW_SIZE\n end += WINDOW_SIZE \n\n print(\"INFO: %s has been processed...\" % name)\n sys.stdout.flush()\n return (snp_cg_meth, snp_chg_meth, snp_chh_meth) \n\n\ndef build_scatter(snp_meth_level_info, context):\n print(\"Building the scatter plot for %s context...\" % context)\n plt.figure()\n plt.scatter([val[0] for val in snp_meth_level_info],\n [val[1] for val in snp_meth_level_info])\n plt.xlabel(\"Methylation level\")\n plt.ylabel(\"#C->T\")\n plt.title(\"%s context\" % context)\n plt.savefig(\"snp_data/%s.snp.meth.png\" % context)\n\n print(\"Writing regression data...\")\n with open(\"snp_data/%s.regr.txt\" % context, \"w\") as outp:\n for (meth, snp, gc) in snp_meth_level_info:\n outp.write(\"%s\\t%s\\t%s\\n\" % (meth, snp, gc)) \n \n\ndef main(): \n snp_cg_meth = []\n snp_chg_meth = []\n snp_chh_meth = []\n\n print(\"Reading SNP and methylation data...\")\n \n with open(SNP_FILENAME, \"r\") as snpf:\n snp_lines = snpf.readlines()\n with open(METH_FILENAME, \"r\") as methf:\n meth_lines = methf.readlines()[1:] \n\n\n print(\"Splitting SNPs by chr id...\")\n\n snpl = dict()\n \n prev_line = snp_lines[0].split()\n chr_snp = [prev_line]\n cnt = 0\n\n for line in snp_lines[1:]:\n lsplit = line.split()\n if prev_line[0] == lsplit[0]:\n chr_snp.append(lsplit)\n else: \n snpl[chr_snp[-1][0]] = chr_snp[:] \n chr_snp = [lsplit]\n prev_line = lsplit\n\n if len(chr_snp) > 0:\n snpl[chr_snp[-1][0]] = chr_snp \n\n print(\"Splitting methylation by chr id...\") \n\n methl = defaultdict(list) \n for line in meth_lines:\n methl[line.split()[0]].append(line.split()) \n\n p = Pool(processes=8)\n print(\"Applying...\")\n output = [p.apply_async(process_record, args=(record.seq, record.name, \n snpl[record.name], methl[record.name], \n sys.stdout)) \n for record in SeqIO.parse(open(AMEL_FILENAME), \"fasta\")]\n print(\"Getting individual results...\") \n results = [o.get() for o in output]\n\n print(\"Collecting results...\")\n for (cg, chg, chh) in results:\n snp_cg_meth += cg\n snp_chg_meth += chg\n snp_chh_meth += chh \n\n del results\n\n print(\"Results have been collected...\")\n\n print(\"Building scatter plots...\")\n build_scatter(snp_cg_meth, \"CG\")\n build_scatter(snp_chg_meth, \"CHG\")\n build_scatter(snp_chh_meth, \"CHH\") \n \nif __name__ == \"__main__\":\n main()","sub_path":"scripts/count_snp_and_methylation_level.py","file_name":"count_snp_and_methylation_level.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"39129469","text":"import gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.misc\nimport tensorflow as tf\nfrom PythonClass.gym_portpolio2.experience_buffer import experience_buffer\nfrom PythonClass.gym_portpolio2.dqn import Qnetwork\nfrom PythonClass.gym_portpolio2.skiing import skiing\nimport os\n\n\n\n\ndef processState(states):\n return np.reshape(states,[7056])\n\ndef updateTargetGraph(tfVars,tau):\n total_vars = len(tfVars)\n op_holder = []\n for idx,var in enumerate(tfVars[0:total_vars/2]):\n op_holder.append(tfVars[idx+total_vars/2].assign((var.value()*tau) + ((1-tau)*tfVars[idx+total_vars/2].value())))\n return op_holder\n\ndef updateTarget(op_holder,sess):\n for op in op_holder:\n sess.run(op)\n\ngame = skiing()\nbatch_size = 32 #How many experiences to use for each training step.\nupdate_freq = 4 #How often to perform a training step.\ny = .99 #Discount factor on the target Q-values\nstartE = 1 #Starting chance of random action\nendE = 0.1 #Final chance of random action\nanneling_steps = 10000. #How many steps of training to reduce startE to endE.\nnum_episodes = 10000 #How many episodes of game environment to train network with.\npre_train_steps = 10000 #How many steps of random actions before training begins.\nmax_epLength = 5000 #The max allowed length of our episode.\nload_model = True #Whether to load a saved model.\ntest_model = True #Exit after \"done\" flag is True\npath = \"./dqn\" #The path to save our model to.\nh_size = 512 #The size of the final convolutional layer before splitting it into Advantage and Value streams.\ntau = 0.001 #Rate to update target network toward primary network\n\ntf.reset_default_graph()\nmainQN = Qnetwork(h_size)\ntargetQN = Qnetwork(h_size)\n\ninit = tf.initialize_all_variables()\n\nsaver = tf.train.Saver()\n\ntrainables = tf.trainable_variables()\n\ntargetOps = updateTargetGraph(trainables, tau)\n\nmyBuffer = experience_buffer()\n\n# Set the rate of random action decrease.\ne = startE\nstepDrop = (startE - endE) / anneling_steps\n\n# create lists to contain total rewards and steps per episode\njList = []\nrList = []\ntotal_steps = 0\n\n# Make a path for our model to be saved in.\nif not os.path.exists(path):\n os.makedirs(path)\n\nwith tf.Session() as sess:\n if load_model == True:\n print('Loading Model...')\n ckpt = tf.train.get_checkpoint_state(path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n sess.run(init)\n updateTarget(targetOps, sess) # Set the target network to be equal to the primary network.\n for i in range(num_episodes):\n episodeBuffer = experience_buffer()\n # Reset environment and get first new observation\n s = game.reset()\n s = processState(s)\n d = False\n rAll = 0\n j = 0\n # The Q-Network\n while j < max_epLength: # If the agent takes longer than 200 moves to reach either of the blocks, end the trial.\n j += 1\n # Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = np.random.randint(0, 3)\n else:\n a = sess.run(mainQN.predict, feed_dict={mainQN.scalarInput: [s]})[0]\n s1, r, d, _ = game.step(a)\n game.env.render()\n\n s1 = processState(s1)\n total_steps += 1\n episodeBuffer.add(\n np.reshape(np.array([s, a, r, s1, d]), [1, 5])) # Save the experience to our episode buffer.\n\n if total_steps > pre_train_steps:\n if e > endE:\n e -= stepDrop\n\n if total_steps % (update_freq) == 0:\n trainBatch = myBuffer.sample(batch_size) # Get a random batch of experiences.\n # Below we perform the Double-DQN update to the target Q-values\n Q1 = sess.run(mainQN.predict, feed_dict={mainQN.scalarInput: np.vstack(trainBatch[:, 3])})\n Q2 = sess.run(targetQN.Qout, feed_dict={targetQN.scalarInput: np.vstack(trainBatch[:, 3])})\n end_multiplier = -(trainBatch[:, 4] - 1)\n doubleQ = Q2[range(batch_size), Q1]\n targetQ = trainBatch[:, 2] + (y * doubleQ * end_multiplier)\n # Update the network with our target values.\n _ = sess.run(mainQN.updateModel,\n feed_dict={mainQN.scalarInput: np.vstack(trainBatch[:, 0]), mainQN.targetQ: targetQ,\n mainQN.actions: trainBatch[:, 1]})\n\n updateTarget(targetOps, sess) # Set the target network to be equal to the primary network.\n rAll += r\n s = s1\n\n if d == True:\n if test_model:\n game.env.monitor.close()\n break\n\n\n # Get all experiences from this episode and discount their rewards.\n myBuffer.add(episodeBuffer.buffer)\n jList.append(j)\n rList.append(rAll)\n # Periodically save the model.\n if i % 1000 == 0:\n saver.save(sess, path + '/model-' + str(i) + '.cptk')\n print(\"Saved Model\")\n if len(rList) % 10 == 0:\n print(total_steps, np.mean(rList[-10:]), e)\n saver.save(sess, path + '/model-' + str(i) + '.cptk')\nprint(\"Percent of succesful episodes: \" + str(sum(rList) / num_episodes) + \"%\")\n","sub_path":"PythonClass/gym_portpolio2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"75567285","text":"# -*- coding: utf-8 -*-\n\n# My simply CSHC module \n\nimport win32com.client\n\n# Motherboard, Controller, and Port Classes\nWin32Processor = {}\nWin32CacheMemory = {}\nWin32BaseBoard = {}\nWin32BIOS = {}\nWin32PhysicalMemory = {}\nWin32MemoryDevice = {}\n\nObjWMI = win32com.client.GetObject('winmgmts:')\n\n#dict Win32Processor = using class WMI Win32_Processor\nObjService = ObjWMI.InstancesOf('Win32_Processor')\nfor objItem in ObjService:\n if objItem.AddressWidth != None:\n Win32Processor['AddressWidth'] = objItem.AddressWidth\n if objItem.Architecture != None:\n Win32Processor['Architecture'] = objItem.Architecture\n if objItem.Availability != None:\n Win32Processor['Availability'] = objItem.Availability\n if objItem.Caption != None:\n Win32Processor['Caption'] = objItem.Caption\n if objItem.ConfigManagerErrorCode != None:\n Win32Processor['ConfigManagerErrorCode'] = \\\n objItem.ConfigManagerErrorCode\n if objItem.ConfigManagerUserConfig != None:\n Win32Processor['ConfigManagerUserConfig'] = \\\n objItem.ConfigManagerUserConfig\n if objItem.CpuStatus != None:\n Win32Processor['CpuStatus'] = objItem.CpuStatus\n if objItem.CreationClassName != None:\n Win32Processor['CreationClassName'] = objItem.CreationClassName\n if objItem.CurrentClockSpeed != None:\n Win32Processor['CurrentClockSpeed'] = objItem.CurrentClockSpeed\n if objItem.CurrentVoltage != None:\n Win32Processor['CurrentVoltage'] = objItem.CurrentVoltage\n if objItem.DataWidth != None:\n Win32Processor['DataWidth'] = objItem.DataWidth\n if objItem.Description != None:\n Win32Processor['Description'] = objItem.Description\n if objItem.DeviceID != None:\n Win32Processor['DeviceID'] = objItem.DeviceID\n if objItem.ErrorCleared != None:\n Win32Processor['ErrorCleared'] = objItem.ErrorCleared\n if objItem.ErrorDescription != None:\n Win32Processor['ErrorDescription'] = objItem.ErrorDescription\n if objItem.ExtClock != None:\n Win32Processor['ExtClock'] = objItem.ExtClock\n if objItem.Family != None:\n Win32Processor['Family'] = objItem.Family\n if objItem.InstallDate != None:\n Win32Processor['InstallDate'] = objItem.InstallDate\n if objItem.L2CacheSize != None:\n Win32Processor['L2CacheSize'] = objItem.L2CacheSize\n if objItem.L2CacheSpeed != None:\n Win32Processor['L2CacheSpeed'] = objItem.L2CacheSpeed\n if objItem.L3CacheSize != None:\n Win32Processor['L3CacheSize'] = objItem.L3CacheSize\n if objItem.L3CacheSpeed != None:\n Win32Processor['L3CacheSpeed'] = objItem.L3CacheSpeed\n if objItem.LastErrorCode != None:\n Win32Processor['LastErrorCode'] = objItem.LastErrorCode\n if objItem.Level != None:\n Win32Processor['Level'] = objItem.Level\n if objItem.LoadPercentage != None:\n Win32Processor['LoadPercentage'] = objItem.LoadPercentage\n if objItem.Manufacturer != None:\n Win32Processor['Manufacturer'] = objItem.Manufacturer\n if objItem.MaxClockSpeed != None:\n Win32Processor['MaxClockSpeed'] = objItem.MaxClockSpeed\n if objItem.Name != None:\n Win32Processor['Name'] = objItem.Name\n if objItem.NumberOfCores != None:\n Win32Processor['NumberOfCores'] = objItem.NumberOfCores\n if objItem.NumberOfLogicalProcessors != None:\n Win32Processor['NumberOfLogicalProcessors'] = \\\n objItem.NumberOfLogicalProcessors\n if objItem.OtherFamilyDescription != None:\n Win32Processor['OtherFamilyDescription'] = \\\n objItem.OtherFamilyDescription\n if objItem.PNPDeviceID != None:\n Win32Processor['PNPDeviceID'] = objItem.PNPDeviceID\n \n ListPMC = []\n Win32Processor['PowerManagementCapabilities'] = ListPMC\n objColl = objItem.PowerManagementCapabilities\n if objColl is None:\n ListPMC.append(1)\n else:\n for objValue in objColl:\n ListPMC.append(objValue)\n \n if objItem.PowerManagementSupported != None:\n Win32Processor['PowerManagementSupported'] = \\\n objItem.PowerManagementSupported\n if objItem.ProcessorId != None:\n Win32Processor['ProcessorId'] = objItem.ProcessorId\n if objItem.ProcessorType != None:\n Win32Processor['ProcessorType'] = objItem.ProcessorType\n if objItem.Revision != None:\n Win32Processor['Revision'] = objItem.Revision\n if objItem.Role != None:\n Win32Processor['Role'] = objItem.Role\n if objItem.SocketDesignation != None:\n Win32Processor['SocketDesignation'] = objItem.SocketDesignation\n if objItem.Status != None:\n Win32Processor['Status'] = objItem.Status\n if objItem.StatusInfo != None:\n Win32Processor['StatusInfo'] = objItem.StatusInfo\n if objItem.Stepping != None:\n Win32Processor['Stepping'] = objItem.Stepping\n if objItem.SystemCreationClassName != None:\n Win32Processor['SystemCreationClassName'] = \\\n objItem.SystemCreationClassName\n if objItem.SystemName != None:\n Win32Processor['SystemName'] = objItem.SystemName\n if objItem.UniqueId != None:\n Win32Processor['UniqueId'] = objItem.UniqueId\n if objItem.UpgradeMethod != None:\n Win32Processor['UpgradeMethod'] = objItem.UpgradeMethod\n if objItem.Version != None:\n Win32Processor['Version'] = objItem.Version\n if objItem.VoltageCaps != None:\n Win32Processor['VoltageCaps'] = objItem.VoltageCaps\n\n#dict Win32CacheMemory = using class WMI Win32_CacheMemory\nObjService = ObjWMI.InstancesOf('Win32_CacheMemory')\nfor objItem in ObjService:\n if objItem.Access != None:\n Win32CacheMemory['Access'] = objItem.Access\n if objItem.AdditionalErrorData != None:\n Win32CacheMemory['AdditionalErrorData'] = objItem.AdditionalErrorData\n \n ListAddEr = []\n Win32CacheMemory['AdditionalErrorData'] = ListAddEr\n ObjColl = objItem.AdditionalErrorData\n if ObjColl is None:\n ListAddEr.append(1)\n else:\n for objValue in objColl:\n ListAddEr.append(objValue)\n \n if objItem.Associativity != None:\n Win32CacheMemory['Associativity'] = objItem.Associativity\n if objItem.Availability != None:\n Win32CacheMemory['Availability'] = objItem.Availability\n if objItem.BlockSize != None:\n Win32CacheMemory['BlockSize'] = objItem.BlockSize\n if objItem.CacheSpeed != None:\n Win32CacheMemory['CacheSpeed'] = objItem.CacheSpeed\n if objItem.CacheType != None:\n Win32CacheMemory['CacheType'] = objItem.CacheType\n if objItem.Caption != None:\n Win32CacheMemory['Caption'] = objItem.Caption\n if objItem.ConfigManagerErrorCode != None:\n Win32CacheMemory['ConfigManagerErrorCode'] = \\\n objItem.ConfigManagerErrorCode\n if objItem.ConfigManagerUserConfig != None:\n Win32CacheMemory['ConfigManagerUserConfig'] = \\\n objItem.ConfigManagerUserConfig\n if objItem.CorrectableError != None:\n Win32CacheMemory['CorrectableError'] = objItem.CorrectableError\n if objItem.CreationClassName != None:\n Win32CacheMemory['CreationClassName'] = objItem.CreationClassName\n\n ListData = []\n Win32CacheMemory['CurrentSRAM'] = ListData\n objColl = objItem.CurrentSRAM\n if objColl is None:\n ListData.append(1)\n else:\n for objValue in objColl:\n ListData.append(objValue)\n \n if objItem.Description != None:\n Win32CacheMemory['Description'] = objItem.Description\n if objItem.DeviceID != None:\n Win32CacheMemory['DeviceID'] = objItem.DeviceID\n if objItem.EndingAddress != None:\n Win32CacheMemory['EndingAddress'] = objItem.EndingAddress\n if objItem.ErrorAccess != None:\n Win32CacheMemory['ErrorAccess'] = objItem.ErrorAccess\n if objItem.ErrorAddress != None:\n Win32CacheMemory['ErrorAddress'] = objItem.ErrorAddress\n if objItem.ErrorCleared != None:\n Win32CacheMemory['ErrorCleared'] = objItem.ErrorCleared\n if objItem.ErrorCorrectType != None:\n Win32CacheMemory['ErrorCorrectType'] = objItem.ErrorCorrectType\n if objItem.ErrorData != None:\n Win32CacheMemory['ErrorData'] = objItem.ErrorData\n if objItem.ErrorDataOrder != None:\n Win32CacheMemory['ErrorDataOrder'] = objItem.ErrorDataOrder\n if objItem.ErrorDescription != None:\n Win32CacheMemory['ErrorDescription'] = objItem.ErrorDescription\n if objItem.ErrorInfo != None:\n Win32CacheMemory['ErrorInfo'] = objItem.ErrorInfo\n if objItem.ErrorMethodology != None:\n Win32CacheMemory['ErrorMethodology'] = objItem.ErrorMethodology\n if objItem.ErrorResolution != None:\n Win32CacheMemory['ErrorResolution'] = objItem.ErrorResolution\n if objItem.ErrorTime != None:\n Win32CacheMemory['ErrorTime'] = objItem.ErrorTime\n if objItem.ErrorTransferSize != None:\n Win32CacheMemory['ErrorTransferSize'] = objItem.ErrorTransferSize\n if objItem.FlushTimer != None:\n Win32CacheMemory['FlushTimer'] = objItem.FlushTimer\n if objItem.InstallDate != None:\n Win32CacheMemory['InstallDate'] = objItem.InstallDate\n if objItem.InstalledSize != None:\n Win32CacheMemory['InstalledSize'] = objItem.InstalledSize\n if objItem.LastErrorCode != None:\n Win32CacheMemory['LastErrorCode'] = objItem.LastErrorCode\n if objItem.Level != None:\n Win32CacheMemory['Level'] = objItem.Level\n if objItem.LineSize != None:\n Win32CacheMemory['LineSize'] = objItem.LineSize\n if objItem.Location != None:\n Win32CacheMemory['Location'] = objItem.Location\n if objItem.MaxCacheSize != None:\n Win32CacheMemory['MaxCacheSize'] = objItem.MaxCacheSize\n if objItem.Name != None:\n Win32CacheMemory['Name'] = objItem.Name\n if objItem.NumberOfBlocks != None:\n Win32CacheMemory['NumberOfBlocks'] = objItem.NumberOfBlocks\n if objItem.OtherErrorDescription != None:\n Win32CacheMemory['OtherErrorDescription'] = \\\n objItem.OtherErrorDescription\n if objItem.PNPDeviceID != None:\n Win32CacheMemory['PNPDeviceID'] = objItem.PNPDeviceID\n \n ListCap = []\n Win32CacheMemory['PowerManagementCapabilities'] = ListCap\n objColl = objItem.PowerManagementCapabilities\n if objColl is None:\n ListCap.append(1)\n else:\n for objValue in objColl:\n ListCap.append(objValue)\n \n if objItem.PowerManagementSupported != None:\n Win32CacheMemory['PowerManagementSupported'] = \\\n objItem.PowerManagementSupported\n if objItem.Purpose != None:\n Win32CacheMemory['Purpose'] = objItem.Purpose\n if objItem.ReadPolicy != None:\n Win32CacheMemory['ReadPolicy'] = objItem.ReadPolicy\n if objItem.ReplacementPolicy != None:\n Win32CacheMemory['ReplacementPolicy'] = objItem.ReplacementPolicy\n if objItem.StartingAddress != None:\n Win32CacheMemory['StartingAddress'] = objItem.StartingAddress\n if objItem.Status != None:\n Win32CacheMemory['Status'] = objItem.Status\n if objItem.StatusInfo != None:\n Win32CacheMemory['StatusInfo'] = objItem.StatusInfo\n\n ListSRAM = []\n Win32CacheMemory['SupportedSRAM'] = ListSRAM\n objColl = objItem.SupportedSRAM\n if objColl is None:\n ListSRAM.append(1)\n else:\n for objValue in objColl:\n ListSRAM.append(objValue)\n \n if objItem.SystemCreationClassName != None:\n Win32CacheMemory['SystemCreationClassName'] = \\\n objItem.SystemCreationClassName\n if objItem.SystemLevelAddress != None:\n Win32CacheMemory['SystemLevelAddress'] = objItem.SystemLevelAddress\n if objItem.SystemName != None:\n Win32CacheMemory['SystemName'] = objItem.SystemName\n if objItem.WritePolicy != None:\n Win32CacheMemory['WritePolicy'] = objItem.WritePolicy\n\n#dict Win32BaseBoard = using class WMI Win32_BaseBoard\nObjService = ObjWMI.InstancesOf('Win32_BaseBoard')\nfor objItem in ObjService:\n if objItem.Caption != None:\n Win32BaseBoard['Caption'] = objItem.Caption \n if objItem.ConfigOptions != None:\n Win32BaseBoard['ConfigOptions'] = objItem.ConfigOptions \n if objItem.CreationClassName != None:\n Win32BaseBoard['CreationClassName'] = objItem.CreationClassName\n if objItem.Depth != None:\n Win32BaseBoard['Depth'] = objItem.Depth\n if objItem.Description != None:\n Win32BaseBoard['Description'] = objItem.Description\n if objItem.Height != None:\n Win32BaseBoard['Height'] = objItem.Height\n if objItem.HostingBoard != None:\n Win32BaseBoard['HostingBoard'] = objItem.HostingBoard\n if objItem.HotSwappable != None:\n Win32BaseBoard['HotSwappable'] = objItem.HotSwappable\n if objItem.InstallDate != None:\n Win32BaseBoard['InstallDate'] = objItem.InstallDate\n if objItem.Manufacturer != None:\n Win32BaseBoard['Manufacturer'] = objItem.Manufacturer\n if objItem.Model != None:\n Win32BaseBoard['Model'] = objItem.Model\n if objItem.Name != None:\n Win32BaseBoard['Name'] = objItem.Name\n if objItem.OtherIdentifyingInfo != None:\n Win32BaseBoard['OtherIdentifyingInfo'] = objItem.OtherIdentifyingInfo\n if objItem.PartNumber != None:\n Win32BaseBoard['PartNumber'] = objItem.PartNumber\n if objItem.PoweredOn != None:\n Win32BaseBoard['PoweredOn'] = objItem.PoweredOn\n if objItem.Product != None:\n Win32BaseBoard['Product'] = objItem.Product\n if objItem.Removable != None:\n Win32BaseBoard['Removable'] = objItem.Removable\n if objItem.Replaceable != None:\n Win32BaseBoard['Replaceable'] = objItem.Replaceable\n if objItem.RequirementsDescription != None:\n Win32BaseBoard['RequirementsDescription'] = \\\n objItem.RequirementsDescription\n if objItem.RequiresDaughterBoard != None:\n Win32BaseBoard['RequiresDaughterBoard'] = \\\n objItem.RequiresDaughterBoard\n if objItem.SerialNumber != None:\n Win32BaseBoard['SerialNumber'] = objItem.SerialNumber\n if objItem.SKU != None:\n Win32BaseBoard['SKU'] = objItem.SKU\n if objItem.SlotLayout != None:\n Win32BaseBoard['SlotLayout'] = objItem.SlotLayout\n if objItem.SpecialRequirements != None:\n Win32BaseBoard['SpecialRequirements'] = objItem.SpecialRequirements\n if objItem.Status != None:\n Win32BaseBoard['Status'] = objItem.Status\n if objItem.Tag != None:\n Win32BaseBoard['Tag'] = objItem.Tag\n if objItem.Version != None:\n Win32BaseBoard['Version'] = objItem.Version\n if objItem.Weight != None:\n Win32BaseBoard['Weight'] = objItem.Weight\n if objItem.Width != None:\n Win32BaseBoard['Width'] = objItem.Width\n\n#dict Win32BIOS = using class WMI Win32_BIOS \nObjService = ObjWMI.InstancesOf('Win32_BIOS')\nfor objItem in ObjService:\n \n ListBIOSChar = []\n Win32BIOS['BiosCharacteristics'] = ListBIOSChar\n objColl = objItem.BiosCharacteristics\n if objColl is None:\n ListBIOSChar.append(1)\n else:\n for objValue in objColl:\n ListBIOSChar.append(objValue)\n \n if objItem.BIOSVersion != None:\n Win32BIOS['BIOSVersion'] = objItem.BIOSVersion\n if objItem.BuildNumber != None:\n Win32BIOS['BuildNumber'] = objItem.BuildNumber\n if objItem.Caption != None:\n Win32BIOS['Caption'] = objItem.Caption\n if objItem.CodeSet != None:\n Win32BIOS['CodeSet'] = objItem.CodeSet\n if objItem.CurrentLanguage != None:\n Win32BIOS['CurrentLanguage'] = objItem.CurrentLanguage\n if objItem.Description != None:\n Win32BIOS['Description'] = objItem.Description\n if objItem.IdentificationCode != None:\n Win32BIOS['IdentificationCode'] = objItem.IdentificationCode\n if objItem.InstallableLanguages != None:\n Win32BIOS['InstallableLanguages'] = objItem.InstallableLanguages\n if objItem.InstallDate != None:\n Win32BIOS['InstallDate'] = objItem.InstallDate\n if objItem.LanguageEdition != None:\n Win32BIOS['LanguageEdition'] = objItem.LanguageEdition\n if objItem.ListOfLanguages != None:\n Win32BIOS['ListOfLanguages'] = objItem.ListOfLanguages\n if objItem.Manufacturer != None:\n Win32BIOS['Manufacturer'] = objItem.Manufacturer\n if objItem.Name != None:\n Win32BIOS['Name'] = objItem.Name\n if objItem.OtherTargetOS != None:\n Win32BIOS['OtherTargetOS'] = objItem.OtherTargetOS\n if objItem.PrimaryBIOS != None:\n Win32BIOS['PrimaryBIOS'] = objItem.PrimaryBIOS\n if objItem.ReleaseDate != None:\n Win32BIOS['ReleaseDate'] = objItem.ReleaseDate\n if objItem.SerialNumber != None:\n Win32BIOS['SerialNumber'] = objItem.SerialNumber\n if objItem.SMBIOSBIOSVersion != None:\n Win32BIOS['SMBIOSBIOSVersion'] = objItem.SMBIOSBIOSVersion\n if objItem.SMBIOSMajorVersion != None:\n Win32BIOS['SMBIOSMajorVersion'] = objItem.SMBIOSMajorVersion\n if objItem.SMBIOSMinorVersion != None:\n Win32BIOS['SMBIOSMinorVersion'] = objItem.SMBIOSMinorVersion\n if objItem.SMBIOSPresent != None:\n Win32BIOS['SMBIOSPresent'] = objItem.SMBIOSPresent\n if objItem.SoftwareElementID != None:\n Win32BIOS['SoftwareElementID'] = objItem.SoftwareElementID\n if objItem.SoftwareElementState != None:\n Win32BIOS['SoftwareElementState'] = objItem.SoftwareElementState\n if objItem.Status != None:\n Win32BIOS['Status'] = objItem.Status\n if objItem.TargetOperatingSystem != None:\n Win32BIOS['TargetOperatingSystem'] = objItem.TargetOperatingSystem\n if objItem.Version != None:\n Win32BIOS['Version'] = objItem.Version\n \n#dict Win32PhysicalMemory = using class WMI Win32_PhysicalMemory \nObjService = ObjWMI.InstancesOf('Win32_PhysicalMemory')\nfor objItem in ObjService:\n if objItem.BankLabel != None:\n Win32PhysicalMemory['BankLabel'] = objItem.BankLabel\n if objItem.Capacity != None:\n Win32PhysicalMemory['Capacity'] = objItem.Capacity\n if objItem.Caption != None:\n Win32PhysicalMemory['Caption'] = objItem.Caption\n if objItem.CreationClassName != None:\n Win32PhysicalMemory['CreationClassName'] = objItem.CreationClassName\n if objItem.DataWidth != None:\n Win32PhysicalMemory['DataWidth'] = objItem.DataWidth\n if objItem.Description != None:\n Win32PhysicalMemory['Description'] = objItem.Description\n if objItem.DeviceLocator != None:\n Win32PhysicalMemory['DeviceLocator'] = objItem.DeviceLocator\n if objItem.FormFactor != None:\n Win32PhysicalMemory['FormFactor'] = objItem.FormFactor\n if objItem.HotSwappable != None:\n Win32PhysicalMemory['HotSwappable'] = objItem.HotSwappable\n if objItem.InstallDate != None:\n Win32PhysicalMemory['InstallDate'] = objItem.InstallDate\n if objItem.InterleaveDataDepth != None:\n Win32PhysicalMemory['InterleaveDataDepth'] = \\\n objItem.InterleaveDataDepth\n if objItem.InterleavePosition != None:\n Win32PhysicalMemory['InterleavePosition'] = objItem.InterleavePosition\n if objItem.Manufacturer != None:\n Win32PhysicalMemory['Manufacturer'] = objItem.Manufacturer\n if objItem.MemoryType != None:\n Win32PhysicalMemory['MemoryType'] = objItem.MemoryType\n if objItem.Model != None:\n Win32PhysicalMemory['Model'] = objItem.Model\n if objItem.Name != None:\n Win32PhysicalMemory['Name'] = objItem.Name\n if objItem.OtherIdentifyingInfo != None:\n Win32PhysicalMemory['OtherIdentifyingInfo'] = \\\n objItem.OtherIdentifyingInfo\n if objItem.PartNumber != None:\n Win32PhysicalMemory['PartNumber'] = objItem.PartNumber\n if objItem.PositionInRow != None:\n Win32PhysicalMemory['PositionInRow'] = objItem.PositionInRow\n if objItem.PoweredOn != None:\n Win32PhysicalMemory['PoweredOn'] = objItem.PoweredOn\n if objItem.Removable != None:\n Win32PhysicalMemory['Removable'] = objItem.Removable\n if objItem.Replaceable != None:\n Win32PhysicalMemory['Replaceable'] = objItem.Replaceable\n if objItem.SerialNumber != None:\n Win32PhysicalMemory['SerialNumber'] = objItem.SerialNumber\n if objItem.SKU != None:\n Win32PhysicalMemory['SKU'] = objItem.SKU\n if objItem.Speed != None:\n Win32PhysicalMemory['Speed'] = objItem.Speed\n if objItem.Status != None:\n Win32PhysicalMemory['Status'] = objItem.Status\n if objItem.Tag != None:\n Win32PhysicalMemory['Tag'] = objItem.Tag\n if objItem.TotalWidth != None:\n Win32PhysicalMemory['TotalWidth'] = objItem.TotalWidth\n if objItem.TypeDetail != None:\n Win32PhysicalMemory['TypeDetail'] = objItem.TypeDetail\n if objItem.Version != None:\n Win32PhysicalMemory['Version'] = objItem.Version\n \n#dict Win32MemoryDevice = using class WMI Win32_MemoryDevice\nObjService = ObjWMI.InstancesOf('Win32_MemoryDevice')\nfor objItem in ObjService:\n if objItem.Access != None:\n Win32MemoryDevice['Access'] = objItem.Access\n \n ListAED = []\n Win32MemoryDevice['AdditionalErrorData'] = ListAED\n objColl = objItem.AdditionalErrorData\n if objColl is None:\n ListAED.append(0)\n else:\n for objValue in objColl:\n ListAED.append(objValue)\n \n if objItem.Availability != None:\n Win32MemoryDevice['Availability'] = objItem.Availability\n if objItem.BlockSize != None:\n Win32MemoryDevice['BlockSize'] = objItem.BlockSize\n if objItem.Caption != None:\n Win32MemoryDevice['Caption'] = objItem.Caption\n if objItem.ConfigManagerErrorCode != None:\n Win32MemoryDevice['ConfigManagerErrorCode'] = \\\n objItem.ConfigManagerErrorCode\n if objItem.ConfigManagerUserConfig != None:\n Win32MemoryDevice['ConfigManagerUserConfig'] = \\\n objItem.ConfigManagerUserConfig\n if objItem.CorrectableError != None:\n Win32MemoryDevice['CorrectableError'] = objItem.CorrectableError\n if objItem.CreationClassName != None:\n Win32MemoryDevice['CreationClassName'] = objItem.CreationClassName\n if objItem.Description != None:\n Win32MemoryDevice['Description'] = objItem.Description\n if objItem.DeviceID != None:\n Win32MemoryDevice['DeviceID'] = objItem.DeviceID\n if objItem.EndingAddress != None:\n Win32MemoryDevice['EndingAddress'] = objItem.EndingAddress\n if objItem.ErrorAccess != None:\n Win32MemoryDevice['ErrorAccess'] = objItem.ErrorAccess\n if objItem.ErrorAddress != None:\n Win32MemoryDevice['ErrorAddress'] = objItem.ErrorAddress\n if objItem.ErrorCleared != None:\n Win32MemoryDevice['ErrorCleared'] = objItem.ErrorCleared\n \n ListED = []\n Win32MemoryDevice['ErrorData'] = ListED\n objColl = objItem.ErrorData\n if objColl is None:\n ListED.append(0)\n else:\n for objValue in objColl:\n ListED.append(objValue)\n \n if objItem.ErrorDataOrder != None:\n Win32MemoryDevice['ErrorDataOrder'] = objItem.ErrorDataOrder\n if objItem.ErrorDescription != None:\n Win32MemoryDevice['ErrorDescription'] = objItem.ErrorDescription\n if objItem.ErrorGranularity != None:\n Win32MemoryDevice['ErrorGranularity'] = objItem.ErrorGranularity\n if objItem.ErrorInfo != None:\n Win32MemoryDevice['ErrorInfo'] = objItem.ErrorInfo\n if objItem.ErrorMethodology != None:\n Win32MemoryDevice['ErrorMethodology'] = objItem.ErrorMethodology\n if objItem.ErrorResolution != None:\n Win32MemoryDevice['ErrorResolution'] = objItem.ErrorResolution\n if objItem.ErrorTime != None:\n Win32MemoryDevice['ErrorTime'] = objItem.ErrorTime\n if objItem.ErrorTransferSize != None:\n Win32MemoryDevice['ErrorTransferSize'] = objItem.ErrorTransferSize\n if objItem.InstallDate != None:\n Win32MemoryDevice['InstallDate'] = objItem.InstallDate\n if objItem.LastErrorCode != None:\n Win32MemoryDevice['LastErrorCode'] = objItem.LastErrorCode\n if objItem.Name != None:\n Win32MemoryDevice['Name'] = objItem.Name\n if objItem.NumberOfBlocks != None:\n Win32MemoryDevice['NumberOfBlocks'] = objItem.NumberOfBlocks\n if objItem.OtherErrorDescription != None:\n Win32MemoryDevice['OtherErrorDescription'] = \\\n objItem.OtherErrorDescription\n if objItem.PNPDeviceID != None:\n Win32MemoryDevice['PNPDeviceID'] = objItem.PNPDeviceID\n\n ListPMC = []\n Win32MemoryDevice['PowerManagementCapabilities'] = ListPMC\n objColl = objItem.PowerManagementCapabilities\n if objColl is None:\n ListPMC.append(0)\n else:\n for objValue in objColl:\n ListPMC.append(objValue)\n \n if objItem.PowerManagementSupported != None:\n Win32MemoryDevice['PowerManagementSupported'] = \\\n objItem.PowerManagementSupported\n if objItem.Purpose != None:\n Win32MemoryDevice['Purpose'] = objItem.Purpose\n if objItem.StartingAddress != None:\n Win32MemoryDevice['StartingAddress'] = objItem.StartingAddress\n if objItem.Status != None:\n Win32MemoryDevice['Status'] = objItem.Status\n if objItem.StatusInfo != None:\n Win32MemoryDevice['StatusInfo'] = objItem.StatusInfo\n if objItem.SystemCreationClassName != None:\n Win32MemoryDevice['SystemCreationClassName'] = \\\n objItem.SystemCreationClassName\n if objItem.SystemLevelAddress != None:\n Win32MemoryDevice['SystemLevelAddress'] = objItem.SystemLevelAddress\n if objItem.SystemName != None:\n Win32MemoryDevice['SystemName'] = objItem.SystemName","sub_path":"pyCSHC.py","file_name":"pyCSHC.py","file_ext":"py","file_size_in_byte":25558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"616403966","text":"import numpy\r\nimport pandas as pd\r\nfrom sys import argv\r\nfrom argparse import ArgumentParser as Parse\r\nimport datetime\r\nimport Calculations\r\n\r\n# #open file\r\n# zonedf_path = r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\taxi+_zone_lookup.csv\"\r\n\r\n# greendf_path = r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\datafiles\\green_tripdata_2018-01.csv\"\r\n\r\n# yellowdf_path = r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\datafiles\\yellow_tripdata_2018-01.csv\"\r\n\r\n# fhvdf_path = r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\datafiles\\fhv_tripdata_2018-01.csv\"\r\n\r\n# saves link to mock zone csv\r\nzonedf = pd.read_csv(r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\mock_zones.csv\")\r\n\r\n# creates dictionary of zone IDs associated with boroughs\r\nzone_id_dict = Calculations.init_zone_dict(zonedf)\r\n\r\n\r\n# -y -g -f include data from given service providers (y for yellow, etc.)\r\nparser = Parse(description='Given start and end borough, displays average cost.')\r\nparser.add_argument('-y', action=\"store_true\", default=False)\r\nparser.add_argument('-g', action=\"store_true\", default=False)\r\nparser.add_argument('-f', action=\"store_true\", default=False)\r\n\r\n# --start 00/00/0000 00:00:00 --end 00/00/0000 00:00:00\r\nparser.add_argument(\"--start\", nargs=2, action=\"store\")\r\nparser.add_argument(\"--end\", nargs=2, action=\"store\")\r\n\r\n# --sborough BoroughName --eborough BoroughName\r\nparser.add_argument(\"--sborough\", nargs=1, action=\"store\")\r\nparser.add_argument(\"--eborough\", nargs=1, action=\"store\")\r\n\r\n# parse arguments\r\nargs = vars(parser.parse_args()) \r\n\r\n# must select at least one data source. else, error\r\nif args[\"y\"] is False and args[\"g\"] is False and args[\"f\"] is False:\r\n print(\"error: must choose at least one list\")\r\n exit()\r\n\r\n# create datetime object for command line start information\r\nif \"start\" in args and args[\"start\"] is not None:\r\n year, month, day = map(int, args[\"start\"][0].split('-'))\r\n start_date = datetime.date(year, month, day)\r\n start_time = datetime.datetime.strptime(args[\"start\"][1],'%H:%M:%S').time()\r\n time_object_start = datetime.datetime.combine(start_date, start_time)\r\n # if end date is not given, make end date into the future\r\n if \"end\" in args and args[\"end\"] is None:\r\n end_date = datetime.datetime(2050, 1, 1)\r\n end_time = datetime.datetime.strptime(\"00:00:00\",'%H:%M:%S').time()\r\n time_object_end = datetime.datetime.combine(end_date, end_time)\r\n# create datetime object for command line end information\r\nif \"end\" in args and args[\"end\"] is not None:\r\n year, month, day = map(int, args[\"end\"][0].split('-'))\r\n end_date = datetime.date(year, month, day)\r\n end_time = datetime.datetime.strptime(args[\"end\"][1],'%H:%M:%S').time()\r\n time_object_end = datetime.datetime.combine(end_date, end_time)\r\n # if start date is not given, include all information from beginning\r\n if \"start\" in args and args[\"start\"] is None:\r\n start_date = datetime.datetime(1900, 1, 1)\r\n start_time = datetime.datetime.strptime(\"00:00:00\",'%H:%M:%S').time()\r\n time_object_start = datetime.datetime.combine(start_date, start_time)\r\n# read in yellow information\r\nif args[\"y\"] is True:\r\n tot_list = []\r\n tot_list.append(pd.read_csv(r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\mock_yellow_data.csv\",index_col=0, header=0))\r\n df = pd.concat(tot_list, axis = 0, ignore_index = False)\r\n # filter by pick up date/time\r\n if ((\"start\" in args and args[\"start\"] is not None) or (\"end\" in args and args[\"end\"] is not None)):\r\n df['tpep_pickup_datetime'] = pd.to_datetime(df['tpep_pickup_datetime'])\r\n mask = (df['tpep_pickup_datetime'] > time_object_start) & (df['tpep_pickup_datetime'] <= time_object_end)\r\n df.loc[mask]\r\n df = df.loc[mask]\r\n # filter if start/end info is given\r\n df = Calculations.calc_start_end(df, args, zone_id_dict, 'PULocationID', 'DOLocationID') \r\n df.to_csv(\"filtered_yellow_data.csv\")\r\n# read in green information\r\nif args[\"g\"] is True:\r\n tot_list = []\r\n tot_list.append(pd.read_csv(r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\mock_green_data.csv\",index_col=0, header=0))\r\n df = pd.concat(tot_list, axis = 0, ignore_index = False)\r\n # filter by pick up date/time\r\n if ((\"start\" in args and args[\"start\"] is not None) or (\"end\" in args and args[\"end\"] is not None)):\r\n df['lpep_pickup_datetime'] = pd.to_datetime(df['lpep_pickup_datetime'])\r\n mask = (df['lpep_pickup_datetime'] > time_object_start) & (df['lpep_pickup_datetime'] <= time_object_end)\r\n df.loc[mask]\r\n df = df.loc[mask]\r\n # filter if start/end info is given\r\n df = Calculations.calc_start_end(df, args, zone_id_dict, 'PULocationID', 'DOLocationID') \r\n df.to_csv(\"filtered_green_data.csv\")\r\n# read in for-hire information\r\nif args[\"f\"] is True:\r\n tot_list = []\r\n tot_list.append(pd.read_csv(r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\mock_fhv_data.csv\",index_col=0, header=0))\r\n df = pd.concat(tot_list, axis = 0, ignore_index = False)\r\n # filter by pick up date/time\r\n if ((\"start\" in args and args[\"start\"] is not None) or (\"end\" in args and args[\"end\"] is not None)):\r\n df['Pickup_DateTime'] = pd.to_datetime(df['Pickup_DateTime'])\r\n mask = (df['Pickup_DateTime'] > time_object_start) & (df['Pickup_DateTime'] <= time_object_end)\r\n df.loc[mask]\r\n df = df.loc[mask]\r\n # filter if start/end info is given\r\n df = Calculations.calc_start_end(df, args, zone_id_dict, 'PUlocationID', 'DOlocationID') \r\n df.to_csv(\"filtered_fhv_data.csv\")\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"NYCTaxi.py","file_name":"NYCTaxi.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"537572179","text":"import sys\nimport os.path\n\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtQml import QQmlApplicationEngine\nfrom PyQt5.QtGui import QGuiApplication\n\nfrom controllers.main_controller import MainController\n\n\ndef main():\n app = QGuiApplication(sys.argv)\n qml_engine = QQmlApplicationEngine()\n\n main_controller = MainController(app)\n context = qml_engine.rootContext()\n context.setContextProperty(\"main\", main_controller)\n\n this_directory = os.path.dirname(os.path.abspath(__file__))\n qml_path = os.path.join(this_directory, 'qml/main.qml')\n qml_engine.load(qml_path)\n\n main_window = qml_engine.rootObjects()[0]\n main_window.show()\n\n QTimer.singleShot(0, main_controller.startup)\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"projectdir/plotapp/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"222226642","text":"import cv2\nimport numpy as np\nimport math\n\n\n#img=cv2.imread('hough_simple_1.pgm', cv2.IMREAD_GRAYSCALE)\n\nsobelx = np.array([[1,2,0,-2,-1], [2,3,0,-3,-2],[3,5,0,-5,-3], [2,3,0,-3,-2], [1,2,0,-2,-1]])\nsobely = np.array([[-1,-2,-3,-2,-1], [-2,-3,-5,-3,-2], [0,0,0,0,0], [2,3,5,3,2], [1,2,3,2,1]])\n\n#used sobel 5x5 matrix\n\ndef p5(image_in):\n #gaussian = cv2.getgaussiankernel(image_in, )\n dst1 = cv2.filter2D(image_in, -1, sobelx)\n dst2 = cv2.filter2D(image_in, -1, sobely)\n\n for i in range(image_in.shape[0]):\n for j in range(image_in.shape[1]):\n xmag = dst1[i,j]**2\n ymag = dst2[i,j]**2\n magnitude = math.sqrt(xmag+ymag)\n image_in.itemset((i,j), magnitude)\n \n","sub_path":"Assignment1/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"596849207","text":"\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom params import *\nfrom .Model import *\n\n\n\n\n\n\n# Train the model\n_, nb_img_tain, nb_img_test, testingPartition, trainedclassifier, _, trainConfusionMat = model.train(10)\n\n# Test the model\ntestErrorMat = model.test(testingPartition, trainedclassifier)\n\n\n\ndef plot_trainConfusionMatrix(trainConfusionMat, nb_img_tain):\n \n trainCM = trainConfusionMat/nb_img_tain\n\n fig, ax = plt.subplots(1, figsize=(10,10))\n sns.heatmap(trainCM, annot=True)\n ax.set_xlabel('model predictions', fontsize=10)\n ax.set_ylabel('actual', fontsize=10)\n plt.title(\"Training data confusion matrix\", fontsize=15)\n plt.show()\n\n\n\n\n\ndef plot_testErrorMatrix(testErrorMat, nb_img_test):\n \n testEM = testErrorMat/nb_img_test\n\n fig, ax = plt.subplots(1, figsize=(10,10))\n sns.heatmap(testEM, annot=True)\n ax.set_xlabel('model predictions', fontsize=10)\n ax.set_ylabel('actual', fontsize=10)\n plt.title(\"Test data error matrix\", fontsize=15)\n plt.show()","sub_path":"src/confusionMatrix.py","file_name":"confusionMatrix.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"35144164","text":"from odoo import api, fields, models, _\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT, float_compare\nfrom odoo.exceptions import RedirectWarning, UserError, ValidationError\n\n\nclass SaleOrderLine(models.Model):\n _inherit = 'sale.order.line'\n\n state = fields.Selection(selection_add=[\n ('waitingapproval', 'Wait Approval'),\n ])\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n def _check_proposal(self):\n\n if self.user_has_groups('sales_team.group_sale_manager'):\n return False\n else:\n ICPSudo = self.env['ir.config_parameter'].sudo()\n if ICPSudo.get_param('fal_config_setting.fal_is_proposal'):\n return True\n else:\n return False\n\n @api.multi\n def action_propose(self):\n if self._check_proposal():\n if self.user_has_groups('sales_team.group_sale_manager'):\n self.action_confirm()\n else:\n self.action_wait()\n\n # keep wizard if want to show list of restriction\n # view = self.env.ref('fal_sale_approval.view_fal_sale_proposal_wizard')\n # return {\n # 'name': _('Propose Quotation?'),\n # 'type': 'ir.actions.act_window',\n # 'view_type': 'form',\n # 'view_mode': 'form',\n # 'res_model': 'fal.sale.proposal.wizard',\n # 'views': [(view.id, 'form')],\n # 'view_id': view.id,\n # 'target': 'new',\n # }\n else:\n self.action_confirm()\n\n state = fields.Selection(selection_add=[\n ('waitingapproval', 'Wait Approval'),\n ])\n\n @api.multi\n def action_wait(self):\n orders = self.filtered(lambda s: s.state in ['draft', 'sent'])\n return orders.write({\n 'state': 'waitingapproval',\n })\n","sub_path":"fal_sale_approval/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"535943285","text":"# GUARDA A POSIÇÃO DAS COLUNAS NA TABELA DE ASSOCIADOS\nIDASSOCIADO = 1\nTITULO = 2\nNOME = 3\nTRATAMENTO = 4\nDTINS = 5\nMATR = 6\nDTDESLIG = 7\nDTMATR = 8\nIDCATEG_SOC = 9\nIDSITUACAO = 10\nDTSITUACAO = 11\nSEXO = 12\nNATURALIDADE = 13\nDTNASC = 14\nENDERECO = 15\nBAIRRO = 16\nIDCIDADE = 17\nCEP = 18\nTEL1 = 19\nTEL2 = 20\nTEL3 = 21\nEMAIL = 22\nMES_ANIV = 23\nIDENTIDADE = 24\nORGAO_EMISSOR = 25\nIDEST_CIVIL = 26\nCPF = 27\nENVIA_BOLETO = 28\nNOME_PAI = 29\nNOME_MAE = 30\nVIA = 31\nATIVA_MSG = 32\nMSG = 33\nCONTADOR_MSG = 34\nDIA_VENC_COB = 35\nDTULTATU = 36\nOBS = 37\nTEL_TRAB = 38\nRAMAL_TRAB = 39\nENDERECO_COB = 40\nBAIRRO_COB = 41\nIDCIDADE_COB = 42\nCEP_COB = 43\nENDERECO_COM = 44\nBAIRRO_COM = 45\nIDCIDADE_COM = 46\nCEP_COM = 47\nEMAIL_COM = 48\nNACIONALIDADE = 49\nREFERENCIA = 50\nOCUPACAO = 51\nNOME_EMP = 52\nRENDA = 53\nBANCOS = 54\nDT_VAL = 55\nDT_RECAD = 56\nFLG_TEMP = 57\nIDASSOC_RESP = 58\nDTADM = 59\nFOTO = 60\nFLG_VALIDADE = 61\nSEQ_MATR = 62\nFLG_CONSELHEIRO = 63\nIDPROFISSAO = 64\nIDSETOR_CONS = 65\nIDCONDICAO = 66\nSITUACAO_CONS = 67\nOBS_CONS = 68\nFLG_RECAD = 69\nTERMO = 70\nAC = 71\nIDLAYOUT = 72\nAG_VAL = 73\nCC_CART = 74\nFLG_ISENTO = 75\nFLG_MES_SEGUINTE = 76\nEXTRA = 77\nMATR_OPC = 78\nMSG_TELA = 79\nATIVA_MSG_TELA = 80\nIDTIPO_TEL1 = 81\nIDTIPO_TEL2 = 82\nIDTIPO_TEL3 = 83\nIDCARGO_PROF = 84\nIDESCOLARIDADE = 85\nTP_END_CORRESP = 86\nIDENTIF_PROF = 87\nDATA_VINCULACAO = 88\nFLG_BLOQUEAR_CONSUMO = 89\nMANDATO_INI_CONS = 90\nMANDATO_FIM_CONS = 91\nMES_COB = 92\nVIA_ESTAC = 93\nMATR_ESTAC = 94\nNOME_ABREV = 95\nIDASSOC_VINCULADO = 96\nCPF_CC = 97\nEMAIL_CC = 98\nLINK_ERP = 99\nFLG_MES_ANTERIOR = 100","sub_path":"modulos/colunas_origem_associados.py","file_name":"colunas_origem_associados.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"204023451","text":"\"\"\"\ndoc string goes here\n\"\"\"\n\n__all__ = ['PurePythonStorage']\n\n# Standard library imports.\n\n# Related third party imports.\n\n# Local application/library specific imports.\nfrom currency_exchange.core.entities import CurrencyExchangeRate, CurrencyEntity\nfrom .base import BaseStorage\n\n\nclass DictStorage:\n def __init__(self):\n self.providers = {}\n self.currencies = {}\n self.rates = {}\n\n\nclass PurePythonStorage(BaseStorage):\n _connection = None\n\n @property\n def connection(self):\n if self._connection is None:\n self._connection = DictStorage()\n return self._connection\n\n def migrate(self):\n pass\n\n def drop_tables(self):\n pass\n\n def store_rates(self, exchange_rates):\n assert all([isinstance(obj, CurrencyExchangeRate) for obj in exchange_rates])\n\n for rate in exchange_rates:\n provider_name = rate.get_provider()\n provider_data = self.connection.rates.setdefault(provider_name, {})\n\n on_date = rate.get_on_date()\n per_date_data = provider_data.setdefault(on_date, {})\n\n from_code = rate.get_from_currency().get_code()\n to_code = rate.get_to_currency().get_code()\n\n per_date_data[(from_code, to_code)] = rate.get_rate()\n\n def get_currencies(self, provider_names, currency_codes):\n result = {}\n\n for code in currency_codes:\n data = self.connection.currencies.get(code)\n\n if data:\n currency = CurrencyEntity(data[0], data[1])\n else:\n currency = CurrencyEntity(code, code)\n\n result[code] = currency\n\n return result\n\n def get_rates(self, provider_names, currency_code, to_currencies=None, on_date=None):\n result = []\n currency_pairs = [(currency_code, to_currency) for to_currency in to_currencies]\n currencies = self.get_currencies(None, to_currencies + [currency_code])\n\n for prov_name in provider_names:\n provider_data = self.connection.rates.get(prov_name)\n\n if provider_data:\n on_date_data = provider_data.get(on_date)\n\n if on_date_data:\n for pair in currency_pairs:\n rate = on_date_data.get(pair)\n\n if rate:\n from_currency = currencies.get(pair[0])\n to_currency = currencies.get(pair[1])\n exchange_rate = CurrencyExchangeRate(prov_name, from_currency, to_currency, rate, on_date)\n result.append(exchange_rate)\n\n return result\n\n def store_currencies(self, currencies):\n assert all([isinstance(obj, CurrencyEntity) for obj in currencies])\n\n for currency in currencies:\n self.connection.currencies[currency.get_code()] = (currency.get_name(), currency.get_code())\n","sub_path":"currency_exchange/core/storage/python_storage.py","file_name":"python_storage.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"510009295","text":"import boto3\nfrom botocore.exceptions import ClientError\nfrom inventorycalculator.errors import S3StorageError\nfrom OneTicketLogging import elasticsearch_logger\n\n\n_logger = elasticsearch_logger(__name__)\n\n\nclass S3Storage:\n def __init__(self, bucket_name: str):\n self._bucket_name = bucket_name\n self._client = boto3.client('s3')\n\n def upload(self, key: str, data: str):\n try:\n self._client.put_object(\n Body=data.encode(),\n Key=key,\n Bucket=self._bucket_name\n )\n except ClientError as e:\n _logger.error(e)\n raise S3StorageError('Unable to upload given data')\n\n def get(self, key: str) -> str:\n try:\n return self._client.get_object(\n Key=key,\n Bucket=self._bucket_name\n )['Body'].read().decode('utf-8')\n except ClientError as e:\n _logger.error(e)\n raise S3StorageError(f'Resource not exists by given key:{key}')\n","sub_path":"inventorycalculator/core/storages/s3_storage.py","file_name":"s3_storage.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"147712842","text":"def main(filename='neko.txt.mecab'):\n analyzed = []\n\n with open(filename, mode='r', encoding='utf-8') as f:\n for line in f:\n item = line.split('\\t')\n if item[0] != 'EOS\\n':\n e = item[1].split(',')\n analyzed.append({'surface': item[0], 'base': e[6], 'pos': e[0], 'pos1': e[1]})\n\n verb = set([s['base'] for s in analyzed if s['pos'] == '動詞'])\n\n for i in verb:\n print(i)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"nlp32.py","file_name":"nlp32.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"53460463","text":"#!/usr/bin/env python3\n# declarative.py - declarative design\n\nlines = open(\"prices.txt\")\nfields = [line.split() for line in lines]\ncost = sum([float(field[1]) * float(field[2]) for field in fields])\nprint(cost)\n\n#####################################\n#\n# $ declarative.py\n# 262633.5\n#\n","sub_path":"py3/pgms/sec1/declarative.py","file_name":"declarative.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"91725931","text":"import MixedSort as mx\nfrom time import time\nimport random\n\ndef count_elapsed_time(f):\n \"\"\"\n Decorator.\n Execute the function and calculate the elapsed time.\n Print the result to the standard output.\n \"\"\"\n\n def wrapper():\n # Start counting.\n start_time = time()\n # Take the original function's return value.\n ret = f()\n # Calculate the elapsed time.\n elapsed_time = time() - start_time\n print(\"Elapsed time: %0.10f seconds.\" % elapsed_time)\n return ret\n\n return wrapper\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n\n prueba = []\n for i in range(60):\n prueba.append(random.randint(0,200))\n print(\"Lista desordenada\\n\" + str(prueba))\n listaPruebaOrdenada = mx.bucketSort(prueba)\n print(\"Lista ordenada\\n\" +str(listaPruebaOrdenada))\n numeros = []\n for i in range(1000000):\n numeros.append(random.randint(0,100000000))\n start_time = time()\n mx.bucketSort(numeros)\n elapsed_time = time() - start_time\n print(\"Numero de datos \" + str(len(numeros)))\n print(\"Tiempo transcurrido: %.10f segundos.\" % elapsed_time)\n\n\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"137813525","text":"class Node:\n # O (1)\n def __init__(self, initial_data):\n self.data = initial_data\n self.next = None\n\n\nclass LinkedList:\n # O(1)\n def __init__(self):\n self.head = None\n self.tail = None\n\n # O(1)\n def append(self, new_node):\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n\n # O(1)\n def prepend(self, new_node):\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.next = self.head\n self.head = new_node\n\n # O(1)\n def insert_after(self, current_node, new_node):\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n elif current_node is self.tail:\n self.tail.next = new_node\n self.tail = new_node\n else:\n new_node.next = current_node.next\n current_node.next = new_node\n\n # O(1)\n def remove_after(self, current_node):\n if (current_node is None) and (self.head is not None):\n succeeding_node = self.head.next\n self.head = succeeding_node\n if succeeding_node is None:\n self.tail = None\n elif current_node.next is not None:\n succeeding_node = current_node.next\n current_node.next = succeeding_node\n if succeeding_node is None:\n self.tail = current_node\n\n\n\"\"\"\nFinish the methods needed for the Queue class, and the breadth first search in the graph\n\"\"\"\n\n\nclass Queue:\n #O(1)\n def __init__(self):\n self.list = LinkedList()\n\n #O(1)\n def push(self, new_item):\n self.list.append(new_item) # Insert as list tail (end of queue)\n\n #O(1)\n def pop(self):\n popped_item = self.list.head # Copy list head (front of queue)\n self.list.remove_after(None) # Remove list head\n return popped_item\n","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"149023476","text":"import sys\n\nfrom PyQt5.QtWidgets import QMainWindow,QApplication,QVBoxLayout,QWidget,QMessageBox\nfrom PyQt5.QtGui import QColor,QPainter,QFont\nfrom PyQt5.QtCore import QPoint,pyqtSignal,QCoreApplication\n\n\nclass Board(QWidget):\n\n ending_signal = pyqtSignal(str)\n\n def __init__(self):\n super().__init__()\n self.init_game()\n\n def init_game(self):\n self.win = None\n self.turn = 'black'\n self.black_piece_pos = []\n self.white_piece_pos = []\n self.ending_signal.connect(self.show_ending_message)\n self.update()\n\n def paintEvent(self, e):\n qp = QPainter()\n qp.begin(self)\n qp.setBrush(QColor(255,215,0))\n qp.drawRect(0,0,self.width()-1,self.height()-1)\n points_list =[x for x in range(32,962,62)]\n for i in points_list:\n qp.drawLine(i,32,i,900)\n qp.drawLine(32,i,900,i)\n qp.setPen(QColor('black'))\n qp.setFont(QFont('SimSun',10))\n string_1 = [str(x) for x in range(1,16)]\n string_2 = 'abcdefghijklmno'\n for i,j in zip(points_list,string_1):\n qp.drawText(i,933,j)\n for i,j in zip(points_list,string_2):\n qp.drawText(930,i,j)\n qp.setBrush(QColor('black'))\n for pos in self.black_piece_pos:\n qp.drawEllipse(pos,30,30)\n qp.setBrush(QColor('white'))\n for pos in self.white_piece_pos:\n qp.drawEllipse(pos,30,30)\n qp.end()\n\n def mousePressEvent(self, e):\n if e.pos().x() < 962 and e.pos().y() < 962:\n pos = self.cal_pos(e.pos())\n if (pos not in self.black_piece_pos) and (pos not in self.white_piece_pos):\n if self.turn == 'black':\n self.black_piece_pos.append(pos)\n self.check_win(pos)\n elif self.turn == 'white':\n self.white_piece_pos.append(pos)\n self.check_win(pos)\n\n self.update()\n\n def change_turn(self):\n if self.turn == 'black':\n self.turn = 'white'\n elif self.turn == 'white':\n self.turn = 'black'\n\n def check_win(self,pos):\n x, y= pos.x(),pos.y()\n condition= {}\n condition[1] = [QPoint(x+62,y),QPoint(x+62*2,y),QPoint(x+62*3,y),QPoint(x+62*4,y)]\n condition[2] = [QPoint(x+62,y),QPoint(x+62*2,y),QPoint(x+62*3,y),QPoint(x-62,y)]\n condition[3] = [QPoint(x + 62, y), QPoint(x + 62 * 2, y), QPoint(x -62, y), QPoint(x - 62 * 2, y)]\n condition[4] = [QPoint(x + 62, y), QPoint(x - 62, y), QPoint(x - 62 * 2, y), QPoint(x - 62 * 3, y)]\n condition[5] = [QPoint(x - 62*4, y), QPoint(x - 62 * 3, y), QPoint(x - 62 * 2, y), QPoint(x - 62 * 1, y)]\n condition[6] = [QPoint(x,y+62),QPoint(x,y+62*2),QPoint(x,y+62*3),QPoint(x,y+62*4)]\n condition[7] = [QPoint(x,y+62),QPoint(x,y+62*2),QPoint(x,y+62*3),QPoint(x,y-62)]\n condition[8] = [QPoint(x , y+ 62), QPoint(x , y+ 62 * 2), QPoint(x , y-62), QPoint(x , y- 62 * 2)]\n condition[9] = [QPoint(x , y+ 62), QPoint(x, y - 62), QPoint(x, y - 62 * 2), QPoint(x , y- 62 * 3)]\n condition[10] = [QPoint(x , y- 62*4), QPoint(x , y- 62 * 3), QPoint(x , y- 62 * 2), QPoint(x , y- 62 * 1)]\n condition[11] = [QPoint(x+62,y+62),QPoint(x+62*2,y+62*2),QPoint(x+62*3,y+62*3),QPoint(x+62*4,y+62*4)]\n condition[12] = [QPoint(x+62,y+62),QPoint(x+62*2,y+62*2),QPoint(x+62*3,y+62*3),QPoint(x-62,y-62)]\n condition[13] = [QPoint(x + 62, y+ 62), QPoint(x + 62 * 2, y + 62 * 2), QPoint(x -62, y-62), QPoint(x - 62 * 2, y- 62 * 2)]\n condition[14] = [QPoint(x + 62, y+ 62), QPoint(x - 62, y- 62), QPoint(x - 62 * 2, y- 62 * 2), QPoint(x - 62 * 3, y- 62 * 3)]\n condition[15] = [QPoint(x - 62*4, y- 62*4), QPoint(x - 62 * 3, y- 62 * 3), QPoint(x - 62 * 2, y- 62 * 2), QPoint(x - 62 * 1, y - 62 * 1)]\n condition[16] = [QPoint(x-62,y+62),QPoint(x-62*2,y+62*2),QPoint(x-62*3,y+62*3),QPoint(x-62*4,y+62*4)]\n condition[17] = [QPoint(x-62,y+62),QPoint(x-62*2,y+62*2),QPoint(x-62*3,y+62*3),QPoint(x+62,y-62)]\n condition[18] = [QPoint(x -62, y+ 62), QPoint(x - 62 * 2, y + 62 * 2), QPoint(x +62, y-62), QPoint(x + 62 * 2, y- 62 * 2)]\n condition[19] = [QPoint(x - 62, y+ 62), QPoint(x + 62, y- 62), QPoint(x + 62 * 2, y- 62 * 2), QPoint(x + 62 * 3, y- 62 * 3)]\n condition[20] = [QPoint(x + 62*4, y- 62*4), QPoint(x + 62 * 3, y- 62 * 3), QPoint(x + 62 * 2, y- 62 * 2), QPoint(x + 62 * 1, y - 62 * 1)]\n for k,i in condition.items():\n count = 0\n if self.turn == 'black':\n for piece in i:\n if piece in self.black_piece_pos:\n count += 1\n if count == 4:\n self.win = 'black'\n elif self.turn == 'white':\n for piece in i:\n if piece in self.white_piece_pos:\n count += 1\n if count == 4:\n self.win = 'white'\n if not self.win:\n self.change_turn()\n elif self.win == 'black':\n self.ending_signal.emit('black win!')\n elif self.win == 'white':\n self.ending_signal.emit('white win!')\n\n\n def show_ending_message(self,ms):\n msg = QMessageBox(QMessageBox.Information,'Game over!',ms,QMessageBox.NoButton,self)\n msg.addButton('&Replay',QMessageBox.AcceptRole)\n msg.addButton('&Exit',QMessageBox.RejectRole)\n if msg.exec() == QMessageBox.AcceptRole:\n self.init_game()\n else:\n QCoreApplication.quit()\n\n @staticmethod\n def cal_pos(pos):\n a,b = pos.x(),pos.y()\n if a % 62-32 <31:\n a = a // 62 *62+32\n else:\n a = (a //62+1)*62+32\n if b % 62-32 <31:\n b = b// 62 *62+32\n else:\n b = (b //62+1)*62+32\n\n return QPoint(a,b)\n\n\n\n\nclass Game(QMainWindow):\n def __init__(self):\n super().__init__()\n self.board = Board()\n\n l = QVBoxLayout()\n l.addWidget(self.board)\n w = QWidget()\n w.setLayout(l)\n w.setFixedHeight(1000)\n w.setFixedWidth(1000)\n self.setCentralWidget(w)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n game = Game()\n game.show()\n sys.exit(app.exec())","sub_path":"wzq.py","file_name":"wzq.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"244679038","text":"from selenium import webdriver\nimport requests\n\ndef get_links(url):\n \"\"\"Find all links on page at the given url.\n Return a list of all link addresses, as strings.\n \"\"\"\n links=[]\n browser = webdriver.Firefox()\n browser.get(url)\n elements = browser.find_elements_by_tag_name('a')\n for ele in elements:\n links.append(ele.get_attribute('href'))\n browser.close()\n return links\n\n\n\n\ndef invalid_urls(urllist):\n invalid_links = []\n for url in urllist:\n r = requests.head(url)\n if r.status_code == 404:\n invalid_links.append(url)\n return invalid_links \n\nlinks = get_links(\"https://cpske.github.io/ISP/\")\nfor link in links:\n print(\"Link: \"+link)\ninvalid_links = invalid_urls(links) \nfor link in invalid_links:\n print(\"Invalid: \" + link)\n","sub_path":"polls/tests/link_invalid_url_test.py","file_name":"link_invalid_url_test.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"321742832","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport argparse\nfrom lib_mean import create_output_file, calc_monthly_mean, update_file_history\n\n\n\"\"\"\nWhat this script does:\n\nCalculate monthly means over a number of years from CICE output.\n\nHow to run this script:\n\n./cice_monthly_mean.py 01/iceh.nc 02/iceh.nc out.nc --vars ih\n\nWhere 01/iceh.nc 02/iceh.nc contain a number of monthly averaged fields.\nAfterwards out.nc will contain a mean of all Jan, Feb, Mar, etc.\n\nIn order to get a yearly mean using the output of this script one can just do:\n\nncra -v out.nc yearly_mean.nc\n\n\"\"\"\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('input_files', nargs='+', help=\"\"\"\n The input data files in NetCDF format. These files can\n be given in any order. They MUST appear before any other\n arguments/options.\"\"\")\n parser.add_argument('output_file', default='ice_monthly.nc', help=\"\"\"\n The name of the output file.\"\"\")\n parser.add_argument('--vars', default=None, nargs='+',\n help='A list of the variables to average.')\n parser.add_argument('--copy_vars', default=[], nargs='+',\n help=\"\"\"A list of the variables copy across but not\n included in the averaging.\"\"\")\n\n args = parser.parse_args()\n\n create_output_file(args.input_files[0], args.vars + args.copy_vars,\n args.output_file)\n calc_monthly_mean(args.input_files, args.vars, args.output_file)\n update_file_history(args.output_file, ' '.join(sys.argv))\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"analyse/cice_monthly_mean.py","file_name":"cice_monthly_mean.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"329209520","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2017/10/2 9:10\n# @File : hello.py\n# @Software: PyCharm\n__author__ = 'Arteezy'\n' a test module '\nimport sys\n\n\ndef test():\n args = sys.argv\n if len(args) == 1:\n print('Hello, world!')\n elif len(args) == 2:\n print('Hello, %s!' % args[1])\n else:\n print('Too many arguments!')\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"JetProjects/PyCharm/learn-python3/samples/module/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"259777224","text":"n = int(input())\na = list(map(int, input().split()))\ndp = [0]*n\nfor i in range(n):\n value = 0\n for j in range(i):\n if a[i] > a[j]:\n value = max(value, dp[j])\n dp[i] = value + 1\nprint(max(dp))\n","sub_path":"baekjoon/11053_가장긴증가하는부분수열.py","file_name":"11053_가장긴증가하는부분수열.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"335077282","text":"# The MIT License (MIT)\n\n# Copyright (c) 2021-2022 Krux contributors\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom collections import deque\nimport os\nimport time\nimport pygame as pg\nimport cv2\nfrom kruxsim import events\nfrom kruxsim.mocks.board import BOARD_CONFIG\n\nCOMMANDS = [\"press\", \"touch\", \"qrcode\", \"screenshot\", \"wait\", \"include\", \"x\"]\n\n\nclass SequenceExecutor:\n def __init__(self, sequence_filepath):\n self.filepath = sequence_filepath\n self.command = None\n self.command_params = []\n self.command_fn = None\n self.command_timer = 0\n self.key = None\n self.key_checks = 0\n self.touch_pos = None\n self.touch_checks = 0\n self.camera_image = None\n commands = load_commands(self.filepath)\n if commands[0][0] == \"wait\" and BOARD_CONFIG[\"krux\"][\"display\"][\"touch\"]:\n commands = commands[0:1] + [(\"press\", [\"BUTTON_A\"])] + commands[1:]\n commands.append((\"wait\", [\"1\"]))\n self.commands = deque(commands)\n\n def execute(self):\n if self.command_fn:\n if time.time() - self.command_timer > 0.1:\n print(\"Executing (%s, %r)\" % (self.command, self.command_params))\n self.command_timer = 0\n self.command_fn()\n self.command_fn = None\n self.command_params = []\n elif self.commands:\n cmd, params = self.commands.popleft()\n self.command_timer = time.time()\n self.command = cmd\n self.command_params = params\n if cmd == \"press\":\n self.command_fn = self.press_key\n elif cmd == \"touch\":\n self.command_fn = self.touch\n elif cmd == \"qrcode\":\n self.command_fn = self.show_qrcode\n elif cmd == \"screenshot\":\n self.command_fn = self.request_screenshot\n elif cmd == \"wait\":\n self.command_timer += float(params[0])\n self.command_fn = self.wait\n\n def press_key(self):\n key = self.command_params[0]\n self.key = None\n self.key_checks = 0\n if key == \"BUTTON_A\":\n self.key = pg.K_RETURN\n elif key == \"BUTTON_B\":\n self.key = pg.K_DOWN\n elif key == \"BUTTON_C\":\n self.key = pg.K_UP\n\n def touch(self):\n self.touch_pos = (self.command_params[0], self.command_params[1])\n self.touch_checks = 0\n\n def show_qrcode(self):\n filename = self.command_params[0]\n self.camera_image = cv2.imread(\n os.path.join(os.path.dirname(self.filepath), \"qrcodes\", filename),\n cv2.IMREAD_COLOR,\n )\n\n def request_screenshot(self):\n filename = self.command_params[0]\n pg.event.post(pg.event.Event(events.SCREENSHOT_EVENT, {\"filename\": filename}))\n\n def wait(self):\n pass\n\n\ndef load_commands(sequence_filepath):\n commands = []\n\n # If the sequence doesn't exist, it may be board-specific; look for it within a subfolder named for the board\n filepath = sequence_filepath\n if not os.path.exists(filepath):\n filepath = os.path.join(\n os.path.dirname(sequence_filepath),\n BOARD_CONFIG[\"type\"],\n os.path.basename(sequence_filepath),\n )\n\n with open(filepath, \"r\") as sequence_file:\n raw_commands = sequence_file.readlines()\n for raw_command in raw_commands:\n if not any(raw_command.startswith(cmd) for cmd in COMMANDS):\n continue\n num_times = 1\n if raw_command.startswith(\"x\"):\n num_times = int(raw_command[1:].split()[0])\n raw_command = raw_command.split(\" \", 1)[1]\n cmd_parts = raw_command.strip().split()\n cmd = cmd_parts[0]\n params = cmd_parts[1:] if len(cmd_parts) > 1 else []\n for _ in range(num_times):\n if cmd == \"include\":\n commands.extend(\n load_commands(\n os.path.join(os.path.dirname(sequence_filepath), params[0])\n )\n )\n else:\n commands.append((cmd, params))\n return commands\n","sub_path":"simulator/kruxsim/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"234821729","text":"\"\"\"Unit tests for inventory Managment\"\"\"\n# pylint: disable=import-error\nfrom unittest import TestCase\nimport io\nimport sys\nfrom unittest.mock import patch\nfrom inventory_management.electric_appliances_class import ElectricAppliances\nfrom inventory_management.furniture_class import Furniture\nfrom inventory_management.inventory_class import Inventory\nfrom inventory_management.main import main_menu, get_price, add_new_item, item_info, exit_program, FULL_INVENTORY\nfrom inventory_management.market_prices import get_latest_price\n\n\nclass TestInventoryManagement(TestCase):\n \"\"\"Class for housing the tests\"\"\"\n def test_inventory(self):\n \"\"\"best the base inventory class\"\"\"\n test = Inventory(0, 'desc', 1, 2)\n self.assertEqual(test.return_as_dictionary(),\n {'product_code': 0,\n 'description': 'desc',\n 'market_price': 1,\n 'rental_price': 2})\n\n def test_electric_appliances(self):\n \"\"\"test electric appliances class\"\"\"\n test = ElectricAppliances(777, 'Look with your special eyes', 999, 888, 'MYBRAND', 2)\n self.assertEqual(test.return_as_dictionary(),\n {'product_code': 777,\n 'description': 'Look with your special eyes',\n 'market_price': 999,\n 'rental_price': 888,\n 'brand': 'MYBRAND',\n 'voltage': 2})\n\n def test_furniture(self):\n \"\"\"test out the furniture class\"\"\"\n test = Furniture(17, 'desc', 354, 144, 'bronze', 'yuge')\n self.assertEqual(test.return_as_dictionary(),\n {'product_code': 17,\n 'description': 'desc',\n 'market_price': 354,\n 'rental_price': 144,\n 'material': 'bronze',\n 'size': 'yuge'})\n\n def test_main_menu(self):\n \"\"\"tests for main menu function\"\"\"\n self.assertTrue(main_menu('1'), 'add_new_item')\n self.assertTrue(main_menu('2'), 'item_info')\n self.assertTrue(main_menu('q'), 'exit_program')\n\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n with patch('builtins.input', side_effect=['popcorn','q']):\n main_menu()\n sys.stdout = sys.__stdout__\n self.assertIn('Please choose from the following options', capturedOutput.getvalue())\n self.assertIn('1. Add a new item to the inventory', capturedOutput.getvalue())\n self.assertIn('2. Get item information', capturedOutput.getvalue())\n self.assertIn('q. Quit', capturedOutput.getvalue())\n\n\n def test_get_price(self):\n \"\"\"test the get price function\"\"\"\n self.assertEqual(None, get_price(134))\n\n\n def test_add_new_item(self):\n inventory = ['0', 'desc', '1', 'n', 'n']\n furniture = [17, 'desc', 354, 'y', 'bronze', 'XL']\n electric = [777, 'desc', 999, 'n', 'y', 'MYBRAND', 2]\n\n with patch('builtins.input', side_effect=inventory):\n add_new_item()\n\n self.assertEqual(FULL_INVENTORY['0'], {'product_code': '0', 'description': 'desc',\n 'market_price': 24, 'rental_price': '1'})\n\n with patch('builtins.input', side_effect=furniture):\n add_new_item()\n\n self.assertEqual(FULL_INVENTORY[17], {'product_code': 17, 'description': 'desc',\n 'market_price': 24, 'rental_price': 354,\n 'material': 'bronze', 'size': 'XL'})\n\n with patch('builtins.input', side_effect=electric):\n add_new_item()\n\n self.assertEqual(FULL_INVENTORY[777], {'product_code': 777, 'description': 'desc',\n 'market_price': 24, 'rental_price': 999,\n 'brand': 'MYBRAND', 'voltage': 2})\n\n def test_item_info(self):\n \"\"\" asert equal \"\"\"\n new_furniture = ['159', 'desc', '52', 'n', 'n']\n with patch('builtins.input', side_effect=new_furniture):\n add_new_item()\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n with patch('builtins.input', side_effect=['159']):\n item_info()\n sys.stdout = sys.__stdout__\n self.assertEqual(capturedOutput.getvalue(),\n 'product_code:159\\ndescription:desc\\nmarket_price:24\\nrental_price:52\\n')\n\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n with patch('builtins.input', side_effect=[56456456498406]):\n item_info()\n sys.stdout = sys.__stdout__\n self.assertEqual(capturedOutput.getvalue(), 'Item not found in inventory\\n')\n\n\n def test_exit_program(self):\n \"\"\" assert raises \"\"\"\n with self.assertRaises(SystemExit):\n exit_program()\n\n\n def test_get_latest_price(self):\n \"\"\"test the get latest price function\"\"\"\n self.assertEqual(24, get_latest_price(134))\n","sub_path":"students/thomas_sulgrove/lesson01/assignment/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"551876751","text":"\"\"\"\nThe Python standard library's 'calendar' module allows you to\nrender a calendar to your terminal.\nhttps://docs.python.org/3.6/library/calendar.html\n\nWrite a program that accepts user input of the form\n `14_cal.py month [year]`\nand does the following:\n - If the user doesn't specify any input, your program should\n print the calendar for the current month. The 'datetime'\n module may be helpful for this.\n - If the user specifies one argument, assume they passed in a\n month and render the calendar for that month of the current year.\n - If the user specifies two arguments, assume they passed in\n both the month and the year. Render the calendar for that\n month and year.\n - Otherwise, print a usage statement to the terminal indicating\n the format that your program expects arguments to be given.\n Then exit the program.\n\"\"\"\n\nimport sys\nimport calendar\nfrom datetime import datetime\nimport time\n\n# Still need to include some of the rules at the top regarding if 1 argument assume it is month and print\n# Calendar of that month\n\nvalid_months = [\n 'january', 'february', 'march', \n 'april', 'may', 'june', 'july', \n 'august', 'september', 'october', \n 'november', 'december', \n 'jan', 'feb', 'mar', 'apr', 'jun', \n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n\ntry:\n month, year = input(\"Enter a month and a year(1900 - 2020): \").split()\nexcept: \n print(\"Pleae enter a valid Month and Year(1900 - 2020) \\ne.g.: February 2020\")\n print(\"The current Month and Year:\", datetime.today().strftime(\"%B\"), datetime.today().strftime(\"%Y\"))\nelse: \n if (len(month) > 0) and (1900 <= int(year) <= 2020):\n if month.lower() in valid_months:\n print('You entered', month.capitalize(), year)\n else: \n print('You entered an invalid month')\n else:\n print(\"Pleae enter a valid Month and Year(1900 - 2020) \\ne.g.: February 2020\")\n print(\"The current Month and Year:\", datetime.today().strftime(\"%B\"), datetime.today().strftime(\"%Y\"))\n","sub_path":"src/14_cal.py","file_name":"14_cal.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"509630649","text":"import torch\n\nimport torchvision\nimport tqdm\n\nfrom TOOLS import gan_losses\nfrom TOOLS import sinkhorn_autodiff\n\n\ndef train_func(data_loader, G, D, G_ema, ema, z_train, g_optimizer, d_optimizer, z_sample, train_dict, args, myargs):\n def train(**kwargs):\n for i, (imgs, _) in enumerate(tqdm.tqdm(data_loader)):\n train_dict['batches_done'] += 1\n step = train_dict['batches_done']\n summary = {}\n summary_d_logits_mean = {}\n summary_wd = {}\n\n G.train()\n G_ema.train()\n\n imgs = imgs.cuda()\n bs = imgs.size(0)\n\n z_train.sample_()\n f_imgs = G(z_train[:bs])\n\n # train D\n with torch.no_grad():\n sinkhorn_d = sinkhorn_autodiff.sinkhorn_loss(x=imgs.view(bs, -1), y=f_imgs.view(bs, -1).detach(),\n epsilon=args.sinkhorn_eps, niter=args.sinkhorn_niter,\n cuda=True, pi_detach=args.sinkhorn_pi_detach)\n summary_wd['D_sinkhorn_d'] = sinkhorn_d.item()\n\n r_logit = D(imgs)\n r_logit_mean = r_logit.mean()\n f_logit = D(f_imgs.detach())\n f_logit_mean = f_logit.mean()\n summary_d_logits_mean['D_r_logit_mean'] = r_logit_mean.item()\n summary_d_logits_mean['D_f_logit_mean'] = f_logit_mean.item()\n\n # Wasserstein-1 Distance\n wd = r_logit_mean - f_logit_mean\n gp = gan_losses.wgan_gp_gradient_penalty(imgs.data, f_imgs.data, D)\n d_loss = -wd + gp * 10.0 + torch.relu(wd - sinkhorn_d.item())\n summary_wd['wd'] = wd.item()\n summary['gp'] = gp.item()\n summary['d_loss'] = d_loss.item()\n\n D.zero_grad()\n d_loss.backward()\n d_optimizer.step()\n\n if step % args.n_critic == 0:\n # train G\n z_train.sample_()\n f_imgs = G(z_train)\n\n sinkhorn_d = sinkhorn_autodiff.sinkhorn_loss(x=imgs.view(imgs.size(0), -1), y=f_imgs.view(f_imgs.size(0), -1),\n epsilon=args.sinkhorn_eps, niter=args.sinkhorn_niter,\n cuda=True, pi_detach=args.sinkhorn_pi_detach)\n summary_wd['G_sinkhorn_d'] = sinkhorn_d.item()\n\n f_logit = D(f_imgs)\n f_logit_mean = f_logit.mean()\n g_loss = - f_logit_mean + args.lambda_sinkhorn * sinkhorn_d\n summary_d_logits_mean['G_f_logit_mean'] = f_logit_mean.item()\n summary['g_loss'] = g_loss.item()\n\n D.zero_grad()\n G.zero_grad()\n g_loss.backward()\n g_optimizer.step()\n\n # end iter\n ema.update(train_dict['batches_done'])\n\n if i % args.sample_every == 0:\n # sample images\n G.eval()\n G_ema.eval()\n G_z = G(z_sample)\n merged_img = torchvision.utils.make_grid(G_z, normalize=True, pad_value=1, nrow=16)\n myargs.writer.add_images('G_z', merged_img.view(1, *merged_img.shape), train_dict['batches_done'])\n # G_ema\n G_ema_z = G_ema(z_sample)\n merged_img = torchvision.utils.make_grid(G_ema_z, normalize=True, pad_value=1, nrow=16)\n myargs.writer.add_images('G_ema_z', merged_img.view(1, *merged_img.shape), train_dict['batches_done'])\n # x\n merged_img = torchvision.utils.make_grid(imgs, normalize=True, pad_value=1, nrow=16)\n myargs.writer.add_images('x', merged_img.view(1, *merged_img.shape), train_dict['batches_done'])\n # checkpoint\n myargs.checkpoint.save_checkpoint(checkpoint_dict=myargs.checkpoint_dict, filename='ckpt.tar')\n # summary\n for key in summary:\n myargs.writer.add_scalar('train_vs_batch/%s'%key, summary[key], train_dict['batches_done'])\n myargs.writer.add_scalars('train_vs_batch', summary_d_logits_mean, train_dict['batches_done'])\n myargs.writer.add_scalars('wd', summary_wd, train_dict['batches_done'])\n\n G.train()\n elif train_dict['batches_done'] <= 20000:\n for key in summary:\n myargs.writer.add_scalar('train_vs_batch/%s' % key, summary[key], train_dict['batches_done'])\n myargs.writer.add_scalars('train_vs_batch', summary_d_logits_mean, train_dict['batches_done'])\n myargs.writer.add_scalars('wd', summary_wd, train_dict['batches_done'])\n return train","sub_path":"BigGAN-PyTorch-1-exp-master/DCGAN/train_func_wgan_gp_sinkhorn.py","file_name":"train_func_wgan_gp_sinkhorn.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"186859379","text":"import socket\nimport uuid\nimport time\n\nimport proto_orchestrator_classes as poc\nfrom proto_orchestrator_classes import node\nfrom proto_orchestrator_classes import pool\n\n\n# The prototype orchestrator server\n\nnodes_per_pool = 2\nchildren_per_pool = 2\n\n# orchestrator socket\norch_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\norch_sock.bind((\"127.0.0.1\", 5005))\n\npoc.main_socket = orch_sock\n\nnext_node_port = 5050\n\n# Tree\nroot_pool = pool(None)\n\n\n# Tree Insertion for new nodes:\n# - search lowest level (top down)\n# - Find pool with min number of nodes\n# IF number of nodes in pool < N\n# - Insert into this pool\n# ELSE\n# - create new level with M children for each pool in higher level\n# - insert into pool with min number of entries\n\ndef get_lowest_level():\n # traverse tree to find leaves\n current_pool_list = [root_pool]\n current_children_pools = root_pool.children\n while len(current_children_pools) > 0:\n if len(current_children_pools) > len(current_pool_list):\n current_pool_list = current_children_pools\n else:\n raise Exception(\"Child list shorter than parent... This is not a tree\")\n next_children_pools = []\n for children_pool in current_children_pools:\n next_children_pools += children_pool.children\n current_children_pools = next_children_pools\n return current_pool_list\n\n\ndef get_insert_pool(the_node: node):\n lowest_tree_level = get_lowest_level()\n # get minimum from level\n min_used_pool = min(lowest_tree_level, key=lambda x: len(x.members))\n # check level for space\n if len(min_used_pool.members) < nodes_per_pool:\n # Insert into this pool\n return min_used_pool\n else:\n # Create new level\n for lt_pool in lowest_tree_level:\n new_children = [pool(lt_pool) for _ in range(children_per_pool)]\n # Add children to the parent\n lt_pool.add_children(new_children)\n \n # Make recursive call to insert node\n print(\"Inserting recursive\")\n return get_insert_pool(the_node)\n\nprint(\"Started up, entering main loop\")\n\nwhile True:\n data, adr = orch_sock.recvfrom(1024)\n msg = data.decode('UTF-8')\n \n print(\"Message received\")\n\n node_ip = \"127.0.0.1\"\n node_port = next_node_port\n node_uuid = uuid.uuid4()\n\n orch_sock.sendto(str.encode(\"initial_info[\"+node_uuid.hex+\",\"+node_ip+\",\"+str(node_port)+\"]\"), adr)\n # Replace this by some ACK message \n time.sleep(1)\n\n # make node object\n the_node = node(node_uuid, node_ip, node_port)\n \n insert_pool = get_insert_pool(the_node)\n\n insert_pool.introduce_pool_to_member(the_node)\n insert_pool.introduce_parents_to_member(the_node)\n insert_pool.introduce_children_to_member(the_node)\n\n insert_pool.add_member(the_node)\n\n next_node_port += 1\n\n# a pool is a interconnected list\n# each pool has a fully connected parent pool\n\n\n","sub_path":"src_prototype/proto_orchestrator.py","file_name":"proto_orchestrator.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"598912214","text":"# define the configuration (hyperparameters) for the residual autoencoder\n# for this type of network.\n\n\n# NETWORK MODEL NAME\nnetwork_model = 'SR_RGB_bicubic'\n\ndata_path = '/home/mz/Documents/PyCharm Projects/Data/'\n# data_path = 'H:\\\\trainData\\\\'\n# CURRENT TRAINING DATASET\ntraining_data = [\n # 'lf_benchmark_HSV.hdf5',\n data_path + 'lf_patch_synthetic_rgb_sr_1.hdf5',\n # data_path + 'lf_patch_synthetic_sr_2.hdf5',\n # data_path + 'lf_patch_synthetic_sr_3.hdf5',\n # data_path + 'lf_patch_synthetic_sr_4.hdf5',\n # data_path + 'lf_patch_synthetic_sr_5.hdf5',\n # data_path + 'lf_patch_synthetic_sr_6.hdf5',\n # data_path + 'lf_patch_synthetic_sr_7.hdf5',\n]\n\n# NETWORK LAYOUT HYPERPARAMETERS\n\n# general config params\nconfig = {\n # flag whether we want to train for RGB (might require more\n # changes in other files, can't remember right now)\n # 'ColorSpace' : 'YUV',\n # 'ColorSpace' : 'YCBCR',\n # 'ColorSpace' : 'LAB',\n 'ColorSpace' : 'RGB',\n 'VisibleGPU' :'0,1',\n # maximum layer which will be initialized (deprecated)\n 'max_layer' : 100,\n\n # this will log every tensor being allocated,\n # very spammy, but useful for debugging\n 'log_device_placement' : False,\n}\n\n# encoder for 48 x 48 patch, 9 views, RGB\nD = 9\nH = 48\nW = 48\nnviews = 9\nH_HR = 96\nW_HR = 96\n\nC = 3\n\ncv_pos = int((nviews-1)/2)\n\n# Number of features in the layers\nL = 16\nL0 = 24\nL1 = 32 #32\nL2 = 64 #64\nL3 = 96 #96\nL4 = 128 #128\nL5 = 160 #160\nL6 = 192 #192\n\n\n# fraction between encoded patch and decoded patch. e.g.\n# the feature maps of decoded patch are 3 times as many\n# as the encoded patch, then patch_weight = 3\npatch_weight = 3\n\n# Encoder stack for downwards convolution\n\nlayer_config = [\n {\n # 'id': 'YUV',\n # 'id': 'YCBCR',\n # 'id': 'LAB',\n 'id': 'RGB',\n 'channels' : C,\n 'start' : 0,\n 'end': 3,\n 'layout': [\n # for h and s channels\n { 'conv' : [ 3,3,3, C, L0 ],\n 'stride' : [ 1,1,1, 1, 1 ]\n }],\n 'upscale':[\n { 'conv' : [ 3, 3, 3, L, L0 ], #L_hs*patch_weight+L_hs\n 'stride' : [ 1, 1, 1, 1, 1 ]\n }],\n 'final': [\n { 'conv' : [ 1, 1, C*patch_weight, C ],\n 'stride' : [ 1, 1, 1, 1 ]\n },]\n\n },\n]\n\n# chain of dense layers to form small bottleneck (can be empty)\nlayers = dict()\n\nlayers['encoder_3D'] = [\n { 'conv' : [ 3,3,3, L0, L1 ],\n 'stride' : [ 1,1, 2,2, 1 ]\n },\n # resolution now 9 x 24 x 24\n { 'conv' : [ 3,3,3, L1, L1 ],\n 'stride' : [ 1,1, 1,1, 1 ]\n },\n { 'conv' : [ 3,3,3, L1, L2 ],\n 'stride' : [ 1,2, 1,1, 1 ]\n },\n # resolution now 5 x 24 x 24\n { 'conv' : [ 3,3,3, L2, L2 ],\n 'stride' : [ 1,1, 1,1, 1 ]\n },\n { 'conv' : [ 3,3,3, L2, L3 ],\n 'stride' : [ 1,1, 2,2, 1 ]\n },\n # resolution now 5 x 12 x 12\n { 'conv' : [ 3,3,3, L3, L3 ],\n 'stride' : [ 1,1, 1,1, 1 ]\n },\n { 'conv' : [ 3,3,3, L3, L4 ],\n 'stride' : [ 1,1, 2,2, 1 ]\n },\n # resolution now 5 x 6 x 6\n { 'conv' : [ 3,3,3, L4, L4 ],\n 'stride' : [ 1,1, 1,1, 1 ]\n },\n { 'conv' : [ 3,3,3, L4, L5 ],\n 'stride' : [ 1,2, 2,2, 1 ]\n },\n # resolution now 3 x 6 x 6\n { 'conv' : [ 3,3,3, L5, L5 ],\n 'stride' : [ 1,1, 1,1, 1 ]\n },\n # {'conv': [3, 3, 3, L5, L6],\n # 'stride': [1, 1, 2, 2, 1]\n # },\n # # resolution now 3 x 3 x 3\n # {'conv': [3, 3, 3, L6, L6],\n # 'stride': [1, 1, 1, 1, 1]\n # },\n\n ]\n\nlayers[ 'upscale'] = [\n { 'conv' : [ 3, 3, L0, L*patch_weight + L],\n 'stride' : [ 1, 2, 2, 1 ]\n },\n {'conv': [3, 3, C*patch_weight, L0+C],\n 'stride': [1, 1, 1, 1]\n },\n]\n\nlayers[ 'autoencoder_nodes' ] = []\nlayers[ '2D_decoder_nodes' ] = []\nlayers[ 'preferred_gpu' ] = 0\n# if skip-connections are used\npinhole_connections = True\n\n# 3D ENCODERS\nencoders_3D = [\n {\n # 'id': 'YUV',\n # 'id': 'YCBCR',\n # 'id': 'LAB',\n 'id': 'RGB',\n 'channels': C,\n 'preferred_gpu' : 0,\n },\n]\n#\n# 2D DECODERS\n#\n# Each one generates a 2D upsampling pathway next to the\n# two normal autoencoder pipes.\n#\n# Careful, takes memory. Remove some from training if limited.\n#\ndecoders_2D = [\n {\n # 'id': 'YUV',\n # 'id': 'YCBCR',\n # 'id': 'LAB',\n 'id': 'RGB',\n 'channels': C,\n 'preferred_gpu' : 1,\n 'loss_fn': 'L2',\n 'train': True,\n 'weight': 1.0,\n },\n]\n\n# MINIMIZERS\nminimizers = [\n\n # center view super resolution\n {\n # 'id': 'YUV_min',\n # 'id': 'YCBCR_min',\n # 'id': 'LAB_min',\n 'id': 'RGB_min',\n # 'losses_2D' : [ 'YUV' ],\n # 'losses_2D' : [ 'YCBCR' ],\n # 'losses_2D' : [ 'LAB' ],\n 'losses_2D' : [ 'RGB' ],\n 'optimizer' : 'Adam',\n 'preferred_gpu' : 1,\n 'step_size' : 1e-4,\n },\n ]\n\n\n# TRAINING HYPERPARAMETERS\ntraining = dict()\n\n# subsets to split training data into\n# by default, only 'training' will be used for training, but the results\n# on mini-batches on 'validation' will also be logged to check model performance.\n# note, split will be performed based upon a random shuffle with filename hash\n# as seed, thus, should be always the same for the same file.\n#\ntraining[ 'subsets' ] = {\n 'validation' : 0.05,\n 'training' : 0.95,\n}\n\n\n# number of samples per mini-batch\n# reduce if memory is on the low side,\n# but if it's too small, training becomes ridicuously slow\ntraining[ 'samples_per_batch' ] = 20\n\n# log interval (every # mini-batches per dataset)\ntraining[ 'log_interval' ] = 5\n\n# save interval (every # iterations over all datasets)\ntraining[ 'save_interval' ] = 100\n\n# noise to be added on each input patch\n# (NOT on the decoding result)\ntraining[ 'noise_sigma' ] = 0.0\n\n# decay parameter for batch normalization\n# should be larger for larger datasets\ntraining[ 'batch_norm_decay' ] = 0.9\n# flag whether BN center param should be used\ntraining[ 'batch_norm_center' ] = False\n# flag whether BN scale param should be used\ntraining[ 'batch_norm_scale' ] = False\n# flag whether BN should be zero debiased (extra param)\ntraining[ 'batch_norm_zero_debias' ] = False\n\neval_res = {\n 'h_mask': 90,\n 'w_mask': 90,\n 'm': 10,\n 'min_mask': 0.1,\n 'result_folder': \"./results/\",\n 'test_data_folder': \"H:\\\\testData\\\\\"\n}","sub_path":"SR ColorSpace Bicubic VarHisto/config_autoencoder.py","file_name":"config_autoencoder.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"257764226","text":"import sqlite3\nimport json\n# import codecs\nimport os\n\ncx = sqlite3.connect(os.path.join(os.path.expanduser('~')+\"/AppData/Local/Netease/CloudMusic/Library/webdb.dat\"))\ncx.row_factory = sqlite3.Row\n\ndef getPlaylist():\n cu = cx.cursor()\n cu.execute(\"select * from web_playlist\")\n playlists = []\n for item in cu.fetchall():\n playlist = (item[\"pid\"],getPlaylistNameFromJson(item[\"playlist\"]))\n playlists.append(playlist)\n playlists = list(map(lambda x: (x[0], x[1].decode('GBK', 'ignore')), playlists))\n\n print('Listing playlists:')\n print('========= START ==========')\n\n for i, _ in enumerate(playlists):\n print(i, _[1])\n if i % 10 == 0 and i > 0:\n input('Press enter to continue')\n\n print('========== END ===========')\n\n try:\n index = int(input('Choose playlist you wanna export:'))\n playlist = playlists[index]\n except (IndexError, ValueError):\n print('Invalid input, exiting...')\n exit(1)\n return playlist\n\n\n\ndef getPlayListMusic(pid):\n cu = cx.cursor()\n cu.execute(\"select * from web_playlist_track where pid=?\",[pid])\n musics = []\n for item in cu.fetchall():\n musics.append(item[\"tid\"]);\n return musics\n\ndef getOfflineMusicDetail(tid):\n cu=cx.cursor()\n cu.execute(\"select * from web_offline_track where track_id=?\",[tid])\n music = cu.fetchone()\n if music is None:\n return None\n detail = (getMusicNameFromJson(music[\"detail\"]), music[\"relative_path\"])\n return detail\n\ndef writePlaylistToFile(pid, playlistName):\n file = open(os.path.join(playlistName + \".m3u\"), \"w\", encoding='utf-8')\n count = 0\n try:\n file.writelines(\"#EXTM3U\")\n musicIds = getPlayListMusic(pid)\n for tid in musicIds:\n if tid is not None:\n detail = getOfflineMusicDetail(tid)\n if detail is not None:\n count = count + 1\n file.writelines(u\"\\n#EXTINF:\" + detail[0] + u\"\\n\" + detail[1])\n except Exception as e:\n raise\n else:\n pass\n finally:\n file.close()\n if count <= 0:\n os.remove(playlistName + \".m3u\")\n\ndef getPlaylistNameFromJson(jsonStr):\n playlistDetail = json.loads(jsonStr)\n return playlistDetail[\"name\"].encode(\"GBK\", 'ignore');\n\ndef getMusicNameFromJson(jsonStr):\n musicDetail = json.loads(jsonStr)\n return musicDetail[\"name\"];\n\ndef main():\n pid, name = getPlaylist()\n writePlaylistToFile(pid, name)\n\nif __name__ == '__main__':\n main()","sub_path":"create_playlist3.py","file_name":"create_playlist3.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"238331060","text":"from django.test import TestCase\n\nfrom .models import Location, Dog\n\n# Create your tests here.\n\nclass SearchTestCase(TestCase):\n \n fixtures=['test_data']\n\n def setUp(self):\n #Location.objects.create(city=\"Corvallis\", state=\"OR\", zipcode=97333)\n pass\n\n def test_can_lookup_location(self):\n\n location = Location.objects.get(zipcode=97333)\n \n self.assertEqual(location.city, \"Corvallis\")\n self.assertEqual(location.state, \"OR\")\n\n def test_dog_by_location_lookup(self):\n\n location = Location.objects.get(zipcode=97333)\n self.assertIsNotNone(location)\n\n dogs = Dog.objects.filter(location=location)\n self.assertIsNotNone(dogs)\n\n def test_different_dog_location_lookup(self):\n\n location_1 = Location.objects.get(zipcode=97333)\n self.assertIsNotNone(location_1)\n\n location_2 = Location.objects.get(zipcode=97331)\n self.assertIsNotNone(location_2)\n\n dogs_1 = Dog.objects.filter(location=location_1)\n self.assertIsNotNone(dogs_1)\n\n dogs_2 = Dog.objects.filter(location=location_2)\n self.assertIsNotNone(dogs_2)\n\n self.assertNotEqual(dogs_1, dogs_2)\n","sub_path":"matchmaking/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"525148282","text":"import logging\n\n\nclass MyLog:\n logging.basicConfig(level=logging.INFO, filename='bank_main.log', filemode='a',\n format='%(name)s - %(levelname)s - %(message)s')\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n # Log Levels: Info, Warning, Error, Critical, Debug, etc...\n # Handlers are used to better manage where a logger will log to.\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n logger = logging.getLogger(__name__)\n logger.addHandler(console_handler)\n\n @staticmethod\n def info_log(message=None):\n if message is not None:\n MyLog.logger.info(message)\n else:\n MyLog.logger.info(\"We went to the next step in the program\")\n\n @staticmethod\n def warning_log(message=None):\n if message is not None:\n MyLog.logger.warning(message)\n else:\n MyLog.logger.info(\"Warning! Must have valid values!\")\n\n @staticmethod\n def error_log(message=None):\n if message is not None:\n MyLog.logger.error(message)\n else:\n MyLog.logger.info(\"Warning! An error has occurred!\")\n\n\ndef _test():\n MyLog().info_log(\"The test log was successful!\")\n\n\nif __name__ == '__main__':\n _test()\n","sub_path":"cust_logging/my_logger.py","file_name":"my_logger.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"440084647","text":"import time\n\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\nURL = \"https://ambitious-sky-0d3acbd03.azurestaticapps.net/k3.html\"\n\ndriver.get(URL)\n\ntext_input = driver.find_element_by_id(\"title\")\nerror_message = driver.find_element_by_xpath(\"/html/body/form/span\")\ntext_error_message_illegal = \"Only a-z and 0-9 characters allewed\"\ntext_error_message_length = \"Title should be at least 8 characters; you entered 4.\"\n\ntest_data = [\"abcd1234\", \"teszt233@\", \"abcd\"]\n\n\ndef fill_and_return_error(text):\n time.sleep(2)\n text_input.clear()\n text_input.send_keys(text)\n return error_message.text\n\n#* Helyes kitöltés esete:\n# * title: abcd1234\n# * Nincs validációs hibazüzenet\ndef test_positive():\n assert fill_and_return_error(test_data[0]) == \"\"\n\n#* Illegális karakterek esete:\n# * title: teszt233@\n# * Only a-z and 0-9 characters allewed.\ndef test_illegal():\n assert fill_and_return_error(test_data[1]) == text_error_message_illegal\n#* Tul rövid bemenet esete:\n# * title: abcd\n# * Title should be at least 8 characters; you entered 4.\n\ndef test_short():\n assert fill_and_return_error(test_data[2]) == text_error_message_length\n","sub_path":"testproject/k3.py","file_name":"k3.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"306139926","text":"import datetime\nfrom datetime import datetime as dt\nfrom fbprophet import Prophet\nimport pandas as pd\n\ndef days_in_month(month: dt):\n last_month = month.replace(month=month.month-1)\n return (month.date() - last_month.date()).days\n\nclass ProphetModel():\n def __init__(self, df: pd.DataFrame):\n '''\n df - DataFrame with columns named 'y' and 'ds'\n '''\n\n self.prophet = Prophet()\n self.prophet.fit(df)\n self.df = df\n\n def predict(self, month: dt):\n '''\n month - datetime of the first of the month to predict\n return - predicted spending for that month\n '''\n num_days = days_in_month(month)\n latest = pd.to_datetime(self.df.ds.tail(1).item())\n num_future = (latest - month).days + num_days\n\n future = self.prophet.make_future_dataframe(periods=num_future)\n future['cap'] = 8.5\n fcst = self.prophet.predict(future)\n\n return sum(fcst.yhat.tail(num_days))\n\nif __name__ == \"__main__\":\n data = pd.read_csv(\"sample_data.csv\")\\\n .rename({\"Price\":\"y\", \"Date\":\"ds\"}, axis=1)\n data.ds = data.ds.apply(lambda d: dt.strptime(d, \"%Y-%m-%d\"))\n print(data.tail())\n p = ProphetModel(data)\n\n # predicted = p.predict(dt.strptime(\"2030-12-01\", \"%Y-%m-%d\"))\n predicted = p.predict(dt.combine(datetime.date(2022, 12, 1), dt.min.time()))\n print(predicted)\n\n\nimport os\ndef init_Prophet():\n data_path = str(os.path.dirname(os.path.abspath(__file__))) + '/sample_data.csv'\n data = pd.read_csv(data_path).rename({\"Price\":\"y\", \"Date\":\"ds\"}, axis=1)\n data.ds = data.ds.apply(lambda d: dt.strptime(d, \"%Y-%m-%d\"))\n return ProphetModel(data)\n\n","sub_path":"Server/app/Predict/prophetmodel.py","file_name":"prophetmodel.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"600620617","text":"import logging\n\n\n# 输出到文件\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(module)s %(name)s %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='output.log',\n filemode='w')\n\nlogger = logging.getLogger(__name__)\n\n\nlogger.info('This is a log info')\nlogger.debug('Debugging')\nlogger.warning('Warning exists')\nlogger.info('Finish')\n","sub_path":"log/basic_file.py","file_name":"basic_file.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"522726380","text":"#!/usr/bin/python\n\nimport sys\n\ndef split_patch(p):\n inf = {}\n with open(p,'r') as f:\n p_buf = f.readlines()\n diff_index = [i for i in range(len(p_buf)) if p_buf[i].startswith('diff')] + [len(p_buf)]\n for i in range(len(diff_index)-1):\n st = diff_index[i]\n ed = diff_index[i+1]\n #First get the changed source file\n fp = None\n fn = None\n for j in range(st,ed):\n if fp is not None and fn is not None:\n break\n if p_buf[j].startswith('---'):\n fn = p_buf[j][6:].strip()\n elif p_buf[j].startswith('+++'):\n fp = p_buf[j][6:].strip()\n inf[(fn,fp)] = []\n #Get @@ of this diff\n at_index = [j for j in range(st,ed) if p_buf[j].startswith('@@')] + [ed]\n for j in range(len(at_index)-1):\n inf[(fn,fp)].append(''.join(p_buf[at_index[j]:at_index[j+1]]))\n return inf\n\n#sys.argv[1]: patch list\n#user input:\n#p: patch exists\n#n: patch not exists\n#x: patch doesn't apply\n#d: next diff\n#a: next @@\ndef build():\n with open(sys.argv[1],'r') as pl:\n res_vec = []\n for p in pl:\n p = p.strip()\n if p[0] == '#':\n continue\n cve = p[p.rfind('/')+1:]\n p_inf = split_patch(p)\n print('>>>>>>>>>>>>>>>>>' + cve + '>>>>>>>>>>>>>>>>>')\n for k in p_inf:\n for at in p_inf[k]:\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print(k[0])\n print(k[1])\n print(at)\n s = input('-->')\n if s[0] in ('p','n','d','x'):\n break\n if s[0] in ('p','n','x'):\n break\n if not s[0] in ('p','n','x'):\n print('!! All @@ sections have been iterated, plz make a p/n/x decision')\n s = input('-->')\n res_vec += [(cve,s[0].capitalize())]\n print('==============================================')\n for t in res_vec:\n print('%s %s' % t)\n print('==============================================')\n\nif __name__ == '__main__':\n build()\n","sub_path":"tools/gt_builder.py","file_name":"gt_builder.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"376440253","text":"import sqlite3\nimport json\n\nfrom flask import g, current_app\n\nimport reimu.config\n\n\ndef init_db():\n pass\n\n\nclass RowObject(object):\n \"\"\"Table row object.\"\"\"\n def __init__(self, row, columns):\n for i, column in enumerate(columns):\n setattr(self, column, row[i])\n\n\ndef connect():\n \"\"\"Connect to database from application.\"\"\"\n g.db = sqlite3.connect(current_app.config['DATABASE'])\n\n\ndef disconnect():\n \"\"\"Close application's database connection.\"\"\"\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n\n\ndef select(query, arguments=(), single=False, row_type='object'):\n \"\"\"Select one or more rows from database.\"\"\"\n cursor = g.db.cursor()\n cursor.execute(query, arguments)\n rows = cursor.fetchall()\n\n # Convert rows to desired type\n columns = [col[0] for col in cursor.description]\n if row_type == 'object':\n rows = [RowObject(row, columns) for row in rows]\n elif row_type == 'dict':\n rows = [dict(zip(columns, row)) for row in rows]\n\n # Unpack list if needed\n if single:\n return rows[0] if len(rows) else None\n else:\n return rows if len(rows) else []\n\n\ndef count(table):\n \"\"\"Count all rows in given table.\"\"\"\n cursor = g.db.cursor()\n cursor.execute('SELECT COUNT() FROM {};'.format(table))\n result = cursor.fetchone()[0]\n return result\n\n\ndef update(query, arguments=()):\n \"\"\"Update a row\"\"\"\n cursor = g.db.cursor()\n cursor.execute(query, arguments)\n g.db.commit()\n\n\ndef insert(query, arguments=()):\n \"\"\"Insert a row, return it's id\"\"\"\n cursor = g.db.cursor()\n cursor.execute(query, arguments)\n g.db.commit()\n return cursor.lastrowid\n","sub_path":"reimu/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"8038227","text":"\"\"\"\nThis script controls an SDI-12 relay in order to irrigate based on soil moisture readings.\nThere is a setup file associated with this script\n\nA Tekbox TBSRB01 4 channel SDI-12 controlled latching relay is employed.\n\nSoil moisture is checked once when function check_and_irrigate is called - by default, that is once a day at 6AM.\nMeasurement labeled 'SoilMoist50' by default measures soil moisture.\nIf soil moisture is below limit (0.30 by default), relays are triggered over an extended time period:\n * Relay 1 is activated and left active for an hour\n * After the hour, relay 1 is deactivated, relay 2 activated, and left active for an hour.\n * Etc. until all four relays have been active for an hour one at a time.\n * The process starts at 6AM and ends at 10AM.\n\nIrrigation is aborted (all relays deactivated) if system is stopped.\nWhen system boots up, relays are all deactivated.\nIf the script crashes, an attempt is made to deactivate all relays.\n\nCheck script status (LinkComm scripts tab) to see updates on script activity. Unfortunately, the status only updates once the function completes (4 hours). There is no real time status :(\nWhenever a relay is switched, an event is written to the log:\n \"Relay, 0\" means all relays deactivated\n \"Relay, 1\" means relay 1 active, all other inactive\n \"Relay, 2\" means relay 2 active, all other inactive etc.\n\nCheck script variables below to change:\n * label of measurement that checks soil moisture\n * soil moisture limit that triggers irrigation\n * how long to irrigate for\n * SDI address of relay\n\"\"\"\n\n# what is the name of the soil moisture measurement that triggers relays\nsoil_moisture_meas = \"SoilMoist50\"\n\n# what is the limit for the soil moisture required to trigger irrigation?\nmoisture_limit = 0.30\n\n# how long to irrigate for each time in seconds\nirrigation_period_sec = 3600 # one hour\n\n# what SDI-12 address is the Tekbox TBSRB01 relay on?\nrelay_addy = 3\n\n# the SDI-12 bus that the relay is on\nrelay_bus = \"PORT1\"\n\nimport re\nimport utime\nfrom sl3 import *\n\n\nclass Sdi12Error(Exception):\n pass\n\n\ndef sdi_bus_valid(sdi_bus):\n \"\"\"\n Routine checks whether the provided parameter is a SDI-12 bus\n\n :param sdi_bus: string indicating bus: \"Port1\", \"Port2\", or \"RS485\"\n :return: True if provided parameter is a valid bus\n :rtype: Boolean\n \"\"\"\n bus_upper = sdi_bus.upper()\n if (\"PORT1\" in bus_upper) or (\"PORT2\" in bus_upper) or (\"RS485\" in bus_upper):\n return True\n else:\n return False\n\n\ndef sdi_send_command_get_reply(cmd_to_send, sdi_bus=\"Port1\"):\n \"\"\"\n Sends provided command out on the specified SDI-12 bus, gets reply from the sensor.\n\n :param cmd_to_send: the command to send on the SDI-12 bus, e.g. \"0M!\"\n :param sdi_bus: string indicating bus: \"Port1\", \"Port2\", or \"RS485\"\n :return: sensor reply, or \"No reply\"\n :rtype: str\n \"\"\"\n if sdi_bus_valid(sdi_bus):\n reply = command_line('!SDI {} {}'.format(sdi_bus, cmd_to_send), 128)\n if \"Got reply: \" in reply:\n reply = reply.replace(\"Got reply:\", \"\")\n else:\n raise Sdi12Error(\"No such bus\", sdi_bus)\n\n reply = reply.strip()\n return reply\n\n\ndef update_status(status):\n \"\"\" update the status that we show the user\"\"\"\n update = ascii_time(utime.time()) + ' ' + status\n print(update)\n\n\ndef check_reply(reply):\n \"\"\"\n checks the reply from the relay. it must say aX_OK\n if it does not, error is logged and False returned\n :param reply: SDI-12 reply from relay\n :type reply: str\n :return: True if no error\n :rtype: bool\n \"\"\"\n if 'X_OK' in reply:\n return True\n else:\n return False\n\n\nTIME_UP = 1\nSTOPPED = 0\n\ndef wait_time_or_stop(end_time, sleep_period_sec=5):\n \"\"\"\n Waits until provided end time or until recording is stopped\n :param end_time: when the wait should end\n :type end_time: u_time.time()\n :param sleep_period_sec: how long to sleep for when checking for recording\n :type sleep_period_sec: int seconds\n :return: TIME_UP (1) or STOPPED (0)\n :rtype: int\n \"\"\"\n ret_val = TIME_UP\n\n while(utime.time() < end_time):\n if setup_read(\"Recording\").upper() == \"OFF\":\n ret_val = STOPPED\n break\n utime.sleep(sleep_period_sec)\n\n return ret_val\n\n\ndef relay_control(relay_index):\n \"\"\"\n either opens all relays (relay_index = 0), or\n closes one relay and opens all others (1 <= relay_index <=4)\n updates status with result\n\n :param relay_index: which relay to close (0 means open all)\n :type relay_index: int\n :return: True if AOK\n :rtype: bool\n \"\"\"\n # format up the command:\n # 0 means open relay - stop irrigating\n # 1 means close relay - start irrigating\n if relay_index == 1:\n sdi_cmd = '{}XSR,1,0,0,0'.format(relay_addy)\n elif relay_index == 2:\n sdi_cmd = '{}XSR,0,1,0,0'.format(relay_addy)\n elif relay_index == 3:\n sdi_cmd = '{}XSR,0,0,1,0'.format(relay_addy)\n elif relay_index == 4:\n sdi_cmd = '{}XSR,0,0,0,1'.format(relay_addy)\n else:\n # open all relays\n sdi_cmd = '{}XSR,0,0,0,0'.format(relay_addy)\n\n reply = sdi_send_command_get_reply(sdi_cmd, relay_bus)\n if check_reply(reply):\n update_status(\"Relay {} activated\".format(relay_index))\n reading = Reading(label=\"Relay\", time=utime.time(),\n etype='E', value=relay_index, quality='G')\n reading.write_log()\n return True\n\n else:\n update_status(\"Relay activation failure, SDI: {}\".format(reply))\n reading = Reading(label=\"Relay\", time=utime.time(),\n etype='E', value=relay_index, quality='B')\n reading.write_log()\n return False\n\n\n@TASK\ndef relay_all_deactivate():\n \"\"\"\n Issues command to open all relays (stops irrigation)\n Connect this to a Script Task\n \"\"\"\n relay_control(0)\n\n\ndef irrigate():\n \"\"\"\n Irrigates by controlling relays over a 5 hour period\n :return: True if irrigation completes, False if aborted\n :rtype: bool\n \"\"\"\n time_tracker = utime.time()\n result = True\n\n for relay_index in range(1,5):\n\n if not relay_control(relay_index):\n result = False # failed to issue command\n break\n\n # update time for next end\n time_tracker = time_tracker + irrigation_period_sec\n\n if not wait_time_or_stop(time_tracker):\n # system was stopped\n update_status(\"System stopped. Irrigation aborted.\")\n result = False # failed to issue command\n break\n\n # deactivate all relays\n relay_all_deactivate()\n return result\n\n\n@TASK\ndef check_and_irrigate():\n \"\"\"\n Routine will check soil moisture and irrigate if appropriate\n Connect this to a Script Task\n \"\"\"\n\n try:\n # check soil moisture\n moisture = measure(soil_moisture_meas)\n if moisture.quality != 'G':\n # failed to measure moisture\n update_status(\"Moisture measurement failed\")\n\n elif moisture.value < moisture_limit:\n # proceed to irrigate\n if is_being_tested():\n # script testing should not irrigate\n update_status(\"Script test will not irrigate! Soil moisture: {}\".format(moisture.value))\n else:\n update_status(\"Irrigating! Soil moisture: {}\".format(moisture.value))\n irrigate() # note that irrigate will update status\n\n else: # no need to irrigate now\n update_status(\"No need to irrigate. Soil moisture: {}\".format(moisture.value))\n\n except:\n # if script breaks, deactivate all relays\n relay_all_deactivate()\n","sub_path":"projects/Irrigation_Vineyard/Vineyard_script_a1.py","file_name":"Vineyard_script_a1.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"74546105","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\ndata=pd.read_csv('q5.csv',header=0,index_col='person_id')\r\nwant_data=pd.DataFrame(data.loc[:,['ttl_txn','ttl_to','avg_to_per_qty']])\r\n#print(want_data)\r\nwant_data_zs=1.0*(want_data-want_data.mean())/want_data.std()\r\n#print(want_data_zs)\r\n\r\n# elbow rule to select K\r\n'''\r\nfrom scipy.spatial.distance import cdist\r\nK=range(1,10)\r\nmeandistortions=[]\r\nfor k in K:\r\n kmeans=KMeans(n_clusters=k)\r\n kmeans.fit(want_data_zs)\r\n meandistortions.append(sum(np.min(\r\n cdist(want_data_zs,kmeans.cluster_centers_,\r\n 'euclidean'),axis=1))/want_data_zs.shape[0])\r\nplt.plot(K,meandistortions,'bx-')\r\nplt.xlabel('k')\r\nplt.ylabel('average disortion')\r\nplt.show()\r\n'''\r\nk=3\r\niteration=500\r\nmodel=KMeans(n_clusters=k,n_jobs=4,max_iter=iteration)\r\nmodel.fit(want_data_zs)\r\n\r\nr1=pd.Series(model.labels_).value_counts()\r\nr2=pd.DataFrame(model.cluster_centers_)\r\nr=pd.concat([r2,r1],axis=1)\r\nr.columns=list(want_data.columns)+['number of cluster']\r\nprint(r)\r\n\r\nr=pd.concat([want_data,pd.Series(model.labels_,index=want_data.index)],axis=1)\r\nr.columns=list(want_data.columns)+['cluster group']\r\noutputfile='data_type.csv'\r\nr.to_csv(outputfile)\r\n\r\ndef density_plot(data,title):\r\n import matplotlib.pyplot as plt\r\n plt.figure()\r\n for i in range(len(data.iloc[0])):\r\n (data.iloc[:,i]).plot(kind='kde',label=data.columns[i],linewidth=2)\r\n plt.ylabel('density')\r\n plt.xlabel('num of people')\r\n plt.title('density curve of group %s ' %title)\r\n plt.legend()\r\n return plt\r\ndef density_plot(data):\r\n import matplotlib.pyplot as plt\r\n p=data.plot(kind='kde',linewidth=2,subplots=True,sharex=False)\r\n [p[i].set_ylabel('density') for i in range(k)]\r\n plt.legend()\r\n return plt\r\n\r\npic_output='gd_'\r\nfor i in range(k):\r\n density_plot(want_data[r['cluster group']==i]).savefig('%s%s.png' %(pic_output,i))\r\n","sub_path":"Q5/Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"22047670","text":"import numpy as np\r\nimport optimization.multivariable_calculus as mvc\r\n\r\ndef best(f, criticalPoints):\r\n globalOptimum = criticalPoints[0]\r\n optimumOutput = f(globalOptimum)\r\n for i in range(1, len(criticalPoints)):\r\n contender = f(criticalPoints[i])\r\n if contender < optimumOutput:\r\n globalOptimum = criticalPoints[i]\r\n optimumOutput = contender\r\n return globalOptimum\r\n\r\ndef identityMatrix(n):\r\n I = []\r\n for i in range(n):\r\n I.append([])\r\n for j in range(n):\r\n I[i].append(1 if i == j else 0)\r\n return I\r\n\r\ndef lineSearch(f, n, grad, xk, pk):\r\n # Given a function and a in which direction to travel,\r\n # lineSearch solves for the optimal distance to travel to not under- or over-shoot.\r\n # Backtracking line search will initialize alpha, the distance to travel, as a high number.\r\n # alpha will be iteratively lessened until the Armijo-Goldstein condition is satisfied.\r\n alpha = 1e2\r\n control = 0.5 # 0 < control < 1 is a control parameter for the Armijo-Goldstein condition. See https://en.wikipedia.org/wiki/Backtracking_line_search.\r\n lesseningFactor = 0.5 # 0 < lesseningFactor < 1 is multiplied into alpha at each iteration to lessen it.\r\n m = pk.dot(grad.T).tolist()[0] # local slope in direction pk\r\n t = control * m # Store this value for later access in the condition.\r\n fxk = f(xk.tolist()[0])\r\n # Armijo set control and lesseningFactor to 1/2 in his original paper, as done here.\r\n # Now, lessen alpha until the condition is satisfied. Break after 40 steps in case something went wrong.\r\n for i in range(40):\r\n # If the Armijo-Goldstein condition is met, terminate. Otherwise, lessen alpha.\r\n if f((xk[0] + alpha*pk).tolist()) <= fxk + alpha*t:\r\n break\r\n alpha = alpha * lesseningFactor\r\n return alpha\r\n\r\ndef BFGS(V, sk, yk):\r\n ykskT = yk.dot(sk.T)\r\n return (1 + yk.dot(V).dot(yk.T)/ykskT)*(sk.T).dot(sk)/ykskT - (V.dot(yk.T).dot(sk) + (sk.T).dot(yk).dot(V))/ykskT #(1 + (yk.T).dot(V).dot(yk)/skTyk)*(sk.dot(sk.T))/skTyk - (sk.dot(yk.T) * V+ V.dot(yk).dot(sk.T))/skTyk\r\n\r\ndef DFP(V, sk, yk):\r\n VykT = V.dot(yk.T)\r\n return (sk.T).dot(sk)/yk.dot(sk.T) - (VykT.dot(yk).dot(V))/yk.dot(VykT)\r\n\r\ndef optimize(f, n, convergence = 1e-6, trials=1, maxSteps = 100, lowerBound = -1, upperBound = 1):\r\n criticalPoints = []\r\n convergenceSquared = convergence**2\r\n for trial in range(trials):\r\n # Initial guess for optimum, to be optimized\r\n xk = np.array([[np.random.random() * (upperBound-lowerBound) + lowerBound for i in range(n)]])#np.array([0, -0.1])\r\n # This runs a combination of the DFP and BFGS Quasi-Newton methods for optimization.\r\n V = np.array(identityMatrix(n)) # Initially, the inverse Hessian is approximated with the identity matrix.\r\n for i in range(maxSteps):\r\n grad = np.array([mvc.gradient(f, xk.tolist()[0])])\r\n # Calculate direction by Newton's Method with an approximated inverse Hessian.\r\n pk = -V.dot(grad[0])\r\n # Perform line search to calculate step size, alpha.\r\n alpha = lineSearch(f, n, grad, xk, pk) # Calculate to find next point after step.\r\n sk = alpha*pk # step at iteration k\r\n # If the step size is small enough, terminate search. Further computation would be wasteful.\r\n if sk.dot(sk) < convergenceSquared:\r\n continue\r\n sk = np.array([sk.tolist()])\r\n xk_next = xk + sk\r\n # Now, for the new xk, update the approximate inverse Hessian with BFGS.\r\n yk = np.array([mvc.gradient(f, xk_next.tolist()[0])]) - grad\r\n xk = xk_next\r\n #### Update the inverse Hessian with a combination of DFP and BFGS. For more details, see en.wikipedia.org/wiki/Broyden–Fletcher–Goldfarb–Shanno_algorithm.\r\n ####t = (2*alpha - 1)/alpha\r\n ####V = V + t * DFP(V, sk, yk) + (1-t) * BFGS(V, sk, yk)\r\n V = V + BFGS(V, sk, yk)\r\n criticalPoints.append(xk.tolist()[0])\r\n return best(f, criticalPoints)\r\n","sub_path":"optimization/quasi_newton.py","file_name":"quasi_newton.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"553548841","text":"#予測\nfrom collections import defaultdict\nimport math\n#def predict_all(model_file,input_file):\n #load w from model_file\n #model_fileは未作成:testプログラムにおいて読み込む\n\n\ndef predict_one(w,phi):\n score = 0\n for name,value in phi.items():\n if name in w:\n score += value * w[name]\n if score >= 0:\n return 1\n else:\n return -1\n\ndef create_features(x):\n phi = defaultdict(lambda :0)\n words = x.strip().split()\n for word in words:\n phi['UNI:' + word] += 1\n return phi\n\"\"\"\ndef update_weights(w,phi,y):\n c = 0.0001\n for name,value in w.items():\n if abs(value) < c:\n w[name] = 0\n else:\n w[name] -= sign(value) * c\n for name,value in phi.items():\n w[name] += value * y\n return w\n\"\"\"\ndef sign(x):\n if x >= 0:\n return 1\n else:\n return -1\n\nsigmoid = defaultdict(lambda :0)\n\ndef sigm(x,word):\n if x >= 0:\n sigmoid[word] += (math.exp(x)/(1+math.exp(x))**2)\n return sigmoid[word]\n else:\n sigmoid[word] -= (math.exp(x)/(1+math.exp(x))**2)\n return sigmoid[word]\n\n\nif __name__ == '__main__':\n w = defaultdict(lambda :0)\n l = 20 #iteration? : 試行数?\n margin = 20\n c = 0.0001\n for i in range(l):\n with open('../../data/titles-en-train.labeled','r') as t_f:\n for line in t_f:\n phi = defaultdict(lambda :0)\n y,x = line.strip().split('\\t') #y is int , x is words\n y = float(y)\n for word,value in create_features(x).items():\n phi[word] = value\n val = w[word] * phi[word] * y\n if val <= margin:\n if abs(w[word]) < c:\n w[word] = 0\n else:\n w[word] += sigm(w[word],word) * c\n# w[word] -= sign(w[word]) * c\n w[word] += phi[word] * y\n with open('model_file.txt','w') as m_f:\n# for line in m_f:\n for word,value in w.items():\n m_f.write('{}\\t{}\\n'.format(word,value))\n","sub_path":"yohta/tutorial06/train_svm.py","file_name":"train_svm.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"591751686","text":"import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _as_mat(x):\n if x.ndim == 2:\n return x\n return x.reshape(len(x), -1)\n\n\nclass MaxoutFunction(function.Function):\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() >= 2,\n in_types.size() <= 3\n )\n x_type, w_type = in_types[:2]\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n w_type.dtype.kind == 'f',\n x_type.ndim >= 2,\n w_type.ndim == 3,\n type_check.prod(x_type.shape[1:]) == w_type.shape[0]\n )\n\n if in_types.size().eval() == 3:\n b_type = in_types[2]\n type_check.expect(\n b_type.dtype.kind == 'f',\n b_type.ndim == 2,\n b_type.shape == w_type.shape[1:]\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n x = _as_mat(inputs[0])\n W = inputs[1]\n ys = xp.tensordot(x, W, axes=1)\n if len(inputs) == 3:\n ys += inputs[2]\n self.argmax = xp.argmax(ys, axis=1)\n return xp.max(ys, axis=1),\n\n def backward(self, inputs, grad_outputs):\n gy = grad_outputs[0]\n x = _as_mat(inputs[0])\n W = inputs[1]\n\n xp = cuda.get_array_module(*inputs)\n # gradient of z = xW + b\n gz = xp.zeros((gy.shape[0], W.shape[1], gy.shape[1]), x.dtype)\n if xp == numpy:\n idx0 = xp.arange(len(gy))[:, None]\n idx1 = xp.arange(gy.shape[1])\n gz[idx0, self.argmax, idx1] = gy\n else:\n gz_r = xp.rollaxis(gz, 1)\n cuda.elementwise(\n 'T gy, S argmax, int32 n', 'raw T gz',\n 'gz[argmax * n + i] = gy', 'maxout_bwd'\n )(gy, self.argmax, gz_r.size // len(gz_r), gz_r)\n gx = xp.tensordot(gz, W, ((1, 2), (1, 2))).reshape(inputs[0].shape)\n gW = xp.tensordot(x, gz, (0, 0))\n\n if len(inputs) == 3:\n gb = gz.sum(axis=0)\n return gx, gW, gb\n else:\n return gx, gW\n\n\ndef maxout(x, W, b=None):\n \"\"\"non parametrized Maxout activation function\n\n It accepts two or three arguments: an input minibatch ``x``,\n a weight tensor ``W``, and optionally a bias matrix ``b``\n and computes\n\n .. math::\n\n Y_{i} = \\\\mathrm{max}_{j} (x^{T}W_{\\\\cdot ij} + b_{ij}).\n\n Here, :math:`x` is a input vector and :math:`W_{\\\\cdot ij}`\n is a sub-vector extracted from :math:`W` by fixing second\n and third dimensions to :math:`i` and :math:`j`, respectively.\n Minibatch dimension is omitted in the above equation.\n\n Args:\n x (~chainer.Variable): Input variable. Its first dimension is assumed\n to be the *minibatch dimension*. The other dimensions are treated\n as concatenated one dimension whose size must be ``N``.\n W (~chainer.Variable): Weight variable of shape ``(N, C, M)``.\n b (~chainer.Variable): Bias variable (optional) of shape ``(C, M)``.\n Returns:\n ~chainer.Variable: Variable holding :math:`Y`.\n\n .. seealso:: :class:`~chainer.links.Maxout`\n \"\"\"\n\n if b is None:\n return MaxoutFunction()(x, W)\n else:\n return MaxoutFunction()(x, W, b)\n","sub_path":"chainer/functions/activation/maxout.py","file_name":"maxout.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"44427736","text":"# -*- coding: utf-8 -*-\nfrom Public.requests import requ\nfrom branch.log import Log\nfrom branch.operate_db import Operate_db\nfrom config.readyaml import Getyaml\n\nreques=requ()\nclass TestApi(object):\n\tdef __init__(self,url,key,connent,fangshi,param_place,assertdata):\n\t\tself.url = url\n\t\tself.key = key\n\t\tself.connent = connent\n\t\tself.fangshi = fangshi\n\t\tself.param_place = param_place\n\t\tself.assertdata = assertdata\n\n\tdef get_param(self):\n\t\tif self.param_place != 'database':\n\t\t\treturn self.connent\n\t\telse:\n\t\t\t#获取数据库名\n\t\t\tself.database = Getyaml(yamlparam=\"interface_db\",interface=self.url).port_db()\n\t\t\tLog().info('当前接口涉及数据库:%s' % self.database)\n\t\t\t#执行数据库操作\n\t\t\tpost_data = Operate_db(self.database,self.url).Perform()\n\t\t\tLog().info('数据格式为:%s' % post_data)\n\t\t\treturn post_data\n\n\tdef testapi(self):\n\t\tif self.fangshi=='POST':\n\t\t\t#self.parem = {'key': self.key, 'info': self.connent}\n\t\t\tself.response=reques.post(self.url, self.get_param(), self.assertdata)\n\t\telif self.fangshi==\"GET\":\n\t\t\tself.parem = {'key': self.key, 'info': self.connent}\n\t\t\tself.response = reques.get(self.url, self.get_param())\n\t\telif self.fangshi == \"PUT\":\n\t\t\t#self.parem = {'key': self.key, 'info': self.connent}\n\t\t\tself.response = reques.putfile(self.url, self.get_param(), self.assertdata)\n\t\telif self.fangshi == \"DELETE\":\n\t\t\tself.parem = {'key': self.key, 'info': self.connent}\n\t\t\tself.response = reques.delfile(self.url, self.get_param())\n\t\treturn self.response\n\t# def getJson(self):\n\t# \tjson_data = self.testapi()\n\t# \treturn json_data","sub_path":"Public/select_request.py","file_name":"select_request.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"138253277","text":"from .layers import *\n\nclass ExtraTransition():\n def __init__(self,\n stage_id,\n num_channels,\n num_branches,\n num_out_channels):\n self.scope = str(stage_id) + 'ExtraTransition'\n self.num_channels = num_channels\n self.num_branches = num_branches\n self.num_out_channels = num_out_channels\n\n def forward(self, input):\n with tf.variable_scope(self.scope):\n _out = []\n for i in range(self.num_branches):\n # if tf.shape(input[i])[3].asnumpy != self.num_out_channels:\n if i < self.num_branches - 1:\n _tmp_out = slim.conv2d(input[i], num_outputs=self.num_out_channels * pow(2, i),\n kernel_size=[3,3], stride=1, activation_fn=tf.nn.relu,\n normalizer_fn=batch_norm)\n else:\n _tmp_out = input[i]\n _out.append(_tmp_out)\n return _out","sub_path":"hr_rnet/transition.py","file_name":"transition.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"436569996","text":"from cloudmusic_server.models import Song, SONG_LOAD, SONG_STAGE, SONG_LOAD_OK,\\\n SONG_OK\n\n\ndef update_status(song_id):\n song = Song.objects.get(pk=song_id)\n if song.status == SONG_LOAD:\n song.status = SONG_STAGE\n elif song.status == SONG_LOAD_OK:\n song.status = SONG_OK\n song.save()\n\n\nCAMEL_CASE_BLACKLIST = ['DJ', 'of', 'a', 'vs', 'and', 'feat', 'the', 'for']\n\ndef parse_title(song, title):\n title = title.strip()\n if '-' in title:\n artist, name = title.split('-', 1)\n \n name = name.strip()\n name = name[0].upper() + name[1:]\n \n song.artist = _fix_artist(artist)\n song.name = name\n else:\n song.name = title.strip()\n\ndef _fix_artist(artist):\n artist = artist.strip()\n \n parts = artist.split(' ')\n new_artist = ''\n for part in parts:\n if part == ' ' or len(part) == 0:\n continue\n \n part = part.strip()\n \n has_point = False\n if part[-1] == '.':\n has_point = True\n part = part[:-1]\n \n in_blacklist = False\n for word in CAMEL_CASE_BLACKLIST:\n if part.lower() == word.lower():\n part = word\n in_blacklist = True\n break\n \n if not in_blacklist:\n part = part[0].upper() + part[1:].lower()\n \n new_artist += part\n if has_point:\n new_artist += '.'\n new_artist += ' '\n \n return new_artist[:-1] # remove last whitespace\n\n ","sub_path":"server/cloudmusic_server/backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"74680434","text":"import pygame, pygame.gfxdraw, time\n\nimport things\n\nSHOW_TIME = False\n\nclass PygView(object):\n \n def __init__(self, layout, config):\n \"\"\"Initialize pygame, window, background, font,...\n default arguments \n \"\"\"\n self.radius = config['radius']\n self.aspect = config['aspect']\n self.w = config['width']\n self.h = int(self.w / self.aspect)\n self.fps = float(config['fps'])\n self.RESTITUTION = float(config['restitution'])\n\n pygame.init()\n pygame.display.set_caption(\"Press ESC to quit\")\n self.screen = pygame.display.set_mode((self.w, self.h), pygame.DOUBLEBUF)\n self.background = pygame.Surface(self.screen.get_size()).convert() \n self.background.fill((255, 255, 255)) # fill background white\n \n self.clock = pygame.time.Clock()\n self.playtime = 0\n self.time_font = pygame.font.SysFont('monospace', 17)\n self.layout = layout\n\n self.make_objects()\n\n def make_objects(self):\n self.paddle = None\n self.balls = [things.Ball( (200, 150), (-2.1, 1.74), self.radius, self)]\n self.brickMap = None\n\n def fix_bg(self, contour = None, maxcost = None):\n \"\"\"painting on the surface\"\"\"\n pygame.display.flip()\n self.screen.blit(self.background, (0, 0)) \n\n def stash_balls(self):\n for ball in self.balls:\n ball.update()\n pygame.gfxdraw.filled_circle(self.background, int(ball.pos[0]), int(ball.pos[1]), ball.radius, (145, 120, 170))\n\n def run(self):\n \"\"\"The mainloop\n \"\"\"\n running = True\n while running:\n # handle any events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False \n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n # Clean the background\n self.background.fill((255, 255, 255)) # fill background white\n if SHOW_TIME:\n self.playtime += self.clock.tick(self.fps) / 1000.0\n self.draw_text(self.time_font, \"FPS: {:6.3} {:6.3} sec.\".format(self.clock.get_fps(), self.playtime), (15, self.h - 15))\n # get all game-state from cpp-engine\n \n # update all objects' position, color, etc.\n self.stash_balls()\n\n # finally, update screen\n pygame.display.flip()\n self.screen.blit(self.background, (0, 0)) \n pygame.quit()\n\n def draw_text(self, font, txt, pos, color=(0,0,0)):\n t = font.render(txt, True, (0, 0, 0))\n self.screen.blit(t, pos)\n\n def transform(self, coords):\n # coords must lie in [0.0, 1.0] ~ x, y\n return (int(coords[0] * self.w), int(coords[1] * self.h))\n\n# call with width of window and fps\nconfig = {'width' : 400,\n 'height' : 400 * 9 / 16,\n 'friction' : 0,\n 'restitution' : 1,\n 'radius' : 8,\n 'aspect' : 16.0/9,\n 'fps' : 30}\nlayout = \"\"\nmyWin = PygView(layout, config)\nmyWin.fix_bg()\n#time.sleep(2)\n\nmyWin.run()\n#myWin.run()","sub_path":"cpp/smashit/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"122496930","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 23 11:00:36 2020\r\n\r\n@author: Ismail\r\n\"\"\"\r\n\r\nimport h5py\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\nfrom PIL import Image\r\n\r\nfilename = '../data/coco_val2017_vg_detector_features_adaptive.h5'\r\nID = int(input(\"enter image id to visualize\"))\r\nimage = cv2.imread('im'+str(ID)+'.jpg')\r\n\r\nwith h5py.File(filename, 'r') as f:\r\n # List all groups\r\n print(\"Keys: %s\" % f.keys())\r\n a_group_key = list(f.keys())[0]\r\n\r\n # Get the data\r\n data = list(f[a_group_key])\r\n index = 0\r\n # for i in range(4500):\r\n # if (f['image_id'][i]==ID):\r\n # index = i\r\n # break\r\n print (f['image_id'][index])\r\n boxes = np.array(f['boxes'][index])\r\n boxes = boxes.reshape(boxes.size//4,4)\r\n feat = np.array(f['features'][index])\r\n feat = feat.reshape(feat.size//2048,2048)\r\n print(boxes.shape)\r\n print (f['height'][index],f['width'][index],np.amax(boxes,axis=0))\r\n # print(feat.shape, boxes.shape)\r\n raise Exception()\r\n # print(boxes)\r\n fig,ax = plt.subplots(1)\r\n ax.imshow(image)\r\n \r\n cmap = plt.get_cmap('gnuplot')\r\n colors = [cmap(i) for i in np.linspace(0, 1, boxes.shape[0])]\r\n \r\n for i,box in enumerate(boxes):\r\n \t#if f['features'][index][i]>=0.5:\r\n\t # Create a Rectangle patch\r\n\t rect = patches.Rectangle((box[0],box[1]),box[2]-box[0],box[3]-box[1],linewidth=1,edgecolor=colors[i],facecolor='none')\r\n\t # Add the patch to the Axes\r\n\t ax.add_patch(rect)\r\n\t plt.text(box[0],box[1],'weight: '+str(f['features'][index][i]),color='red')\r\nplt.savefig('features_im'+str(ID)+'_t.png')\t\r\nplt.show()\r\n ","sub_path":"getImages/visFeatures.py","file_name":"visFeatures.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"301224524","text":"# -*- coding: utf-8 -*-\n\n'''\nЗадание 25.2c\n\nСкопировать класс CiscoTelnet из задания 25.2b и изменить метод send_config_commands добавив проверку команд на ошибки.\n\nУ метода send_config_commands должен быть дополнительный параметр strict:\n* strict=True значит, что при обнаружении ошибки, необходимо сгенерировать исключение ValueError\n* strict=False значит, что при обнаружении ошибки, надо только вывести на стандартный поток вывода сообщене об ошибке\n\nМетод дожен возвращать вывод аналогичный методу send_config_set у netmiko (пример вывода ниже).\nТекст исключения и ошибки в примере ниже.\n\nПример создания экземпляра класса:\nIn [1]: from task_25_2c import CiscoTelnet\n\nIn [2]: r1_params = {\n ...: 'ip': '192.168.100.1',\n ...: 'username': 'cisco',\n ...: 'password': 'cisco',\n ...: 'secret': 'cisco'}\n\nIn [3]: r1 = CiscoTelnet(**r1_params)\n\nIn [4]: commands_with_errors = ['logging 0255.255.1', 'logging', 'i']\nIn [5]: correct_commands = ['logging buffered 20010', 'ip http server']\nIn [6]: commands = commands_with_errors+correct_commands\n\nИспользование метода send_config_commands:\n\nIn [7]: print(r1.send_config_commands(commands, strict=False))\nПри выполнении команды \"logging 0255.255.1\" на устройстве 192.168.100.1 возникла ошибка -> Invalid input detected at '^' marker.\nПри выполнении команды \"logging\" на устройстве 192.168.100.1 возникла ошибка -> Incomplete command.\nПри выполнении команды \"i\" на устройстве 192.168.100.1 возникла ошибка -> Ambiguous command: \"i\"\nconf t\nEnter configuration commands, one per line. End with CNTL/Z.\nR1(config)#logging 0255.255.1\n ^\n% Invalid input detected at '^' marker.\n\nR1(config)#logging\n% Incomplete command.\n\nR1(config)#i\n% Ambiguous command: \"i\"\nR1(config)#logging buffered 20010\nR1(config)#ip http server\nR1(config)#end\nR1#\n\nIn [8]: print(r1.send_config_commands(commands, strict=True))\n---------------------------------------------------------------------------\nValueError Traceback (most recent call last)\n in \n----> 1 print(r1.send_config_commands(commands, strict=True))\n\n...\n\nValueError: При выполнении команды \"logging 0255.255.1\" на устройстве 192.168.100.1 возникла ошибка -> Invalid input detected at '^' marker.\n\n'''\n\nimport telnetlib\nimport time\nimport clitable\nimport re\n\n\nclass CiscoTelnet:\n def __init__(self, **kwargs):\n self.t = telnetlib.Telnet(kwargs['ip'])\n self.t.read_until(b'Username:')\n self._write_line(kwargs['username'])\n self.t.read_until(b'Password:')\n self._write_line(kwargs['password'])\n self._write_line('enable')\n self._write_line(kwargs['secret'])\n self._write_line('terminal length 0')\n # Just to clear buffer\n self.t.read_until(b'terminal length 0')\n self.t.read_until(b'#')\n\n def _write_line(self, command):\n self.t.write(command.encode('utf-8') + b'\\n')\n\n def _parse_command_dynamic(self, command_output, attributes_dict, index_file='index', templ_path='templates'):\n cli_table = clitable.CliTable(index_file, templ_path)\n cli_table.ParseCmd(command_output, attributes_dict)\n headers = list(cli_table.header)\n result = [dict(zip(headers, list(row))) for row in cli_table]\n return result\n\n def send_show_command(self, command, templates, parse):\n self._write_line(command)\n time.sleep(3)\n command_output = self.t.read_very_eager().decode('utf-8')\n if parse:\n attributes = {'Command': command, 'Vendor': 'cisco_ios'}\n result = self._parse_command_dynamic(command_output, attributes, templ_path=templates)\n else:\n result = command_output\n return result\n\n def send_config_commands(self, commands, strict=False):\n result = ''\n if type(commands) == str:\n commands = [commands]\n self._write_line('conf t')\n for command in commands:\n self._write_line(command)\n time.sleep(3)\n command_output = self.t.read_very_eager().decode('utf-8')\n error = re.search(r'^% (.+)\\n', command_output, flags=re.M)\n if error:\n error_msg = (f'При выполнении команды \"{command}\" на устройстве {self.t.host}'\n f' возникла ошибка -> {error.group(1)}')\n if strict:\n raise ValueError(error_msg)\n print(error_msg)\n result += command_output\n return result\n\n\ndef main():\n r1_params = {\n 'ip': '192.168.100.1',\n 'username': 'cisco',\n 'password': 'cisco',\n 'secret': 'cisco'}\n\n test = CiscoTelnet(**r1_params)\n # print(test.send_show_command('sh ip int bri', 'templates', True))\n commands_with_errors = ['logging 0255.255.1', 'logging', 'i']\n correct_commands = ['logging buffered 20010', 'ip http server']\n commands = commands_with_errors + correct_commands\n # print(test.send_config_commands(commands, strict=False))\n print(test.send_config_commands(commands, strict=True))\n test.t.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"exercises/25_oop_basics/task_25_2c.py","file_name":"task_25_2c.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"192638787","text":"from product import Product\n\ndef create_products_from_file():\n data = open('data.txt', 'r')\n products_list = data.read().splitlines()\n \n new_product_list = []\n for item in products_list:\n item = item.split(',')\n new_product_list.append(item)\n \n product_dict = {}\n for item in new_product_list:\n \tproduct_dict[int(item[0])] = item[1], float(item[2]), int(item[3])\n\n products_names = []\n for item in product_dict.keys():\n products_names.append('product'+str(item))\n \n products = []\n for p_id, p_values in zip(products_names, product_dict.items()):\n p_id = Product(p_values[1][0], p_values[1][1], p_values[1][2])\n products.append(p_id)\n\n data.close()\n return products\n\n\ndef save_products_to_file(products):\n products = products\n product_list = []\n for product in products:\n product_list.append(str((product.id)-3) + ',' + product.name + ',' + str(product.price) + ',' + str(product.amount))\n data = open('data.txt', 'w')\n for item in product_list:\n data.write(item+'\\n')\n data.close()","sub_path":"fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"228312483","text":"from config import cfg\nfrom glob import glob\nfrom os.path import join, exists\nfrom os import makedirs\nimport pandas as pd\nimport urllib.request as request\n\n\ndef read_data():\n dfs = []\n files = glob(join(cfg.external_data_path, \"*.csv\"))\n for file in files:\n df = pd.read_csv(file)\n df.dropna(inplace=True)\n df.drop_duplicates(subset=\"image-src\", keep=False, inplace=True)\n df[\"height\"] = pd.to_numeric(df[\"height\"], downcast=\"float\")\n df[\"weight\"] = pd.to_numeric(df[\"weight\"], downcast=\"float\")\n df.reset_index(drop=True, inplace=True)\n data = df.drop(\n [column for column in df.columns if column not in cfg.useful_columns], 1)\n dfs.append(data)\n\n frame = pd.concat(dfs, axis=0, ignore_index=True)\n frame.drop_duplicates(subset=\"image-src\", keep=False, inplace=True)\n frame.reset_index(drop=True, inplace=True)\n frame.to_csv(join(cfg.intermediate_data_path,\n \"unclean_combined_annotation.csv\"), index=False)\n _ = frame.info()\n return frame\n\n\ndef check_url(dataframe):\n dataframe['image-src'] = cfg.web+dataframe['image-src']\n index_of_bad_urls = []\n for index, url in enumerate(dataframe['image-src']):\n try:\n request.urlopen(url)\n except:\n print(f\"{url} is not good!\")\n index_of_bad_urls.append(index)\n # print(index_of_bad_urls)\n dataframe.drop(index_of_bad_urls, inplace=True)\n dataframe.reset_index(drop=True, inplace=True)\n dataframe.to_csv(join(cfg.intermediate_data_path,\n \"combined_annotation.csv\"), index=False)\n _ = dataframe.info()\n return dataframe\n\n\ndef crawl_data_from_frame(dataframe=None):\n if not dataframe:\n filename = join(cfg.intermediate_data_path, \"combined_annotation.csv\")\n dataframe = pd.read_csv(filename)\n if not exists(cfg.raw_test_data_path):\n makedirs(cfg.raw_test_data_path)\n bmi = dataframe['weight'] / \\\n ((dataframe['height']/100)*(dataframe['height']/100))\n dataframe['bmi'] = bmi\n height = dataframe['height']/100\n dataframe['height'] = height\n for index, url in enumerate(dataframe['image-src']):\n images_name = str(index).zfill(4) + \".jpg\"\n raw_path_for_file = join(cfg.raw_test_data_path, images_name)\n cropped_path_for_file = join(cfg.cropped_data_path, images_name)\n request.urlretrieve(url, raw_path_for_file)\n dataframe.iloc[index, 2] = cropped_path_for_file\n\n cols = ['image-src', 'height', 'weight', 'bmi']\n dataframe = dataframe[cols]\n dataframe.rename(columns={'image-src': 'Path',\n 'bmi': \"BMI\"}, inplace=True)\n dataframe.to_csv(join(cfg.test_data_path,\n \"annotation.csv\"), index=False)\n _ = dataframe.info()\n\n\ndef crawl_data():\n dataframe = read_data()\n clean_dataframe = check_url(dataframe=dataframe)\n crawl_data_from_frame(dataframe=clean_dataframe)\n\n\nif __name__ == \"__main__\":\n crawl_data()\n","sub_path":"src/data/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"400438908","text":"# Tuples are used to store multiple items in a single variable.\n# Tuple is one of 4 built-in data types in Python used to store collections of data\n# Tuple items are ordered, unchangeable, and allow duplicate values.\n\n\na = (5, 4, 3, 2, 1)\nprint(type(a))\nfor i in range(len(a)):\n for j in range(0, 5):\n no = int(input(\"guess a no\"))\n if a[i] < no:\n print(\"guess smaller than\", no)\n elif a[i] > no:\n print(\"guess greater than\", no)\n else:\n print(\"correct guess you Win the game\")\n break\n print(\"want to leave press 0 if not press 1 /t\")\n ch = int(input(\"enter for leave\"))\n if ch == 0:\n break\n else:\n continue\n","sub_path":"exercise/GuessNo/GuessNo.py","file_name":"GuessNo.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"570522738","text":"\"\"\"Support for ReCollect Waste sensors.\"\"\"\nfrom __future__ import annotations\n\nfrom aiorecollect.client import PickupType\n\nfrom homeassistant.components.sensor import (\n SensorDeviceClass,\n SensorEntity,\n SensorEntityDescription,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_FRIENDLY_NAME\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\n\nfrom .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER\n\nATTR_PICKUP_TYPES = \"pickup_types\"\nATTR_AREA_NAME = \"area_name\"\n\nSENSOR_TYPE_CURRENT_PICKUP = \"current_pickup\"\nSENSOR_TYPE_NEXT_PICKUP = \"next_pickup\"\n\nSENSOR_DESCRIPTIONS = (\n SensorEntityDescription(\n key=SENSOR_TYPE_CURRENT_PICKUP,\n name=\"Current pickup\",\n ),\n SensorEntityDescription(\n key=SENSOR_TYPE_NEXT_PICKUP,\n name=\"Next pickup\",\n ),\n)\n\n\n@callback\ndef async_get_pickup_type_names(\n entry: ConfigEntry, pickup_types: list[PickupType]\n) -> list[str]:\n \"\"\"Return proper pickup type names from their associated objects.\"\"\"\n return [\n t.friendly_name\n if entry.options.get(CONF_FRIENDLY_NAME) and t.friendly_name\n else t.name\n for t in pickup_types\n ]\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n \"\"\"Set up ReCollect Waste sensors based on a config entry.\"\"\"\n coordinator = hass.data[DOMAIN][entry.entry_id]\n\n async_add_entities(\n [\n ReCollectWasteSensor(coordinator, entry, description)\n for description in SENSOR_DESCRIPTIONS\n ]\n )\n\n\nclass ReCollectWasteSensor(CoordinatorEntity, SensorEntity):\n \"\"\"ReCollect Waste Sensor.\"\"\"\n\n _attr_device_class = SensorDeviceClass.DATE\n _attr_has_entity_name = True\n\n def __init__(\n self,\n coordinator: DataUpdateCoordinator,\n entry: ConfigEntry,\n description: SensorEntityDescription,\n ) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n super().__init__(coordinator)\n\n self._attr_extra_state_attributes = {}\n self._attr_unique_id = f\"{entry.data[CONF_PLACE_ID]}_{entry.data[CONF_SERVICE_ID]}_{description.key}\"\n self._entry = entry\n self.entity_description = description\n\n @callback\n def _handle_coordinator_update(self) -> None:\n \"\"\"Respond to a DataUpdateCoordinator update.\"\"\"\n self.update_from_latest_data()\n self.async_write_ha_state()\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Handle entity which will be added.\"\"\"\n await super().async_added_to_hass()\n self.update_from_latest_data()\n\n @callback\n def update_from_latest_data(self) -> None:\n \"\"\"Update the state.\"\"\"\n if self.entity_description.key == SENSOR_TYPE_CURRENT_PICKUP:\n try:\n event = self.coordinator.data[0]\n except IndexError:\n LOGGER.error(\"No current pickup found\")\n return\n else:\n try:\n event = self.coordinator.data[1]\n except IndexError:\n LOGGER.info(\"No next pickup found\")\n return\n\n self._attr_extra_state_attributes.update(\n {\n ATTR_PICKUP_TYPES: async_get_pickup_type_names(\n self._entry, event.pickup_types\n ),\n ATTR_AREA_NAME: event.area_name,\n }\n )\n self._attr_native_value = event.date\n","sub_path":"homeassistant/components/recollect_waste/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"515953214","text":"#KochDrawV2.py\nimport turtle as t\ndef koch(size, n):\n if n == 0:\n t.fd(size)\n else:\n for angle in [0, 60, -120, 60]:\n t.left(angle)\n koch(size/3, n-1)\ndef main():\n t.setup(600,600)\n t.penup()\n t.goto(-200,100)\n t.pendown()\n t.pensize(2)\n level = 3 #3阶科赫曲线\n for i in range(3):\n koch(400,level)\n t.right(120)\n t.hideturtle()\nmain()\n","sub_path":"week_5/KochDrawV2.py","file_name":"KochDrawV2.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"108024313","text":"#! /usr/bin/python\n\n# George-Cristian Muraru, 342C4\n\n### Query privacy \n# Non-adaptive approach - injects files that could break all futures queries\n# The token t is deterministically computed and sent to the server\n# The server sends back the file identifiers of that files that contain that word\n# The server can tell when quaries repeat -> learn the query pattern and the\n# file-access pattern\n\n### The goal of the server is to determine the keywords corresponding to the\n#tokens -> violates query privacy -> and then file privacy\n\nfrom Crypto.Cipher import DES\nimport time\nimport matplotlib.pyplot as plt\n\nfrom math import log\nimport time\nimport random\nimport string\n\nkey = \"bsearcha\"\naes = DES.new(key)\n\n\nserver_files = []\n\ndef random_word(length):\n\tword = ''.join([random.choice(string.lowercase + string.uppercase) for i in range(length)])\n\tword = word.rjust(16)\n\treturn word\n\n\ndef enc_file(f_in):\n\treturn set([aes.encrypt(mess) for mess in f_in])\n\ndef enc(m):\n\treturn aes.encrypt(m)\n\ndef dec(m):\n\treturn aes.decrypt(m)\n\n\ndef populate_vocabulary(voc_size):\n\tK = set()\n\n\twhile len(K) < voc_size:\n\t\tK.add(random_word(random.randint(4,16)))\n\n\tprint (\"Added voc with size {0}\".format(voc_size))\n\tenc_K = [enc(word) for word in K]\n\treturn (list(K), enc_K)\n\ndef send_word_to_client(word):\n\treturn enc(word)\n\ndef byte2bin(bval, length):\n\treturn bin(bval)[2:].zfill(length)\n\ndef inject_files(K):\n\tF = [set() for _ in range(int(log(len(K), 2)))]\n\n\tfor i in range(len(K)):\n\t\tfor i_file, bit in enumerate(byte2bin(i, int(log(len(K), 2)))):\n\t\t\tif bit == '1':\n\t\t\t\tF[i_file].add(K[i])\n\n\tF_enc = [enc_file(f_in) for f_in in F]\n\n\treturn (F, F_enc)\n\ndef recover(F_enc, K, query_word):\n\tindex_word = 0\n\n\tfor i in range(len(F_enc)):\n\t\tif query_word in F_enc[i]:\n\t\t\tindex_word += pow(2, len(F_enc) - i - 1)\n\n\treturn (K[index_word], index_word)\n\n\nif __name__ == \"__main__\":\n\ttimes = []\n\tvoc = [64, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072]\n\n\t# Test\n\tfor vocabulary_size in voc:\n\t\tK, K_enc = populate_vocabulary(vocabulary_size)\n\t\tF, F_enc = inject_files(K)\n\n\t\ttotal = 0\n\t\tfor i in range(10000):\n\t\t\tindex = random.choice(range(len(K)))\n\t\t\tquery_word = K_enc[index]\n\n\t\t\tstart = time.time()\n\t\t\tword, index_word = recover(F_enc, K, query_word)\n\t\t\tend = time.time()\n\t\t\ttotal += (end - start)\n\n\t\t\tassert index_word == index\n\t\t\tassert word == K[index]\n\n\t\ttimes.append(total / 10000.0)\n\t\n\tprint (voc)\n\tprint (times)\n\tplt.ylabel(\"Average time to find the word\")\n\tplt.xlabel(\"Number of words in the vocabulary\")\n\tplt.plot(voc, times, \"-o\")\n\tplt.show()\n\n","sub_path":"binary_attack.py","file_name":"binary_attack.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"285486504","text":"\"\"\"Using word frequencies to create a summary.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport json\r\nimport string\r\nimport random\r\nimport pprint\r\n\r\nfrom nltk import pos_tag\r\nfrom nltk.collocations import BigramAssocMeasures\r\nfrom nltk.collocations import BigramCollocationFinder \r\nfrom nltk.corpus import wordnet\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.corpus import words as nltk_words\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.probability import FreqDist\r\n\r\nimport constants\r\n\r\n\r\n###########################\r\n# PART OF SPEECH TAG TRANSLATOR FROM `pos_tag` TAGS to `wordnet` TAGS\r\n###########################\r\n# source for tags: https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/\r\n# NB: wordnet has a ADV_SAT tag, but I have no idea what that is\r\nDEFAULT_TAG = wordnet.NOUN\r\n\r\nPOS_TRANSLATOR = {\r\n 'CC': DEFAULT_TAG, # coordinating conjunction\r\n 'CD': DEFAULT_TAG, # cardinal digit\r\n 'DT': DEFAULT_TAG, # determiner\r\n 'EX': DEFAULT_TAG, # existential there (like: \"there is\" ... think of it like \"there exists\")\r\n 'FW': DEFAULT_TAG, # foreign word\r\n 'IN': DEFAULT_TAG, # preposition/subordinating conjunction\r\n 'JJ': wordnet.ADJ, # adjective 'big'\r\n 'JJR': wordnet.ADJ, # adjective, comparative 'bigger'\r\n 'JJS': wordnet.ADJ, # adjective, superlative 'biggest'\r\n 'LS': DEFAULT_TAG, # list marker 1)\r\n 'MD': wordnet.VERB, # modal could, will\r\n 'NN': wordnet.NOUN, # noun, singular 'desk'\r\n 'NNS': wordnet.NOUN, # noun plural 'desks'\r\n 'NNP': wordnet.NOUN, # proper noun, singular 'Harrison'\r\n 'NNPS': wordnet.NOUN, # proper noun, plural 'Americans'\r\n 'PDT': wordnet.ADJ, # predeterminer 'all the kids'\r\n 'POS': DEFAULT_TAG, # possessive ending parent's\r\n 'PRP': DEFAULT_TAG, # personal pronoun I, he, she\r\n 'PRP$': DEFAULT_TAG, # possessive pronoun my, his, hers\r\n 'RB': wordnet.ADV, # adverb very, silently,\r\n 'RBR': wordnet.ADV, # adverb, comparative better\r\n 'RBS': wordnet.ADV, # adverb, superlative best\r\n 'RP': wordnet.ADV, # particle give up\r\n 'TO': DEFAULT_TAG, # to go 'to' the store.\r\n 'UH': DEFAULT_TAG, # interjection errrrrrrrm\r\n 'VB': wordnet.VERB, # verb, base form take\r\n 'VBD': wordnet.VERB, # verb, past tense took\r\n 'VBG': wordnet.VERB, # verb, gerund/present participle taking\r\n 'VBN': wordnet.VERB, # verb, past participle taken\r\n 'VBP': wordnet.VERB, # verb, sing. present, non-3d take\r\n 'VBZ': wordnet.VERB, # verb, 3rd person sing. present takes\r\n 'WDT': DEFAULT_TAG, # wh-determiner which\r\n 'WP': DEFAULT_TAG, # wh-pronoun who, what\r\n 'WP$': DEFAULT_TAG, # possessive wh-pronoun whose\r\n 'WRB': wordnet.ADV # wh-abverb where, when\r\n}\r\n\r\n\r\ndef parse_arguments():\r\n \"\"\"Parses command-line arguments.\r\n\r\n Returns:\r\n - args (argparse.Namespace): The parsed arguments\r\n \"\"\"\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-f', '--file', type=str, help='The path to the JSON file containing processed text')\r\n parser.add_argument('-w', '--num_words', type=int, help='The number of frequent words to print out', default=20)\r\n parser.add_argument('-c', '--num_collocations', type=int, help='The number of collocations to print out',\r\n default=10)\r\n parser.add_argument('-cw', '--collocation_window', type=int, help='The window for searching for collocations',\r\n default=5)\r\n return parser.parse_args()\r\n# End of parse_arguments()\r\n\r\n\r\ndef load_records(file, preview_records=False):\r\n \"\"\"Loads the records from the JSON file. Also filters out empty records.\r\n\r\n Params:\r\n - file (str): The path to the JSON file\r\n\r\n Returns:\r\n - records (list): The contents of the JSON file\r\n \"\"\"\r\n with open(file, 'r') as json_file:\r\n records = json_file.readlines()\r\n records = [json.loads(record) for record in records]\r\n records = list(filter(lambda record: record[constants.TEXT] != '', records))\r\n if preview_records:\r\n print(\"=====Random Sample of Records=====\")\r\n pprint.pprint(random.choices(records, k=10))\r\n return records\r\n# End of load_records()\r\n\r\n\r\ndef tokenize_records(records):\r\n \"\"\"Tokenizes the records into word lists. Filters out any stopwords in the list.\r\n\r\n Params:\r\n - records (list): The non-empty records from the JSON file\r\n\r\n Returns:\r\n - tokenized_records (list>): The tokenized text content of the records\r\n \"\"\"\r\n contents = map(lambda record: record[constants.TEXT], records)\r\n tokenized_records = [word_tokenize(record.lower()) for record in contents]\r\n lemmatized_records = lemmatize_words(tokenized_records)\r\n lemmatized_words = list()\r\n for lemmatized_record in lemmatized_records:\r\n lemmatized_words.extend(lemmatized_record)\r\n return lemmatized_words\r\n# End of tokenize_records()\r\n\r\n\r\ndef lemmatize_words(records):\r\n \"\"\"Lemmatizes the words in the tokenized sentences.\r\n\r\n Lemmatization works best when the words are tagged with their corresponding part of speech, so the words are first\r\n tagged using nltk's `pos_tag` function.\r\n\r\n NB: There is a good chance that this tagging isn't 100% accurate. For that matter, lemmatization isn't always 100%\r\n accurate.\r\n\r\n Params:\r\n - records (list>): The word-tokenized records\r\n\r\n Returns:\r\n - lemmatized_records (list)): The lemmatized words from all the records\r\n \"\"\"\r\n print('Length of tagged_records: {:d}'.format(len(records)))\r\n print('Total number of words: {:d}'.format(sum([len(record) for record in records])))\r\n tagged_records = map(lambda record: pos_tag(record), records)\r\n tagged_records = filter_stopwords(tagged_records)\r\n lemmatizer = WordNetLemmatizer()\r\n lemmatized_records = list()\r\n for record in tagged_records:\r\n try:\r\n lemmatized_record = list(map(lambda word: lemmatizer.lemmatize(word[0], POS_TRANSLATOR[word[1]]), record))\r\n except Exception as err:\r\n print(record)\r\n raise err\r\n lemmatized_records.append(lemmatized_record)\r\n print('Total number of words after filtering: {:d}'.format(len(lemmatized_records)))\r\n return lemmatized_records\r\n# End of lemmatize_words()\r\n\r\n\r\ndef filter_stopwords(tagged_records):\r\n \"\"\"Filters stopwords, punctuation, and contractions from the tagged records. This is done after tagging to make\r\n sure that the tagging is as accurate as possible.\r\n\r\n Params:\r\n - tagged_records (list>>): The records, with each word tagged with its part of speech\r\n\r\n Returns:\r\n - filtered_records (list>>): The records, with unimportant words filtered out\r\n \"\"\"\r\n print('Filtering stopwords')\r\n stop_words = list(stopwords.words('english'))\r\n stop_words.extend(string.punctuation)\r\n stop_words.extend(constants.CONTRACTIONS)\r\n stop_words.extend(constants.MYSQL_STOPWORDS)\r\n dictionary_words = set(nltk_words.words())\r\n\r\n def not_dictionary_word(word): \r\n return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']\r\n\r\n filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]\r\n filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]\r\n filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)\r\n for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608\r\n filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))\r\n for record in filtered_records]\r\n return filtered_records\r\n# End of filter_stopwords()\r\n\r\n\r\ndef extract_frequent_words(records, num_words, no_counts=False):\r\n \"\"\"Stems the words in the given records, and then counts the words using NLTK FreqDist.\r\n\r\n Stemming is done using the English Snowball stemmer as per the recommendation from \r\n http://www.nltk.org/howto/stem.html\r\n\r\n NB: There is also a Lancaster stemmer available, but it is apparently very aggressive and can lead to a loss of\r\n potentially useful words (source: https://stackoverflow.com/a/11210358/5760608)\r\n\r\n Params:\r\n - records (list): The tokenized records from the JSON file\r\n - num_words (int): The number of words to extract\r\n - no_counts (bool): If True, frequent words will not include the word counts\r\n\r\n Returns:\r\n - frequent_words (list or list>): The list of most frequent words\r\n \"\"\"\r\n word_counts = FreqDist(records)\r\n frequent_words = word_counts.most_common(num_words)\r\n if no_counts:\r\n frequent_words = [word[0] for word in frequent_words]\r\n print(\"=====The {:d} Most Frequent Words=====\".format(num_words))\r\n print(frequent_words)\r\n return frequent_words\r\n# End of extract_frequent_words()\r\n\r\n\r\ndef extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):\r\n \"\"\"Extracts the most common collocations present in the records.\r\n\r\n Params:\r\n - records (list>): The tokenized and lemmatized records from the JSON file\r\n - num_collocations (int): The number of collocations to show\r\n - collocation_window (int): The text window within which to search for collocations\r\n\r\n Returns:\r\n - best_collocations (list>): The highest scored collocations present in the records\r\n \"\"\"\r\n bigram_measures = BigramAssocMeasures()\r\n bigram_finder = BigramCollocationFinder.from_words(records, window_size=collocation_window)\r\n bigram_finder.apply_freq_filter(min_freq=3)\r\n best_collocations = bigram_finder.nbest(bigram_measures.raw_freq, num_collocations)\r\n print(\"=====The {:d} Most Frequent Collocations=====\".format(num_collocations))\r\n pprint.pprint(best_collocations)\r\n if compare_collocations:\r\n print(\"=====The {:d} Best Collocations (Pointwise Mutual Information)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.pmi, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Student's t test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.student_t, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Chi-square test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.chi_sq, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Mutual Information)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.mi_like, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Likelihood Ratios)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.likelihood_ratio, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Poisson Stirling)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.poisson_stirling, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Jaccard Index)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.jaccard, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Phi-square test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.phi_sq, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Fisher's Exact Test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.fisher, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Dice's Coefficient)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.dice, num_collocations))\r\n return best_collocations\r\n# End of extract_collocations()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = parse_arguments()\r\n records = load_records(args.file, False)\r\n tokenized_records = tokenize_records(records)\r\n extract_frequent_words(tokenized_records, args.num_words, True)\r\n extract_collocations(tokenized_records, args.num_collocations, args.collocation_window, False)\r\n","sub_path":"wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":12393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"190565767","text":"import pandas as pd \nimport numpy as np\nimport urllib3\n# from bs4 import BeautifulSoup\nimport yfinance as yf\nimport matplotlib.pyplot as plt\nimport time\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n# 參考網站\n# https://medium.com/ai%E8%82%A1%E4%BB%94/%E7%94%A8-python-%E6%89%93%E9%80%A0%E8%87%AA%E5%B7%B1%E7%9A%84%E8%82%A1%E5%B8%82%E8%B3%87%E6%96%99%E5%BA%AB-%E7%BE%8E%E8%82%A1%E7%AF%87-e3e896659fd6\n\n# https://medium.com/ai%E8%82%A1%E4%BB%94/%E7%94%A8-python-%E6%89%93%E9%80%A0%E8%87%AA%E5%B7%B1%E7%9A%84%E8%82%A1%E5%B8%82%E8%B3%87%E6%96%99%E5%BA%AB-%E7%BE%8E%E8%82%A1%E7%AF%87-e3e896659fd6\n# 最近一日交易量最大的前 100 檔熱門美股\nurl = 'https://finance.yahoo.com/u/yahoo-finance/watchlists/most-watched/'\ndata = pd.read_html(url, flavor=\"bs4\")[1]\ndata.columns = ['symbol','name','price','changePrice','changePercent','Time','vol','avgVol','MarketCap']\n# a = sorted(a, key=lambda a_entry: a_entry[1]) \n# data1 = data[np.argsort(data[::, 0])]\n# print(data)\n\ndef p2f(x):\n if x.isdigit():\n return float(x.strip('%'))/100\n return 0\n\n\ndef Money2int(x):\n m = {'k':3, 'K': 3, 'M': 6, 'B': 9, 'T': 12}\n if x.isdigit():\n return float(x)\n return (float(x[:-1]) * 10 ** m[x[-1]] )\n\ndef over3(x):\n A = Money2int(x[6])\n B = Money2int(x[7])\n P = p2f(x[4])\n L = ( A>B and P>=0.03 )\n return L\n\ndef sort3to10(x):\n A = Money2int(x[6])\n B = Money2int(x[7])\n P = p2f(x[4])\n L = ( A>B and ( P>=0.03 and P<= 0.1))\n return L\n\ndef big10(x):\n A = Money2int(x[6])\n B = Money2int(x[7])\n P = p2f(x[4])\n L = ( A>B and P > 0.1)\n return L\n\ndef setFunc( func ):\n newlist = list( filter(func, data.values) )\n if newlist:\n d = np.row_stack(newlist)\n return pd.DataFrame({'Symbol': d[:, 0], 'Name': d[:, 1], 'price': d[:, 2], 'Change': d[:, 3], 'Change%': d[:, 4], 'TTM': d[:, 8], 'Vol': d[:, 5], 'avgVol(3m)': d[:, 6] })\n else:\n return 'None in list'\n\n# dataframe = pd.DataFrame.from_records(d)\n# dataset.boxplot()\nprint(setFunc(big10))\nprint(setFunc(sort3to10))\n\n\nOver3 = setFunc(over3)\nprint(timestr)\n# Over3.to_excel('over3.xlsx', sheet_name='A'+timestr)\n# data=data[np.argsort(data[:,0])]\n# stk_list = data.Symbol\n","sub_path":"Stock/MostWatch.py","file_name":"MostWatch.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"434322796","text":"#!python\n# -*- coding: utf-8 -*-#\n###########################################################################\n# Author : Bhishan Poudel; Physics Graduate Student, Ohio University\n# Date : Sep 21, 2017\n# Last update :\n###########################################################################\n\"\"\"\n:Topic: Ridge Regression With Grad descent\n\n:Ref: http://hyperanalytic.net/ridge-regression\n\n:Algorithm::\n\n grad_ols = (h-t).T @ X / N\n grad_ridge = (grad_ols + shrinkage * w ) # shrinkage /N for some cases.\n w = w - l_rate * grad_ridge\n\n\"\"\"\n# Imports\nimport numpy as np\nfrom sklearn import datasets\nfrom scipy import stats\n\ndef ridge_batch_grad_desc(X, t, shrinkage, iters, l_rate):\n \"\"\"Calculate weight vector using Ridge Regression L2 norm.\n\n Args:\n X(matrix): Design matrix with bias term.\n\n t(column vector): Target column vector (shape = 1, samples)\n\n shrikage(float): L2 regularization shrikage hyper parameter.\n\n iters(int): Number of iterations.\n\n l_rate(float): Learning rate for gradient descent algorithm.\n\n \"\"\"\n X=np.array(X)\n t = np.array(t)\n t =t.reshape(len(t),1)\n N = len(t)\n w = np.ones(X.shape[1])\n w = w.reshape(1,len(w))\n\n print(\"x.shape = {}\".format(X.shape))\n print(\"t.shape = {}\".format(t.shape))\n print(\"w.shape = {}\".format(w.shape))\n print(\"shrinkage = {}\".format(shrinkage))\n print(\"iters = {}\".format(iters))\n print(\"l_rate = {}\".format(l_rate))\n for i in range(0, iters):\n h = X @ w.T\n MSE = np.square(h - t).mean()\n print (\"iteration:\", i, \"MSE:\", MSE)\n grad_ols = (h-t).T @ X / N\n grad_ridge = (grad_ols + shrinkage * w ) # shrinkage /N for some cases.\n w = w - l_rate * grad_ridge\n\n # make w row vector\n w = w.reshape(1, X.shape[1]) # shape = 1, feature + 1\n return w\n\ndef main():\n \"\"\"Run main function.\"\"\"\n diabetes = datasets.load_diabetes()\n\n X = diabetes.data\n y = diabetes.target\n intercept = np.ones(len(X))\n X = np.append(intercept, X)\n X = np.reshape(X,(442,11))\n\n Z = stats.zscore(X, axis=0)\n Y = stats.zscore(y)\n\n w = ridge_batch_grad_desc(Z,Y,.1,5000,.1)\n print (w)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Machine_Learning_Univ_Course_(2017Fall)/Extra_hw/Extra_hw01/ridge_regression/ridge_BGD.py","file_name":"ridge_BGD.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"344237348","text":"'''\nhttps://qiita.com/seradaihyo/items/006c5f1c86314a3b7a38\npythonでwikipediaをスクレイピングする\n'''\n#\n# import requests,bs4\n#\n# # urlの取得\n#\n#\n# url = 'https://ja.wikipedia.org/wiki/Python'\n# res = requests.get(url)\n#\n# # htmlの取得\n# soup = bs4.BeautifulSoup(res.text, 'html.parser')\n#\n# # 選択した要素を取得\n# index = soup.select('#toc')\n#\n# # 出力する\n# for i in index:\n# print(i.getText())\n\n\n'''\n先にコメントアウトでコードの処理を考えていく方法\n'''\n\n'''\nスクレイピングに必要な流れ\nhtmlのソースコードを取得\nクラス、文字列などから必要なものを取得する\nfor文で回して出力する(必要に応じて)\n'''\n\n# 必要なライブラリの取得\n# import requests\n# import pandas as pd\n# from bs4 import BeautifulSoup\n\n\n# htmlのソースを取得する\n# url = 'https://ja.wikipedia.org/wiki/%E3%83%A1%E3%82%A4%E3%83%B3%E3%83%9A%E3%83%BC%E3%82%B8'\n# html = requests.get(url)\n# soup = BeautifulSoup(html.content, 'html.parser')\n#\n# # テスト\n# # print('htmlのソース')\n# # print('soupのみ')\n# print(soup)\n# print('soup.prettify')\n# print(soup.prettify)\n\n\n# 文字を取り出す 文字以外の要素を削除\n# for script in soup(['script', 'style']):\n# script.decompose()\n\n\n# テキストのみを取得 タグは全部取る\n# text = soup.get_text()\n\n\n# テキストを改行ごとにリストに入れて、リスト内の要素の前後の空白を削除\n# line=[]\n# for line in text.splitlines():\n# lines.append(line.strip)\n#\n#\n# text = '\\n'.join(line for line in lines if line)\n\n\n\n'''\namazonprimevideoでその映画が無料で見られるかを確認するプログラム\n'''\n\n'''\nwikipediaの「今日は何の日」から一日一回取得する\n'''\n\n'''\n※練習\n日経ビジネスから\n新着記事の見出しとURLを取得する\n'''\n\n# import requests\n# from bs4 import BeautifulSoup\n# import re\n#\n# urlName = 'https://business.nikkei.com/'\n# url = requests.get(urlName)\n# soup = BeautifulSoup(url.content, 'html.parser')\n#\n# elems = soup.find_all('span')\n# for elem in elems:\n# try:\n# string = elem.get('class').pop(0) #spanから'class'を取り出す\n# if string in 'category': #in→その文字列が含まれているかを判定してくれる\n# print(elem.string)\n# title = elem.find_next_sibling('h3') #兄弟要素を取得する\n# print(title.text.replace('\\n',''))\n# r = elem.find_previous('a')\n# print(urlName + r.get('href'), '\\n')\n# except:\n# print('エラーです')\n\n\n\n\n# 取得したいクラス名 mainpage-content-text\n\n\n# 日経ビジネス電子版から新着記事の見出しとURLを取得する。https://business.nikkei.com/\n\n# import requests\n# from bs4 import BeautifulSoup\n# import re\n#\n# #urlとhtmlのコンテンツを取得する\n# urlName = 'https://business.nikkei.com/'\n# url = requests.get(urlName)\n# soup = BeautifulSoup(url.content, 'html.parser')\n#\n#\n# # beautifulsoupでhtmlの解析をする\n#\n# elems = soup.find_all('span') #span要素をすべてelemsに格納\n#\n# for elem in elems:\n# try:\n# string = elem.get(\"class\").pop(0) #elemからclassを取り出す\n# if string in 'category': #文字列の中に'カテゴリ'があった場合\n# print(elem.string) #テキスト名を抜き出す\n# title = elem.find_next_sibling('h3') #find_next_sibling()で同じ深さのh3を検索する\n# print(title.text.replace('\\n', '')) #タイトルをプリントする\n# r = elem.find_previous('a') #find_previous()でaタグを探す。\n# print(urlName + r.get('href'), '\\n')\n# except:\n# pass\n\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\n\ntoday = datetime.date.today() #今日の日付を出力する\n\nurlName = 'https://ja.wikipedia.org/wiki/%E3%83%A1%E3%82%A4%E3%83%B3%E3%83%9A%E3%83%BC%E3%82%B8'\n\nurl = requests.get(urlName)\nsoup = BeautifulSoup(url.content, 'html.parser')\n\nelems = soup.select('.mainpage-onthisday',)\n\n\nprint(today)\nprint('本日のできごと')\n\n\nfor elem in elems:\n result = []\n print(elem.text)\n result.append(elem.text)\n # /nのところで改行をしてリスト形式にする\n # printをするまえに上記を行えば更にきれいになる?\n\n# 結果を出力したい\n# with open('result.html', 'a', encoding='utf-8,') as f:\n# print(result, file=f)\n","sub_path":"wikipedia.py","file_name":"wikipedia.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"171876013","text":"# skill points test\n\nprint(\"You have 10 skill points.\")\nprint(\"You can assign and unassign them into 4 characteristics: strength, health, wisdom and agility.\")\n\nskill_points = 10\ncharacteristics = {\"Strength\": 0, \"Health\": 0, \"Wisdom\": 0, \"Agility\": 0}\nuser_choice = None\nchar_choice = None\nMENU_MAIN = \"\"\"\\nMain menu:\n- enter 0 to exit\n- enter 1 to assign a skill point to one of characteristics\n- enter 2 to unassign a skill point from one of characteristics\n\"\"\"\nMENU_CHAR_ADD = \"\"\"\\nCharacteristics menu:\n- enter 1 to assign a skill point to Strength\n- enter 2 to assign a skill point to Health\n- enter 3 to assign a skill point to Wisdom\n- enter 4 to assign a skill point to Agility\n\"\"\"\nMENU_CHAR_DEL = \"\"\"\\nCharacteristics menu:\n- enter 1 to unassign a skill point from Strength\n- enter 2 to unassign a skill point from Health\n- enter 3 to unassign a skill point from Wisdom\n- enter 4 to unassign a skill point from Agility\n\"\"\"\n\nwhile user_choice != \"0\":\n print(\"\\nFree skill points:\", skill_points)\n print(\"Current characteristics\", characteristics)\n print(MENU_MAIN)\n user_choice = input(\"Choose the option: \")\n if user_choice == \"0\":\n print(\"Good bye.\")\n elif user_choice == \"1\":\n print(MENU_CHAR_ADD)\n char_choice = input(\"Choose the option: \")\n if skill_points == 0:\n print(\"No free SP.\")\n print(\"You can unassign skill points from current characteristics.\")\n elif char_choice == \"1\":\n characteristics[\"Strength\"] += 1\n skill_points -= 1\n elif char_choice == \"2\":\n characteristics[\"Health\"] += 1\n skill_points -= 1\n elif char_choice == \"3\":\n characteristics[\"Wisdom\"] += 1\n skill_points -= 1\n elif char_choice == \"4\":\n characteristics[\"Agility\"] += 1\n skill_points -= 1\n else:\n print(\"There is no such option.\")\n elif user_choice == \"2\":\n print(MENU_CHAR_DEL)\n char_choice = input(\"Choose the option: \")\n if char_choice == \"1\" and characteristics[\"Strength\"] != 0:\n characteristics[\"Strength\"] -= 1\n skill_points += 1\n elif char_choice == \"2\" and characteristics[\"Health\"] != 0:\n characteristics[\"Health\"] -= 1\n skill_points += 1\n elif char_choice == \"3\" and characteristics[\"Wisdom\"] != 0:\n characteristics[\"Wisdom\"] -= 1\n skill_points += 1\n elif char_choice == \"4\" and characteristics[\"Agility\"] != 0:\n characteristics[\"Agility\"] -= 1\n skill_points += 1\n else:\n print(\"There is no such option, or there are 0 skill points in chosen characteristics.\")\n else:\n print(\"There is no such option.\")\n\ninput(\"\\nPress Enter to exit.\")\n","sub_path":"test5/skill_points.py","file_name":"skill_points.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"223733219","text":"#!/usr/bin/env python\n\nimport sys # used for receiving the command line arguments\nimport os # used for receiving the path of the directory with this python script\nimport operator # sorting dictionaries\nimport datetime # measuring duration\n\n####################################################################################################################\n#\n# START OF THE SCRIPT\n#\n####################################################################################################################\n\n# guard the existance of an input file from the command list arguments\nif len(sys.argv) < 2:\n print(\"Wrong number of arguments. Please pass a path to a file with encrypted text.\")\n exit()\n\n# receive the path to the input file\nPATH_INPUT_FILE = sys.argv[1]\n\n# receive the current working directory\nCURRENT_WORKING_DIRECTORY = os.getcwd()\n\nif PATH_INPUT_FILE[0] is not '/':\n PATH_INPUT_FILE = os.getcwd() + \"/\" + PATH_INPUT_FILE\n\n# guard the existence of the file at the given path\nif not os.path.isfile(PATH_INPUT_FILE):\n print(\"Error - File does not exist: \" + PATH_INPUT_FILE)\n exit(0)\n\n# store start time for measuring duration\nSTART_TIME = datetime.datetime.now()\n\n# defines the line length when printing a preview of the current decryption state of a text\nLINE_LENGTH = 150\n\n# read the encypted content\nTHE_CIPHERTEXT = open(PATH_INPUT_FILE).read()\nprint(\"preview ciphertext: \" + THE_CIPHERTEXT[:LINE_LENGTH])\n\n####################################################################################################################\n#\n# GLOBAL VARIABLES\n#\n####################################################################################################################\nENGLISH_CHAR_FREQUENCY = {\n \" \": 15.00, \"e\": 12.702, \"t\": 9.056, \"a\": 8.167, \"o\": 7.507, \"i\": 6.966, \"n\": 6.749, \"s\": 6.327, \"h\": 6.094,\n \"r\": 5.987, \"d\": 4.2530, \"l\": 4.025, \"c\": 2.782, \"u\": 2.758, \"m\": 2.406, \"w\": 2.360, \"f\": 2.228, \"g\": 2.015,\n \"y\": 1.974, \"p\": 1.9290, \"b\": 1.492, \"v\": 0.978, \"k\": 0.772, \"j\": 0.153, \"x\": 0.150, \"q\": 0.095, \"z\": 0.074}\n\nENGLISH_BIGRAM_FREQUENCY = {\n \"TH\": 2.71, \"EN\": 1.13, \"NG\": 0.89, \"HE\": 2.33, \"AT\": 1.12, \"AL\": 0.88, \"IN\": 2.03, \"ED\": 1.08,\n \"IT\": 0.88, \"ER\": 1.78, \"ND\": 1.07, \"AS\": 0.87, \"AN\": 1.61, \"TO\": 1.07, \"IS\": 0.86, \"RE\": 1.41,\n \"OR\": 1.06, \"HA\": 0.83, \"ES\": 1.32, \"EA\": 1.00, \"ET\": 0.76, \"ON\": 1.32, \"TI\": 0.99, \"SE\": 0.73,\n \"ST\": 1.25, \"AR\": 0.98, \"OU\": 0.72, \"NT\": 1.17, \"TE\": 0.98, \"OF\": 0.71}\n\nENGLISH_TRIGRAM_FREQUENCY = {\n \"THE\": 1.81, \"ERE\": 0.31, \"HES\": 0.24, \"AND\": 0.73, \"TIO\": 0.31, \"VER\": 0.24, \"ING\": 0.72, \"TER\": 0.30,\n \"HIS\": 0.24, \"ENT\": 0.42, \"EST\": 0.28, \"OFT\": 0.22, \"ION\": 0.42, \"ERS\": 0.28, \"ITH\": 0.21, \"HER\": 0.36,\n \"ATI\": 0.26, \"FTH\": 0.21, \"FOR\": 0.34, \"HAT\": 0.26, \"STH\": 0.21, \"THA\": 0.33, \"ATE\": 0.25, \"OTH\": 0.21,\n \"NTH\": 0.33, \"ALL\": 0.25, \"RES\": 0.21, \"INT\": 0.32, \"ETH\": 0.24, \"ONT\": 0.20}\n\n# variable for a list of letters that are save. save in a manner of that they shouldn't be swapped any more cause it is\n# very likely that they are at the right place in the decryption dictionary.\nSAVED_LETTERS = []\n\n# thresthold fur successfull decryption\nMIN_HIT_RATE = 0.9\n\n\n# it's a way more faster to search in keys of dictionaries than in a list. so this functions just loads every entry\n# of the english wordlist file a key in a dictionary. this dictionary is used for the hit rate later.\ndef read_wordlist_english():\n wordlist_file = open(CURRENT_WORKING_DIRECTORY + \"/Ue_1_A_1_Wordlist_English.txt\")\n wordlist = {}\n for word in wordlist_file.read().split('\\r\\n'):\n wordlist[word.lower()] = None\n wordlist_file.close()\n return wordlist\n\n\n# receive the english wordlist and store it in `ENGLISH_WORDLIST`\nENGLISH_WORDLIST = read_wordlist_english()\n\n\ndef get_words_from_wordlist_by_length(word_length):\n long_words = []\n for word in ENGLISH_WORDLIST.keys():\n if len(word) == word_length:\n long_words.append(word)\n\n return long_words\n\n\n# prints the first 1000 characters of the given string if it is longer than 1000 characters. Prints just the string\n# else.\ndef print_short(input_string, max_length=1000):\n if len(input_string) > max_length:\n print(input_string[:max_length])\n else:\n print(input_string)\n\n\n# auxiliary function to print current hit rate and so far decrypted text, or determines the script when given hit rate\n# is higher the `MIN_HIT_RATE`.\ndef print_stats(cleartext, hit_rate):\n if hit_rate >= MIN_HIT_RATE:\n print(\"\\n\\n\\t\\t--> FOUND KEY <--\\n\\n\")\n print(\"finished with a hit rate of: \" + str(hit_rate))\n print(\"\\ncleartext:\\n\" + cleartext[:400])\n print_duration()\n exit()\n print(str(hit_rate) + \" --> \" + cleartext[:LINE_LENGTH])\n\n\n# auxiliary funtion that prints the duration of the script execution.\ndef print_duration():\n duration = datetime.datetime.now() - START_TIME\n print(\"\\nduration \" + str(duration.total_seconds()) + \" s\\n\")\n\n\n# returns a float value between 0 and 1 describing the percentage of words that could be found in the english wordlist\ndef hit_rate(input_string):\n # split the given string into seperated words\n splitted_text = input_string.split(\" \")\n # stored the number of word found in the wordlist\n hits_count = 0\n # iterate through the word in the given string and check their existance in the wordlist\n for possible_word in splitted_text:\n if possible_word in ENGLISH_WORDLIST:\n hits_count += 1\n\n return float(hits_count * 1.0 / len(splitted_text))\n\n\ndef hit_rate_by_key(a_key):\n return hit_rate(decrypt(THE_CIPHERTEXT, a_key))\n\n\n####################################################################################################################\n#\n# CALCULATE FREQUENCIES\n#\n####################################################################################################################\n\n# returns a dictionary to\ndef char_frequency(input_string):\n some_dictionary = {}\n for character in input_string:\n if character in some_dictionary.keys():\n some_dictionary[character] += 1\n else:\n some_dictionary[character] = 1\n return some_dictionary\n\n\ndef ngram_frequency(input_string, factor):\n some_dictionary = {}\n for an_index in range(len(input_string) - factor):\n trigram = input_string[an_index: an_index + factor]\n if trigram in some_dictionary.keys():\n some_dictionary[trigram] += 1\n else:\n some_dictionary[trigram] = 1\n\n return some_dictionary\n\n\ndef bigram_frequency(input_string):\n return ngram_frequency(input_string, 2)\n\n\ndef trigram_frequency(input_string):\n return ngram_frequency(input_string, 3)\n\n\n####################################################################################################################\n#\n# AUXILIARY FUNCTIONS and SWAP FUNCTIONS\n#\n####################################################################################################################\n\n# takes an dictionary and returns a list containing the values ordered by their value in the dictionary.\ndef dictionary_to_sorted_list_by_value(a_dictionary):\n a_tuples_list = list(reversed(sorted(a_dictionary.items(), key=operator.itemgetter(1))))\n a_sorted_list = [x[0] for x in a_tuples_list]\n return a_sorted_list\n\n\n# swaps the given `value_1` and `value_2` in the given dictionary\ndef swap_dictionary(value_1, value_2, dictionary):\n # avoid invalid swapping of the same character\n if value_1 == value_2:\n return dictionary\n # avoid swapping of already identified characters\n elif value_1 in SAVED_LETTERS or value_2 in SAVED_LETTERS:\n return dictionary\n\n for key, value in dictionary.items():\n if value == value_1:\n position_a = key\n elif value == value_2:\n position_b = key\n\n if position_a and position_b:\n dictionary[position_a] = value_2\n dictionary[position_b] = value_1\n\n return dictionary\n\n\ndef swap_and_check(character_1, character_2, input_key, current_hit_rate):\n\n if character_1 == character_2:\n return (current_hit_rate, input_key)\n\n better_key_candidate = input_key.copy()\n\n swap_dictionary(character_2_to_swap, character_1_to_swap, better_key_candidate)\n\n hit_rate_1 = hit_rate_by_key(input_key)\n hit_rate_2 = hit_rate_by_key(better_key_candidate)\n\n if hit_rate_1 < hit_rate_2:\n input_key = better_key_candidate.copy()\n\n higher_hit_rate = max(hit_rate_1, hit_rate_2)\n\n if current_hit_rate != higher_hit_rate:\n print(\"swapping '\" + character_2_to_swap + \"' with '\" + character_1_to_swap + \"'\")\n print_stats(decrypt(THE_CIPHERTEXT, current_best_key), higher_hit_rate)\n\n return (higher_hit_rate, input_key)\n\n\n############################################################################\n# decrypts the given ciphertext with the given key.\ndef decrypt(ciphertext, key):\n cleartext = \"\"\n for a_character in ciphertext:\n cleartext = cleartext + key[a_character]\n return cleartext\n\n\ndef get_long_words(cleartext, word_length):\n long_words = []\n for word in cleartext.split(' '):\n if len(word) == word_length and word not in long_words:\n long_words.append(word)\n\n return long_words\n\n\ndef get_words_by_length(ciphertext, space_character, word_length):\n candidates = {}\n for word in ciphertext.split(space_character):\n if len(word) == word_length:\n if word in candidates.keys():\n candidates[word] += 1\n else:\n candidates[word] = 1\n return candidates\n\n\ndef get_the_candidates(ciphertext, space_character):\n return get_words_by_length(ciphertext, space_character, 3)\n\n\ndef get_of_candidates(ciphertext, space_character):\n return get_words_by_length(ciphertext, space_character, 2)\n\n\n# weight the bigrams\n\ndef weight_bigrams(mapped_bigrams, input_weight_dictionary):\n if input_weight_dictionary:\n weight_dictionary = input_weight_dictionary\n else:\n weight_dictionary = {}\n\n for bigrams_tuple in mapped_bigrams:\n\n first_word = bigrams_tuple[0]\n second_word = bigrams_tuple[1]\n\n for an_index in range(len(first_word)):\n\n first_character = first_word[an_index].lower()\n second_character = second_word[an_index]\n\n a_tuple = (first_character, second_character)\n\n keys = weight_dictionary.keys()\n\n if a_tuple in keys:\n weight_dictionary[a_tuple] += 1\n else:\n weight_dictionary[a_tuple] = 1\n\n return weight_dictionary\n\n\n# returns a list with all\ndef get_double_letters(input_text):\n a_dictionary = {}\n last_letter = ''\n for a_character in input_text:\n if a_character == last_letter:\n double_letters = a_character + a_character\n if double_letters in a_dictionary.keys():\n a_dictionary[double_letters] += 1\n else:\n a_dictionary[double_letters] = 1\n last_letter = a_character\n\n return a_dictionary\n\n\n############################################################################\n# calculation of frequencies\n\n\n# initially create a first\nDICT_CHAR_FREQUENCY = char_frequency(THE_CIPHERTEXT)\n\n# create a list of characters sorted by their associated values in `dict_char_frequency_ciphertext`\nLIST_CHAR_FREQUENCY = dictionary_to_sorted_list_by_value(DICT_CHAR_FREQUENCY)\n\n# guessing the most frequent letter is a whitespace\nCIPHTER_WHITESPACE = dictionary_to_sorted_list_by_value(DICT_CHAR_FREQUENCY)[0]\nprint(\"\\npossible encrypted whitespace: '\" + CIPHTER_WHITESPACE + \"' occures \"\n + str(DICT_CHAR_FREQUENCY[CIPHTER_WHITESPACE]) + \" times in text\")\n\n# remove whitespaces before calculating ngrams\nCIPHERTEXT_WO_WHITESPACES = THE_CIPHERTEXT[:].replace(CIPHTER_WHITESPACE, \"\")\n\n# assuming the first 4 character from the frequency calculation are right\nSAVED_LETTERS.append(\" \")\n\n# the currently best fitting key dictionary\ncurrent_best_key = dict(zip(LIST_CHAR_FREQUENCY, dictionary_to_sorted_list_by_value(ENGLISH_CHAR_FREQUENCY)))\n\n# reference to the last highest hit rate\nlast_higher_hit_rate = 0\n\n####################################################################################################################\n#\n# INiTIAL STATISTICS BY FREQUENCY\n#\n####################################################################################################################\ninitial_cleartext = decrypt(THE_CIPHERTEXT, current_best_key)\ninitial_hit_rate = hit_rate(initial_cleartext)\nprint(\"\\ninitial hit rate --> \" + str(initial_hit_rate))\nprint(initial_cleartext[:LINE_LENGTH])\n# print(\"initial key: \")\n# pprint(current_best_key)\n\n\n####################################################################################################################\n#\n# SEARCH FOR 'the'\n#\n####################################################################################################################\nprint(\"\\ntrying to identify the word 'the'...\")\ndict_the_candidates = get_the_candidates(initial_cleartext, ' ')\nlist_the_candidates = dictionary_to_sorted_list_by_value(dict_the_candidates)\n\nfor an_index in range(1): # optionally increase range to iterate the first 'the' candidates list entries\n the_candidate = list_the_candidates[an_index]\n print(\"best 'the' candidate: '\" + the_candidate + \"'\")\n if the_candidate != \"the\":\n\n better_key_candidate = current_best_key.copy()\n\n if the_candidate[0] != \"t\":\n swap_dictionary(the_candidate[0], 't', better_key_candidate)\n if the_candidate[1] != \"h\":\n swap_dictionary(the_candidate[1], 'h', better_key_candidate)\n if the_candidate[2] != \"e\":\n swap_dictionary(the_candidate[2], 'e', better_key_candidate)\n\n hit_rate_1 = hit_rate_by_key(better_key_candidate)\n hit_rate_2 = hit_rate_by_key(current_best_key)\n\n if hit_rate_1 > hit_rate_2:\n\n SAVED_LETTERS.append(\"t\")\n SAVED_LETTERS.append(\"h\")\n SAVED_LETTERS.append(\"e\")\n\n current_best_key = better_key_candidate.copy()\n higher_hit_rate = max(hit_rate_1, hit_rate_2)\n if last_higher_hit_rate != higher_hit_rate:\n print_stats(decrypt(THE_CIPHERTEXT, current_best_key), higher_hit_rate)\n last_higher_hit_rate = higher_hit_rate\n break\n\n####################################################################################################################\n#\n# SEARCH FOR 'of'\n#\n####################################################################################################################\nprint(\"\\ntrying to identify the word 'of'...\")\ndict_of_candidates = get_of_candidates(decrypt(THE_CIPHERTEXT, current_best_key), ' ')\nlist_of_candidates = dictionary_to_sorted_list_by_value(dict_of_candidates)\n\nfor an_index in range(1): # optionally increase range to iterate the first 'of' candidates list entries\n of_candidate = list_of_candidates[an_index]\n print(\"best 'of' candidate: '\" + of_candidate + \"'\")\n if of_candidate != \"of\":\n\n better_key_candidate = current_best_key.copy()\n\n if of_candidate[0] != \"o\":\n swap_dictionary(of_candidate[0], 'o', better_key_candidate)\n if of_candidate[1] != \"f\":\n swap_dictionary(of_candidate[1], 'f', better_key_candidate)\n\n hit_rate_1 = hit_rate_by_key(better_key_candidate)\n hit_rate_2 = hit_rate_by_key(current_best_key)\n\n if hit_rate_1 > hit_rate_2:\n\n SAVED_LETTERS.append(\"o\")\n SAVED_LETTERS.append(\"f\")\n\n current_best_key = better_key_candidate.copy()\n higher_hit_rate = max(hit_rate_1, hit_rate_2)\n if last_higher_hit_rate != higher_hit_rate:\n print_stats(decrypt(THE_CIPHERTEXT, current_best_key), higher_hit_rate)\n last_higher_hit_rate = higher_hit_rate\n break\n\n####################################################################################################################\n#\n# CHECK DOUBLE LETTERS\n#\n####################################################################################################################\nprint(\"\\ncheck double letters occurences...\\n\")\n\nDEPTH = 14 # range till 0 - 14 (length of the list double letter alternatives)\n\ndouble_letters_dictionary = get_double_letters(CIPHERTEXT_WO_WHITESPACES)\ndouble_letters_orderes_list = dictionary_to_sorted_list_by_value(double_letters_dictionary)[:DEPTH]\n\nfor double_letter_candidate in double_letters_orderes_list:\n for double_letter_candidate_alternative in [\"l\", \"s\", \"o\", \"t\", \"f\", \"p\", \"r\", \"m\", \"c\", \"n\", \"d\", \"g\", \"i\", \"b\"][\n :DEPTH]:\n character_1_to_swap = double_letter_candidate[0]\n character_2_to_swap = double_letter_candidate_alternative\n\n swap_result = swap_and_check(character_1_to_swap, character_2_to_swap, current_best_key, last_higher_hit_rate)\n last_higher_hit_rate = swap_result[0]\n current_best_key = swap_result[1]\n\n####################################################################################################################\n#\n# HANDLE N-GRAMS\n#\n####################################################################################################################\ncleartext_without_whitespaces = decrypt(THE_CIPHERTEXT, current_best_key).replace(\" \", \"\")\n\nprint(\"\\ncalculating bigrams\")\ndict_bigrams = bigram_frequency(cleartext_without_whitespaces)\nenglish_bigram_list = dictionary_to_sorted_list_by_value(ENGLISH_BIGRAM_FREQUENCY)\ncipher_bigram_list = dictionary_to_sorted_list_by_value(dict_bigrams)\n\nmapped_bigrams = zip(english_bigram_list, cipher_bigram_list)\nweight_bigrams_dictionary = weight_bigrams(mapped_bigrams, None)\n\nprint(\"\\ncalculating trigrams\")\ndict_trigrams = trigram_frequency(cleartext_without_whitespaces)\nenglish_trigram_list = dictionary_to_sorted_list_by_value(ENGLISH_TRIGRAM_FREQUENCY)\ncipher_trigram_list = dictionary_to_sorted_list_by_value(dict_trigrams)\n\nmapped_trigrams = zip(english_trigram_list, cipher_trigram_list)\nweight_trigrams_dictionary = weight_bigrams(mapped_trigrams, weight_bigrams_dictionary)\nweight_trigrams_list = dictionary_to_sorted_list_by_value(weight_trigrams_dictionary)\n\nprint(\"\\niterate ngrams list\")\nfor an_entry in weight_trigrams_list:\n\n character_1 = an_entry[0]\n character_2 = an_entry[1]\n\n if character_1 == character_2:\n continue\n\n if character_1 in SAVED_LETTERS or character_2 in SAVED_LETTERS:\n continue\n\n swap_result = swap_and_check(character_1, character_2, current_best_key, last_higher_hit_rate)\n last_higher_hit_rate = swap_result[0]\n current_best_key = swap_result[1]\n\n####################################################################################################################\n#\n# OPTIONALLY LONG WORDS CHECK\n#\n####################################################################################################################\nif \"-longWords\" in sys.argv:\n\n print(\"\\ndo long word check..\")\n\n LONG_WORD_LENGTH = 14\n\n long_words = get_words_from_wordlist_by_length(LONG_WORD_LENGTH)\n print(\"count words with a length of \" + str(LONG_WORD_LENGTH) + \" from wordlist: \" + str(len(long_words)))\n\n long_words_encrypted = get_long_words(decrypt(THE_CIPHERTEXT, current_best_key), LONG_WORD_LENGTH)\n print(\"count words with a length of \" + str(LONG_WORD_LENGTH) + \" from cleartext: \" + str(\n len(long_words_encrypted)) + \"\\n\")\n\n for long_word in long_words:\n\n for long_word_encrypted in long_words_encrypted:\n\n better_key_candidate = current_best_key.copy()\n\n already_swapped_characters = []\n\n for an_index in range(len(long_word_encrypted)):\n\n character_1_to_swap = long_word[an_index]\n character_2_to_swap = long_word_encrypted[an_index]\n\n if character_1_to_swap not in already_swapped_characters:\n if character_1_to_swap != character_2_to_swap:\n swap_dictionary(character_2_to_swap, character_1_to_swap, better_key_candidate)\n\n already_swapped_characters.append(character_1_to_swap)\n already_swapped_characters.append(character_2_to_swap)\n\n hit_rate_1 = hit_rate_by_key(current_best_key)\n hit_rate_2 = hit_rate_by_key(better_key_candidate)\n\n if hit_rate_1 < hit_rate_2:\n current_best_key = better_key_candidate.copy()\n\n higher_hit_rate = max(hit_rate_1, hit_rate_2)\n\n if last_higher_hit_rate != higher_hit_rate:\n print(\"swapping word '\" + long_word + \"' with '\" + long_word_encrypted + \"'\")\n print_stats(decrypt(THE_CIPHERTEXT, current_best_key), higher_hit_rate)\n\n last_higher_hit_rate = higher_hit_rate\n\n####################################################################################################################\n#\n# SWAP WEAKEST CHAR WITH NEIGHBOURS\n#\n####################################################################################################################\nprint(\"\\ncalculate current char frquency...\")\ncurrent_char_frequency = char_frequency(decrypt(THE_CIPHERTEXT, current_best_key))\n\nreversed_sorted_characters_list = list(reversed(dictionary_to_sorted_list_by_value(current_char_frequency)))\n\nprint(\"current char frequencies reversed: \" + str(reversed_sorted_characters_list) + \"\\n\")\n\nfor index_1 in range(len(reversed_sorted_characters_list) - len(SAVED_LETTERS) - 1):\n\n for index_2 in range(len(reversed_sorted_characters_list) - len(SAVED_LETTERS) - 1):\n\n for an_index in [0, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8]:\n\n better_key_candidate = current_best_key.copy()\n\n second_index = index_2 + an_index\n\n # avoid array out of bounds exception\n if not (second_index >= len(reversed_sorted_characters_list) or second_index < 0):\n\n character_1 = reversed_sorted_characters_list[index_2]\n character_2 = reversed_sorted_characters_list[second_index]\n\n swap_dictionary(character_1, character_2, better_key_candidate)\n\n hit_rate_1 = hit_rate_by_key(current_best_key)\n hit_rate_2 = hit_rate_by_key(better_key_candidate)\n\n if hit_rate_1 < hit_rate_2:\n current_best_key = better_key_candidate.copy()\n\n higher_hit_rate = max(hit_rate_1, hit_rate_2)\n\n if last_higher_hit_rate != higher_hit_rate:\n print(\"swapping '\" + character_1 + \"' with '\" + character_2 + \"'\")\n print_stats(decrypt(THE_CIPHERTEXT, current_best_key), higher_hit_rate)\n\n last_higher_hit_rate = higher_hit_rate\n\n####################################################################################################################\n#\n# MANUAL CHARACTERS\n#\n####################################################################################################################\n\nif len(sys.argv) > 2:\n print(\"\\nhandle manuel swapping of characters...\")\n manuel_swap_list = []\n for parameter_kandidate in sys.argv:\n if len(parameter_kandidate) == 1:\n manuel_swap_list.append(parameter_kandidate)\n\n if len(manuel_swap_list) > 0 and len(manuel_swap_list) % 2 == 0:\n print(\"characters to swap: \" + str(manuel_swap_list) + \"\\n\")\n an_index = 0\n while an_index < len(manuel_swap_list) - 1:\n\n character_1 = manuel_swap_list[an_index]\n character_2 = manuel_swap_list[an_index + 1]\n\n better_key_candidate = current_best_key.copy()\n\n swap_dictionary(character_1, character_2, better_key_candidate)\n\n hit_rate_1 = hit_rate_by_key(current_best_key)\n hit_rate_2 = hit_rate_by_key(better_key_candidate)\n\n if hit_rate_1 < hit_rate_2:\n current_best_key = better_key_candidate.copy()\n\n higher_hit_rate = max(hit_rate_1, hit_rate_2)\n\n if last_higher_hit_rate != higher_hit_rate:\n print(\"swapping '\" + character_1 + \"' with '\" + character_2 + \"'\")\n print_stats(decrypt(THE_CIPHERTEXT, current_best_key), higher_hit_rate)\n\n an_index += 2\n\nprint(\"Finished without fullfilling result:\")\nprint(\"\\nCurrent state:\")\nprint_stats(decrypt(THE_CIPHERTEXT, current_best_key), higher_hit_rate)\nprint_duration()\n","sub_path":"Uebung1/Ue_1_A_1_Decrypt.py","file_name":"Ue_1_A_1_Decrypt.py","file_ext":"py","file_size_in_byte":24502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"132966502","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import Image # Image is the message type\nfrom cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images\n\nimport sys\nimport argparse\nimport numpy as np\nimport cv2\nimport os, glob, time\n\nfrom detect_arrow_webcam import *\n\nROS_TOPIC = \"/realsense/color/image_raw\" #'/mrt/camera/color/image_raw'\n\n\nclass ImageSubscriber:\n\n \"\"\"Subscribes to ROS Topic and calls image_callback\"\"\"\n\n def __init__(self, image_topic):\n \"\"\"\n\n :image_topic: string\n\n \"\"\"\n rospy.init_node(\"image_sub\", anonymous=True)\n self.br = CvBridge()\n self.sub = rospy.Subscriber(image_topic, Image, self.image_callback)\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n # Blue color in BGR\n self.color = (255, 255, 0)\n self.org = (50, 50)\n self.fontScale = 1\n self.thickness = 2\n self.vid_file = cv2.VideoWriter(\n \"arrow.mp4\", cv2.VideoWriter_fourcc(*\"MP4V\"), 10, (640, 480)\n )\n rospy.spin()\n print(\"all done!\")\n self.vid_file.release()\n cv2.destroyAllWindows()\n\n def image_callback(self, data):\n \"\"\"Converts ROS Image, passes to arrow_detect and displays detected\n\n :data: Image\n :returns: None\n\n \"\"\"\n cv_img = self.br.imgmsg_to_cv2(data)\n found, theta, orient, direction, output = arrow_detect(cv_img)\n print(\"shape: \", output.shape)\n\n if direction == 1:\n direction = \"Right\"\n elif direction is None:\n direction = \"not found\"\n else:\n direction = \"Left\"\n\n output = cv2.putText(\n output,\n direction,\n self.org,\n self.font,\n self.fontScale,\n self.color,\n self.thickness,\n cv2.LINE_AA,\n )\n\n self.vid_file.write(output)\n cv2.imshow(\"Arrow\", output)\n cv2.waitKey(20)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-t\", \"--topic\", help=\"ROS Topic to subscribe to\", default=ROS_TOPIC\n )\n args = parser.parse_args()\n\n subscriber = ImageSubscriber(args.topic)\n # cv2.destroyAllWindows()\n","sub_path":"src/motion_plan/src/detect_arrow_ros.py","file_name":"detect_arrow_ros.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156190543","text":"################################################################\n# GPT2 Language Model\n################################################################\nimport sys\nimport os\n\nroot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nif root not in sys.path:\n sys.path.append(root)\n\n\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom pytorch_transformers import BertTokenizer, BertModel, BertForMaskedLM, WordpieceTokenizer, GPT2Tokenizer, GPT2Model\nfrom . import utils\nfrom .tokenizer import tokenize \n\n\nparameters = {'small':{'LAYER_COUNT':12, 'FEATURE_COUNT':768},\n 'medium':{'LAYER_COUNT':24, 'FEATURE_COUNT':1024}\n }\n\n\n\nclass GPT2(object):\n \"\"\"Container module for GPT2.\"\"\"\n\n def __init__(self, gpt2_model, language, name, loi, cuda=False):\n super(GPT2, self).__init__()\n # Load pre-trained model tokenizer (vocabulary)\n # Crucially, do not do basic tokenization; PTB is tokenized. Just do wordpiece tokenization.\n if gpt2_model not in ['small', 'medium']:\n raise ValueError(\"GPT2 model must be small or medium\")\n self.model = GPT2Model.from_pretrained('gpt2{}'.format('' if gpt2_model=='small' else '-medium'), output_hidden_states=True)\n self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2{}'.format('' if gpt2_model=='small' else '-medium'))\n\n self.language = language\n self.LAYER_COUNT = parameters[gpt2_model]['LAYER_COUNT']\n self.FEATURE_COUNT = parameters[gpt2_model]['FEATURE_COUNT']\n self.name = name\n self.loi = np.array(loi) if loi else np.arange(parameters[gpt2_model]['LAYER_COUNT']) # loi: layers of interest\n self.cuda = cuda\n\n def __name__(self):\n return self.name\n\n\n def generate(self, path, language, textgrid):\n \"\"\" Input text should have one sentence per line, where each word and every \n symbol is separated from the following by a space. No token should be included,\n as they are automatically integrated during tokenization.\n \"\"\"\n activations = []\n self.model.eval()\n iterator = tokenize(path, language, path_like=True, train=False)\n if self.cuda:\n self.model.to('cuda')\n for line in iterator:\n line = line.strip() # Remove trailing characters\n\n tokenized_text = self.tokenizer.tokenize(line)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n mapping = utils.match_tokenized_to_untokenized(tokenized_text, line)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens]).to('cuda') if self.cuda else torch.tensor([indexed_tokens])\n\n with torch.no_grad():\n encoded_layers = self.model(tokens_tensor) # last_hidden_state, pooled_last_hidden_states, all_hidden_states\n # filtration\n if self.cuda:\n encoded_layers = encoded_layers.to('cpu')\n encoded_layers = np.vstack(encoded_layers[2][1:]) # retrieve all the hidden states (dimension = layer_count * len(tokenized_text) * feature_count)\n encoded_layers = encoded_layers[self.loi, :, :]\n activations += utils.extract_activations_from_tokenized(encoded_layers, mapping)\n \n result = pd.DataFrame(np.vstack(activations), columns=['layer-{}-{}'.format(layer, index) for layer in self.loi for index in range(self.FEATURE_COUNT)])\n return result\n\n","sub_path":"models/english/GPT2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"5738429","text":"import asyncio\nimport unittest\n\nfrom unittest import mock\nfrom tests.aio_test_base import asynctest\n\nfrom aiohttp_cors.preflight_handler import _PreflightHandler\n\n\nclass TestPreflightHandler(unittest.TestCase):\n \"\"\"Unit tests for PreflightHandler\"\"\"\n\n def setUp(self):\n self.loop = asyncio.new_event_loop()\n\n def tearDown(self):\n self.loop.close()\n\n @asynctest\n @asyncio.coroutine\n def test_raises_when_handler_not_extend(self):\n request = mock.Mock()\n handler = _PreflightHandler()\n with self.assertRaises(NotImplementedError):\n yield from handler._get_config(request, 'origin', 'GET')\n","sub_path":"tests/unit/test_preflight_handler.py","file_name":"test_preflight_handler.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"407976603","text":"class Solution:\n def numUniqueEmails(self, emails):\n \"\"\"\n :type emails: List[str]\n :rtype: int\n \"\"\"\n uni_email = []\n for e in emails:\n local,domain = e.split('@')\n noplus = local.split('+')[0]\n nodot = noplus.replace('.','')\n new = nodot + '@' + domain\n if new not in uni_email:\n uni_email.append(new)\n #print(local,domain)\n return len(uni_email)\n\n\ninput = [\"test.email+alex@leetcode.com\",\"test.e.mail+bob.cathy@leetcode.com\",\"testemail+david@lee.tcode.com\"]\ns = Solution()\nprint(s.numUniqueEmails(input))\n","sub_path":"p929.py","file_name":"p929.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156840390","text":"# -*- coding: utf-8 -*-\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport numpy as np\n\nfrom .._imperative_rt import make_const\nfrom .._imperative_rt.core2 import SymbolVar, Tensor\n\n\nclass Const:\n def __init__(self, value=None, *, dtype=None, device=None):\n self.value = np.asarray(value, dtype=dtype)\n self.dtype = dtype\n self.device = device\n\n def __call__(self, *reference):\n from ...tensor import Tensor\n\n device = self.device\n\n if len(reference) != 0:\n reference = reference[0]\n assert isinstance(\n reference, (SymbolVar, Tensor)\n ), \"Reference should be Tensor or VarNode\"\n\n if device is None:\n device = reference.device\n\n if isinstance(reference, SymbolVar):\n cls = type(reference)\n rst = cls(make_const(reference.graph, self.value, device, self.dtype))\n return (rst,)\n\n return (Tensor(self.value, self.dtype, self.device, True),)\n","sub_path":"imperative/python/megengine/core/ops/special.py","file_name":"special.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"133239178","text":"import os\n\nfrom flask_script import Manager, Shell\n\nfrom tracker_app import create_app\nfrom tracker_app.database.db_connection import DbHandler\n\ndb = DbHandler()\n\napp = create_app(os.getenv('APP_SETTINGS') or 'default')\nmanager = Manager(app)\n\n\ndef make_shell_context():\n return dict(app=app)\n\n\nmanager.add_command('shell', Shell(make_context=make_shell_context))\n\n# create db tables\n\n\n@manager.command\ndef create_db():\n \"\"\"Creates the db tables\"\"\"\n db.create_all()\n\n\n@manager.command\ndef drop_db():\n \"\"\"Drops the db tables\"\"\"\n db.drop_all()\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"366342499","text":"import time\nimport math\nimport numpy as np\nfrom collections import Counter\nimport pprint\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n# TODO: Debug - Pruning, CART Pruning\n# TODO: Try batch prediction and visualization\n# TODO: Support Continuous Data ; Feed sample-weight\n\n\n# Util\n\nclass Cluster:\n def __init__(self, data, labels, base=2):\n self._data = np.array(data).T\n self._counters = Counter(labels)\n self._labels = np.array(labels)\n self._cache = None\n self._base = base\n\n def ent(self, ent=None, eps=1e-12):\n _len = len(self._labels)\n if ent is None:\n ent = [_val for _val in self._counters.values()]\n return max(eps, -sum([_c / _len * math.log(_c / _len, self._base) for _c in ent]))\n\n def gini(self, p=None):\n if p is None:\n p = [_val for _val in self._counters.values()]\n return 1 - sum([(_p / len(self._labels)) ** 2 for _p in p])\n\n def con_chaos(self, idx, criteria=\"ent\"):\n if criteria == \"ent\":\n _method = lambda cluster: cluster.ent()\n elif criteria == \"gini\":\n _method = lambda cluster: cluster.gini()\n else:\n raise NotImplementedError(\"Conditional info criteria '{}' not defined\".format(criteria))\n data = self._data[idx]\n features = list(sorted(set(data)))\n self._cache = tmp_labels = [data == feature for feature in features]\n label_lst = [self._labels[label] for label in tmp_labels]\n rs = 0\n for data_label, tar_label in zip(tmp_labels, label_lst):\n tmp_data = self._data.T[data_label]\n _ent = _method(Cluster(tmp_data, tar_label, base=self._base))\n rs += len(tmp_data) / len(data) * _ent\n return rs\n\n def info_gain(self, idx, criteria=\"ent\", get_con_chaos=False):\n if criteria in (\"ent\", \"ratio\"):\n _con_chaos = self.con_chaos(idx)\n _gain = self.ent() - _con_chaos\n if criteria == \"ratio\":\n _gain = _gain / self.ent([np.sum(_cache) for _cache in self._cache])\n elif criteria == \"gini\":\n _con_chaos = self.con_chaos(idx, criteria=\"gini\")\n _gain = self.gini() - _con_chaos\n else:\n raise NotImplementedError(\"Info_gain criteria '{}' not defined\".format(criteria))\n return (_gain, _con_chaos) if get_con_chaos else _gain\n\n\n# Node\n\nclass CvDNode:\n def __init__(self, tree=None, max_depth=None, base=2, ent=None,\n depth=0, parent=None, is_root=True, prev_feat=\"Root\"):\n self._data = self.labels = None\n self._max_depth = max_depth\n self._base = base\n self._ent = ent\n self.criteria = None\n self.children = {}\n self.category = None\n\n self.tree = tree\n if tree is not None:\n tree.nodes.append(self)\n self.feature_dim = None\n self._depth = depth\n self.parent = parent\n self._is_root = is_root\n self._prev_feat = prev_feat\n self.weight = 0\n self.leafs = {}\n self.pruned = False\n\n def __getitem__(self, item):\n if isinstance(item, str):\n return getattr(self, \"_\" + item)\n return\n\n @property\n def key(self):\n return self._depth, self._prev_feat, id(self)\n\n @property\n def height(self):\n if self.category is not None:\n return 1\n return 1 + max([_child.height for _child in self.children.values()])\n\n @property\n def prev_feat(self):\n return self._prev_feat\n\n def copy(self):\n _new_node = self.__class__(\n None, self._max_depth, self._base, self._ent,\n self._depth, self.parent, self._is_root, self._prev_feat)\n _new_node.tree = self.tree\n _new_node.feature_dim = self.feature_dim\n _new_node.category = self.category\n _new_node.labels = self.labels\n _new_node.pruned = self.pruned\n if self.children:\n for key, node in self.children.items():\n _new_node.children[key] = node.copy()\n else:\n _new_node.category = self.category\n if self.leafs:\n for key, leaf in self.leafs.items():\n _new_node.leafs[key] = leaf.copy()\n return _new_node\n\n def feed_tree(self, tree):\n self.tree = tree\n self.tree.nodes.append(self)\n\n def feed_data(self, data, labels):\n self._data = np.array(data).T\n self.labels = np.array(labels)\n\n def stop(self, eps):\n if (\n self._data.shape[1] == 1 or (self._ent is not None and self._ent <= eps)\n or (self._max_depth is not None and self._depth >= self._max_depth)\n ):\n self._handle_terminate()\n return True\n return False\n\n def early_stop(self, max_gain, eps):\n if max_gain <= eps:\n self._handle_terminate()\n return True\n return False\n\n def crop(self, x=None):\n x = self._data if x is None else x\n _mask = np.ones(len(x), dtype=np.bool)\n _mask[self.feature_dim] = False\n return x[_mask]\n\n def get_class(self):\n _counter = Counter(self.labels)\n return max(_counter, key=(lambda key: _counter[key]))\n\n def get_threshold(self):\n if self.category is None:\n rs = 0\n for leaf in self.leafs.values():\n _cluster = Cluster(None, leaf, self._base)\n rs += len(leaf) * _cluster.ent()\n return Cluster(None, self.labels, self._base).ent() - rs / (self.weight - 1)\n return 0\n\n def _gen_children(self, features, new_data, con_chaos):\n for feat in set(features):\n _feat_mask = features == feat\n _new_node = self.__class__(\n self.tree, self._max_depth, self._base, ent=con_chaos,\n depth=self._depth + 1, parent=self, is_root=False, prev_feat=feat)\n self.children[feat] = _new_node\n _new_node.fit(new_data[:, _feat_mask].T, self.labels[_feat_mask])\n\n def _handle_terminate(self):\n self.tree.depth = max(self._depth, self.tree.depth)\n self.category = self.get_class()\n _parent = self\n while _parent is not None:\n _parent.leafs[self.key] = self.labels\n _parent.weight += 1\n _parent = _parent.parent\n\n def fit(self, data, labels, eps=1e-8):\n if data is not None and labels is not None:\n self.feed_data(data, labels)\n if self.stop(eps):\n return\n _cluster = Cluster(self._data.T, self.labels, self._base)\n _max_gain, _con_chaos = _cluster.info_gain(0, criteria=self.criteria, get_con_chaos=True)\n _max_feature = 0\n for i in range(1, len(self._data)):\n _tmp_gain, _tmp_con_chaos = _cluster.info_gain(i, criteria=self.criteria, get_con_chaos=True)\n if _tmp_gain > _max_gain:\n (_max_gain, _con_chaos), _max_feature = (_tmp_gain, _tmp_con_chaos), i\n if self.early_stop(_max_gain, eps):\n return\n self.feature_dim = _max_feature\n self._gen_children(self._data[_max_feature], self.crop(), _con_chaos)\n if self._is_root:\n self.tree.prune()\n\n def prune(self):\n self.category = self.get_class()\n dw = self.weight - 1\n self.weight = 1\n _pop_lst = [key for key in self.leafs]\n self.mark_pruned()\n _parent = self\n while _parent is not None:\n for _k in _pop_lst:\n _parent.leafs.pop(_k)\n _parent.leafs[self.key] = self.labels\n _parent.weight -= dw\n _parent = _parent.parent\n self.children = {}\n\n def mark_pruned(self):\n self.pruned = True\n if self.children is not None:\n for _child in self.children.values():\n _child.mark_pruned()\n\n def predict_one(self, x):\n if self.category is not None:\n return self.category\n try:\n return self.children[x[self.feature_dim]].predict_one(self.crop(x))\n except KeyError:\n return self.get_class()\n\n def predict(self, x):\n if self.category is not None:\n if self._is_root:\n return [self.category] * len(x)\n return self.category\n x = np.atleast_2d(x)\n return [self.predict_one(xx) for xx in x]\n\n def view(self, indent=4):\n print(\" \" * indent * self._depth, self)\n for _node in sorted(self.children.values()):\n _node.view()\n\n def __lt__(self, other):\n return self.prev_feat < other.prev_feat\n\n def __str__(self):\n if self.children:\n return \"CvDNode ({}) ({} -> {})\".format(\n self._depth, self._prev_feat, self.feature_dim)\n return \"CvDNode ({}) ({} -> class: {})\".format(\n self._depth, self._prev_feat, self.category)\n\n __repr__ = __str__\n\n\nclass ID3Node(CvDNode):\n def __init__(self, *args, **kwargs):\n CvDNode.__init__(self, *args, **kwargs)\n self.criteria = \"ent\"\n\n\nclass C45Node(CvDNode):\n def __init__(self, *args, **kwargs):\n CvDNode.__init__(self, *args, **kwargs)\n self.criteria = \"ratio\"\n\n\n# Tree\n\nclass CvDBase:\n def __init__(self, max_depth=None, node=None):\n self.nodes = []\n self.trees = []\n self._threshold_cache = None\n self._max_depth = max_depth\n if node is None:\n self.root = CvDNode(self, max_depth)\n else:\n self.root = node\n self.root.feed_tree(self)\n self.depth = 1\n\n @staticmethod\n def acc(y, y_pred):\n return np.sum(np.array(y) == np.array(y_pred)) / len(y)\n\n def copy(self):\n _new_tree = self.__class__(self._max_depth, node=self.root.copy())\n _new_tree.nodes = [_node.copy() for _node in self.nodes]\n _new_tree.depth = self.depth\n return _new_tree\n\n def fit(self, data=None, labels=None, eps=1e-8):\n self.root.fit(data, labels, eps)\n _arg = np.argmax([CvDBase.acc(labels, tree.predict(data)) for tree in self.trees])\n _tar_tree = self.trees[_arg]\n self.nodes = _tar_tree.nodes\n self.depth = _tar_tree.depth\n self.root = _tar_tree.root\n\n def prune(self):\n self.trees.append(self.copy())\n if self.depth <= 2:\n return\n _nodes = [_node for _node in self.nodes if _node.category is None]\n if self._threshold_cache is None:\n _thresholds = [_node.get_threshold() for _node in _nodes]\n else:\n _thresholds = self._threshold_cache\n _arg = np.argmin(_thresholds)\n _nodes[_arg].prune()\n _thresholds[_arg] = _nodes[_arg].get_threshold()\n self.depth = self.root.height\n for i in range(len(self.nodes) - 1, -1, -1):\n if self.nodes[i].pruned:\n self.nodes.pop(i)\n for i in range(len(_thresholds) - 1, -1, -1):\n if _nodes[i].pruned:\n _thresholds.pop(i)\n self._threshold_cache = _thresholds\n if self.depth > 2:\n self.prune()\n else:\n self.trees.append(self.copy())\n pass\n\n def predict_one(self, x):\n return self.root.predict_one(x)\n\n def predict(self, x):\n return self.root.predict(x)\n\n def view(self):\n self.root.view()\n\n def __str__(self):\n return \"CvDTree ({})\".format(self.depth)\n\n __repr__ = __str__\n\n\nclass ID3Tree(CvDBase):\n def __init__(self, *args, **kwargs):\n if \"node\" not in kwargs:\n CvDBase.__init__(self, node=ID3Node(), *args, **kwargs)\n else:\n CvDBase.__init__(self, *args, **kwargs)\n\n\nclass C45Tree(CvDBase):\n def __init__(self, *args, **kwargs):\n if \"node\" not in kwargs:\n CvDBase.__init__(self, node=C45Node(), *args, **kwargs)\n else:\n CvDBase.__init__(self, *args, **kwargs)\n\nif __name__ == '__main__':\n _data, _x, _y = [], [], []\n with open(\"data.txt\", \"r\") as file:\n for line in file:\n _data.append(line.split(\",\"))\n np.random.shuffle(_data)\n for line in _data:\n _y.append(line.pop(0))\n _x.append(line)\n _x, _y = np.array(_x).T, np.array(_y)\n for _i, line in enumerate(_x):\n _dic = {_c: i for i, _c in enumerate(set(line))}\n for _j, elem in enumerate(line):\n _x[_i][_j] = _dic[elem]\n _x = _x.T\n train_num = 5000\n x_train = _x[:train_num]\n y_train = _y[:train_num]\n x_test = _x[train_num:]\n y_test = _y[train_num:]\n\n _t = time.time()\n _tree = C45Tree()\n _tree.fit(x_train, y_train)\n _tree.view()\n _y_pred = _tree.predict(x_test)\n print(np.sum(_y_pred == y_test) / len(y_test))\n print(time.time() - _t)\n\n _t = time.time()\n _sk_tree = DecisionTreeClassifier()\n _sk_tree.fit(x_train, y_train)\n _y_pred = _tree.predict(x_test)\n print(np.sum(_y_pred == y_test) / len(y_test))\n print(time.time() - _t)\n","sub_path":"CvDTree/CvDTree.py","file_name":"CvDTree.py","file_ext":"py","file_size_in_byte":13004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"149705412","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.ndimage as ndi\n\n\ndef quick_plot(image, median_smoothig=3):\n \"\"\"Display image with matplotlib.pyplot\n\n Parameters\n ----------\n image : Adorned image or numpy array\n Input image.\n\n Returns\n -------\n fig, ax\n Matplotlib figure and axis objects.\n \"\"\"\n fig, ax = plt.subplots(figsize=(10, 7))\n display_image = image.data\n if median_smoothig is not None:\n display_image = ndi.median_filter(display_image, size=median_smoothig)\n height, width = display_image.shape\n try:\n pixelsize_x = image.metadata.binary_result.pixel_size.x\n pixelsize_y = image.metadata.binary_result.pixel_size.y\n except AttributeError:\n extent_kwargs = [-(width / 2), +(width / 2), -(height / 2), +(height / 2)]\n ax.set_xlabel(\"Distance from origin (pixels)\")\n else:\n extent_kwargs = [\n -(width / 2) * pixelsize_x,\n +(width / 2) * pixelsize_x,\n -(height / 2) * pixelsize_y,\n +(height / 2) * pixelsize_y,\n ]\n ax.set_xlabel(\n \"Distance from origin (meters) \\n\" \"1 pixel = {} meters\".format(pixelsize_x)\n )\n ax.set_xlim(extent_kwargs[0], extent_kwargs[1])\n ax.set_ylim(extent_kwargs[2], extent_kwargs[3])\n ax.imshow(display_image, cmap=\"gray\", extent=extent_kwargs)\n return fig, ax\n\n\ndef select_point(image):\n \"\"\"Return location of interactive user click on image.\n\n Parameters\n ----------\n image : AdornedImage or 2D numpy array.\n\n Returns\n -------\n coords\n Coordinates of last point clicked in the image.\n Coordinates are in x, y format.\n Units are the same as the matplotlib figure axes.\n \"\"\"\n fig, ax = quick_plot(image)\n coords = []\n\n def on_click(event):\n print(event.ydata, event.xdata)\n coords.append(event.ydata)\n coords.append(event.xdata)\n\n fig.canvas.mpl_connect(\"button_press_event\", on_click)\n plt.show()\n return np.flip(coords[-2:], axis=0) # coordintes in x, y format\n\n\ndef _rectangles_overlap(bottomleft_1, topright_1, bottomleft_2, topright_2):\n \"\"\"Compare two rectangles and return True if they are overlapping.\n\n Parameters\n ----------\n bottomleft_1 : listlike, float\n x, y coordinate of bottom left corner of rectangle 1.\n topright_1 : listlike, float\n x, y coordinate of top right corner of rectangle 1.\n bottomleft_2 : listlike, float\n x, y coordinate of bottom left corner of rectangle 2.\n topright_2 : listlike, float\n x, y coordinate of top right corner of rectangle 2.\n\n Returns\n -------\n boolean\n True if rectangles are overlapping, False if they do not overlap.\n \"\"\"\n # check if bottom_left_1 is above top_right_2\n if bottomleft_1[1] > topright_2[1]:\n return False\n # check if bottom_left_2 is above top_right_1\n elif bottomleft_2[1] > topright_1[1]:\n return False\n # check if top_right_1 is to the left of bottom_left_2\n elif topright_1[0] < bottomleft_2[0]:\n return False\n # check if top_right_2 is to the left of bottom_left_1\n elif topright_2[0] < bottomleft_1[0]:\n return False\n # else, rectangles are overlapping\n else:\n return True\n\n\nclass InteractiveRectangle(object):\n def __init__(\n self,\n fig,\n ax,\n roi_size_x=1e-6,\n roi_size_y=1e-6,\n fov_x=None,\n fov_y=None,\n central_lamella_height=None,\n existing_fiducial=None,\n min_distance_from_lamella=0.0,\n ):\n \"\"\"Interactive tool for the user to click and set ROI position.\n\n Parameters\n ----------\n fig : matplotlib figure object\n Figure displaying ion beam image on real space axes.\n ax : matplotlib axes object\n Figure axes must be in real space units.\n roi_size_x : float, optional\n The size in real space of the ROI in x, by default 1e-6\n roi_size_y : float, optional\n The size in real space of the ROI in y, by default 1e-6\n fov_x : listlike, float, optional\n Field of view minimum and maximum in x, by default None\n fov_y : listlike, float, optional\n Field of view minimum and maximum in y, by default None\n central_lamella_height : float, optional\n Height of lamella region, by default None\n existing_fiducial : Matplotlib rectangle patch, optional\n min_distance_from_lamella : float, optional\n Separation between fiducial and lamella milling in real space,\n by default 0.\n \"\"\"\n self.fig = fig\n self.ax = ax\n self.roi_size_x = roi_size_x\n self.roi_size_y = roi_size_y\n self.field_of_view_x = fov_x\n self.field_of_view_y = fov_y\n self.central_lamella_height = central_lamella_height\n self.buffer = min_distance_from_lamella\n self.existing_fiducial = existing_fiducial\n self.coords = []\n\n self.rect = matplotlib.patches.Rectangle((0, 0), 0, 0, fill=False, color=\"y\")\n self.ax.add_artist(self.rect)\n if central_lamella_height:\n self.rect_lamella = matplotlib.patches.Rectangle(\n (0, 0), 0, 0, fill=False, color=\"c\"\n )\n self.ax.add_artist(self.rect_lamella)\n self.ax.set_title(\"Click to set the ROI marker\")\n if existing_fiducial:\n self.ax.add_artist(existing_fiducial)\n self.fig.canvas.mpl_connect(\"button_press_event\", self.on_click)\n\n def on_click(self, event):\n if event.inaxes is None:\n return\n # Ensure we are not too close to the edge\n if self.field_of_view_x:\n if (event.xdata - (self.roi_size_x / 2)) <= self.field_of_view_x[0]:\n print(\"Too close to the edge, please reselect.\")\n return\n elif (event.xdata + (self.roi_size_x / 2)) >= self.field_of_view_x[1]:\n print(\"Too close to the edge, please reselect.\")\n return\n if self.field_of_view_y:\n if (event.ydata - (self.roi_size_y / 2)) <= self.field_of_view_y[0]:\n print(\"Too close to the edge, please reselect.\")\n return\n elif (event.ydata + (self.roi_size_y / 2)) >= self.field_of_view_y[1]:\n print(\"Too close to the edge, please reselect.\")\n return\n print(event.xdata, event.ydata)\n self.coords = [event.xdata, event.ydata]\n self.rect.set_x(event.xdata - (self.roi_size_x / 2))\n self.rect.set_y(event.ydata - (self.roi_size_y / 2))\n self.rect.set_width(self.roi_size_x)\n self.rect.set_height(self.roi_size_y)\n # Also display the lamella itself, if appropriate\n if self.central_lamella_height:\n self.rect_lamella.set_x(event.xdata - (self.roi_size_x / 2))\n self.rect_lamella.set_y(event.ydata - (self.central_lamella_height / 2))\n self.rect_lamella.set_width(self.roi_size_x)\n self.rect_lamella.set_height(self.central_lamella_height)\n # # Ensure there is sufficent separation between the lamella & fiducial\n if self.existing_fiducial is not None:\n bottom_left_1 = np.array(self.existing_fiducial.xy) - self.buffer\n top_right_1 = np.array(self.existing_fiducial.xy) + np.array(\n [\n self.existing_fiducial.get_width() + self.buffer,\n self.existing_fiducial.get_height() + self.buffer,\n ]\n )\n bottom_left_2 = self.rect_lamella.xy\n top_right_2 = np.array(self.rect_lamella.xy) + np.array(\n [self.rect_lamella.get_width(), self.rect_lamella.get_height()]\n )\n if _rectangles_overlap(\n bottom_left_1, top_right_1, bottom_left_2, top_right_2\n ):\n print(\"Lamella too close to the fiducial marker\")\n return\n\n self.fig.canvas.draw()\n\n def show(self):\n plt.show()\n","sub_path":"autolamella/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":8148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"22315624","text":"from django.db import models\nfrom django.utils import timezone\n\nfrom django.conf import settings\n\nDRAFT = 0\nHIDDEN = 1\nPUBLISHED = 2\n\nUPLOAD_TO = settings.MEDIA_ROOT + 'blog_image_uploads/'\n\n\ndef entries_published(queryset):\n \"\"\"Return only the entries published\"\"\"\n now = timezone.now()\n return queryset.filter(\n models.Q(start_publication__lte=now) | \\\n models.Q(start_publication=None),\n models.Q(end_publication__gt=now) | \\\n models.Q(end_publication=None),\n status=PUBLISHED)\n\n\nclass EntryPublishedManager(models.Manager):\n \"\"\"Manager to retrieve published entries\"\"\"\n\n def get_query_set(self):\n \"\"\"Return published entries\"\"\"\n return entries_published(\n super(EntryPublishedManager, self).get_query_set())\n\n def on_site(self):\n \"\"\"Return entries published on current site\"\"\"\n return super(EntryPublishedManager, self).get_query_set(\n )\n\n def search(self, pattern):\n \"\"\"Top level search method on entries\"\"\"\n try:\n return self.advanced_search(pattern)\n except:\n return self.basic_search(pattern)\n\n def advanced_search(self, pattern):\n \"\"\"Advanced search on entries\"\"\"\n from zinnia.search import advanced_search\n return advanced_search(pattern)\n\n def basic_search(self, pattern):\n \"\"\"Basic search on entries\"\"\"\n lookup = None\n for pattern in pattern.split():\n query_part = models.Q(content__icontains=pattern) | \\\n models.Q(excerpt__icontains=pattern) | \\\n models.Q(title__icontains=pattern)\n if lookup is None:\n lookup = query_part\n else:\n lookup |= query_part\n\n return self.get_query_set().filter(lookup)\n","sub_path":"blogger/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"487062019","text":"from lxml import etree as ET\n\nwith open('text.txt', \"a\") as mfile: \n parser = ET.XMLParser(recover=True)\n ttree = ET.parse('data.xml',parser=parser)\n root = ttree.getroot()\n for thread in root.findall('Page'):\n for post in thread.findall('Post'):\n if post.text == None:\n continue\n print(post.text)\n mfile.write(str(post.text) + '\\n\\n')\n print('Done')\n\n\n","sub_path":"apertium-tools/scrapers-misc/haos.ucoz.kz-forum2txt.py","file_name":"haos.ucoz.kz-forum2txt.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"99305750","text":"class Solution:\n def gardenNoAdj(self, N: int, paths: List[List[int]]) -> List[int]:\n # brute force way:\n # set flower type of first one = 1\n answer = [1] + [None] * (N-1)\n for garden in range(1, N+1): # start at the first\n # print(garden)\n neighbours = self.getNeighbour(garden, paths)\n # print(neighbours)\n for neighbour in neighbours:\n for flower in range(1, 5): # try flowers 1-4\n if self.checkNeighbours(flower, neighbour, paths, answer):\n answer[neighbour-1] = flower\n break # need to cancel search once first answer is found or it will keep running\n # print(\"ans\", answer)\n \n return answer\n\n def getNeighbour(self, node: int, paths):\n \"\"\"returns list of neighbours\n probably could be speed optimised to go through list in\n 1 pass\n \"\"\"\n neighbours = []\n for i in paths:\n if i[0] == node:\n neighbours.append(i[1]) \n # this is neighbours, NOT python index\n if i[1] == node: # paths are both ways\n neighbours.append(i[0])\n return neighbours\n\n \n def checkNeighbours(self, flower: int, neighbour: int, paths, answer):\n # return True if no neighbour of neighbour has this\n # else false\n NofN = self.getNeighbour(neighbour, paths) # get list of neighbours\n for i in NofN:\n if answer[i-1] == flower:\n return False\n return True","sub_path":"weekly-contest-136/flower-planting-with-no-adjacent v1.py","file_name":"flower-planting-with-no-adjacent v1.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"651553809","text":"import os\nimport re\nimport sys\nimport json\nimport time\nimport splunk.rest as sr\nfrom splunk.persistconn.application import PersistentServerConnectionApplication\n\nif sys.version_info.major == 2:\n sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libs_py2'))\n sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libs_py2', 'pura_libs_utils'))\nelif sys.version_info.major == 3:\n sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libs_py3'))\n sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libs_py3', 'pura_libs_utils'))\n\nfrom pura_libs_utils import pura_logger_manager as logger_manager\nfrom pura_libs_utils.pura_consts import *\nfrom pura_libs_utils import pura_utils as utils\nfrom pura_libs_utils import six\nfrom builtins import str\n\nlogging = logger_manager.setup_logging('pura_read_progress')\n\nif sys.platform == \"win32\":\n import msvcrt\n # Binary mode is required for persistent mode on Windows.\n msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)\n\n\nclass ReadProgressHandler(PersistentServerConnectionApplication):\n \"\"\"\n This is a REST handler base-class that makes implementing a REST handler easier.\n\n This works by resolving a name based on the path in the HTTP request and calls it.\n This class will look for a function that includes the HTTP verb followed by the path.abs\n\n For example, if a GET request is made to the endpoint is executed with the path /read_progress,\n then this class will attempt to run a function named get_read_progress().\n Note that the root path of the REST handler is removed. If a POST request is made to the endpoint\n is executed with the path /read_progress, then this class will attempt to execute post_read_progress().\n \"\"\"\n\n def __init__(self, command_line, command_arg):\n PersistentServerConnectionApplication.__init__(self)\n\n @classmethod\n def get_function_signature(cls, method, path):\n \"\"\"\n Get the function that should be called based on path and request method.\n\n :param cls: class\n :param method: type of call (get/post)\n :param path: the rest endpoint for which method is to be called\n\n :return name of the function to be called\n \"\"\"\n\n if len(path) > 0:\n components = path.split(\"pura\")\n path = components[1]\n return method + re.sub(r'[^a-zA-Z0-9_]', '_', path).lower()\n else:\n return method\n\n def handle(self, in_string):\n \"\"\"\n Handler function to call when REST endpoint is hit and process the call\n\n :param in_string: string of arguments\n\n :return Result of REST call\n \"\"\"\n try:\n\n logging.info(\"Handling a request\")\n\n # Parse the arguments\n args = utils.parse_in_string(in_string)\n\n # Get the user information\n self.session_key = args['session']['authtoken']\n self.user = args['session']['user']\n self.host = args['server']['hostname']\n\n # Get the method\n method = args['method']\n\n # Get the path and the args\n if 'rest_path' in args:\n path = args['rest_path']\n else:\n return utils.render_error_json(MESSAGE_NO_PATH_PROVIDED, 403)\n\n # Get the function signature\n function_name = self.get_function_signature(method, path)\n\n try:\n function_to_call = getattr(self, function_name)\n except AttributeError:\n function_to_call = None\n\n # Try to run the function\n if function_to_call is not None:\n logging.info(\"Executing function, name={}\".format(function_name))\n\n return function_to_call()\n\n else:\n logging.warn(\"A request could not be executed since the associated function is missing, name={}\"\n .format(function_name))\n return utils.render_error_json(MESSAGE_PATH_NOT_FOUND, 404)\n\n except Exception as exception:\n logging.exception(MESSAGE_FAILED_HANDLE_REQUEST)\n return utils.render_error_json(str(exception))\n\n def check_session_is_alive(self, scan_key):\n \"\"\"\n Function to check if session has timed-out\n\n :param scan_key: Scan key to fetch entry from KV store\n\n :return (True/False) Session is alive\n \"\"\"\n\n # Check if local directory exists\n if not os.path.isdir(LOCAL_DIR):\n os.makedirs(LOCAL_DIR)\n\n if not os.path.isdir(SESSION_PATH):\n os.makedirs(SESSION_PATH)\n\n file_path = os.path.join(SESSION_PATH, scan_key)\n if os.path.exists(file_path):\n logging.info(MESSAGE_SESSION_FILE_EXISTS.format(str(file_path)))\n try:\n os.remove(file_path)\n except Exception as e:\n logging.exception(MESSAGE_ERROR_REMOVING_SESSION_FILE.format(str(e)))\n return False\n return True\n\n def get_read_progress(self):\n \"\"\"\n Read progress from KV store.\n\n :return response for read progress REST call\n \"\"\"\n\n scan_report = dict()\n scan_report['status'] = PROGRESS_NEW\n scan_report['results'] = {}\n scan_report['message'] = MESSAGE_NO_SCAN_RESULTS\n scan_report['progress'] = 0\n scan_report['host_name'] = str(self.host)\n\n try:\n response, content = sr.simpleRequest('{}?output_mode=json'.format(kvstore_endpoint),\n sessionKey=self.session_key)\n except Exception:\n logging.exception(MESSAGE_EXCEPTION_READ_KVSTORE.format(self.user, self.host))\n return utils.render_error_json(MESSAGE_EXCEPTION_READ_KVSTORE.format(self.user, self.host))\n if response['status'] not in success_codes:\n logging.error(MESSAGE_ERROR_READING_PROGRESS.format(self.user, self.host))\n return utils.render_error_json(MESSAGE_ERROR_READING_PROGRESS.format(self.user, self.host))\n else:\n for entry in json.loads(content):\n if self.host == entry['host'] and self.user == entry['user'] and not entry['cancelled'] and not entry['returned']:\n scan_key = entry['_key']\n session_alive = self.check_session_is_alive(scan_key)\n if session_alive:\n scan_report.update({\n 'status': entry['status'],\n 'message': entry['message'],\n 'progress': entry['progress']\n })\n\n if scan_report['status'] == PROGRESS_COMPLETE:\n results = self.get_latest_results()\n scan_report.update({\n 'results': results\n })\n\n return utils.render_json(scan_report)\n else:\n key = entry['_key']\n entry['cancelled'] = True\n entry['progress'] = 100\n entry['returned'] = True\n entry['status'] = PROGRESS_COMPLETE\n try:\n response, _ = sr.simpleRequest('{}/{}?output_mode=json'.format(kvstore_endpoint, key),\n sessionKey=self.session_key, jsonargs=json.dumps(entry),\n method='POST', raiseAllErrors=True)\n except Exception:\n logging.exception(MESSAGE_EXCEPTION_DELETE_KVSTORE.format(self.user, self.host))\n return utils.render_error_json(MESSAGE_EXCEPTION_DELETE_KVSTORE.format(self.user,\n self.host))\n\n if response['status'] not in success_codes:\n logging.error(MESSAGE_ERROR_CANCEL_SCAN.format(self.user, self.host))\n return utils.render_error_json(MESSAGE_ERROR_CANCEL_SCAN.format(self.user,\n self.host))\n\n results = self.get_latest_results()\n scan_report.update({\n 'status': PROGRESS_ERROR,\n 'progress': 100,\n 'results': results,\n 'message': MESSAGE_UNAUTHORIZED_SCAN_TERMINATION})\n\n return utils.render_json(scan_report)\n else:\n results = self.get_latest_results()\n scan_report.update({\n 'status': PROGRESS_COMPLETE,\n 'progress': 100,\n 'results': results\n })\n return utils.render_json(scan_report)\n\n return utils.render_error_json(MESSAGE_NO_ENTRY_FOUND, 404)\n\n def get_latest_results(self):\n \"\"\"\n Fetch latest results for given user\n\n :return latest results for given user based on timestamp\n \"\"\"\n\n # Check if local directory exists\n if not os.path.isdir(LOCAL_DIR):\n os.makedirs(LOCAL_DIR)\n\n results = dict()\n if not os.path.isdir(REPORT_PATH):\n os.makedirs(REPORT_PATH)\n list_reports = os.listdir(REPORT_PATH)\n\n user_reports = list()\n persistent_user_report = PERSISTENT_FILE_JSON.format(self.user)\n for report in list_reports:\n if self.user == report[:-16] and report != persistent_user_report:\n user_reports.append(report)\n\n latest_timestamp = 0\n for report in user_reports:\n timestamp = (report[:-5])[-10:]\n if int(timestamp) > latest_timestamp:\n latest_timestamp = int(timestamp)\n\n for report in user_reports:\n if str(latest_timestamp) in report:\n report_file = os.path.join(REPORT_PATH, report)\n with open(report_file, 'r') as file_handler:\n results = json.load(file_handler)\n break\n\n return results\n","sub_path":"apps/python_upgrade_readiness_app/bin/pura_read_progress.py","file_name":"pura_read_progress.py","file_ext":"py","file_size_in_byte":10547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"364195858","text":"# coding: utf-8\nimport logging\n'''\n日志设定文件\n'''\ndef config_logging(filename=None, format='%(asctime)s, %(levelname)s %(message)s', level=logging.INFO,\n logger=None, category=None, scribe_host='127.0.0.1', scribe_port=1464, console_log_level=None,\n name=None, propagate=True, backend=1, kafka_topic=None, scribe_format=None, multi_process_logger_kwargs={},\n scribe_log_level=logging.WARNING, databus_channel=None, databus_key=None, databus_format=None,\n sentry_dsn=None, sentry_level=logging.ERROR, file_format=None):\n if logger is None:\n logger = logging.getLogger()\n # need a clean state, for some module may have called logging functions already (i.e. logging.info)\n # in that case, a default handler would been appended, causing undesired output to stderr\n for handler in logger.handlers:\n logger.removeHandler(handler)\n formatter = logging.Formatter(format)\n logger.setLevel(level)\n if not propagate:\n logger.propagate = False\n if filename:\n if 'when' not in multi_process_logger_kwargs:\n multi_process_logger_kwargs['when'] = 'midnight'\n handler = MultiProcessRotatingFileHandler(filename=filename, **multi_process_logger_kwargs)\n file_formatter = formatter\n if file_format:\n file_formatter = logging.Formatter(file_format)\n handler.setFormatter(file_formatter)\n logger.addHandler(handler)\n # if category or kafka_topic:\n # scribe = ScribeLogHandler(category=category, backend=backend, \\\n # kafka_topic=kafka_topic, host=scribe_host, port=scribe_port)\n # scribe_formatter = formatter\n # if scribe_format:\n # scribe_formatter = logging.Formatter(scribe_format)\n # scribe.setFormatter(scribe_formatter)\n # scribe.setLevel(scribe_log_level)\n # logger.addHandler(scribe)\n # if databus_channel:\n # from pyutil.databus import DatabusLogHandler\n # databus = DatabusLogHandler(databus_channel, databus_key)\n # databus_formatter = formatter\n # if databus_format:\n # databus_formatter = logging.Formatter(databus_format)\n # databus.setFormatter(databus_formatter)\n # logger.addHandler(databus)\n if console_log_level is not None:\n ch = logging.StreamHandler()\n formatter = logging.Formatter(format)\n ch.setFormatter(logging.Formatter(format))\n ch.setLevel(console_log_level)\n logger.addHandler(ch)\n # if sentry_dsn is not None:\n # import raven\n # from raven.handlers.logging import SentryHandler\n # from raven.transport.registry import TransportRegistry, default_transports\n # raven.Raven = None\n # raven.Client.logger = logging.getLogger('raven')\n # raven.Client._registry = TransportRegistry(transports=default_transports)\n # client = raven.Client(sentry_dsn)\n # handler = SentryHandler(client)\n # handler.setLevel(sentry_level)\n # logger.addHandler(handler)\n\n\nimport time\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\n\nfrom filelock import FileLock\n\n\nMIDNIGHT = 24 * 60 * 60 # 00:00:00\nSECONDS_PER_DAY = 60 * 60 * 24\n\n\nclass MultiProcessRotatingFileHandler(TimedRotatingFileHandler):\n def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, utc=False):\n super(MultiProcessRotatingFileHandler, self).__init__(filename, when, interval, backupCount, encoding, False,\n utc)\n d, f = os.path.split(filename)\n self.lock_file_name = os.path.join(d, '.' + f)\n\n def computeRollover(self, currentTime):\n \"\"\"\n Work out the rollover time based on the specified time.\n 都在整数时间点rollover\n \"\"\"\n if self.when == 'MIDNIGHT' or self.when.startswith('W'):\n if self.utc:\n t = time.gmtime(currentTime)\n else:\n t = time.localtime(currentTime)\n currentHour = t[3]\n currentMinute = t[4]\n currentSecond = t[5]\n secondsToMidnight = MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +\n currentSecond)\n result = currentTime + secondsToMidnight\n if self.when.startswith('W'):\n day = t[6]\n if day != self.dayOfWeek:\n if day < self.dayOfWeek:\n daysToWait = self.dayOfWeek - day\n else:\n daysToWait = self.dayOfWeek - day + 7\n result += SECONDS_PER_DAY * daysToWait\n else:\n result = currentTime + self.interval - currentTime % self.interval\n return result\n\n def doRollover(self):\n \"\"\"\n do a rollover; in this case, a date/time stamp is appended to the filename\n when the rollover happens. However, you want the file to be named for the\n start of the interval, not the current time. If there is a backup count,\n then we have to get a list of matching filenames, sort them and remove\n the one with the oldest suffix.\n rollover时两种情况:\n 1、检查要被rename to的文件是否已经存在,如果已经存在,说明有另外的进程已经rollover了,那本进程\n a)reopen\n b) 更新rollover时间\n 2、如果不存在,说明本进程是最先抢到rollover锁的,那本进程:\n a) rename\n b) 删除旧日志\n c) reopen\n d) 更新rollover时间\n \"\"\"\n if self.stream:\n self.stream.close()\n self.stream = None\n # get the time that this sequence started at and make it a TimeTuple\n currentTime = int(time.time())\n t = self.rolloverAt - self.interval\n if self.utc:\n timeTuple = time.gmtime(t)\n else:\n timeTuple = time.localtime(t)\n dfn = self.baseFilename + \".\" + time.strftime(self.suffix, timeTuple)\n\n with FileLock(self.lock_file_name):\n if not os.path.exists(dfn):\n self._rollover(dfn)\n self.stream = self._open()\n self._updateRolloverAt(currentTime)\n\n def _rollover(self, dfn):\n # Issue 18940: A file may not have been created if delay is True.\n if os.path.exists(self.baseFilename):\n os.rename(self.baseFilename, dfn)\n\n if self.backupCount > 0:\n for s in self.getFilesToDelete():\n os.remove(s)\n\n def _updateRolloverAt(self, currentTime):\n \"\"\"\n 更新下一次rollover的时间点\n :param currentTime:\n :return:\n \"\"\"\n newRolloverAt = self.computeRollover(currentTime)\n while newRolloverAt <= currentTime:\n newRolloverAt = newRolloverAt + self.interval\n self.rolloverAt = newRolloverAt\n\n","sub_path":"backup/utilog.py","file_name":"utilog.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"60309273","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import render, redirect, HttpResponseRedirect, get_object_or_404, HttpResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import View, TemplateView, DetailView, DeleteView\n\nfrom customers_app.forms import LoginForm, UserCreateForm, CustomerForm\nfrom customers_app.helpers import are_passwords_match, filter_and_sort_customers_by_query_params\nfrom customers_app.models import Customer, Photo\n\nURLS = {\n 'home_url': reverse_lazy('home'),\n 'list_url': reverse_lazy('customers-list'),\n 'voting_url': reverse_lazy('customers-voting'),\n 'auth_url': reverse_lazy('customers-auth'),\n 'logout_url': reverse_lazy('customers-logout'),\n 'create_url': reverse_lazy('customers-create'),\n}\n\n\nclass HomePage(TemplateView):\n \"\"\"\n Render home page with links to pages of project\n \"\"\"\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update({\n 'list_url': URLS['list_url'],\n 'voting_url': URLS['voting_url'],\n 'auth_url': URLS['auth_url'],\n 'create_url': URLS['create_url']\n })\n\n user = self.request.user\n if user.is_authenticated:\n context['detail_url'] = reverse_lazy('customers-detail', kwargs={'pk': user.customer.id})\n\n return context\n\n def get(self, request, *args, **kwargs):\n\n user = request.user\n if user.is_authenticated:\n try:\n user.customer\n except User.customer.RelatedObjectDoesNotExist:\n return HttpResponse('User is not a customer. If you are admin - create Customer model for yourself. '\n 'If you are not admin - say admin to create Customer model for you ')\n\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n\ndef logout_view(request):\n logout(request)\n return redirect('customers-auth')\n\n\nclass CustomersVotingView(TemplateView):\n \"\"\"\n Render template with photos and points for each photo.\n Allows to vote for certain photo by click on button near photo's points\n \"\"\"\n template_name = \"customers_voting.html\"\n\n def get_context_data(self, **kwargs):\n if not self.request.user.is_authenticated:\n raise PermissionDenied\n\n context = super().get_context_data(**kwargs)\n context.update({\n 'photos': Photo.objects.filter(customer__isnull=False),\n 'max_points': Photo.max_points\n })\n return context\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n\n photo = get_object_or_404(Photo, pk=request.POST.get('id_of_photo'))\n photo.add_point()\n\n return self.render_to_response(context)\n\n\nclass CustomersListView(TemplateView):\n \"\"\"\n Render template with information about customers\n Allows filter and sort customer's information by buttons\n \"\"\"\n template_name = \"customers_list.html\"\n\n def get_context_data(self, **kwargs):\n if not self.request.user.is_authenticated:\n raise PermissionDenied\n\n context = super().get_context_data(**kwargs)\n\n customers = Customer.objects.all()\n query_params = self.request.GET\n\n context['customers'] = filter_and_sort_customers_by_query_params(query_params, customers)\n return context\n\n\nclass CustomersDetailView(DetailView):\n \"\"\"\n Render template with information about certain customer\n \"\"\"\n template_name = \"customers_detail.html\"\n model = Customer\n\n def get_object(self, queryset=None):\n if not self.request.user.is_authenticated:\n raise PermissionDenied\n\n obj = super().get_object()\n return obj\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pk = self.get_object().pk\n\n context.update({\n 'delete_url': reverse_lazy('customers-delete', kwargs={'pk': pk}),\n 'logout_url': URLS['logout_url']\n })\n return context\n\n\nclass CustomersDeleteView(DeleteView):\n \"\"\"\n Delete certain customer\n \"\"\"\n template_name = 'customers_confirm_delete.html'\n model = Customer\n success_url = URLS['home_url']\n\n def get_object(self, queryset=None):\n if not self.request.user.is_authenticated:\n raise PermissionDenied\n\n obj = super().get_object()\n if self.request.user.is_staff:\n return obj\n\n if self.request.user.customer != obj:\n raise PermissionDenied\n\n return obj\n\n\nclass CustomersCreateView(View):\n \"\"\"\n Render registration template\n \"\"\"\n user_form_class = UserCreateForm\n customer_form_class = CustomerForm\n\n template_name = 'customers_create.html'\n\n def get(self, request, *args, **kwargs):\n if not request.user.is_staff:\n raise PermissionDenied\n\n return render(request,\n self.template_name,\n {'user_form': self.user_form_class,\n 'customer_form': self.customer_form_class,\n 'auth_url': URLS['auth_url']\n }\n )\n\n @transaction.atomic()\n def post(self, request, *args, **kwargs):\n if not request.user.is_staff:\n raise PermissionDenied\n\n user_form = UserCreateForm(request.POST)\n\n customer_form = CustomerForm(request.POST, request.FILES or None)\n if user_form.is_valid() and customer_form.is_valid():\n if are_passwords_match(user_form):\n user_cleaned_data = user_form.cleaned_data\n user_cleaned_data.pop('confirm_password')\n\n user = User.objects.create_user(**user_cleaned_data)\n customer = customer_form.save(commit=False)\n\n photo = Photo.objects.create(photo=request.FILES['photo'])\n\n customer.user = user\n customer.photo = photo\n\n customer.save()\n\n return HttpResponseRedirect(reverse('customers-detail', kwargs={'pk': customer.pk}))\n\n return render(request, self.template_name, {'user_form': user_form, 'customer_form': customer_form})\n\n\nclass CustomersAuthView(View):\n \"\"\"\n Render authentication template\n \"\"\"\n template_name = 'customers_auth.html'\n form_class = LoginForm\n\n def get(self, request, *args, **kwargs):\n return render(request,\n self.template_name,\n {'form': self.form_class, 'create_url': URLS['create_url']}\n )\n\n def post(self, request, *args, **kwargs):\n user_form = LoginForm(request.POST)\n if user_form.is_valid():\n if are_passwords_match(user_form):\n user_cleaned_data = user_form.cleaned_data\n user = authenticate(username=user_cleaned_data['username'], password=user_cleaned_data['password'])\n if user:\n login(request, user)\n return redirect('home')\n else:\n msg = 'Invalid login or password'\n user_form.errors['__all__'] = user_form.error_class([msg])\n\n return render(request, self.template_name, {'form': user_form})\n","sub_path":"customers_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"102232211","text":"\"\"\"\nCheckerPlugin.py\n\nCopyright (c) 2008 by Panopta LLC\njason@panopta.com\n\nBase class for plugin functionality for remote service checks.\n\"\"\"\ncheckers = []\nimport cPickle\nfrom Queue import Queue\nfrom ConfigParser import ConfigParser\nimport DNS\nfrom datetime import datetime, timedelta\n\n\nclass DummySchedule:\n def __init__(self, **entries): \n self.__dict__.update(entries)\n\n\nclass CheckerPlugin(object):\n textkey = None\n\n def __init__(self, config=None, result_queue=None, **kwargs):\n \"\"\" Perform all initialization for the check plugin. \"\"\"\n self.config = config\n self.options = kwargs\n if not result_queue:\n self.__result_queue = Queue()\n self.testing = True\n else: self.__result_queue = result_queue\n\n def check(self, schedule):\n raise NotImplementedError\n \n @classmethod\n def test(cls, schedule, dummy_data={}, **kwargs):\n \"\"\" basic stub for the testing framework \"\"\"\n \n DNS.ParseResolvConf()\n \n config = ConfigParser()\n config.read(\"config.cfg\")\n \n checker = cls(config, **kwargs)\n schedule = DummySchedule(**schedule)\n schedule.metadata = cPickle.dumps(dummy_data)\n \n if hasattr(schedule, \"fqdn\") and not kwargs.get(\"nodns\"):\n # this is usually done in the DNSThread, but since we're not running a\n # full-blown threaded checker, just resolve it ourselves now\n r = DNS.Request(schedule.fqdn, qtype='A').req()\n for a in r.answers:\n if a['typename'] != 'A': continue\n schedule.ip_address = a['data']\n schedule.dns_ttl_expiration = datetime.now() + timedelta(seconds=int(a['ttl']))\n break\n \n checker.check(schedule)\n results = checker.test_results\n \n return bool(results[\"result\"]), results[\"result_time\"], results[\"result_duration\"]\n \n @property\n def test_results(self):\n return self.__result_queue.get()\n\n def reportResults(self, schedule, result_time, result, duration, metadata=\"\"):\n \"\"\" Submit the results to the appropriate handler. \"\"\"\n results = {'schedule': schedule,\n 'result': result,\n 'result_time': result_time,\n 'result_duration': duration,\n 'metadata': metadata,\n 'result_type': 'service',\n }\n\n self.__result_queue.put(results)\n \n reportServiceResults = reportResults\n\n def reportResourceResults(self, schedule, result_time, value, metadata=''):\n \"\"\"Submit the results for a resource check\"\"\"\n results = {'schedule': schedule,\n 'value': value,\n 'result_time': result_time,\n 'metadata': metadata,\n 'result_type': 'resource'}\n self.__result_queue.put(results)\n\n def loadMetadata(self, schedule): \n if not schedule.metadata: return {} \n return cPickle.loads(schedule.metadata)\n\n\n","sub_path":"MainComponents/build/appliance/build/tmp/src/checker_plugins/CheckerPlugin.py","file_name":"CheckerPlugin.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"90154220","text":"#!/usr/bin/env python2.7\n\n'''\nHere, put a script that will split up the plotting function for running on a cluster. \nAsk how many to split into. \n\nCan run as the function \n./dataplotter.py barplot PARAMFILE cluster\n\nBy adding cluster - it will run on a cluster file. \n\nNeed to include a file - such as . \n\nRun on cluster by creating a template batch file. \nAnd include a submission file. \n'''\n\nimport sys, os\nimport subprocess\nimport time\n\ndef sherlock_cluster_submit(jobName, wd, outputDirPath, walltime, mem, proc, commandList, queue=\"normal\" ):\n '''\n #DESCRIPTION: Submits list of commands into batch file\n #INPUT: 0. Job Name 1. Working directory 2. Error/Output directory 3. Memory (500mb), 4. List of C ommands\n #OUTPUT: Creates a batch file. The batch file will be stored in the error/output directory, with the jobname as the filename The batch file will also be submitted.\n '''\n print(\"Calling RyoScript: sherlock_cluster_submit\")\n\n print(\"\\tCreating submission file for: \", jobName)\n submissionFileName = outputDirPath + \"/\" + jobName + \".sub\"\n fileOUT = open(submissionFileName, \"w\")\n outLine = '''#!/bin/bash\n\n#SBATCH --job-name=''' + jobName + '''\n#SBATCH --output=''' + outputDirPath + '''/''' + jobName + '''.out\n#SBATCH --error=''' + outputDirPath + '''/''' + jobName + '''.err\n#SBATCH --workdir=''' + wd + '''\n#SBATCH -p ''' + queue + '''\n#SBATCH --time=''' + walltime + '''\n#SBATCH --qos=normal\n#SBATCH --nodes=1\n#SBATCH --mem=''' + mem + '''\n#SBATCH --ntasks-per-node=''' + proc + '''\n\n''' + \"\\n\".join(commandList) + \"\\n\"\n fileOUT.write(outLine)\n fileOUT.close()\n\n submitFlag = 1\n if submitFlag !=0:\n process = subprocess.Popen(['sbatch', submissionFileName], stdout= subprocess.PIPE)\n stdout = process.communicate()\n jobNumber = stdout[0].decode('utf-8').split(\".\")[0]\n time.sleep(0.1) #Pause for one second to make sure job is properly submitted\n return jobNumber\n\n\ndef create_split(paramFilePath, expFilePath, outDir, numJobs=200):\n \n #Get gene list\n geneList = list()\n fileIN = open(expFilePath, \"U\")\n fileIN.readline()\n for i in fileIN:\n geneList.append(i.rstrip().split(\"\\t\")[0])\n fileIN.close()\n\n if os.path.exists(outDir)==False:\n os.mkdir(outDir)\n\n numBatches = numJobs\n numGenesPerSplit = len(geneList)/numBatches\n splitNum = 1\n outList = list()\n for i in range(numBatches+1):\n start = i*numGenesPerSplit\n end = min((i+1)*numGenesPerSplit, len(geneList))\n fileIN = open(paramFilePath)\n outPath = outDir + \"/\" + paramFilePath.split(\"/\")[-1] + \".split.\" + str(i).zfill(4)\n outList.append(outPath)\n fileOUT = open( outPath, \"w\")\n for i in fileIN:\n i =i.rstrip().split(\"\\t\")\n if i[0]!=\"GENES\" and i[0]!=\"genes\":\n fileOUT.write(\"\\t\".join(i)+ \"\\n\")\n else:\n fileOUT.write(\"\\t\".join([i[0], \",\".join(geneList[start:end])])+ \"\\n\")\n fileIN.close()\n fileOUT.close()\n return outList\n\ndef create_run_function(paramFiles, commandType=\"singlecell\"):\n\n count = 0\n for i in paramFiles:\n count +=1\n command = \"./dataplotter.py \" + commandType + \" \" + i\n commandList = [\"module load python/2.7.5\", command]\n sherlock_cluster_submit(\n jobName = \"ryoTest\" + str(count) + i.replace(\"/\", \"_\"),\n wd = \"/scratch/PI/hbfraser/ryo/barres/scripts/dataplotter\" ,\n outputDirPath = \"/scratch/PI/hbfraser/ryo/barres/scripts/batch\",\n walltime= \"02:00:00\",\n mem = \"2G\",\n proc = \"1\",\n commandList = commandList,\n queue = \"hbfraser\"\n )\n\ndef rename_files(pathDir, outFile):\n #Update the combined.figureList.txt\n files = os.listdir(pathDir)\n figureList = list()\n for i in files:\n oldNameSplit = i.split(\".\")\n geneName = oldNameSplit[1]\n newName = \"dombined.\" + geneName + \".png\"\n os.system(\"mv -f \" + pathDir + \"/\" +i + \" \" + pathDir + \"/\" + newName)\n figureList.append([geneName, newName])\n\ndef create_compiled_files(pathDir, outFile, startLetter = \"d\"):\n #Update the combined\n fileOUT = open(outFile, \"w\")\n figureList = os.listdir(pathDir)\n\n for i in figureList:\n if i[0]==startLetter:\n geneName = i.split(\".\")[1]\n vec = [geneName, i]\n fileOUT.write(\"\\t\".join(vec) + \"\\n\")\n fileOUT.close()\n\n","sub_path":"scripts/split_job.py","file_name":"split_job.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"14908751","text":"from django.shortcuts import render\r\nfrom django.http import JsonResponse\r\nfrom django.http import HttpResponse\r\nfrom .forms import FahrerForm, Lieferform\r\nfrom .models import Fahrer, Fahrzeug, Kunde, Produkte, Lieferungen\r\nfrom .utils import render_to_pdf\r\nfrom django.db.models import Sum\r\nfrom django.db.models import Q\r\n\r\n# Create your views here.\r\n\r\ndef liefer_list(request):\r\n lieferlist = Lieferungen.objects.order_by('-datum_von')\r\n context = {'lieferlist': lieferlist}\r\n return render(request, 'collect/liefer_list.html', context)\r\n\r\ndef import_list(request):\r\n lieferlist = Lieferungen.objects.order_by('-datum_von').filter(id_aufnehmer__nachname='Scheuenstuhl')\r\n summe = Lieferungen.objects.order_by('-datum_von').filter(id_aufnehmer__nachname='Scheuenstuhl').aggregate(Sum('menge'))\r\n context = {'lieferlist': lieferlist,'summe':summe}\r\n return render(request, 'collect/import.html', context)\r\n\r\ndef filter_list(request, pk):\r\n lieferlist = Lieferungen.objects.order_by('-datum_von').filter(Q(id_aufnehmer__id=pk) | Q(id_abgeber__id=pk))\r\n summe = Lieferungen.objects.order_by('-datum_von').filter(id_aufnehmer__id=pk).aggregate(Sum('menge'))\r\n context = {'lieferlist': lieferlist,'summe':summe}\r\n return render(request, 'collect/import.html', context)\r\n\r\ndef test(request):\r\n if request.method == \"POST\":\r\n fahrer_form = FahrerForm(request.POST)\r\n if fahrer_form.is_valid():\r\n Fahrer = fahrer_form.save(commit=False)\r\n Fahrer.save()\r\n\r\n else:\r\n fahrer_form = FahrerForm()\r\n return render(request, 'collect/test.html', {'fahrerform': fahrer_form})\r\n\r\ndef lieferschein(request):\r\n if request.method == \"POST\":\r\n liefer_form = Lieferform(request.POST)\r\n if liefer_form.is_valid():\r\n Lieferungen = liefer_form.save(commit=False)\r\n Lieferungen.save()\r\n\r\n else:\r\n liefer_form = Lieferform()\r\n return render(request, 'collect/lieferschein.html', {'lieferform': liefer_form})\r\n\r\ndef liefer_form_data(request, pk):\r\n\r\n\r\n fahrzeug_form_data = Fahrzeug.objects.filter(pk=pk).values() #getting the liked posts\r\n fahrzeug_form_data = list(fahrzeug_form_data)\r\n return JsonResponse(fahrzeug_form_data, safe=False) # Sending an success response\r\n #return HttpResponse(is_private)\r\n\r\ndef validate_username(request, pk):\r\n username = request.GET.get('username', None)\r\n username = pk\r\n\r\n data = {\r\n 'is_taken': username\r\n }\r\n return JsonResponse(data)\r\n\r\n\r\n\r\ndef generatepdf(request, pk):\r\n pdfdata = Lieferungen.objects.filter(pk=pk).order_by('-datum_von')[0]\r\n npk_gesamt={}\r\n npk_gesamt[\"n\"] = int(pdfdata.menge * pdfdata.id_produkt.n)\r\n npk_gesamt[\"p\"] = int(pdfdata.menge * pdfdata.id_produkt.p)\r\n npk_gesamt[\"k\"] = int(pdfdata.menge * pdfdata.id_produkt.k)\r\n context = {'pdfdata': pdfdata,'npk_gesamt': npk_gesamt}\r\n pdf = render_to_pdf('collect/invoice.html', context)\r\n return HttpResponse(pdf, content_type='application/pdf')","sub_path":"collect/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"365494746","text":"def solution(start, end):\n if start > end:\n return\n\n division = end+1\n\n for i in range(start+1, end+1):\n if res[start] < res[i]:\n division = i\n break\n #print(division)\n\n solution(start+1, division-1)\n solution(division, end)\n print(res[start])\n\n\nimport sys\nsys.setrecursionlimit(10 ** 9)\n\nres = []\ncnt = 0\n\nwhile cnt <= 10000:\n try:\n num = int(input())\n except:\n break\n res.append(num)\n cnt+=1\n\nsolution(0, len(res)-1)\n","sub_path":"NamHyeonJi_20210725/9_1_NamHyeonJi.py","file_name":"9_1_NamHyeonJi.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"448996487","text":"# We want to create class for an object that behaves like a triangle, that has flexible sides and angles.\n# Because of approximations in python the triangle will get distorted after some of the changes so this is not a\n# perfect model\n\n# 30P\n# - class constructor can receives 3 arguments for angles (with default value of 60) and 3 arguments for sides (with\n# default value of 1)\n# class variables for sides will be called A, B, C\n# class variables for angles will be called AB, BC, CA (indicating sides)\n\n# 30P\n# - class implements method to modify_angle:\n# - modify_angle method takes two argument:\n# - \"angle\" and can be one of 3 string values 'AB', 'BC', 'CA'\n# - \"degrees\" that can be a positive or negative and represents the amount by which the angle will be modified\n# If as a result of the change any of the angles will be outside interval (0, 180) then method should raise an exception\n# When an angle is modified you will need to recalculate the opposing side which can be done using the following\n# example: angle AB is changed then C = (A**2 + B**2 - 2*A*B*cos(AB))**(1/2)\n# Because angles in a triangle must sum up to 180 degrees unmodified angles need to be recalculated after we have\n# recalculated the opposite side using the following example:\n# angle AB is changed then BC = arccos((B**2+ C**2 - A**2) / (2*B*C)), CA = arccos((C**2+ A**2 - B**2) / (2*C*A)),\n\n\n# 30P\n# - class implements method to modify_side:\n# - modify_side method takes two argument:\n# - \"side\" and can be one of 3 string values 'A', 'B', 'C'\n# - \"meters\" that can be a positive or negative and represents the amount by which the side will be modified\n# If as a result of the change sum of the unmodified sides is less then or equal to the changed side then method should\n# throw an exception\n# If as a result of the change side will be less then or equal to 0 then method should raise a different exception\n# When a side is modified by some value all other sides need to be modified by the fraction of the change to maintain\n# the same triangle angles. For example if A increase by +1 then B = ((A+1)/A)*B and C = ((A+1)/A)*C\n\nfrom math import cos, acos, degrees\n\n\nclass Triangle:\n A = B = C = 1\n AB = BC = CA = 60\n\n def __init__(self, a=1, b=1, c=1, ab=60, bc=60, ca=60):\n self.A = a\n self.B = b\n self.C = c\n self.AB = ab\n self.BC = bc\n self.CA = ca\n\n def modify_angle(self, angle, degrees_to_add):\n if angle == \"AB\":\n self.AB += degrees_to_add\n self.C = (self.A ** 2 + self.B ** 2 - 2 * self.A * self.B * cos(self.AB)) ** (1 / 2)\n self.BC = degrees(acos((self.B ** 2 + self.C ** 2 - self.A ** 2) / (2 * self.B * self.C)))\n self.CA = degrees(acos((self.C ** 2 + self.A ** 2 - self.B ** 2) / (2 * self.C * self.A)))\n elif angle == \"BC\":\n self.BC += degrees_to_add\n self.A = (self.B ** 2 + self.C ** 2 - 2 * self.B * self.C * cos(self.BC)) ** (1 / 2)\n self.AB = degrees(acos((self.A ** 2 + self.B ** 2 - self.C ** 2) / (2 * self.A * self.B)))\n self.CA = degrees(acos((self.C ** 2 + self.A ** 2 - self.B ** 2) / (2 * self.C * self.A)))\n elif angle == \"CA\":\n self.CA += degrees_to_add\n self.B = (self.C ** 2 + self.A ** 2 - 2 * self.C * self.A * cos(self.CA)) ** (1 / 2)\n self.AB = degrees(acos((self.A ** 2 + self.B ** 2 - self.C ** 2) / (2 * self.A * self.B)))\n self.BC = degrees(acos((self.B ** 2 + self.C ** 2 - self.A ** 2) / (2 * self.B * self.C)))\n if not (0 <= self.AB <= 180 or 0 <= self.BC <= 180 or 0 <= self.CA <= 180):\n raise ValueError(\"Angle out of range\")\n\n def modify_side(self, side, meters):\n if side == \"A\":\n self.A += meters\n self.B = ((self.A + meters) / self.A) * self.B\n self.C = ((self.A + meters) / self.A) * self.C\n if self.B + self.C <= self.A:\n raise AttributeError(\"Modified side A too small\")\n elif side == \"B\":\n self.B += meters\n self.A = ((self.B + meters) / self.B) * self.A\n self.C = ((self.B + meters) / self.B) * self.C\n if self.A + self.C <= self.B:\n raise AttributeError(\"Modified side B too small\")\n elif side == \"C\":\n self.C += meters\n self.A = ((self.C + meters) / self.C) * self.A\n self.B = ((self.C + meters) / self.C) * self.B\n if self.A + self.B <= self.C:\n raise AttributeError(\"Modified side C too small\")\n if self.A <= 0 or self.B <= 0 or self.C <= 0:\n raise ValueError(\"Side equal to 0\")\n\n\n# 10P\n# Create an object from your class with default constructor values and modify angle AB by +30 degrees and side A by +1.5\ntriangle = Triangle()\ntriangle.modify_angle(\"AB\", 30)\nprint(\"Angles: \\n\", \"AB =\", triangle.AB, \"\\n\", \"BC =\", triangle.BC, \"\\n\", \"CA =\", triangle.CA)\ntriangle2 = Triangle()\ntriangle2.modify_side(\"A\", 1.5)\nprint(\"Sides:\\n\", \"A =\", triangle2.A, \"\\n\", \"B =\", triangle2.B, \"\\n\", \"C =\", triangle2.C)\n\n","sub_path":"modul5/homework5.py","file_name":"homework5.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"477685792","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/event_mark1/page.py\nfrom debug_utils import LOG_DEBUG\nfrom gui.Scaleform.daapi.view.battle.classic import ClassicPage\nfrom gui.Scaleform.genConsts.BATTLE_VIEW_ALIASES import BATTLE_VIEW_ALIASES\nfrom gui.battle_control.battle_constants import BATTLE_CTRL_ID\nfrom gui.battle_control import g_sessionProvider\nfrom gui.Scaleform.daapi.view.battle.event_mark1.delivery_direction import DeliveryDirection\n_MARK1_COMPONENTS_TO_CTRLS = ((BATTLE_CTRL_ID.ARENA_PERIOD, (BATTLE_VIEW_ALIASES.BATTLE_TIMER,\n BATTLE_VIEW_ALIASES.PREBATTLE_TIMER,\n BATTLE_VIEW_ALIASES.PLAYERS_PANEL,\n BATTLE_VIEW_ALIASES.BATTLE_END_WARNING_PANEL)),\n (BATTLE_CTRL_ID.DEBUG, (BATTLE_VIEW_ALIASES.DEBUG_PANEL,)),\n (BATTLE_CTRL_ID.MARK1_BONUS, (BATTLE_VIEW_ALIASES.FLAG_NOTIFICATION,)),\n (BATTLE_CTRL_ID.MARK1_EVENT_NOTS, (BATTLE_VIEW_ALIASES.EVENT_NOTIFICATION_PANEL,)))\n\nclass EventMark1Page(ClassicPage):\n\n def __init__(self, components=_MARK1_COMPONENTS_TO_CTRLS, fullStatsAlias=BATTLE_VIEW_ALIASES.FULL_STATS):\n super(EventMark1Page, self).__init__(components=components, fullStatsAlias=fullStatsAlias)\n self.__markIDirection = None\n return\n\n def __del__(self):\n LOG_DEBUG('EventMark1Page is deleted')\n\n def _populate(self):\n super(EventMark1Page, self)._populate()\n self.__markIDirection = DeliveryDirection()\n vehicleCtrl = g_sessionProvider.shared.vehicleState\n if vehicleCtrl is not None:\n vehicleCtrl.onRespawnBaseMoving += self.__onRrespawnBaseMoving\n return\n\n def _dispose(self):\n vehicleCtrl = g_sessionProvider.shared.vehicleState\n if vehicleCtrl is not None:\n vehicleCtrl.onRespawnBaseMoving -= self.__onRrespawnBaseMoving\n if self.__markIDirection is not None:\n self.__markIDirection.clear()\n self.__markIDirection = None\n super(EventMark1Page, self)._dispose()\n return\n\n def __onRrespawnBaseMoving(self):\n self._toggleRadialMenu(False)\n","sub_path":"PythonFiles/WOT/res/scripts/client/gui/Scaleform/daapi/view/battle/event_mark1/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"613524595","text":"import logging\n\nfrom .data_handler import ProteinDataHandler\nfrom .globals import DATASETS\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\n\ndef main():\n for target in DATASETS['eval']:\n dh = ProteinDataHandler(target, structures_version=3)\n\n nas =(dh.target_pdb_cm==-1).sum()\n LOGGER.info(f'Nas: {nas}')\n\n\nif __name__ == '__main__':\n main()","sub_path":"archive/na_sanity_check.py","file_name":"na_sanity_check.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"347721705","text":"#!/usr/bin/python\n\nimport multiprocessing\nimport subprocess\nimport os\n\ndef do_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", \"--network\", help=\"specify network range to sweep\", required=\"True\")\n return parser.parse_args()\n\ndef pinger( job_q, results_q ):\n DEVNULL = open(os.devnull,'w')\n while True:\n ip = job_q.get()\n if ip is None: break\n\n try:\n subprocess.check_call(['ping','-c1',ip],\n stdout=DEVNULL)\n results_q.put(ip)\n except:\n pass\n \ndef check_ip():\n import re\n import sys\n wb = {}\n params = re.match(r'(?P(\\d{1,3}\\.){3}).*', args.network)\n try:\n wb = params.groupdict()\n except:\n print('Failed to extract IP range. Please check.')\n sys.exit()\n return wb[\"range\"]\n\n\nif __name__ == '__main__':\n args = do_args()\n network_range = check_ip()\n pool_size = 255\n\n jobs = multiprocessing.Queue()\n results = multiprocessing.Queue()\n\n pool = [ multiprocessing.Process(target=pinger, args=(jobs,results))\n for i in range(pool_size) ]\n\n for p in pool:\n p.start()\n\n for i in range(1,255):\n jobs.put(\"%s{0}\".format(i) % network_range)\n\n for p in pool:\n jobs.put(None)\n\n for p in pool:\n p.join()\n\n while not results.empty():\n ip = results.get()\n print(ip)\n","sub_path":"pingsweep.py","file_name":"pingsweep.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"106483433","text":"\"\"\"\nTEST_WR.PY Unit tests for write_results()\n\"\"\"\nimport os\nimport sys\nimport tempfile\n\nimport numpy as np\nimport pytest\n\nfrom rcrbounds import write_results\n\n\n# Basic functionality\ndef test_wr_basic():\n \"\"\"write the specified array to the specified text file\"\"\"\n moment_vector = np.zeros(5)\n with tempfile.TemporaryDirectory() as tmp:\n outfile = os.path.join(tmp, 'pout.txt')\n write_results(moment_vector, outfile)\n\n\n# Exceptions to handle\ndef test_wr_readonly(read_only_file):\n \"\"\"warn and continue if file is read-only\"\"\"\n moment_vector = np.zeros(5)\n with pytest.warns(UserWarning, match=\"Cannot write\"):\n write_results(moment_vector, read_only_file)\n\n\ndef test_wr_badfolder():\n \"\"\"warn and continue if folder does not exist\"\"\"\n moment_vector = np.zeros(5)\n with pytest.warns(UserWarning, match=\"Cannot write\"):\n write_results(moment_vector, \"nonexistent-path-name/pout.txt\")\n\n\n@pytest.mark.skipif(sys.platform != 'win32', reason=\"Windows test\")\ndef test_wr_illegalname():\n \"\"\"warn and continue if file name is illegal\"\"\"\n moment_vector = np.zeros(5)\n with pytest.warns(UserWarning, match=\"Cannot write\"):\n write_results(moment_vector, \"?\")\n","sub_path":"python/testing/test_wr.py","file_name":"test_wr.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"526473413","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom home import views as home_views\nfrom accounts import views as accounts_views\nfrom paypal.standard.ipn import urls as paypal_urls\nfrom paypal_store import views as paypal_views\nfrom products import views as product_views\nfrom magazines import views as magazines_views\nfrom threads import views as forum_views\nfrom polls import api_views\nfrom threads import api_views as thread_api_views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', home_views.get_index, name='index'),\n url(r'^register/$', accounts_views.register, name='register'),\n url(r'^profile/$', accounts_views.profile, name='profile'),\n url(r'^login/$', accounts_views.login, name='login'),\n url(r'^logout/$', accounts_views.logout, name='logout'),\n url(r'^cancel_subscription/$', accounts_views.cancel_subscription, name='cancel_subscription'),\n #url(r'^subscriptions_webhook/$', accounts_views.subscriptions_webhook, name='subscriptions_webhook'),\n url(r'^a-very-hard-to-guess-url/', include(paypal_urls)),\n\n url(r'^paypal-return/$', paypal_views.paypal_return),\n url(r'^paypal-cancel/$', paypal_views.paypal_cancel),\n url(r'^products/$', product_views.all_products, name='products'),\n url(r'^magazines/$', magazines_views.all_magazines, name='magazines'),\n url(r'^forum/$', forum_views.forum, name='forum'),\n url(r'^threads/(?P\\d+)/$', forum_views.threads, name='threads'),\n url(r'^new_thread/(?P\\d+)/$', forum_views.new_thread, name='new_thread'),\n url(r'^thread/(?P\\d+)/$', forum_views.thread, name='thread'),\n url(r'^post/new/(?P\\d+)/$', forum_views.new_post, name='new_post'),\n url(r'^post/edit/(?P\\d+)/(?P\\d+)/$',forum_views.edit_post, name='edit_post'),\n url(r'^post/delete/(?P\\d+)/(?P\\d+)/$', forum_views.delete_post, name='delete_post'),\n url(r'^thread/vote/(?P\\d+)/(?P\\d+)/$', forum_views.thread_vote, name='cast_vote'),\n url(r'^threads/polls/$', api_views.PollViewSet.as_view(), name='threads_poll_api1'),\n url(r'^threads/polls/(?P[\\d]+)$', api_views.PollInstanceView.as_view(), name='poll-instance'),\n url(r'^threads/polls/vote/(?P\\d+)/$', api_views.VoteCreateView.as_view(), name='create_vote'),\n url(r'^threads/threads/$', api_views.ThreadViewSet.as_view(), name='threads_threads'),\n url(r'^threads/post/update/(?P[\\d+]+)/$', thread_api_views.PostUpdateView.as_view(),\n name=\"update-poll\"),\n url(r'^threads/post/delete/(?P[\\d]+)/$', thread_api_views.PostDeleteView.as_view(), name='delete-poll'),\n\n]\n","sub_path":"we_are_social/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"392455954","text":"\n\n#calss header\nclass _SARS():\n\tdef __init__(self,): \n\t\tself.name = \"SARS\"\n\t\tself.definitions = [u'abbreviation for Severe Acute Respiratory Syndrome: a serious infectious illness that causes difficulty in breathing and sometimes death']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_sars.py","file_name":"_sars.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"220940997","text":"import sqlite3\nfrom GannDAO import GannDAO\nganndao = GannDAO()\nconn = sqlite3.connect(\"stock.db\")\nentry = raw_input(\"Enter C/R/D -----> \")\n\nif entry == \"D\":\n symbol = raw_input(\"Enter the Symbol\")\n ganndao.delGann(conn, symbol)\nelif entry == \"C\":\n stype = raw_input(\"Enter the Type of Symbol\")\n symbol = raw_input(\"Enter the Symbol\")\n ganndao.insertGann(conn, stype, symbol)\nelif entry == \"R\":\n cursor = ganndao.selectGann(conn)\n for row in cursor:\n column = len(row)\n counter = 0\n while counter < column:\n print(row[counter])\n counter = counter + 1","sub_path":"gann.py","file_name":"gann.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"200392313","text":"import pymongo\nimport datetime\nimport numpy as np\nfrom gensim.models.doc2vec import Doc2Vec\nimport ipdb\n# import MeCab\n\nclass Sentence(dict):\n __client = pymongo.MongoClient()\n db = __client['language_analysis']\n collection_name = ''\n __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n # DEFAULT_DOC2VEC_MODEL_NAME = \"jawiki_wakati_neo_200-210_17\"\n DEFAULT_DOC2VEC_MODEL_NAME = \"jawiki_wakati_neo_001-300_3\"\n # DEFAULT_DOC2VEC_MODEL_NAME = \"jawiki_line_200-300_5\"\n DEFAULT_COLLECTION_NAME = \"line_talk\"\n\n __structure__ = {\n '_id': int,\n # 'user_name': str,\n # 'user_id': int,\n 'content': str,\n 'wakati_content': str,\n 'chasen_content': str,\n 'docvecs': dict\n }\n\n def __init__(self, initial_dict):\n for key, value_type in self.__structure__.items():\n self[key] = initial_dict.get(key)\n try:\n self[key] = value_type(self[key])\n except:\n pass\n\n # @overrides(dict)\n def update(self):\n return self.db[self.collection_name].update_one(\n {'_id': self._id},\n self\n )\n\n @classmethod\n def find(cls, *args, **kwargs):\n cls.db[cls.collection_name].find()\n talks = cls.db[cls.collection_name].find(*args, **kwargs)\n talk_list = []\n for i, talk in enumerate(talks):\n talk_list.append(cls(talk))\n return talk_list\n\n @classmethod\n def find_one(cls, filter, *args, **kwargs):\n talk = cls.db[cls.collection_name].find_one(filter, *args, **kwargs)\n if talk:\n return cls(talk)\n else:\n return cls({})\n\n @classmethod\n def update_one(cls, filter, update, upsert=False):\n return cls.db[cls.collection_name].update_one(filter, update, upsert)\n\n\n @classmethod\n def get_doc2vec_model(cls, doc2vec_model_name):\n doc2vec_model_name_base = '_'.join(doc2vec_model_name.split('_')[:-1])\n doc2vec_model_filename = 'models/{}/{}.model'.format(\n doc2vec_model_name_base, doc2vec_model_name)\n doc2vec_model = Doc2Vec.load(doc2vec_model_filename)\n return doc2vec_model\n\n @classmethod\n def infer_docvecs(cls, doc2vec_model_name=DEFAULT_DOC2VEC_MODEL_NAME,\n collection_name=DEFAULT_COLLECTION_NAME):\n cls.collection_name = collection_name\n doc2vec_model = cls.get_doc2vec_model(doc2vec_model_name)\n talks = cls.find({})\n for talk in talks:\n # print(talk.wakati_content)\n docvec = doc2vec_model.infer_vector(\n talk.wakati_content\n ).tolist()\n if talk.docvecs is None:\n talk.docvecs = {doc2vec_model_name: docvec}\n else:\n talk.docvecs[doc2vec_model_name] = docvec\n talk.update_one({\n '_id': talk._id,\n }, {'$set': {'docvecs': talk.docvecs}})\n # {doc2vec_model_name: docvec}\n\n @classmethod\n def most_similar_with_id(cls, id, topn=10,\n model_name=DEFAULT_DOC2VEC_MODEL_NAME,\n collection_name=DEFAULT_COLLECTION_NAME):\n cls.collection_name = collection_name\n base_talk = cls.find_one({'_id': id})\n # print(\"id: {}, content: {}\".format(base_talk._id, base_talk.content))\n if base_talk.docvecs:\n try:\n base_talk_docvec = np.array(base_talk.docvecs[model_name])\n except KeyError as e:\n raise FileExistsError(\"{} docvec is not exist. Please run Sentense.infer_docvecs for {}.\")\n cls.most_similar_with_docvec(\n base_talk_docvec, topn=topn,\n model_name=model_name, collection_name=collection_name)\n else:\n return None\n\n @classmethod\n def most_similar_with_doc(cls, document, topn=10,\n model_name=DEFAULT_DOC2VEC_MODEL_NAME,\n collection_name=DEFAULT_COLLECTION_NAME):\n print(document)\n wakati_tagger = MeCab.Tagger(\"-Owakati\")\n wakati_document = wakati_tagger.parse(str(document))\n doc2vec_model = cls.get_doc2vec_model(model_name)\n docvec = doc2vec_model.infer_vector(wakati_document)\n cls.most_similar_with_docvec(\n docvec, topn=topn,\n model_name=model_name, collection_name=collection_name)\n\n @classmethod\n def most_similar_with_docvec(cls, base_talk_docvec, topn=10,\n model_name=DEFAULT_DOC2VEC_MODEL_NAME,\n collection_name=DEFAULT_COLLECTION_NAME):\n cls.collection_name = collection_name\n doc2vec_model = cls.get_doc2vec_model(model_name)\n base_talk_docvec_length = np.linalg.norm(base_talk_docvec)\n talks = cls.find({})\n similarities = []\n ids = []\n for talk in talks:\n try:\n docvec = np.array(talk.docvecs[model_name])\n except KeyError as e:\n docvec = doc2vec_model.infer_vector(talk.wakati_content)\n talk.docvecs[model_name] = docvec\n talk.update()\n docvec_length = np.linalg.norm(docvec)\n similarity = np.dot(base_talk_docvec, docvec) / (\n base_talk_docvec_length * docvec_length)\n if similarity.any():\n similarities.append(similarity)\n ids.append(talk._id)\n most_similar_indexes = np.argsort(similarities)[::-1]\n for index in most_similar_indexes[:topn]:\n document = cls.find_one({'_id': ids[index]})\n reply_document = cls.find_one({'_id': ids[index] + 1})\n print(\"id: {}, similarity: {}, content: {}\".format(\n document._id, similarities[index], document.content,\n # np.linalg.norm(np.array(document.docvecs[model_name]))\n ))\n print(\"\\treply: {}\".format(reply_document['content']))\n\n\nif __name__ == '__main__':\n # Sentence.infer_docvecs()\n sentence = Sentence({})\n sentence.most_similar_with_id(1)\n # Sentence.most_similar_with_doc('あしたきますか')\n import ipdb\n ipdb.set_trace()","sub_path":"lang_analysis/doc2vec/sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"196909414","text":"# coding: utf-8\n\"\"\"\nData sources for the model and output.\nCurrent entry point: \n model_spec, view_spec = get_specification()\n model_spec, view_spec = get_mock_specification(user_input)\n \n\"\"\"\nfrom pprint import pprint\nimport pandas as pd\nimport numpy as np\n\n\n###########################################################################\n## Sample (mock) proxies as func and constants - to use in this file\n###########################################################################\n\n \n\n# label, year, value\nDATA_PROXY = [ (\"GDP\", 2013, 66190.11992)\n , (\"GDP\", 2014, 71406.3992)\n , (\"GDP_IQ\", 2013, 101.3407976)\n , (\"GDP_IQ\", 2014, 100.6404858)\n , (\"GDP_IP\", 2013, 105.0467483)\n , (\"GDP_IP\", 2014, 107.1941886) ] \n \nDATA_PROXY_AS_DF = pd.DataFrame(\n { \"GDP\": [66190.11992, 71406.3992 ]\n , \"GDP_IQ\": [101.3407976, 100.6404858] \n , \"GDP_IP\": [105.0467483, 107.1941886]}\n , index = [2013, 2014])\n #[[\"GDP\", \"GDP_IQ\", \"GDP_IP\"]]\n \n# label, year, value\nCONTROLS_PROXY = [(\"GDP_IQ\", 2015, 95.0)\n , (\"GDP_IP\", 2015, 115.0)\n , (\"GDP_IQ\", 2016, 102.5)\n , (\"GDP_IP\", 2016, 113.0)\n , (\"is_forecast\", 2015, 1)\n , (\"is_forecast\", 2016, 1)\n ] \n \n# title, label, group, level, precision\n# ERROR: wont print cyrillic charactes, only whitespace.\nNAMES_CSV_PROXY = [(\"ВВП\", \"GDP\", \"Нацсчета\", 1, 0),\n (\"Индекс физ.объема ВВП\", \"GDP_IQ\", \"Нацсчета\", 2, 1),\n (\"Дефлятор ВВП\",\t \"GDP_IP\", \"Нацсчета\", 2, 1)]\n \nEQ_SAMPLE = [\"GDP(t) = GDP(t-1) * GDP_IP(t) / 100 * GDP_IQ(t) / 100\"]\n\n# change in test setting: one variable not in output \nROW_LABELS_IN_OUTPUT = [\"GDP\", \"GDP_IQ\", \"GDP_IP\"] # , \"is_forecast\"]\n\n##########################################################################\n## Sample (mock) proxies as func - to import outside this file \n###########################################################################\n\ndef _sample_for_xfill_dataframe_before_equations(): \n z = pd.DataFrame(\n { \"GDP\" : [66190.11992, 71406.3992, None, None]\n , \"GDP_IQ\": [101.3407976, 100.6404858, 95.0, 102.5] \n , \"GDP_IP\": [105.0467483, 107.1941886, 115.0, 113.0] \n , \"is_forecast\": [None, None, 1, 1]} \n , index = [2013, 2014, 2015, 2016] \n )\n \n # Test setting: dataframe before equations has less columns than union of controls and data\n return z[ROW_LABELS_IN_OUTPUT]\n\ndef _sample_for_xfill_array_after_equations():\n return np.array( \n [['', '2013', '2014', '2015', '2016']\n ,['GDP', 66190.11992, 71406.3992, '=C2*D3*D4/10000', '=D2*E3*E4/10000']\n ,['GDP_IQ', 101.3407976, 100.6404858, 95.0, 102.5]\n ,['GDP_IP', 105.0467483, 107.1941886, 115.0, 113.0]\n #,['is_forecast', \"\", \"\", 1, 1]\n ]\n , dtype=object) \n # WARNING: actual intention was '=C2*D3/100*D4/100', '=C2*D3/100*D4/100'\n \n###########################################################################\n## Entry points\n###########################################################################\n\ndef get_proxy_specification_dict():\n return {'data': convert_tuple_to_df(DATA_PROXY), \n 'controls': convert_tuple_to_df(CONTROLS_PROXY),\n 'equations': EQ_SAMPLE,\n 'format': ROW_LABELS_IN_OUTPUT }\n\n# WARNING: to de dereciated in favor of get_proxy_specification_dict()\ndef get_mock_specification():\n model_spec = [\n (\"Historic data as df\", convert_tuple_to_df(DATA_PROXY) ),\n (\"Names as dict\", {x[1]:x[0] for x in NAMES_CSV_PROXY} ),\n (\"Equations as list\", EQ_SAMPLE ),\n (\"Control parameters as df\", convert_tuple_to_df(CONTROLS_PROXY) )] \n \n # LATER: requires workaround\n view_spec = [\n ['Excel filename' , 'model.xls'],\n ['Sheet name' , 'model'],\n ['List of variables', ROW_LABELS_IN_OUTPUT] \n ]\n \n return model_spec, view_spec\n\n \ndef print_specification(specification): \n for spec in specification:\n print(\"\\n------ {}:\".format(spec[0]))\n pprint(spec[1])\n\n###########################################################################\n## General handling\n###########################################################################\n \ndef convert_tuple_to_df(tuple_):\n \"\"\"Returns a dataframe with years in rows and variables in columns. \n *lt* is a list of tuples like *data_proxy* and *controls_proxy*\"\"\" \n \n # Read dataframe\n df = pd.DataFrame(tuple_, columns=['prop', 'time', 'val'])\n # Pivot by time\n return df.pivot(index='time', columns='prop', values='val')\n \n###########################################################################\n## Historic data \n###########################################################################\n \ndef check_get_historic_data_as_dataframe():\n \"\"\"\n >>> check_get_historic_data_as_dataframe()\n True\n \"\"\"\n df1 = convert_tuple_to_df(DATA_PROXY)\n df2 = DATA_PROXY_AS_DF \n return df1.equals(df2)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n \n # m, v = get_mock_specification()\n # print_specification(m) \n # print_specification(v) \n","sub_path":"issues/abandoned/orphan_files/data_source.py","file_name":"data_source.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"530420959","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'feedback.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n #url(r'^polls/', include('polls.urls', namespace=\"polls\")),\n url(r'^feedback/', include('feedback.urls', namespace=\"feedback\")),\n #url(r'^(?P\\w+)/$','feedback.views.index2', name='index2'),\n #url(r'^my/', 'feedback.views.my_view', name='my'),\n url(r'^login/', 'feedback.views.Login', name='login'),\n url(r'^getmail/', 'feedback.views.getmail', name='getmail'),\n url(r'^logout/', 'feedback.views.Logout', name='logout'),\n url(r'^register/', 'feedback.views.register', name='register'),\n url(r'^success/', 'feedback.views.success', name='success'),\n url(r'^unsuccess/', 'feedback.views.unsuccess', name='unsuccess'),\n #url(r'^analyse/', 'feedback.views.analyse', name='analyse'),\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"coursefeedback/coursefeedback/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"125689261","text":"'''\r\n@created: 04-06-2015\r\n@author: WheelerM\r\n\r\nThe purpose of this library is hold all functions relating to file / error handling. \r\n'''\r\nimport warnings as wn\r\n\r\ndef CardWarning(message, category, filename, lineno, file=None, line=None):\r\n return \"CARD: %s\" % (message)\r\n\r\ndef _make_gen(reader):\r\n b = reader(1024 * 1024)\r\n while b:\r\n yield b\r\n b = reader(1024*1024)\r\n\r\ndef returnLineCountFromFile(filepath):\r\n try:\r\n with open(filepath, 'rb') as fileinstance:\r\n f_gen = _make_gen(fileinstance.raw.read)\r\n return sum( buf.count(b'\\n') for buf in f_gen )\r\n except EnvironmentError:\r\n raise ValueError(' *** Cannot access the requested file: ' + filepath + \" ***\" )\r\n\r\ndef returnLineFromFile(filepath, linenumber = None, seperator = None):\r\n with open(filepath, \"r\") as fileinstance:\r\n try:\r\n if linenumber is None:\r\n return enumerate(fileinstance)\r\n else:\r\n if returnLineCountFromFile(filepath) < linenumber:\r\n wn.warn(\"linenumber is greater than the number of lines in the file returning Nonetype\\n\")\r\n return None\r\n else:\r\n for line, data in enumerate(fileinstance):\r\n if line == linenumber:\r\n if seperator is None:\r\n return data\r\n else:\r\n data = data.rstrip().split(seperator)\r\n return data\r\n except EnvironmentError:\r\n raise ValueError(' *** Cannot access the requested file: ' + filepath + \" ***\" )","sub_path":"PyTCG/CardSetup/FileHandling.py","file_name":"FileHandling.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"15979781","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Submitted by Harsh Srivastava\n# # Roll - 117CS0755\n\n# ## importing libraries\n\n# In[1]:\n\n\nimport random\n\n\n# ## fitness function\n# ## depends upon the number of collision in the horizontal and diagonal directions\n# ## maximum fitness in case of no collissions\n# ## maximum fitness = (n * (n - 1)) / 2 ,where n --> number of queens\n\n# In[2]:\n\n\ndef fitness(c):\n h_cols = sum([c.count(queen)-1 for queen in c])/2\n d_cols = 0\n\n n = len(c)\n left_diagonal = [0] * 2*n\n right_diagonal = [0] * 2*n\n for i in range(n):\n left_diagonal[i + c[i] - 1] += 1\n right_diagonal[len(c) - i + c[i] - 2] += 1\n\n d_cols = 0\n for i in range(2*n-1):\n counter = 0\n if left_diagonal[i] > 1:\n counter += left_diagonal[i]-1\n if right_diagonal[i] > 1:\n counter += right_diagonal[i]-1\n d_cols += counter / (n-abs(i-n+1))\n \n return int(maxFitness - (h_cols + d_cols)) \n\n\n# ## function for creating random chromosomes with binary values\n\n# In[3]:\n\n\ndef random_chromosome(size): #making random chromosomes \n return [ random.randint(1, size) for _ in range(size) ]\n\n\n# ## function for probability calculation\n# ## found by dividing fitness by the maximum fitness\n# ## so that all values are between 0 and 1\n\n# In[4]:\n\n\ndef probability(c, fitness):\n return fitness(c) / maxFitness\n\n\n# ## picking chromosomes for cross over\n\n# In[5]:\n\n\ndef random_pick(population, probabilities):\n popProbabilty = zip(population, probabilities)\n total = sum(w for c, w in popProbabilty)\n r = random.uniform(0, total)\n upto = 0\n for c, w in zip(population, probabilities):\n if upto + w >= r:\n return c\n upto += w\n assert False\n\n\n# ## doing cross_over between two chromosomes\n\n# In[6]:\n\n\ndef reproduce(x, y): \n c = random.randint(0, len(x) - 1)\n return x[0:c] + y[c:len(x)]\n\n\n# ## function for mutation\n# ## in this case we randomly chnage the bit value in the chromosome\n# ## has a very low probability\n\n# In[7]:\n\n\ndef mutate(x):\n c = random.randint(0, len(x) - 1)\n m = random.randint(1, len(x))\n x[c] = m\n return x\n\n\n# ## GA algo implementation\n\n# In[8]:\n\n\ndef queen_da(population, fitness, maxFitness):\n pm = 0.03\n new_population = []\n max_fit_current = -1\n max_fit_chrom = None\n probabilities = [probability(n, fitness) for n in population]\n for i in range(len(population)):\n x = random_pick(population, probabilities) \n y = random_pick(population, probabilities) \n child = reproduce(x, y) \n if random.random() < pm:\n child = mutate(child)\n fit_child = fitness(child)\n if fit_child > max_fit_current:\n max_fit_chrom = child\n max_fit_current = fit_child\n new_population.append(child)\n if fitness(child) == maxFitness: break\n print_chromosome(max_fit_chrom)\n return new_population\n\n\n# ## printing a chromosome value\n\n# In[9]:\n\n\ndef print_chromosome(chrom):\n print(\"Chromosome = {}, Fitness = {}\"\n .format(str(chrom), fitness(chrom)))\n\n\n# ## taking number of queens\n\n# In[10]:\n\n\nnum_queens = 4\n\n\n# ## code block for number of queens input and simulation\n\n# In[11]:\n\n\nmaxFitness = (num_queens*(num_queens-1))/2 \npopulation = [random_chromosome(num_queens) for _ in range(100)]\n\ngeneration = 1\n\nwhile not maxFitness in [fitness(chrom) for chrom in population]:\n print(\"For Generation {}\".format(generation))\n population = queen_da(population, fitness, maxFitness)\n print()\n print(\"Maximum Fitness = {}\".format(max([fitness(n) for n in population])))\n generation += 1\nchrom_out = []\nprint(\"Generations needed = {}\".format(generation-1))\nfor chrom in population:\n if fitness(chrom) == maxFitness:\n print()\n print(\"Possible Solution: \")\n chrom_out = chrom\n print_chromosome(chrom)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"ML Assign 8/ML Assign 8/N queens/n_queens.py","file_name":"n_queens.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"53673828","text":"from flask import Flask,redirect, url_for, render_template,request, session, Blueprint\nfrom bs4 import BeautifulSoup as soup\nimport string\nimport requests\nimport wikipedia\nimport json\nfrom pprint import pprint\n\nfrom website import func\nimport PIL.Image\n\n\nfrom wikipedia.wikipedia import summary\n\n\n#from transformer into translate\n\nviews = Blueprint('views', __name__)\n#Global variables\n\ninfobox={}\nimages={}\ninfoSummary = {}\n\n\n\n\n#@views.route('/')\n@views.route(\"/\", methods=[\"POST\", \"GET\"])\ndef getInput():\n if request.method == 'POST':\n #Retrieve the seraching target\n aim = request.form['content']\n section = request.form[\"section\"]\n session[\"section\"] = section\n #Store it in session\n session[\"content\"] = aim\n return redirect(url_for(\"views.scrape\"))\n else:\n return render_template(\"search.html\")\n \n\n\n#Home page\n@views.route(\"/home\")\ndef displayHome():\n return render_template(\"home.html\")\n\n#Car service\n@views.route(\"/car\", methods=[\"POST\", \"GET\"])\ndef car():\n if request.method == 'POST':\n #Store car brand in the session\n session[\"car\"] = request.form[\"car\"]\n #Go to scrape_car\n return redirect(url_for(\"views.scrape_car\"))\n else:\n return render_template(\"car.html\")\n\n#Scraping car images\n@views.route(\"/scrape_car\", methods=[\"POST\", \"GET\"])\ndef scrape_car():\n cars = {}\n if request.method == \"POST\":\n pass\n else:\n if \"car\" in session:\n #Retrieve the car brand\n brand = session[\"car\"]\n else:\n return redirect(url_for(\"car\"))\n\n\n #Format the input\n brand = func.formatStr(brand)\n #Retrieve image tag from infobox html \n img = func.access_car_wiki(brand)\n\n brand = func.formatStr(brand)\n wikiURL = \"https://en.wikipedia.org/wiki/\"+brand\n data = requests.get(wikiURL)\n #Returns an array containing all the html code\n contents = soup(data.content, \"html.parser\")\n #Returns an array containing infobox html code\n info = contents(\"td\", {\"class\":\"infobox-image\"})[0]\n #print(info)\n img = info.find_all(\"img\")[0]\n \n\n cars[\"path\"] = \"C:\\OSU\\CS361\\WebScrapper\\car.json\"\n cars[\"img\"] = \"https:\"+img[\"src\"]\n cars[\"brand\"] = brand\n\n json_car = json.dumps(cars, indent=len(cars))\n with open(\"car.json\", \"w\") as f:\n f.write(json_car)\n\n\n cars = func.write_cars_json(brand, img)\n return render_template(\"scrape_car.html\", name=brand, img=cars[\"img\"])\n \n\n\n\n@views.route(\"/scrape\", methods=[\"POST\", \"GET\"])\ndef scrape():\n \n if request.method == 'POST':\n #Retrieve the language from the user's request\n language = request.form['language']\n #Store the language at the session temporarily\n session[\"language\"] = language\n info = session[\"summary\"]\n #Update the summary json file by adding language\n summary = func.update_summary(info, language)\n print(summary)\n return render_template(\"scrape.html\", part = session[\"section\"], summary=summary[\"context\"], content=infobox, language=language)\n\n else:\n content = \"\"\n if \"content\" in session and \"section\" in session:\n content = session[\"content\"]\n section = session[\"section\"]\n else:\n return redirect(url_for(\"getInput\"))\n \n #Get all the content from Wikipedia\n search_result = wikipedia.page(wikipedia.search(content)[0])\n #Retrieve the scraping summary\n infoSummary = func.write_summary_json(search_result.summary)\n #Retrieve the scraping images\n images = func.write_image_json(search_result.images)\n session[\"summary\"] = infoSummary\n\n return render_template(\"scrape.html\", part=section, summary=infoSummary[\"context\"], images=images[\"links\"])\n\n\n\n@views.route(\"/transform\", methods=[\"POST\", \"GET\"])\ndef transform():\n if request.method == \"POST\":\n if \"language\" in session:\n language = session[\"language\"]\n \n #Translate via my partner's service in the backend\n #Support other language\n with open(\"output.txt\", \"r\", encoding=\"utf8\") as f:\n #Retrieve scraping data(dictionary)\n translated_content = f.read()\n #Go to a separate web page to display translated content\n return render_template(\"transform.html\", language=language, content=translated_content)\n else:\n return redirect(url_for(\"views.scrape\"))\n\n\n@views.route(\"/carImage\", methods=[\"POST\", \"GET\"])\ndef showImage():\n if request.method == \"POST\":\n\n imgSrc = \"website/new_img.jpg\"\n carImage = PIL.Image.open(imgSrc)\n carImage.show()\n \n return render_template(\"carImage.html\")\n\n else:\n return redirect(url_for(\"views.scrape_car\"))\n\n\n\n","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"432930034","text":"# -*- coding: utf-8 -*-\nfrom Basic.Define import *\nfrom BasicModule import *\nfrom Basic.LogHandler import *\nfrom JackBasicStructLib.Model.Template.ModelTemplate import ModelTemplate\nfrom Evaluation.Accuracy import *\nfrom Evaluation.Loss import *\nimport math\n\n\nclass NLCANet(ModelTemplate):\n def __init__(self, args, training=True):\n self.__args = args\n self.input_imgL_id = 0\n self.input_imgR_id = 1\n self.label_disp_id = 0\n self.output_coarse_img_id = 0\n self.output_refine_img_id = 1\n\n if training == True:\n self.height = args.corpedImgHeight\n self.width = args.corpedImgWidth\n else:\n self.height = args.padedImgHeight\n self.width = args.padedImgWidth\n\n def GenInputInterface(self):\n input = []\n\n args = self.__args\n imgL = tf.placeholder(tf.float32, shape=(\n args.batchSize * args.gpu, self.height, self.width, 3))\n input.append(imgL)\n\n imgR = tf.placeholder(tf.float32, shape=(\n args.batchSize * args.gpu, self.height, self.width, 3))\n input.append(imgR)\n\n return input\n\n def GenLabelInterface(self):\n label = []\n args = self.__args\n\n imgGround = tf.placeholder(tf.float32, shape=(\n args.batchSize * args.gpu, self.height, self.width))\n label.append(imgGround)\n\n return label\n\n def Optimizer(self, lr):\n return tf.train.AdamOptimizer(learning_rate=lr)\n\n def Accuary(self, output, label):\n acc = []\n\n coarse_acc = MatchingAcc(output[self.output_coarse_img_id], label[self.label_disp_id])\n refine_acc = MatchingAcc(output[self.output_refine_img_id], label[self.label_disp_id])\n acc.append(coarse_acc[1])\n acc.append(refine_acc[1])\n\n return acc\n\n def Loss(self, output, label):\n loss = []\n loss_0 = MAE_Loss(output[self.output_coarse_img_id], label[self.label_disp_id])\n loss_1 = MAE_Loss(output[self.output_refine_img_id], label[self.label_disp_id])\n total_loss = loss_0 + loss_1\n loss.append(total_loss)\n loss.append(loss_0)\n loss.append(loss_1)\n return loss\n\n # This is the Inference, and you must have it!\n def Inference(self, input, training=True):\n imgL, imgR = self.__GetVar(input)\n coarse_map, refine_map = self.__NetWork(imgL, imgR, self.height, self.width, training)\n return self.__GenRes(coarse_map, refine_map)\n\n def __NetWork(self, imgL, imgR, height, width, training=True):\n with tf.variable_scope(\"NLCANet\"):\n Info('├── Begin Build ExtractUnaryFeature')\n with tf.variable_scope(\"ExtractUnaryFeature\") as scope:\n imgL_feature = ExtractUnaryFeatureModule(imgL, training=training)\n scope.reuse_variables()\n imgR_feature = ExtractUnaryFeatureModule(imgR, training=training)\n Info('│ └── After ExtractUnaryFeature:' + str(imgL_feature.get_shape()))\n\n Info('├── Begin Build Cost Volume')\n cost_vol = BuildCostVolumeModule(imgL_feature, imgR_feature,\n IMG_DISPARITY, training=training)\n Info('│ └── After Cost Volume:' + str(cost_vol.get_shape()))\n\n Info('├── Begin Build 3DMatching')\n coarse_map = MatchingModule(cost_vol, training=training)\n Info('│ └── After 3DMatching:' + str(coarse_map.get_shape()))\n\n Info('└── Begin Build DispMapRefine')\n refine_map = DispRefinementModule(coarse_map, imgL,\n imgL_feature, training=training)\n Info(' └── After DispMapRefine:' + str(refine_map.get_shape()))\n\n return coarse_map, refine_map\n\n def __GetVar(self, input):\n return input[self.input_imgL_id], input[self.input_imgR_id]\n\n def __GenRes(self, coarse_map, refine_map):\n return [coarse_map, refine_map]\n","sub_path":"Source/ModelImplementation/NLCANet/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544423608","text":"__author__ = 'Joe Linn'\n\nimport unittest\nimport pylastica\nfrom tests.base import Base\n\n\nclass HttpTest(unittest.TestCase, Base):\n def get_config(self):\n return [\n [\n {'transport': 'Http', 'host': self._get_hosts()[0]['host']},\n 'GET'\n ],\n [\n {'transport': {'type': 'Http', 'postWithRequestBody': False}, 'host': self._get_hosts()[0]['host']},\n 'GET'\n ],\n [\n {'transport': {'type': 'Http', 'postWithRequestBody': True}, 'host': self._get_hosts()[0]['host']},\n 'POST'\n ]\n ]\n\n def test_dynamic_http_method_based_on_config_parameter(self):\n for test_data in self.get_config():\n http_method = test_data[1]\n config = test_data[0]\n client = pylastica.Client(**config)\n\n index = client.get_index('dynamic_http_method_test')\n index.create(options=True)\n\n doc_type = index.get_doc_type('test')\n doc_type.add_document(pylastica.Document(1, {'test': 'test'}))\n index.refresh()\n\n result_set = index.search('test')\n\n self.assertTrue('test' in result_set[0].data)\n index.delete()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/transport/test_http.py","file_name":"test_http.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"337206462","text":"from typing import Dict\n\nfrom meiga import Result, Error, Success\nfrom meiga.assertions import assert_success, assert_failure\nfrom meiga.decorators import meiga\n\nfrom alice import Onboarding, Config\n\n\ndef test_should_return_an_error_when_the_api_key_is_not_configured():\n\n config = Config()\n onboarding = Onboarding.from_config(config)\n\n result = onboarding.create_user()\n\n assert_failure(result)\n\n\ndef test_should_do_complete_onboarding_process(\n given_valid_api_key,\n given_any_selfie_image_media_data,\n given_any_document_front_media_data,\n given_any_document_back_media_data,\n):\n @meiga\n def do_complete_onboarding() -> Result[dict, Error]:\n config = Config(api_key=given_valid_api_key)\n\n onboarding = Onboarding.from_config(config)\n\n user_id = onboarding.create_user().unwrap_or_return()\n onboarding.add_selfie(\n user_id=user_id, media_data=given_any_selfie_image_media_data\n ).unwrap_or_return()\n document_id = onboarding.create_document(\n user_id=user_id, type=\"idcard\", issuing_country=\"ESP\"\n ).unwrap_or_return()\n onboarding.add_document(\n user_id=user_id,\n document_id=document_id,\n media_data=given_any_document_front_media_data,\n side=\"front\",\n manual=True,\n ).unwrap_or_return()\n onboarding.add_document(\n user_id=user_id,\n document_id=document_id,\n media_data=given_any_document_back_media_data,\n side=\"back\",\n manual=True,\n ).handle()\n onboarding.document_properties(\n user_id=user_id, document_id=document_id\n ).unwrap_or_return()\n\n report = onboarding.create_report(user_id=user_id).unwrap_or_return()\n\n certificate_id = onboarding.create_certificate(\n user_id=user_id\n ).unwrap_or_return()\n\n _ = onboarding.retrieve_certificate(\n user_id=user_id, certificate_id=certificate_id\n ).unwrap_or_return()\n\n _ = onboarding.retrieve_certificates(user_id=user_id).unwrap_or_return()\n\n onboarding.delete_user(user_id).unwrap_or_return()\n\n return Success(report)\n\n result = do_complete_onboarding()\n\n assert_success(result, value_is_instance_of=Dict)\n","sub_path":"tests/test_integration_onboarding.py","file_name":"test_integration_onboarding.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"191822874","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /home/uittenbroek/Projects/buildout-nuffic/src/collective.newrelic/collective/newrelic/patches/catalog_tool.py\n# Compiled at: 2013-12-24 05:41:42\nimport newrelic.agent, newrelic.api\nfrom Products.CMFPlone.CatalogTool import CatalogTool\nfrom collective.newrelic.utils import logger\nCatalogTool.original_cmfplone_catalogtool_searchResults = CatalogTool.searchResults\n\ndef newrelic_searchResults(self, REQUEST=None, **kw):\n trans = newrelic.agent.current_transaction()\n with newrelic.api.database_trace.DatabaseTrace(trans, str(kw), self):\n result = self.original_cmfplone_catalogtool_searchResults(REQUEST, **kw)\n return result\n\n\nCatalogTool.searchResults = newrelic_searchResults\nlogger.info('Patched Products.CMFPlone.CatalogTool:CatalogTool.searchResults with instrumentation')","sub_path":"pycfiles/collective.newsflash-1.0/catalog_tool.py","file_name":"catalog_tool.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"345494964","text":"import os\nimport time\nimport subprocess\nimport torch as t\n\nfrom jukebox.hparams import Hyperparams\nfrom jukebox.utils.torch_utils import empty_cache\nfrom jukebox.utils.audio_utils import save_wav, load_audio\nfrom jukebox.make_models import make_model\nfrom jukebox.align import get_alignment\nfrom jukebox.save_html import save_html\nfrom jukebox.utils.sample_utils import split_batch, get_starts\nfrom jukebox.utils.dist_utils import print_once\nimport fire\n\n\ndef sample_partial_window(zs, labels, sampling_kwargs, level, prior, tokens_to_sample, hps):\n \"\"\"Sample a partial window of length= prior.n_ctx:\n for start in get_starts(total_length, prior.n_ctx, hop_length):\n zs = sample_single_window(zs, labels, sampling_kwargs, level, prior, start, hps)\n else:\n zs = sample_partial_window(zs, labels, sampling_kwargs, level, prior, total_length, hps)\n return zs\n\n\ndef _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps):\n \"\"\"Sample multiple levels\"\"\"\n alignments = None\n for level in reversed(sample_levels):\n prior = priors[level]\n prior.cuda()\n empty_cache()\n\n # Set correct total_length, hop_length, labels and sampling_kwargs for level\n assert hps.sample_length % prior.raw_to_tokens == 0, f\"Expected sample_length {hps.sample_length} to be multiple of {prior.raw_to_tokens}\"\n total_length = hps.sample_length//prior.raw_to_tokens\n hop_length = int(hps.hop_fraction[level]*prior.n_ctx)\n zs = sample_level(zs, labels[level], sampling_kwargs[level], level, prior, total_length, hop_length, hps)\n\n prior.cpu()\n empty_cache()\n\n # Decode sample\n x = priors[-1].decode(zs[level:], start_level=level, bs_chunks=zs[level].shape[0])\n logdir = f\"{hps.job_id}_{hps.name}/level_{level}\"\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n t.save(dict(zs=zs, labels=labels, sampling_kwargs=sampling_kwargs, x=x), f\"{logdir}/data.pth.tar\")\n save_wav(logdir, x, hps.sr)\n if alignments is None and priors[-1].n_tokens > 0:\n alignments = get_alignment(x, zs, labels[-1], priors[-1], sampling_kwargs[-1]['fp16'], hps)\n save_html(logdir, x, zs, labels[-1], alignments, hps)\n return zs\n\n\ndef ancestral_sample(labels, sampling_kwargs, priors, hps):\n \"\"\"Generate ancestral samples given a list of artists and genres\"\"\"\n sample_levels = list(range(len(priors)))\n zs = [t.zeros(hps.n_samples,0,dtype=t.long, device='cuda') for _ in range(len(priors))]\n zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps)\n return zs\n\n\ndef upsample(zs, labels, sampling_kwargs, priors, hps):\n \"\"\"Upsample given already generated upper-level codes\"\"\"\n sample_levels = list(range(len(priors) - 1))\n zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps)\n return zs\n\n\ndef primed_sample(x, labels, sampling_kwargs, priors, hps):\n \"\"\"Prompt the model with raw audio input (dimension: NTC) and generate continuations\"\"\"\n sample_levels = list(range(len(priors)))\n zs = priors[-1].encode(x, start_level=0, end_level=len(priors), bs_chunks=x.shape[0])\n zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps)\n return zs\n\n\ndef load_prompts(audio_files, duration, hps):\n \"\"\"Load `duration` seconds of the given audio files to use as prompts\"\"\"\n xs = []\n for audio_file in audio_files:\n x = load_audio(audio_file, sr=hps.sr, duration=duration, offset=0.0, mono=True)\n x = x.T # CT -> TC\n xs.append(x)\n while len(xs) < hps.n_samples:\n xs.extend(xs)\n xs = xs[:hps.n_samples]\n x = t.stack([t.from_numpy(x) for x in xs])\n x = x.to('cuda', non_blocking=True)\n return x\n\n\ndef save_samples(model, device, hps, sample_hps, metas: list):\n \"\"\"Generate and save samples, alignment, and webpage for visualization.\"\"\"\n print(hps)\n from jukebox.lyricdict import poems, gpt_2_lyrics\n vqvae, priors = make_model(model, device, hps)\n\n assert hps.sample_length//priors[-2].raw_to_tokens >= priors[-2].n_ctx, f\"Upsampling needs atleast one ctx in get_z_conds. Please choose a longer sample length\"\n assert isinstance(metas, list)\n total_length = hps.total_sample_length_in_seconds * hps.sr\n offset = 0\n while len(metas) < hps.n_samples:\n metas.extend(metas)\n metas = metas[:hps.n_samples]\n\n labels = [prior.labeller.get_batch_labels(metas, 'cuda') for prior in priors]\n for label in labels:\n assert label['y'].shape[0] == hps.n_samples\n\n lower_level_chunk_size = 32\n lower_level_max_batch_size = 16\n if model == '1b_lyrics':\n chunk_size = 32\n max_batch_size = 16\n else:\n chunk_size = 16\n max_batch_size = 3\n sampling_kwargs = [dict(temp=0.99, fp16=True, chunk_size=lower_level_chunk_size, max_batch_size=lower_level_max_batch_size),\n dict(temp=0.99, fp16=True, chunk_size=lower_level_chunk_size, max_batch_size=lower_level_max_batch_size),\n dict(temp=0.99, fp16=True, chunk_size=chunk_size, max_batch_size=max_batch_size)]\n\n if sample_hps.mode == 'ancestral':\n ancestral_sample(labels, sampling_kwargs, priors, hps)\n elif sample_hps.mode == 'primed':\n assert sample_hps.audio_file is not None\n audio_files = sample_hps.audio_file.split(',')\n top_raw_to_tokens = priors[-1].raw_to_tokens\n duration = (int(sample_hps.prompt_length_in_seconds * hps.sr) // top_raw_to_tokens) * top_raw_to_tokens\n x = load_prompts(audio_files, duration, hps)\n primed_sample(x, labels, sampling_kwargs, priors, hps)\n else:\n raise ValueError(f'Unknown sample mode {sample_hps.mode}.')\n\n\ndef run(mode='ancestral', audio_file=None, prompt_length_in_seconds=12.0, port=29500):\n from jukebox.utils.dist_utils import setup_dist_from_mpi\n from jukebox.utils import queue\n # setup distributed communications\n rank, local_rank, device = setup_dist_from_mpi(port=port)\n while True:\n # connect to db\n db, cur = queue.connectdb()\n offset = 0\n # get the next job\n job = queue.get_next_job(cur)\n if job:\n print(job)\n job_id = job['job_id']\n kw = dict()\n kw['sr'] = 44100\n kw['n_samples'] = 3\n kw['hop_fraction'] = (0.5, 0.5, 0.25)\n kw['model'] = '5b_lyrics'\n kw['levels'] = 3\n kw['sample_length_in_seconds'] = int(job['params']['length'])\n kw['total_sample_length_in_seconds'] = int(job['params']['length'])\n kw['n_samples'] = 15 if '5b_lyrics' == job['params']['model'] else 16\n kw['job_id'] = job_id\n kw['name'] = job['params']['name']\n hps = Hyperparams(kw)\n # artist, lyrics, genre\n metas = Hyperparams(dict(artist=job['params']['artist'],\n genre=job['params']['genre'],\n lyrics=job['params']['lyrics'],\n total_length=job['params']['length']*kw['sr'], # remove hardcoded sr\n offset=offset))\n print(hps)\n sample_hps = Hyperparams(dict(mode=mode,\n audio_file=audio_file,\n prompt_length_in_seconds=prompt_length_in_seconds))\n # Lock the job\n queue.lock(cur, job_id)\n # Start the job\n queue.update_status(cur, job_id, \"top_started\")\n # Log the URL\n curl = subprocess.Popen(os.path.expanduser('./get_ip.sh'), stdout=subprocess.PIPE)\n ip, _ = curl.communicate() # (ip, error)\n url = \"http://{}/jukebox/{}_{}/\".format(ip.decode().strip(), job_id, job['params']['name'])\n\n queue.log(cur,\n job_id,\n \"URL: http://{}/jukebox/{}_{}/\".format(ip.decode().strip(), job_id, job['params']['name']))\n # close db connection to avoid timeout error after sampling\n queue.closedb(db)\n # Run the full generating script here\n with t.no_grad():\n save_samples(job['params']['model'], device, hps, sample_hps, [metas])\n # FINISH\n # open fresh db connection\n db, cur = queue.connectdb()\n # update status\n queue.update_status(cur, job_id, \"upsampling_done\")\n queue.closedb(db)\n else:\n # pause the program for a minute and check back for new jobs\n print('Zzz...')\n time.sleep(60)\n # break the loop\n # break\n\nif __name__ == '__main__':\n fire.Fire(run)\n","sub_path":"jukebox/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":11227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"465513928","text":"import numpy\nfrom sklearn.decomposition import PCA\nimport os\n\n\ndef DXSingleCalculation(data, label):\n listZero, listOne = [], []\n for index in range(len(data)):\n if label[index] == 0: listZero.append(data[index])\n if label[index] == 1: listOne.append(data[index])\n print(numpy.mean(listOne), numpy.mean(listZero))\n print(numpy.std(listOne), numpy.std(listZero))\n DXScore = (numpy.mean(listOne) - numpy.mean(listZero)) * (numpy.mean(listOne) - numpy.mean(listZero)) / \\\n (numpy.std(listOne) * numpy.std(listOne) + numpy.std(listZero) * numpy.std(listZero))\n return DXScore\n\n\ndef DXFeatureSelection(data, label, maxFeatures=1):\n totalScore = []\n for index in range(numpy.shape(data)[1]):\n print('\\rTreating %d/%d' % (index, numpy.shape(data)[1]), end='')\n score = DXSingleCalculation(data=data[:, index], label=label)\n print('\\n', score)\n totalScore.append(score)\n\n print('\\n')\n featuresSelected = []\n for _ in range(maxFeatures):\n featuresSelected.append(numpy.argmax(totalScore))\n totalScore[numpy.argmax(totalScore)] = 0\n print(featuresSelected)\n\n results = []\n for indexX in range(numpy.shape(data)[0]):\n current = []\n for indexY in featuresSelected:\n current.append(data[indexX][indexY])\n results.append(current)\n print(numpy.shape(results))\n return results\n\n\nif __name__ == '__main__':\n loadpath = 'D:/LIDC/LBP-Npy/R=3_P=24_Normalization/Part%d-Data.npy'\n labelpath = 'D:/LIDC/LBP-Npy/R=3_P=24_Normalization/Part%d-Label.npy'\n os.makedirs('D:/LIDC/LBP-Npy/R=3_P=24_DX/')\n savepath = 'D:/LIDC/LBP-Npy/R=3_P=24_DX/Part%d-Data.npy'\n\n totalData, totalLabel, totalThreshold = [], [], []\n for index in range(5):\n data = numpy.load(loadpath % index)\n label = numpy.argmax(numpy.load(labelpath % index), axis=1)\n\n totalData.extend(data)\n totalLabel.extend(label)\n\n totalThreshold.append(numpy.shape(data)[0])\n\n totalData = numpy.array(totalData)\n totalLabel = numpy.array(totalLabel)\n print(numpy.shape(totalData), numpy.shape(totalLabel))\n\n result = DXFeatureSelection(data=totalData, label=totalLabel, maxFeatures=30)\n print(numpy.shape(result))\n\n startPosition = 0\n for index in range(5):\n print(numpy.shape(result[startPosition:startPosition + totalThreshold[index]]))\n numpy.save(savepath % index, result[startPosition:startPosition + totalThreshold[index]])\n startPosition += totalThreshold[index]\n","sub_path":"LIDC_Project/Pretreatment_Raw/Step3_DXResult.py","file_name":"Step3_DXResult.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"398927701","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n File Name: proxy_spider.py\n Date: 09/13/2017\n Author: hackrflov\n Email: hackrflov@gmail.com\n Python Version: 2.7\n\"\"\"\n\n\nimport re\nimport json\nimport time\nimport logging\nlog = logging.getLogger('scrapy.spider')\nfrom lxml import html\nfrom datetime import datetime, timedelta\n\nimport scrapy\nimport crawler.settings as st\nfrom pymongo import MongoClient\n\nclass ProxySpider(scrapy.Spider):\n\n name = 'proxy'\n\n def __init__(self, *args, **kwargs):\n super(ProxySpider, self).__init__(*args, **kwargs)\n self.connect()\n\n def connect(self):\n log.info('Connecting to MongoDB...')\n host = st.MONGO_HOST\n db = st.MONGO_DB\n usr = st.MONGO_USERNAME\n pwd = st.MONGO_PASSWORD\n if usr and pwd:\n uri = 'mongodb://{u}:{p}@{h}/{d}'.format(u=usr,p=pwd,h=host,d=db)\n else:\n uri = 'mongodb://{h}/{d}'.format(h=host,d=db)\n client = MongoClient(uri)\n self.clt = client[db][st.MONGO_COLLECTION]\n\n def start_requests(self):\n while True:\n log.info('Start to fetch proxy...')\n meta = {'download_timeout': st.CRAWL_TIMEOUT}\n last_dt = datetime.now()\n\n for url in st.PROXY_SITES_BY_REGX['urls']:\n yield scrapy.Request(url=url, meta=meta, dont_filter=True, callback=self.parse_regx)\n\n for site in st.PROXY_SITES_BY_XPATH:\n meta['ip_xpath'] = site['ip_xpath']\n meta['port_xpath'] = site['port_xpath']\n yield scrapy.Request(url=url, meta=meta, dont_filter=True, callback=self.parse_xpath)\n\n for site in st.PROXY_SITES_BY_TXT:\n meta['ip_key'] = site['ip_key']\n meta['port_key'] = site['port_key']\n yield scrapy.Request(url=url, meta=meta, dont_filter=True, callback=self.parse_txt)\n\n log.info('Fetching is finished, waiting for parsing...')\n time.sleep(10)\n\n log.info('Start to update proxy...')\n while True:\n cur_dt = datetime.now()\n if cur_dt - last_dt >= timedelta(seconds=st.FETCH_INTERVAL): # should restart fecthing now\n last_dt = cur_dt\n break\n else:\n log.info('Before refresh: having {} proxies'.format(self.clt.count()))\n docs = self.clt.find()\n for doc in docs:\n proxy = 'http://{}'.format(doc['ip_port'])\n meta = {'proxy': proxy, 'download_timeout': st.UPDATE_TIMEOUT, 'phase': 'update' }\n yield scrapy.Request(url=st.TEST_URL, meta=meta, dont_filter=True, callback=self.parse_test)\n log.debug('Testing [{}]...'.format(doc['ip_port']))\n\n # Deal with records after each round\n self.clt.update({'ace_times': {'$gt': st.MAX_ACE_TIMES } }, { '$set': { 'ace_times': 1, 'bad_times': 0 } }) # if reach record limit, reset it\n self.clt.delete_many({'$where': \"this.ace_times < this.bad_times\" }) # remove terrible proxies\n\n log.info('All update requests have been send, waiting for parsing...')\n time.sleep(st.UPDATE_INTERVAL)\n\n def parse_regx(self, response):\n proxy_list = re.findall(st.PROXY_REGX, response.body)\n for ip_port in proxy_list:\n meta = {'proxy': 'http://{}'.format(ip_port), 'phase': 'fetch' }\n yield scrapy.Request(url=st.TEST_URL, meta=meta, dont_filter=True, callback=self.parse_test)\n log.debug('Testing [{}]...'.format(ip_port))\n\n def parse_xpath(self, response):\n r = html.fromstring(response.body)\n ip_list = r.xpath(response.meta['ip_xpath'])\n ip_list = [ip for ip in ip_list if re.match(r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}',ip)]\n port_list = r.xpath(response.meta['port_xpath'])\n for i in range(len(ip_list)):\n ip_port = ip_list[i] + \":\" + port_list[i]\n meta = {'proxy': 'http://{}'.format(ip_port), 'phase': 'fetch' }\n yield scrapy.Request(url=st.TEST_URL, meta=meta, dont_filter=True, callback=self.parse_test)\n log.debug('Testing [{}]...'.format(ip_port))\n\n def parse_txt(self, response):\n data = response.body.split('\\n')\n for msg in data[:-1]:\n msg = json.loads(msg)\n ip = msg[response.meta['ip_key']]\n port = msg[response.meta['port_key']]\n ip_port = '{ip}:{port}'.format(ip=ip, port=port)\n meta = {'proxy': 'http://{}'.format(ip_port), 'phase': 'fetch' }\n yield scrapy.Request(url=st.TEST_URL, meta=meta, dont_filter=True, callback=self.parse_test)\n log.debug('Testing [{}]...'.format(ip_port))\n\n def parse_test(self, response):\n ip_port = re.sub('http://', '', response.meta['proxy'])\n try:\n if 'exception' in response.meta:\n raise Exception(response.meta['exception'])\n else:\n data = json.loads(response.body)['data']['fid']\n seconds = response.request.meta['download_latency']\n self.clt.update_one({ 'ip_port': ip_port }, { '$min': { 'best': seconds }, '$inc': { 'ace_times' : 1 } }, upsert=True)\n log.info('{action} proxy {p}, used {s} seconds'.format(action=response.meta['phase'].capitalize(), p=ip_port, s=seconds))\n except Exception as e:\n if response.meta['phase'] == 'update':\n log.info('Update bad record: {p} details: {e}'.format(p=ip_port, e=e))\n self.clt.update_one({ 'ip_port': ip_port }, { '$inc': { 'bad_times' : 1 } }, upsert=True)\n else:\n log.info('Bad proxy: {p} details: {e}'.format(p=ip_port, e=e))\n\n","sub_path":"crawler/proxy_spider.py","file_name":"proxy_spider.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650924855","text":"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper functions/classes for model definition.\"\"\"\n\nimport functools\nfrom typing import Any, Callable\n\nfrom flax import linen as nn\nimport jax\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\nclass MLP(nn.Module):\n \"\"\"A simple MLP.\"\"\"\n net_depth: int = 8 # The depth of the first part of MLP.\n net_width: int = 256 # The width of the first part of MLP.\n net_activation: Callable[Ellipsis, Any] = nn.relu # The activation function.\n skip_layer: int = 4 # The layer to add skip layers to.\n num_rgb_channels: int = 3 # The number of RGB channels.\n num_sigma_channels: int = 1 # The number of sigma channels.\n\n @nn.compact\n def __call__(self, x):\n \"\"\"Evaluate the MLP.\n\n Args:\n x: jnp.ndarray(float32), [batch, num_samples, feature], points.\n\n Returns:\n raw_rgb: jnp.ndarray(float32), with a shape of\n [batch, num_samples, num_rgb_channels].\n raw_sigma: jnp.ndarray(float32), with a shape of\n [batch, num_samples, num_sigma_channels].\n \"\"\"\n feature_dim = x.shape[-1]\n num_samples = x.shape[1]\n x = x.reshape([-1, feature_dim])\n dense_layer = functools.partial(\n nn.Dense, kernel_init=jax.nn.initializers.glorot_uniform())\n inputs = x\n for i in range(self.net_depth):\n x = dense_layer(self.net_width)(x)\n x = self.net_activation(x)\n if i % self.skip_layer == 0 and i > 0:\n x = jnp.concatenate([x, inputs], axis=-1)\n raw_sigma = dense_layer(self.num_sigma_channels)(x).reshape(\n [-1, num_samples, self.num_sigma_channels])\n raw_rgb = dense_layer(self.num_rgb_channels)(x).reshape(\n [-1, num_samples, self.num_rgb_channels])\n return raw_rgb, raw_sigma\n\n\ndef cast_rays(z_vals, origins, directions):\n return origins[Ellipsis, None, :] + z_vals[Ellipsis, None] * directions[Ellipsis, None, :]\n\n\ndef sample_along_rays(key, origins, directions, num_samples, near, far,\n randomized, lindisp):\n \"\"\"Stratified sampling along the rays.\n\n Args:\n key: jnp.ndarray, random generator key.\n origins: jnp.ndarray(float32), [batch_size, 3], ray origins.\n directions: jnp.ndarray(float32), [batch_size, 3], ray directions.\n num_samples: int.\n near: float, near clip.\n far: float, far clip.\n randomized: bool, use randomized stratified sampling.\n lindisp: bool, sampling linearly in disparity rather than depth.\n\n Returns:\n z_vals: jnp.ndarray, [batch_size, num_samples], sampled z values.\n points: jnp.ndarray, [batch_size, num_samples, 3], sampled points.\n \"\"\"\n batch_size = origins.shape[0]\n\n t_vals = jnp.linspace(0., 1., num_samples)\n if lindisp:\n z_vals = 1. / (1. / near * (1. - t_vals) + 1. / far * t_vals)\n else:\n z_vals = near * (1. - t_vals) + far * t_vals\n\n if randomized:\n mids = .5 * (z_vals[Ellipsis, 1:] + z_vals[Ellipsis, :-1])\n upper = jnp.concatenate([mids, z_vals[Ellipsis, -1:]], -1)\n lower = jnp.concatenate([z_vals[Ellipsis, :1], mids], -1)\n t_rand = random.uniform(key, [batch_size, num_samples])\n z_vals = lower + (upper - lower) * t_rand\n else:\n # Broadcast z_vals to make the returned shape consistent.\n z_vals = jnp.broadcast_to(z_vals[None, Ellipsis], [batch_size, num_samples])\n\n coords = cast_rays(z_vals, origins, directions)\n return z_vals, coords\n\n\ndef posenc(x, min_deg, max_deg, legacy_posenc_order=False):\n \"\"\"Cat x with a positional encoding of x with scales 2^[min_deg, max_deg-1].\n\n Instead of computing [sin(x), cos(x)], we use the trig identity\n cos(x) = sin(x + pi/2) and do one vectorized call to sin([x, x+pi/2]).\n\n Args:\n x: jnp.ndarray, variables to be encoded. Note that x should be in [-pi, pi].\n min_deg: int, the minimum (inclusive) degree of the encoding.\n max_deg: int, the maximum (exclusive) degree of the encoding.\n legacy_posenc_order: bool, keep the same ordering as the original tf code.\n\n Returns:\n encoded: jnp.ndarray, encoded variables.\n \"\"\"\n if min_deg == max_deg:\n return x\n scales = jnp.array([2**i for i in range(min_deg, max_deg)])\n if legacy_posenc_order:\n xb = x[Ellipsis, None, :] * scales[:, None]\n four_feat = jnp.reshape(\n jnp.sin(jnp.stack([xb, xb + 0.5 * jnp.pi], -2)),\n list(x.shape[:-1]) + [-1])\n else:\n xb = jnp.reshape((x[Ellipsis, None, :] * scales[:, None]),\n list(x.shape[:-1]) + [-1])\n four_feat = jnp.sin(jnp.concatenate([xb, xb + 0.5 * jnp.pi], axis=-1))\n return jnp.concatenate([x] + [four_feat], axis=-1)\n\n\ndef volumetric_rendering(rgb, sigma, z_vals, dirs, white_bkgd):\n \"\"\"Volumetric Rendering Function.\n\n Args:\n rgb: jnp.ndarray(float32), color, [batch_size, num_samples, 3]\n sigma: jnp.ndarray(float32), density, [batch_size, num_samples, 1].\n z_vals: jnp.ndarray(float32), [batch_size, num_samples].\n dirs: jnp.ndarray(float32), [batch_size, 3].\n white_bkgd: bool.\n\n Returns:\n comp_rgb: jnp.ndarray(float32), [batch_size, 3].\n disp: jnp.ndarray(float32), [batch_size].\n acc: jnp.ndarray(float32), [batch_size].\n weights: jnp.ndarray(float32), [batch_size, num_samples]\n \"\"\"\n eps = 1e-10\n dists = jnp.concatenate([\n z_vals[Ellipsis, 1:] - z_vals[Ellipsis, :-1],\n jnp.broadcast_to(1e10, z_vals[Ellipsis, :1].shape)\n ], -1)\n dists = dists * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1)\n # Note that we're quietly turning sigma from [..., 0] to [...].\n alpha = 1.0 - jnp.exp(-sigma[Ellipsis, 0] * dists)\n accum_prod = jnp.concatenate([\n jnp.ones_like(alpha[Ellipsis, :1], alpha.dtype),\n jnp.cumprod(1.0 - alpha[Ellipsis, :-1] + eps, axis=-1)\n ],\n axis=-1)\n weights = alpha * accum_prod\n\n comp_rgb = (weights[Ellipsis, None] * rgb).sum(axis=-2)\n depth = (weights * z_vals).sum(axis=-1)\n acc = weights.sum(axis=-1)\n # Equivalent to (but slightly more efficient and stable than):\n # disp = 1 / max(eps, where(acc > eps, depth / acc, 0))\n inv_eps = 1 / eps\n disp = acc / depth\n disp = jnp.where((disp > 0) & (disp < inv_eps) & (acc > eps), disp, inv_eps)\n if white_bkgd:\n comp_rgb = comp_rgb + (1. - acc[Ellipsis, None])\n return comp_rgb, disp, acc, weights\n\n\ndef piecewise_constant_pdf(key, bins, weights, num_samples, randomized):\n \"\"\"Piecewise-Constant PDF sampling.\n\n Args:\n key: jnp.ndarray(float32), [2,], random number generator.\n bins: jnp.ndarray(float32), [batch_size, num_bins + 1].\n weights: jnp.ndarray(float32), [batch_size, num_bins].\n num_samples: int, the number of samples.\n randomized: bool, use randomized samples.\n\n Returns:\n z_samples: jnp.ndarray(float32), [batch_size, num_samples].\n \"\"\"\n # Pad each weight vector (only if necessary) to bring its sum to `eps`. This\n # avoids NaNs when the input is zeros or small, but has no effect otherwise.\n eps = 1e-5\n weight_sum = jnp.sum(weights, axis=-1, keepdims=True)\n padding = jnp.maximum(0, eps - weight_sum)\n weights += padding / weights.shape[-1]\n weight_sum += padding\n\n # Compute the PDF and CDF for each weight vector, while ensuring that the CDF\n # starts with exactly 0 and ends with exactly 1.\n pdf = weights / weight_sum\n cdf = jnp.minimum(1, jnp.cumsum(pdf[Ellipsis, :-1], axis=-1))\n cdf = jnp.concatenate([\n jnp.zeros(list(cdf.shape[:-1]) + [1]), cdf,\n jnp.ones(list(cdf.shape[:-1]) + [1])\n ],\n axis=-1)\n\n # Draw uniform samples.\n if randomized:\n # Note that `u` is in [0, 1) --- it can be zero, but it can never be 1.\n u = random.uniform(key, list(cdf.shape[:-1]) + [num_samples])\n else:\n # Match the behavior of random.uniform() by spanning [0, 1-eps].\n u = jnp.linspace(0., 1. - jnp.finfo('float32').eps, num_samples)\n u = jnp.broadcast_to(u, list(cdf.shape[:-1]) + [num_samples])\n\n # Identify the location in `cdf` that corresponds to a random sample.\n # The final `True` index in `mask` will be the start of the sampled interval.\n mask = u[Ellipsis, None, :] >= cdf[Ellipsis, :, None]\n\n def find_interval(x):\n # Grab the value where `mask` switches from True to False, and vice versa.\n # This approach takes advantage of the fact that `x` is sorted.\n x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)\n x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)\n return x0, x1\n\n bins_g0, bins_g1 = find_interval(bins)\n cdf_g0, cdf_g1 = find_interval(cdf)\n\n t = jnp.clip(jnp.nan_to_num((u - cdf_g0) / (cdf_g1 - cdf_g0), 0), 0, 1)\n samples = bins_g0 + t * (bins_g1 - bins_g0)\n\n # Prevent gradient from backprop-ing through `samples`.\n return lax.stop_gradient(samples)\n\n\ndef sample_pdf(key, bins, weights, origins, directions, z_vals, num_samples,\n randomized):\n \"\"\"Hierarchical sampling.\n\n Args:\n key: jnp.ndarray(float32), [2,], random number generator.\n bins: jnp.ndarray(float32), [batch_size, num_bins + 1].\n weights: jnp.ndarray(float32), [batch_size, num_bins].\n origins: jnp.ndarray(float32), [batch_size, 3], ray origins.\n directions: jnp.ndarray(float32), [batch_size, 3], ray directions.\n z_vals: jnp.ndarray(float32), [batch_size, num_coarse_samples].\n num_samples: int, the number of samples.\n randomized: bool, use randomized samples.\n\n Returns:\n z_vals: jnp.ndarray(float32),\n [batch_size, num_coarse_samples + num_fine_samples].\n points: jnp.ndarray(float32),\n [batch_size, num_coarse_samples + num_fine_samples, 3].\n \"\"\"\n z_samples = piecewise_constant_pdf(key, bins, weights, num_samples,\n randomized)\n # Compute united z_vals and sample points\n z_vals = jnp.sort(jnp.concatenate([z_vals, z_samples], axis=-1), axis=-1)\n coords = cast_rays(z_vals, origins, directions)\n return z_vals, coords\n\n\ndef add_gaussian_noise(key, raw, noise_std, randomized):\n \"\"\"Adds gaussian noise to `raw`, which can used to regularize it.\n\n Args:\n key: jnp.ndarray(float32), [2,], random number generator.\n raw: jnp.ndarray(float32), arbitrary shape.\n noise_std: float, The standard deviation of the noise to be added.\n randomized: bool, add noise if randomized is True.\n\n Returns:\n raw + noise: jnp.ndarray(float32), with the same shape as `raw`.\n \"\"\"\n if (noise_std is not None) and randomized:\n return raw + random.normal(key, raw.shape, dtype=raw.dtype) * noise_std\n else:\n return raw\n","sub_path":"snerg/nerf/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":10948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"159680719","text":"# On importe les bibliothèques\nfrom matplotlib.pyplot import *\nfrom numpy import *\nfrom pylab import *\n\nK=2\nw0=5\n### La fonction de transfert ici H(j.w)=1/(1+0.01.j.w-w²)\n##def H(w):\n## xi=1.4\n## return K/(1 + (2*xi*1j*w)/w0 -w**2/w0**2)\n\n### La fonction de transfert ici H(j.w)=1/(1+0.01.j.w-w²)\n##def H2(w):\n## xi=0.01\n## return K/(1 + (2*xi*1j*w)/w0 -w**2/w0**2)\n\n### La fonction de transfert ici H(j.w)=1/(1+0.01.j.w-w²)\n##def H3(w):\n## xi=0.7\n## return K/(1 + (2*xi*1j*w)/w0 -w**2/w0**2)\n\n\n\nK=0.97\ntau=0.004\n##def H(w):\n## return K/(1 + tau*1j*w)\n##\n##def asympt(wlist):\n## out=[]\n## for w in wlist:\n## if w<1/tau:\n## out.append(K)\n## else:\n## out.append(K/(tau*1j*w))\n## return out\n\n##tau1,tau2=0.4,15\n##def H(w):\n## return K/((1 + tau1*1j*w)*(1 + tau2*1j*w))\n\n##def asympt(wlist):\n## out=[]\n## for w in wlist:\n## if w<1/tau2:\n## out.append(K)\n## elif w>=1/tau2 and w<1/tau1:\n## out.append(K/(tau2*1j*w))\n## else:\n## out.append(K/(tau1*1j*w*tau2*1j*w))\n## return out\n\ndef H(w):\n return 2.2/((1+1.34*1j*w)*(1+0.004*1j*w))\n\n##K=2\n##def H(w):\n## return K*w/w\n\ndef trace(zoom,ordre,H):\n # Découpage régulier des puissances en base 10 de la pulsation ici de 10^-2 à 10^3\n if ordre==1:\n fig=figure('Diagrammes Bode')\n w0=1/tau\n puissance_w=arange(log10(w0)-5,log10(w0)+4,0.01)\n else:\n w0=100\n if zoom==False:\n fig=figure('Diagrammes Bode')\n puissance_w=arange(log10(w0)-3,log10(w0)+3,0.01)\n else:\n fig=figure('Zoom Diagrammes Bode')\n puissance_w=arange(log10(w0)-0.1,log10(w0)+0.1,0.01)\n # Les pulsations w\n W=10**puissance_w\n # La phase en degré\n phase = angle(H(W),'deg')\n # Le module en dB\n module = 20*log10(absolute(H(W)))\n #Tracer du diagramme de Bode\n subplot(211) # Perme t d’afficher plusieurs graphes (nombre de graphe (2), colonne (1), ligne (1))\n semilogx(W,module,10,1) # Tracé en semilog du module\n axes = gca()\n axes.set_xlim(0.01,10000)\n axes.set_ylim(-80,25)\n axes.set_ylabel('Gain (dB)')\n axes.set_xscale('log')\n major_ticks_y = np.arange(-80, 25, 20)\n minor_ticks_y = np.arange(-80, 25, 10)\n axes.set_yticks(major_ticks_y)\n axes.set_yticks(minor_ticks_y, minor=True)\n axes.grid(True,which='minor', alpha=0.2)\n axes.grid(True,which='major', alpha=0.5)\n subplot(212)\n semilogx(W,phase,10,1) #Tracé en semilog du module\n axes = gca()\n axes.set_ylabel('Phase (deg)')\n axes.set_xlabel('Pulsation $(rad.s^{-1})$')\n axes.set_xlim(0.01,10000)\n axes.grid(True,which='minor', alpha=0.2)\n axes.grid(True,which='major', alpha=0.5)\n \nordre=1\n\nif ordre==1:\n trace(False,1,H)\n# trace(False,1,asympt)\nelse:\n trace(False,2,H) \n if xi<0.7:\n trace(True,2,H)\n \nshow()\n","sub_path":"DS/2020-2021/DS02/img/DR29.py","file_name":"DR29.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"469755939","text":"# -*- coding: utf-8 -*-\nimport pymongo\nimport logging\nfrom pymongo import errors\nfrom tools.configs import configs\nfrom tools.middlewares import typeassert\n\n\nclass MongodbUtils(object):\n \"\"\"\n Mongodb 工具类\n \"\"\"\n\n conf = configs\n\n @classmethod\n def conn(cls):\n \"\"\"\n connect to mongodb\n :return:\n \"\"\"\n\n db = None\n try:\n db = pymongo.MongoClient(cls.conf[\"ORIGINAL_DATA_URI\"], appname='european')[cls.conf[\"ORIGINAL_DATA_DB\"]]\n # logging.warning(cls.conf[\"CR_DATA_URI\"])\n except pymongo.errors.OperationFailure as ex:\n logging.error('database connect refuse. reason: {0}.'.format(ex))\n finally:\n return db\n\n @classmethod\n @typeassert(table=str, records=list, primary=tuple, depulicate=bool)\n def insert_records(cls, table, records, primary=(), depulicate= False):\n \"\"\"\n 数据存入Mongodb\n :param table:\n :param records:\n :param primary: primary keys\n :return:\n \"\"\"\n\n db = cls.conn()\n if not db:\n return\n\n try:\n if depulicate :\n for index, record in enumerate(records):\n find_record = cls.find_record(table, record, primary)\n if find_record:\n cls.update_record(table, record, {'_id': find_record['_id']})\n else:\n db[table].insert(record)\n else:\n for index, record in enumerate(records):\n db[table].insert(record)\n except errors as ex:\n logging.error(\"The table {0} insert&update is error: {1}.\".format(table, ex))\n\n @classmethod\n @typeassert(table=str, record=dict, primary=tuple)\n def find_record(cls, table, record, primary=()):\n \"\"\"\n find single record in mongodb\n :param table:\n :param record:\n :param primary:\n :return:\n \"\"\"\n\n db = cls.conn()\n if not db:\n return\n\n condition = {}\n for key in primary:\n condition[key] = record[key]\n find_record = db[table].find(condition)\n\n result = None\n if find_record.count():\n result = find_record[0]\n return result\n\n @classmethod\n @typeassert(table=str, record=dict, condition=dict)\n def update_record(cls, table, record, condition={'_id': None}):\n \"\"\"\n update single record in mongodb\n :param table:\n :param record:\n :param condition:\n :return:\n \"\"\"\n\n db = cls.conn()\n if not db:\n return\n\n condition_value = {}\n for key, value in record.iteritems():\n condition_value[key] = value\n try:\n db[table].update(condition, {\"$set\": condition_value}, False, True)\n except errors as ex:\n logging.error(\"The table {0} update is error: {1}.\".format(table, ex))\n\n @classmethod\n @typeassert(table=str, record=dict)\n def delete_record(cls, table, record):\n \"\"\"\n delete single record in mongodb\n :param table:\n :param record:\n :return:\n \"\"\"\n\n db = cls.conn()\n if not db:\n return\n try:\n db[table].remove({\"_id\": record[\"_id\"]}, multi=False)\n except errors as ex:\n logging.error(\"The table {0} update is error: {1}.\".format(table, ex))\n\n @classmethod\n @typeassert(table=str, condition=dict)\n def get_records(cls, table, condition):\n \"\"\"\n find records in table\n :param table:\n :param condition:\n :return:\n \"\"\"\n\n db = cls.conn()\n if not db:\n return\n\n results_list = list()\n try:\n results = db[table].find(condition)\n if results.count():\n for index, row in enumerate(results):\n results_list.append(row)\n else:\n logging.warning('{0}, no data found.'.format(table))\n except errors as ex:\n msg = \"The table {0} find is error: {1}.\".format(table, ex)\n logging.error(msg)\n return results_list\n\n @classmethod\n @typeassert(table=str, condition=dict, sorted_by=str)\n def get_records_order(cls, table, condition, sorted_by):\n \"\"\"\n find records order by sorted_by in table\n :param table:\n :param condition:\n :param sorted_by:\n :return:\n \"\"\"\n\n db = cls.conn()\n if not db:\n return\n\n results_list = list()\n try:\n results = db[table].find(condition).sort(sorted_by, pymongo.ASCENDING)\n if results.count():\n for index, row in enumerate(results):\n results_list.append(row)\n else:\n logging.warning('{0}, no data found.'.format(table))\n except errors as ex:\n logging.error(\"The table {0} find is error: {1}.\".format(table, ex))\n return results_list\n\n @classmethod\n @typeassert(table=str, key=str)\n def get_distinct_key(cls, table, key):\n \"\"\"\n get distinct key in mongodb\n :param table:\n :param key:\n :return:\n \"\"\"\n\n db = cls.conn()\n if not db:\n return\n\n results_list = list()\n try:\n results = db[table].distinct(key)\n if results:\n for index, row in enumerate(results):\n results_list.append(row)\n else:\n logging.warning('{0}, key: {1}, no data found.'.format(table, key))\n except errors as ex:\n logging.error(\"The table {0} find is error: {1}.\".format(table, ex))\n return results_list\n","sub_path":"python2/tools/mongodbutils.py","file_name":"mongodbutils.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"456792253","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\n\"\"\" \nCreates a ResNeXt Model as defined in:\n\nXie, S., Girshick, R., Dollár, P., Tu, Z., & He, K. (2016). \nAggregated residual transformations for deep neural networks. \narXiv preprint arXiv:1611.05431.\n\n\"\"\"\n\n__author__ = \"Pau Rodríguez López, ISELAB, CVC-UAB\"\n__email__ = \"pau.rodri1@gmail.com\"\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, groups, base_width, widen_factor):\n \"\"\" Constructor\n\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n groups: num of convolution groups.\n base_width: base number of channels in each group.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n width_ratio = out_channels / (widen_factor * 64.)\n D = groups * int(base_width * width_ratio)\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv',\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0,\n bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce(x)\n bottleneck = F.relu(self.bn_reduce(bottleneck))\n bottleneck = self.conv_conv(bottleneck)\n bottleneck = F.relu(self.bn(bottleneck))\n bottleneck = self.conv_expand(bottleneck)\n bottleneck = self.bn_expand(bottleneck)\n residual = self.shortcut(x)\n return F.relu(residual + bottleneck)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, groups, base_width, depth=29, nlabels=10, widen_factor=4):\n \"\"\" Constructor\n\n Args:\n groups: number of convolution groups.\n depth: number of layers.\n nlabels: number of classes\n base_width: base number of channels in each group.\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.groups = groups\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.base_width = base_width\n self.widen_factor = widen_factor\n self.nlabels = nlabels\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]\n\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(self.stages[3], nlabels)\n init.kaiming_normal(self.classifier.weight)\n\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n\n Returns: a Module consisting of n sequential bottlenecks.\n\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.groups,\n self.base_width, self.widen_factor))\n else:\n block.add_module(name_,\n ResNeXtBottleneck(out_channels, out_channels, 1, self.groups, self.base_width,\n self.widen_factor))\n return block\n\n def forward(self, input):\n x = input['img']\n x = self.conv_1_3x3(x)\n x = F.relu(self.bn_1(x))\n x = self.stage_1(x)\n x = self.stage_2(x)\n x = self.stage_3(x)\n x = F.avg_pool2d(x, 8, 1)\n x = x.view(-1, self.stages[3])\n return self.classifier(x)\n\ndef CifarResNeXt29(model_TAG):\n model_TAG_list = model_TAG.split('_')\n widen_factor = int(model_TAG_list[3])\n groups = int(model_TAG_list[4])\n base_width = int(model_TAG_list[5])\n\n model = CifarResNeXt(groups=groups, base_width=base_width, widen_factor=widen_factor)\n return model\n\n","sub_path":"src/models/resnext.py","file_name":"resnext.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563579402","text":"import tkinter\nimport tkinter.font\n\n\ndef display(ev):\n label1.config(text=entry1.get())\n\n\ntop = tkinter.Tk()\nmyFont1 = tkinter.font.Font(family='Lucida Sans',\n size=30)\nlabel1 = tkinter.Label(top, text='Display something',\n font=myFont1)\nentry1 = tkinter.Entry(top, font=myFont1)\nentry1.insert(0, 'input something here...')\nbutton1 = tkinter.Button(top, text='submit',\n font=myFont1)\nbutton1.bind('', display)\nentry1.bind('', display)\nlabel1.pack()\nentry1.pack()\nbutton1.pack()\ntop.minsize(850, 500)\ntop.maxsize(850, 500)\ntop.mainloop()","sub_path":"demo57_1.py","file_name":"demo57_1.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"380682620","text":"# TO-DO: complete the helper function below to merge 2 sorted arrays\ndef merge(arrA, arrB):\n elements = len(arrA) + len(arrB)\n merged_arr = [0] * elements\n\n # Your code here\n # index for arrA\n arrA_index = 0\n # index for arrB\n arrB_index = 0\n # loop through merged_arr\n for i in range(elements):\n print(merged_arr)\n # check if\n if arrA_index < len(arrA) and arrB_index < len(arrB):\n # check which value is less than\n if arrA[arrA_index] <= arrB[arrB_index]:\n # set smaller value in sorted list and increment count for appropriate list\n merged_arr[i] = arrA[arrA_index]\n arrA_index += 1\n else:\n merged_arr[i] = arrB[arrB_index]\n arrB_index += 1\n # check for potentially one array to have all elements added already\n elif arrA_index < len(arrA) and arrB_index is len(arrB):\n merged_arr[i] = arrA[arrA_index]\n arrA_index += 1\n else:\n merged_arr[i] = arrB[arrB_index]\n arrB_index += 1\n # return sorted list\n return merged_arr\n\nfoo = [2, 5, 8, 9, 10]\nfoobar = [1, 3, 4, 6, 7]\nx = merge(foo, foobar)\nprint(x)\nfoo = [7, 87]\nfoobar = [12, 90]\nx = merge(foo, foobar)\nprint(x)\n# TO-DO: implement the Merge Sort function below recursively\ndef merge_sort(arr):\n # Your code here\n # if input list is greater than 1 then take length devided by 2 rounded\n if len(arr) > 1:\n # half index for the array\n half = len(arr) // 2\n # split list into left half (start to half)\n left = arr[: half]\n # split list into right half (half to end)\n right = arr[half:]\n # half until left is down to 1 item in left list\n left = merge_sort(left)\n # half until left is down to 1 item in right list\n right = merge_sort(right)\n # return sorted list \n return merge(left, right)\n \n # if input list has 1 element then its already sorted, YEET!\n return arr\n\n\nfoo = [9, 5, 8, 2, 10, 3, 7, 1, 4, 6]\nsorted_list = merge_sort(foo)\nprint(sorted_list)\n# STRETCH: implement the recursive logic for merge sort in a way that doesn't \n# utilize any extra memory\n# In other words, your implementation should not allocate any additional lists \n# or data structures; it can only re-use the memory it was given as input\n# def merge_in_place(arr, start, mid, end):\n# # Your code here\n\n\n# def merge_sort_in_place(arr, l, r):\n# # Your code here\n\n","sub_path":"src/sorting/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"596064161","text":"from cloud.aws import *\nfrom cloud.response import Response\nimport base64\n\n# Define the input output format of the function.\n# This information is used when creating the *SDK*.\ninfo = {\n 'input_format': {\n 'session_id': 'str',\n 'file_path': 'str',\n 'index': 'int',\n },\n 'output_format': {\n 'base64': 'bin',\n 'index': 'int',\n 'size': 'int',\n 'success': 'bool'\n }\n}\n\n\ndef do(data, boto3):\n body = {}\n params = data['params']\n app_id = data['app_id']\n user = data['user']\n\n user_id = user.get('id', None)\n\n def has_permission(_item):\n read_groups = _item['read_groups']\n if 'owner' in read_groups:\n owner_id = _item['owner']\n if owner_id == user_id:\n return True\n user_group = user['group']\n return user_group in read_groups\n\n file_path = params.get('file_path')\n index = params.get('index')\n\n table_name = 'storage-{}'.format(app_id)\n bucket_name = 'storage-{}'.format(app_id)\n\n s3 = S3(boto3)\n dynamo = DynamoDB(boto3)\n item = dynamo.get_item(table_name, file_path).get('Item')\n if item:\n if has_permission(item):\n if item['type'] == 'split_file':\n file_key = item['file_key']\n file_bin = s3.download_file_bin(bucket_name, file_key)\n file_b64 = base64.b64encode(file_bin).decode('utf-8')\n body['success'] = True\n body['base64'] = file_b64\n body['index'] = item['index']\n body['size'] = item['size']\n return Response(body)\n else:\n body['success'] = False\n body['message'] = 'file_path is not a split_file'\n return Response(body)\n else:\n body['success'] = False\n body['message'] = 'permission denied'\n return Response(body)\n else:\n body['success'] = False\n body['message'] = 'file_path: {} does not exist'.format(file_path)\n return Response(body)\n\n","sub_path":"aws_interface/cloud/storage/download_split_b64.py","file_name":"download_split_b64.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"584914163","text":"# 注册(用手机号注册,密码用md5加密)\n# 登录(登录后显示最新一条公告)\n# 冲会员\n# 查看视频(即将所有视频循环打印出来)\n# 下载普通视频(非会员下载视频需要等30s广告,会员下载无需等待)\n# 下载收费视频(非会员下载需要10元,会员下载需要5元)\n# 查看观影记录(就是查看自己下载过的视频)\n# 查看公告(包括历史公告)\nfrom db import models\nimport os\n\n\ndef buy_member(user_dic):\n user = models.User.select_one(id=user_dic['user_id'])\n user.is_vip = 1\n user.update()\n back_dic = {'flag': True, 'msg': 'buy success'}\n return back_dic\n\n\ndef get_movie_list(user_dic):\n movie_list = models.Movie.select_all()\n back_movie_list = []\n if movie_list: # 不为空,继续查询,为空直接返回false\n for movie in movie_list:\n if not movie.is_delete:\n # 拼成一个列表['电影名字','收费/免费','电影id']\n if user_dic['movie_type'] == 'all':\n # 全部\n back_movie_list.append([movie.name, '免费' if movie.is_free else '收费', movie.id])\n elif user_dic['movie_type'] == 'free':\n # 免费电影\n if movie.is_free: # 免费的才往列表里放\n back_movie_list.append([movie.name, '免费', movie.id])\n else:\n # 收费电影\n if not movie.is_free: # 收费的才往列表里放\n back_movie_list.append([movie.name, '收费', movie.id])\n\n if back_movie_list:\n return {'flag': True, 'movie_list': back_movie_list}\n else:\n return {'flag': False, 'msg': '暂无可查看影片'}\n else:\n return {'flag': False, 'msg': '暂无影片'}\n\n\ndef download_movie(user_dic):\n movie = models.Movie.select_one(id=user_dic['movie_id'])\n if not movie: # 电影不存在,返回false\n back_dic = {'flag': False, 'msg': '该电影不存在'}\n return back_dic\n user = models.User.select_one(id=user_dic['user_id'])\n send_back_dic = {'flag': True}\n if user_dic['movie_type'] == 'free': # 下载免费电影,非会员需要等待;下载收费电影,不需要等待了直接下\n if user.is_vip:\n send_back_dic['wait_time'] = 0\n else:\n send_back_dic['wait_time'] = 30\n\n send_back_dic['filename'] = movie.name\n send_back_dic['filesize'] = os.path.getsize(movie.path)\n send_back_dic['path'] = movie.path\n # 把下载记录保存到记录表中\n down_record = models.DownloadRecord(user_id=user_dic['user_id'], movie_id=movie.id)\n down_record.save()\n return send_back_dic\n\n\ndef check_notice(user_dic):\n # 直接调用通过条数查询的接口,传入None表示全查\n return check_notice_by_count(count=None)\n\n\ndef check_notice_by_count(count=None):\n # count 为None,查全部,为1 查一条\n notice_list = models.Notice.select_all()\n back_notice_list = []\n if notice_list: # 不为空,继续查询,为空直接返回false\n if not count:\n for notice in notice_list:\n back_notice_list.append({notice.name: notice.content})\n else: # 查一条\n back_notice_list.append({notice_list[0].name: notice_list[0].content})\n return {'flag': True, 'notice_list': back_notice_list}\n else:\n return {'flag': False, 'msg': '暂无公告'}\n\n\ndef check_download_record(user_dic):\n '''\n 查看下载记录:\n 先通过user_id到DownloadRecord表中查到下载的每一条记录,\n 通过每一条记录中的电影id再去电影表查询电影,取出名字,返回\n :param user_dic:\n :return:\n '''\n download_record = models.DownloadRecord.select_all(user_id=user_dic['user_id'])\n if not download_record:\n back_dic = {'flag': False, 'msg': '暂无观影记录'}\n return back_dic\n else:\n download_list = []\n for record in download_record:\n movie = models.Movie.select_one(id=record.movie_id)\n download_list.append(movie.name)\n back_dic = {'flag': True, 'msg': 'buy success', 'download_list': download_list}\n return back_dic\n","sub_path":"interface/user_interface.py","file_name":"user_interface.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"573589229","text":"import setuptools\nimport os\n\nfrom orchestrator.common import setup as common_setup\n\nrequires = common_setup.parse_requirements()\ndepend_links = common_setup.parse_dependency_links()\nproject = 'orchestrator-facade'\n\n\nsetuptools.setup(\n name=project,\n version=\"1.0.0\",\n description='The facade of M&O projects',\n author='EMC Labs China',\n author_email='Layne.Peng@emc.com',\n url='http://dcade.lss.emc.com/',\n classifiers=[\n 'Environment :: OpenStack',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n ],\n packages=setuptools.find_packages(exclude=['bin', 'smoketests']),\n install_requires=requires,\n dependency_links=depend_links,\n include_package_data=True,\n setup_requires=['setuptools_git>=0.4'],\n scripts=['bin/orchestrator-facade'],\n data_files = [(['/etc/orchestrator-facade'][os.sep == '\\\\'],['etc/orchestrator-facade/orchestrator.conf.sample', 'etc/orchestrator-facade/logging.conf.sample']),\n (['/opt/orchestrator/orchestrator/static'][os.sep == '\\\\'], map(lambda x: 'orchestrator/static/'+x, os.listdir('orchestrator/static')))]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"438171424","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAbsorbance\n==========\n\nAn absorbance plot.\n\"\"\"\n\nimport WrightTools as wt\nfrom WrightTools import datasets\n\np = datasets.JASCO.PbSe_batch_1\ndata = wt.data.from_JASCO(p)\n\ndata.convert('wn', verbose=False)\ndata = data.split('wm', 10000)[0]\ndata = data.split('wm', 6000)[1]\n\nartist = wt.artists.Absorbance(data)\nd = artist.plot(n_smooth=50)\n","sub_path":"examples/absorbance.py","file_name":"absorbance.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"315013776","text":"hrs = input(\"Enter Hours:\")\nhr = float(hrs)\nrate=input(\"Enter Rate per hour:\")\nrph = float(rate)\npay=0\n\nif hr==40:\n pay=40*rps\nelif hr>40:\n extra=hr-40\n pay= rph*1.5*extra+(40*rph)\nelse:\n pay=0\nprint(pay)\n","sub_path":"Assignment1/ifelse.py","file_name":"ifelse.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"168008546","text":"import numpy as np\nimport torch\nimport gym\nimport argparse\nimport os\nfrom baselines import bench\nimport sys\nimport time\n\nimport utils\nimport TD3\nimport EmbeddedTD3\nimport RandomPolicy\nimport OurDDPG\nimport DDPG\nfrom DummyDecoder import DummyDecoder\nfrom RandomPolicy import RandomPolicy, ConstantPolicy\nfrom RandomEmbeddedPolicy import RandomEmbeddedPolicy\n\nimport sys\n# so it can find the action decoder class and LinearPointMass\n# sys.path.insert(0, '../action-embedding')\nfrom pointmass import point_mass\n\nimport reacher_family\n\ndef render_policy(policy, filename, render_mode='rgb_array', eval_episodes=5):\n frames = []\n avg_reward = 0.\n for episode in range(eval_episodes):\n obs = env.reset()\n policy.reset()\n frames.append(env.render(mode=render_mode))\n done = False\n while not done:\n if any([isinstance(policy, EmbeddedTD3.EmbeddedTD3),\n isinstance(policy, RandomEmbeddedPolicy)]):\n action, _, _ = policy.select_action(np.array(obs))\n else:\n action = policy.select_action(np.array(obs))\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n frame = env.render(mode=render_mode)\n # frame[:, :, 1] = (frame[:, :, 1].astype(float) + reward * 100).clip(0, 255)\n\n frames.append(frame)\n if render_mode == 'human':\n time.sleep(0.05)\n\n avg_reward /= eval_episodes\n print(\"---------------------------------------\")\n print(\"Evaluation over %d episodes: %f\" % (eval_episodes, avg_reward))\n print(\"---------------------------------------\")\n\n utils.save_gif('{}.mp4'.format(filename),\n [torch.tensor(frame.copy()).float()/255 for frame in frames],\n color_last=True)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--name\", default=None) # Job name\n parser.add_argument(\"--policy_name\", default=\"TD3\") # Policy name\n parser.add_argument(\"--env_name\", default=\"HalfCheetah-v1\") # OpenAI gym environment name\n parser.add_argument(\"--seed\", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds\n\n parser.add_argument(\"--decoder\", default=None, type=str) # Name of saved decoder\n parser.add_argument(\"--dummy_decoder\", action=\"store_true\") # use a dummy decoder that repeats actions\n parser.add_argument('--dummy_traj_len', type=int, default=1) # traj_len of dummy decoder\n parser.add_argument('--human', action=\"store_true\") # render interactively\n args = parser.parse_args()\n\n if args.env_name.startswith('dm'):\n import dm_control2gym\n _, domain, task = args.env_name.split('.')\n env = dm_control2gym.make(domain_name=domain, task_name=task)\n env_max_steps = 1000\n else:\n env = gym.make(args.env_name)\n env_max_steps = env._max_episode_steps\n\n env.seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n max_action = float(env.action_space.high[0])\n\n if args.policy_name == 'TD3':\n policy = TD3.load('policy', 'results/{}'.format(args.name))\n elif args.policy_name == 'EmbeddedTD3':\n policy = EmbeddedTD3.load('policy', 'results/{}'.format(args.name))\n elif args.policy_name == 'random':\n if args.decoder:\n decoder = load_decoder(args.env_name, args.decoder)\n policy = RandomEmbeddedPolicy(1, decoder, 4)\n elif args.dummy_decoder:\n decoder = DummyDecoder(action_dim, args.dummy_traj_len, env.action_space)\n policy = RandomEmbeddedPolicy(1, decoder, 1)\n else:\n policy = RandomPolicy(env.action_space)\n elif args.policy_name == 'constant':\n policy = ConstantPolicy(env.action_space)\n else:\n assert False\n\n\n render_mode = 'human' if args.human else 'rgb_array'\n render_policy(policy, \"{}_{}\".format(args.env_name, args.name), render_mode)\n","sub_path":"render_policy.py","file_name":"render_policy.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"448777583","text":"'''\n449. Serialize and Deserialize BST\nDescription Submission Solutions\nTotal Accepted: 8321\nTotal Submissions: 19979\nDifficulty: Medium\nContributors: ben65\nSerialization is the process of converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment.\n\nDesign an algorithm to serialize and deserialize a binary search tree. There is no restriction on how your serialization/deserialization algorithm should work. You just need to ensure that a binary search tree can be serialized to a string and this string can be deserialized to the original tree structure.\n\nThe encoded string should be as compact as possible.\n\nNote: Do not use class member/global/static variables to store states. Your serialize and deserialize algorithms should be stateless.\n'''\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n :type root: TreeNode\n :rtype: str\n \"\"\"\n treelist = {}\n def preorder(root, index):\n if root:\n treelist[index] = root.val\n preorder(root.left, 2*index)\n preorder(root.right, 2*index+1)\n preorder(root, 1)\n res = ''\n for i in treelist:\n res += 'i'+str(i)+'v'+str(treelist[i])\n return res\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n :type data: str\n :rtype: TreeNode\n \"\"\"\n if data == '':\n return None\n treelist = data[1:].split('i')\n treedict = {}\n def parse(inf):\n if inf[1] == 'n':\n node = None\n else:\n node = TreeNode(int(inf[1]))\n treedict[int(inf[0])] = node\n return node\n for i in treelist:\n nodeinf = i.split('v')\n parse(nodeinf)\n for i in treedict:\n if treedict[i]:\n if treedict.has_key(2*i):\n treedict[i].left = treedict[2*i]\n if treedict.has_key(2*i+1):\n treedict[i].right = treedict[2*i+1]\n return treedict[1]\n# Your Codec object will be instantiated and called as such:\n# codec = Codec()\n# codec.deserialize(codec.serialize(root))","sub_path":"en/serialize-and-deserialize-bst.py","file_name":"serialize-and-deserialize-bst.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"647273232","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport gym\nimport time\nfrom spinup.algos.sac_pytorch.core import MLP, FlattenMLP, MLPGaussianPolicy\nfrom spinup.utils.logx import EpochLogger\n\nfrom sklearn.cluster import KMeans\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for SAC agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=torch.Tensor(self.obs1_buf[idxs]).to(device),\n obs2=torch.Tensor(self.obs2_buf[idxs]).to(device),\n acts=torch.Tensor(self.acts_buf[idxs]).to(device),\n rews=torch.Tensor(self.rews_buf[idxs]).to(device),\n done=torch.Tensor(self.done_buf[idxs]).to(device))\n\ndef elbow(X):\n Nc = range(2, 15)\n kmeans = [KMeans(n_clusters=i) for i in Nc]\n score = np.array([kmeans[i].fit(X).score(X) for i in range(len(kmeans))])\n diff = [score[i] - (score[-1] - score[0]) * i / (len(Nc) - 1) - score[0] for i in range(len(Nc))]\n return Nc[np.argmax(diff)]\n\n\"\"\"\n\nSoft Actor-Critic\n\n(With slight variations that bring it closer to TD3)\n\n\"\"\"\ndef sac(env_fn, ac_kwargs=dict(), seed=0, steps_per_epoch=1000, epochs=200, replay_size=int(1e6), \n gamma=0.99, polyak=0.995, lr=1e-3, alpha=0.2, batch_size=100, start_steps=10000, \n max_ep_len=1000, logger_kwargs=dict(), save_path=dict(), save_freq=1):\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n env, test_env = env_fn(), env_fn()\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = env.action_space\n\n # Main outputs from computation graph\n policy = MLPGaussianPolicy(obs_dim, act_dim, **ac_kwargs).to(device)\n qf1 = FlattenMLP(obs_dim + act_dim).to(device)\n qf2 = FlattenMLP(obs_dim + act_dim).to(device)\n vf = MLP(obs_dim).to(device)\n \n # Target value network\n vf_targ = MLP(obs_dim).to(device)\n\n # # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Create optimizers\n pi_optimizer = optim.Adam(policy.parameters(), lr=lr)\n qf1_optimizer = optim.Adam(qf1.parameters(), lr=lr)\n qf2_optimizer = optim.Adam(qf2.parameters(), lr=lr)\n vf_optimizer = optim.Adam(vf.parameters(), lr=lr)\n\n # Initializing targets to match main variables\n vf_targ.load_state_dict(vf.state_dict())\n\n def get_action(o, deterministic=False):\n mu, pi, _ = policy(o)\n a = mu if deterministic else pi\n return a.detach().cpu().numpy()[0]\n \n TestEpRet, TestEpLen = [], []\n \n def test_agent(n=10):\n global mu, pi, q1, q2\n for j in range(n):\n o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0\n while not(d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time \n a = get_action(torch.Tensor(o).unsqueeze(0).to(device), True)\n o, r, d, _ = test_env.step(a)\n ep_ret += r\n ep_len += 1\n TestEpRet.append(ep_ret)\n TestEpLen.append(ep_len)\n return TestEpRet, TestEpLen\n\n start_time = time.time()\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n total_steps = steps_per_epoch * epochs\n EpRet, EpLen, LogPi, LossPi = [], [], [], []\n\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n\n \"\"\"\n Until start_steps have elapsed, randomly sample actions\n from a uniform distribution for better exploration. Afterwards, \n use the learned policy. \n \"\"\"\n if t > start_steps:\n a = get_action(torch.Tensor(o).unsqueeze(0).to(device))\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n if d or (ep_len == max_ep_len):\n \"\"\"\n Perform all SAC updates at the end of the trajectory.\n This is a slight difference from the SAC specified in the\n original paper.\n \"\"\"\n for j in range(ep_len):\n batch = replay_buffer.sample_batch(batch_size)\n obs1 = batch['obs1']\n obs2 = batch['obs2']\n acts = batch['acts']\n rews = batch['rews']\n done = batch['done']\n\n # Prediction logp_pi, Q1, Q2, V, V‾ \n _, _, logp_pi = policy(obs1)\n q1 = qf1(obs1, acts).squeeze(1)\n q2 = qf2(obs1, acts).squeeze(1)\n v = vf(obs1).squeeze(1)\n v_targ = vf_targ(obs2).squeeze(1)\n\n # Min Double-Q:\n min_q = torch.min(q1, q2).to(device)\n\n # Targets for Q and V regression\n q_backup = rews + gamma*(1-done)*v_targ\n v_backup = min_q - alpha * logp_pi\n\n # Soft actor-critic losses\n qf1_loss = F.mse_loss(q1, q_backup.detach())\n qf2_loss = F.mse_loss(q2, q_backup.detach())\n vf_loss = F.mse_loss(v, v_backup.detach())\n pi_loss = (alpha * logp_pi - q1).mean()\n \n # Q functions train op\n qf1_optimizer.zero_grad()\n qf1_loss.backward(retain_graph=True)\n qf1_optimizer.step()\n\n qf2_optimizer.zero_grad()\n qf2_loss.backward()\n qf2_optimizer.step()\n \n # V function train op\n vf_optimizer.zero_grad()\n vf_loss.backward()\n vf_optimizer.step()\n\n # Policy train op \n pi_optimizer.zero_grad()\n pi_loss.backward()\n pi_optimizer.step()\n \n # Polyak averaging for target variables\n for targ_param, param in zip(vf_targ.parameters(), vf.parameters()):\n targ_param.data.copy_(polyak*targ_param.data + (1-polyak)*param.data)\n \n LogPi.append(logp_pi.mean())\n LossPi.append(pi_loss)\n\n EpRet.append(ep_ret)\n EpLen.append(ep_len)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n\n # End of epoch wrap-up\n if t > 0 and t % steps_per_epoch == 0:\n epoch = t // steps_per_epoch\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs-1):\n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path)\n\n ckpt_path = args.save_path + 'model.pth.tar'\n torch.save(policy.state_dict(), ckpt_path)\n \n # Test the performance of the deterministic version of the agent.\n TestEpRet, TestEpLen = test_agent()\n print(\"TestEpRet\", TestEpRet)\n\n # Log info about epoch\n print('---------------------------------------')\n print('Epoch', epoch)\n print('EpRet', np.mean(EpRet))\n print('EpLen', np.mean(EpLen))\n print('TestEpRet', np.mean(TestEpRet))\n print('TestEpLen', np.mean(TestEpLen))\n print('TotalEnvInteracts', t)\n print('LogPi', torch.Tensor(LogPi).mean())\n print('LossPi', torch.Tensor(LossPi).mean())\n print('Time', time.time()-start_time)\n print('---------------------------------------')\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HalfCheetah-v2')\n parser.add_argument('--save_path', type=str, default='./save_model/')\n parser.add_argument('--hid', type=int, default=[400, 300])\n parser.add_argument('--gamma', type=int, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=200)\n parser.add_argument('--exp_name', type=str, default='sac')\n args = parser.parse_args()\n\n from spinup.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n sac(lambda : gym.make(args.env), gamma=args.gamma, seed=args.seed, \n epochs=args.epochs, logger_kwargs=logger_kwargs, save_path=args.save_path)\n","sub_path":"spinup/algos/sac_pytorch/sac.py","file_name":"sac.py","file_ext":"py","file_size_in_byte":9954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"18297188","text":"import tensorflow as tf\nfrom ceiling_segmentation.UNET.VGG16.Encoder import Encoder\nfrom ceiling_segmentation.UNET.VGG16.Decoder import Decoder\n\n\nclass EncoderDecoder(tf.keras.Model):\n def __init__(self, num_classes, batch_norm=False):\n \"\"\"\n\n :param num_classes: num of classed we would like to segment (e.g. 2 for free vs occluded)\n :param batch_norm: run with or without batch normalization\n \"\"\"\n super(EncoderDecoder, self).__init__()\n self.encoder = Encoder(batch_norm) # create an instance of Encoder\n self.decoder = Decoder(batch_norm) # create an instance of Decoder\n self.middle_conv = tf.keras.layers.Conv2D(1024, 3, strides=1, padding=\"same\", activation=\"relu\") # parameters are based on VGG16 architecture\n self.last_conv = tf.keras.layers.Conv2D(num_classes, 1, strides=1, padding=\"same\", activation=\"softmax\")\n\n def call(self, inputs, training=False):\n # pass the input image to the encoder and get the output of each vgg blk\n blk_1_out, blk_2_out, blk_3_out, blk_4_out, blk_5_out, x = self.encoder(inputs, training)\n x = self.middle_conv(x)\n x = self.decoder(x, blk_1_out, blk_2_out, blk_3_out, blk_4_out, blk_5_out, training)\n\n output = self.last_conv(x)\n return output\n","sub_path":"ceiling_segmentation/ceiling_segmentation/UNET/VGG16/EncoderDecoder.py","file_name":"EncoderDecoder.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"42064274","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nN4_train = np.array((0.18, 0.182, 0.201, 0.242, 0.261))\r\nN4_test = np.array((0.095, 0.155, 0.183, 0.214, 0.241))\r\n\r\nResNet_train = np.array((0.901, 0.905, 0.897, 0.859, 0.825))\r\nResNet_test = np.array((0.128, 0.184, 0.266, 0.296, 0.345))\r\n\r\nResNetSlw_train = np.array((0.923, 0.879, 0.873, 0.856, 0.839))\r\nResNetSlw_test = np.array((0.128, 0.188, 0.253, 0.292, 0.331))\r\n\r\nVGG_train = np.array((0.567, 0.585, 0.451, 0.454, 0.286))\r\nVGG_test = np.array((0.133, 0.203, 0.254, 0.279, 0.281))\r\n\r\nindex = np.array((50, 100, 200, 300, 500))\r\n\r\nf1 = plt.figure(1)\r\nplt.ylim(0, 1)\r\nplt.plot(index, N4_train, label='CNN train')\r\nplt.plot(index, ResNet_train, label='ResNet train')\r\nplt.plot(index, ResNetSlw_train, label='ResNetShallow train')\r\nplt.plot(index, VGG_train, label='VGG train')\r\nplt.legend()\r\nf2 = plt.figure(2)\r\nplt.ylim(0, 0.4)\r\nplt.plot(index, N4_test, label='CNN test')\r\nplt.plot(index, ResNet_test, label='ResNet test')\r\nplt.plot(index, ResNetSlw_test, label='ResNetShallow test')\r\nplt.plot(index, VGG_test, label='VGG test')\r\nplt.legend()\r\nf3 = plt.figure(3)\r\nplt.plot(index, N4_train / N4_test, label='CNN train / test')\r\nplt.plot(index, ResNet_train / ResNet_test, label='ResNet train / test')\r\nplt.plot(index, ResNetSlw_train / ResNetSlw_test, label='ResNetShallow train / test')\r\nplt.plot(index, VGG_train / VGG_test, label='VGG train / test')\r\nplt.legend()","sub_path":"plot_stat.py","file_name":"plot_stat.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"419261223","text":"from flask import Flask, abort\nimport audio\nfrom video import VideoPlayer\nimport json\nimport time\n\nimport random\n\napp = Flask(__name__)\n\nvplayer = VideoPlayer()\n\nwith open(\"conf.json\") as fo:\n conf = json.load(fo)\n sounds = conf[\"sounds\"]\n playlists = conf[\"playlists\"]\n videos = conf[\"videos\"]\n print(conf)\n\n\n@app.route('/', methods=['GET'])\ndef empty():\n return(\"this is the soundplayer app\")\n\n@app.route('/play/', methods=['POST'])\ndef play_sound(sound_id):\n if (sound_id not in sounds):\n abort(404)\n try:\n audio.play_sound(sounds[sound_id])\n except Exception as e:\n print (e)\n return 'playing ' + sound_id\n\n@app.route('/play/random', methods=['POST'])\ndef play_random():\n audio.play_sound(random.choice(sounds.values()))\n return 'playing random song'\n\n@app.route('/playlist/', methods=['POST'])\ndef playlist(listname):\n if (listname not in playlists):\n abort(404)\n audio.playlist(playlists[listname])\n return 'playing playlist ' + listname\n\n@app.route('/video/', methods=['POST'])\ndef play_video(video_id):\n if (video_id not in videos):\n abort(404)\n vplayer.play(videos[video_id])\n return 'playing video ' + video_id\n\n@app.route('/videoget/', methods=['GET'])\ndef play_video_get(video_id):\n print(video_id, videos)\n if (video_id not in videos):\n abort(404)\n vplayer.play(videos[video_id])\n return(\"Playing video \"+video_id)\n\n\n@app.route('/stop', methods=['POST'])\ndef stop():\n stop_sound()\n stop_video()\n\ndef stop_sound():\n audio.stop()\n\ndef stop_video():\n vplayer.stop()\n\n\n\n\nif __name__ == \"__main__\":\n app.run('0.0.0.0', port=8001)\n","sub_path":"player/build/lib/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"644257352","text":"\"\"\"Convert Epubs from the Masaha Hurra library to OpenITI mARkdown.\n\nThe converter has two main functions:\n* convert_file: convert a single epub file.\n* convert_files_in_folder: convert all epub files in a given folder\n\nUsage examples:\n >>> folder = \"test/masaha/epub/\"\n >>> meta_fp = \"test/masaha/all_books/meta/all_metadata.json\"\n >>> from epub_converter_masaha import convert_file, convert_files_in_folder\n >>> src_fp = folder+\"000008.epub\"\n >>> convert_file(src_fp, meta_fp, dest_fp=folder+\"converted/Masaha000008\")\n >>> convert_files_in_folder(folder, meta_fp, dest_folder=folder+\"converted\")\n Converting all files in folder test/masaha/epub with extensions ['epub']\n\nBoth functions use the MasahaEpubConverter class to do the heavy lifting.\nThe MasahaEpubConverter is a subclass of the GenericEpubConverter,\nwhich in turn is a subclass of the GenericConverter\nfrom the generic_converter module:\n\nGenericConverter\n \\_ GenericEpubConverter\n \\_ MasahaEpubConverter\n\nMethods of both classes:\n\n(methods of GenericConverter are inherited by GenericEpubConverter;\nmethods of GenericConverter with the same name\nin GenericEpubConverter are overwritten by the latter)\n\n=========================== ========================= =======================\ngeneric_converter epub_converter_generic epub_converter_masaha \n=========================== ========================= =======================\n__init__ __init__ __init__ \nconvert_files_in_folder (inherited) (inherited)\nconvert file (inherited) (inherited)\nmake_dest_fp (inherited - generic!) (inherited - generic!)\nget_metadata (inherited - generic!) get_metadata\nget_data get_data (inherited)\npre_process (inherited) (inherited)\nadd_page_numbers (inherited - generic!) (inherited - generic!)\nadd_structural_annotations (inherited - generic!) (inherited - generic!) \nremove_notes remove_notes (inherited)\nreflow (inherited) (inherited)\nadd_milestones (inherited) (inherited)\npost_process (inherited - generic!) post_process\ncompose (inherited) (inherited)\nsave_file (inherited) (inherited)\n convert_html2md convert_html2md\n inspect_epub (inherited)\n sort_html_files_by_toc sort_html_files_by_toc\n add_unique_tags (inherited)\n=========================== ========================= =======================\n\n\nExamples:\n >>> from epub_converter_masaha import MasahaEpubConverter\n >>> from helper.yml2json import yml2json\n >>> folder = \"test/\"\n >>> fn = \"26362727.epub\"\n >>> hc = MasahaEpubConverter(dest_folder=\"test/converted\")\n >>> hc.VERBOSE = False\n >>> meta_fp = \"masaha/all_books/meta/all_metadata.json\"\n >>> hc.metadata_file = meta_fp\n >>> hc.convert_file(folder+fn)\n\n #>>> hc.convert_files_in_folder(folder)\n\n\"\"\"\n\nimport os\nimport json\nimport shutil\nimport re\n\nif __name__ == '__main__':\n from os import sys, path\n root_folder = path.dirname(path.dirname(path.abspath(__file__)))\n root_folder = path.dirname(path.dirname(root_folder))\n sys.path.append(root_folder)\n\nfrom openiti.new_books.convert.epub_converter_generic import GenericEpubConverter\nfrom openiti.new_books.convert.helper import html2md_masaha\nfrom openiti.new_books.convert.helper.yml2json import yml2json\n\n\ndef convert_file(fp, meta_fp, dest_fp=None, verbose=False, overwrite=False):\n \"\"\"Convert one file to OpenITI format.\n\n Args:\n fp (str): path to the file that must be converted.\n meta_fp (str): path to the yml file containing the Masaha metadata\n dest_fp (str): path to the converted file.\n\n Returns:\n None\n \"\"\"\n conv = MasahaEpubConverter(overwrite=overwrite)\n conv.VERBOSE = verbose\n with open(meta_fp, mode=\"r\", encoding=\"utf-8\") as file:\n d = json.load(file)\n conv.metadata_dic = {int(item[\"book_id\"]): item for item in d}\n conv.metadata_file = meta_fp\n conv.convert_file(fp, dest_fp=dest_fp)\n\n##def convert_multifile_text(folder, meta_fp, dest_folder, verbose=False):\n## for i, fn in enumerate(os.listdir(folder)):\n## if i == 0:\n## dest_fp = os.path.join(dest_folder, os.path.splitext(fn)[0])\n## \n \n\ndef convert_files_in_folder(src_folder, meta_fp, dest_folder=None, verbose=False,\n extensions=[\"epub\"], exclude_extensions=[\"yml\"],\n fn_regex=None, overwrite=False):\n \"\"\"Convert all files in a folder to OpenITI format.\\\n Use the `extensions` and `exclude_extensions` lists to filter\\\n the files to be converted.\n\n Args:\n src_folder (str): path to the folder that contains\n the files that must be converted.\n meta_fp (str): path to the yml file containing the Masaha metadata\n dest_folder (str): path to the folder where converted files\n will be stored.\n extensions (list): list of extensions; if this list is not empty,\n only files with an extension in the list should be converted.\n exclude_extensions (list): list of extensions;\n if this list is not empty,\n only files whose extension is not in the list will be converted.\n fn_regex (str): regular expression defining the filename pattern\n e.g., \"-(ara|per)\\d\". If `fn_regex` is defined,\n only files whose filename matches the pattern will be converted.\n\n Returns:\n None\n \"\"\"\n msg = \"Converting all files in folder {} with extensions {}\"\n print(msg.format(src_folder, extensions))\n conv = MasahaEpubConverter(overwrite=overwrite)\n conv.VERBOSE = verbose\n with open(meta_fp, mode=\"r\", encoding=\"utf-8\") as file:\n d = json.load(file)\n conv.metadata_dic = {int(item[\"book_id\"]): item for item in d}\n conv.metadata_file = meta_fp\n conv.convert_files_in_folder(src_folder, dest_folder=dest_folder,\n extensions=extensions,\n exclude_extensions=exclude_extensions,\n fn_regex=fn_regex)\n\n\n################################################################################\n\n\n\n\nclass MasahaEpubConverter(GenericEpubConverter):\n def __init__(self, dest_folder=None, overwrite=True):\n super().__init__(dest_folder=dest_folder, overwrite=overwrite)\n self.toc_fn = \"content.opf\"\n self.metadata_file = None\n\n\n def convert_files_in_folder(self, source_folder, dest_folder=None,\n extensions=[], exclude_extensions=[],\n fn_regex=None):\n \"\"\"Convert all files in a folder to OpenITI format.\\\n Use the `extensions` and `exclude_extensions` lists to filter\\\n the files to be converted.\n\n Args:\n source_folder (str): path to the folder that contains\n the files that must be converted.\n extensions (list): list of extensions; if this list is not empty,\n only files with an extension in the list should be converted.\n exclude_extensions (list): list of extensions;\n if this list is not empty,\n only files whose extension is not in the list will be converted.\n fn_regex (str): regular expression defining the filename pattern\n e.g., \"-(ara|per)\\d\". If `fn_regex` is defined,\n only files whose filename matches the pattern will be converted.\n\n Returns:\n None\n \"\"\"\n failed = []\n if dest_folder:\n self.dest_folder = dest_folder\n fp_list = self.filter_files_in_folder(source_folder, extensions,\n exclude_extensions, fn_regex)\n for fp in fp_list:\n print(fp)\n try:\n self.convert_file(fp)\n except Exception as e:\n print(\"ERROR:\", e)\n failed.append((fp, e))\n \n\n # deal with multivolume texts that are in separate folders:\n multivol_folders = [f for f in os.listdir(source_folder) if f.startswith(\"multivol\")]\n multivol_folders = [os.path.join(source_folder, f) for f in multivol_folders]\n \n for folder in multivol_folders:\n print(folder)\n try:\n first_fn = sorted(os.listdir(folder))[0]\n except Exception as e:\n print(\"folder does not contain files:\", e)\n failed.append((folder, e))\n continue\n outfn = re.sub(\"\\.epub\", \"Vols.automARkdown\", first_fn)\n outfp = os.path.join(dest_folder, outfn)\n if os.path.exists(outfp):\n print(outfp, \"already exists\")\n continue\n temp_folder = os.path.join(dest_folder, \"temp\")\n if os.path.exists(temp_folder):\n shutil.rmtree(temp_folder)\n os.makedirs(temp_folder)\n self.convert_files_in_folder(folder, dest_folder=temp_folder,\n extensions=extensions,\n exclude_extensions=exclude_extensions,\n fn_regex=fn_regex)\n # combine all volumes into one folder:\n combined = []\n endnotes = []\n for i, fn in enumerate(sorted(os.listdir(temp_folder))):\n fp = os.path.join(temp_folder, fn)\n if i == 0:\n outfn = re.sub(\"\\.\", \"Vols.\", fn) \n outfp = os.path.join(dest_folder, outfn)\n with open(fp, mode=\"r\", encoding=\"utf-8\") as file:\n text = file.read()\n if i != 0:\n text = re.split(\"#META#Header#End#?\", text)[-1]\n page = \"PageV{:02d}P{:03d}\"\n text = re.sub(\"PageV\\d+P(\\d+)\", r\"PageV{:02d}P\\1\".format(i+1), text)\n if re.findall(\"### \\|EDITOR\\|[ \\r\\n]+ENDNOTES:?\", text):\n text, notes = re.split(\"### \\|EDITOR\\|[ \\r\\n]+ENDNOTES:?\", text)\n endnotes.append(notes)\n combined.append(text)\n with open(outfp, mode=\"w\", encoding=\"utf-8\") as file:\n text = \"\\n\\n\".join(combined)\n endnotes = \"\\n\\n\".join(endnotes)\n file.write(text + \"\\n\\n### |EDITOR\\|\\n\\nENDNOTES:\\n\\n\" + endnotes)\n print(\"Converting all files done\")\n if failed:\n print(\"These files failed to convert:\")\n for fp, e in failed:\n print(fp, e)\n \n \n\n def sort_html_files_by_toc(self, zp, toc_fp, html_files):\n \"\"\"Gets the table of contents from the Epub file.\n\n Args:\n zp: zipfile object\n toc_fp (str): filepath to the table of contents of the epub.\n html_files(list): an unordered list of the html files in the epub.\n\n Returns:\n toc (list): a list of filepaths to the html files\n in the epub file, in the order specified by the\n table of contents\n \"\"\"\n html_files_dict = {os.path.split(fp)[-1] : fp for fp in html_files}\n toc_data = zp.read(toc_fp)\n toc_data = codecs.decode(toc_data, \"utf-8\")\n soup = BeautifulSoup(toc_data)\n toc_ol = soup.find(\"spine\")\n toc = []\n for item in toc_ol.find_all(\"itemref\"):\n fn = os.path.split(item.get(\"idref\"))[-1]\n if fn in html_files_dict:\n toc.append(html_files_dict[fn])\n return toc\n\n def convert_html2md(self, html):\n \"\"\"Use custom html to mARKdown function for Masaha epubs.\"\"\"\n text = html2md_masaha.markdownify(html)\n return text\n\n def get_metadata(self, metadata_fp):\n \"\"\"Custom method to get the metadata of the Masaha epub file.\"\"\"\n source_fp = self.source_fp\n bookID = os.path.split(source_fp)[1]\n bookID = int(os.path.splitext(bookID)[0])\n meta_dic = self.metadata_dic[bookID]\n meta = [\"#META# {}: {}\".format(k,v) for k,v in sorted(meta_dic.items())]\n return self.magic_value + \"\\n\".join(meta) + self.header_splitter\n\n def post_process(self, text):\n \"\"\"Custom post-processing for masaha texts\"\"\"\n # put page number at the bottom of the page:\n text = re.sub(\"(PageV\\d+P\\d+)(.+?)(?=Page|\\Z)\", r\"\\2\\n\\n\\1\\n\\n\", text, flags=re.DOTALL)\n processed = super().post_process(text)\n return processed\n \n\n\nif __name__== \"__main__\":\n #import doctest\n #doctest.testmod()\n #input(\"Testing finished. Continue?\")\n\n # identify the location of the yml file containing the metadata:\n meta_fp = r\"test\\masaha\\meta\\all_metadata.json\"\n src_folder = \"test/masaha/epub\"\n convert_files_in_folder(src_folder, meta_fp, dest_folder=\"test/converted\", verbose=False)\n## hc.metadata = yml2json(meta_fp, container={})\n \n## fp = r\"test\\26362727.epub\"\n## hc.convert_file(fp)\n## print(\"converted Masaha epub\", fp)\n##\n## hc.convert_files_in_folder(\"test/masaha\")\n## print(\"converted all epub files in folder\", \"test/masaha\")\n\n","sub_path":"build/lib/openiti/new_books/convert/epub_converter_masaha.py","file_name":"epub_converter_masaha.py","file_ext":"py","file_size_in_byte":13599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"99434983","text":"from django.http.response import JsonResponse\nfrom django.views.generic.base import View, TemplateView\nfrom django.views.decorators.csrf import csrf_exempt\n\n\nfrom PIL import Image, ImageFilter, ImageEnhance\nfrom tesserocr import PyTessBaseAPI\nfrom models import SWTScrubber\n\nclass OcrFormView(TemplateView):\n template_name = 'documents/ocr_form.html'\nocr_form_view = OcrFormView.as_view()\n\n\nclass OcrView(View):\n def post(self, request, *args, **kwargs):\n with PyTessBaseAPI() as api:\n with Image.open(request.FILES['image']) as image:\n new_image = image.convert('1')\n enh = ImageEnhance.Contrast(image)\n enh_image = enh.enhance(1.3)\n filtered_image = image.filter(ImageFilter.CONTOUR)\n sharpened_image = image.filter(ImageFilter.SHARPEN)\n api.SetImage(image)\n utf8_text = api.GetUTF8Text()\n new_image.save('new.png', 'PNG')\n return JsonResponse({'utf8_text': utf8_text})\nocr_view = csrf_exempt(OcrView.as_view())\n","sub_path":"ocr_with_django/documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"204445541","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the plusMinus function below.\ndef plusMinus(arr):\n positive = 0 \n negative = 0\n zeros = 0\n for i in range(len(arr)):\n if (arr[i]>0):\n positive+=1\n elif (arr[i]<0):\n negative+=1\n elif (arr[i]==0):\n zeros+=1\n\n pos = positive/(len(arr))\n neg = negative/(len(arr))\n zero = zeros/(len(arr))\n\n print(pos)\n print(neg)\n print(zero)\nif __name__ == '__main__':\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n plusMinus(arr)\n","sub_path":"plusminus.py","file_name":"plusminus.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"499431966","text":"import json\nimport os\nimport socket\n\n\nclass Consumer:\n def __init__(self, port, download_dir, address='localhost', multicast_group_addr='224.3.29.71',\n multicast_group_port=10000,\n timeout=5):\n self.port = port\n self.address = address\n self.multicast_group_addr = multicast_group_addr\n self.multicast_group_port = multicast_group_port\n self.timeout = timeout\n self.download_dir = download_dir.rstrip(\" /\")\n\n def start(self):\n print('Staring a consumer on (%s, %d)' % ('localhost', self.port))\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as receiverSock:\n receiverSock.bind((self.address, self.port))\n receiverSock.listen(1)\n receiverSock.settimeout(self.timeout)\n\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) as sock:\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)\n\n while True:\n fileName = input('Enter the filename to download: ')\n if not fileName:\n print(\"File name must not be empty\")\n continue\n\n print('Asking the producers for a file: %s' % fileName)\n\n request = json.dumps({'fileName': fileName, 'address': self.address, 'port': str(self.port)})\n requestBytes = bytes(request, 'UTF-8')\n sock.sendto(requestBytes, (self.multicast_group_addr, self.multicast_group_port))\n\n try:\n conn, addr = receiverSock.accept()\n except socket.timeout:\n print('No file after %d seconds' % self.timeout)\n continue\n except OSError as why:\n print('Unable to accept a connection: ' + str(why))\n continue\n\n with conn:\n print('Producer from %s seems to have a file %s' % (addr, fileName))\n\n try:\n self.__save_content_to_file(conn, fileName)\n except OSError as why:\n print('Unable to save the file %s: %s' % (fileName, str(why)))\n\n def __save_content_to_file(self, sock, file_name):\n tmp_download_dir = self.download_dir + '/' + 'tempDownloads'\n os.makedirs(tmp_download_dir, exist_ok=True)\n tmp_file = tmp_download_dir + '/' + file_name\n file = self.download_dir + '/' + file_name\n\n if not os.path.exists(file):\n with open(tmp_file, 'wb') as out:\n while True:\n data = sock.recv(1024)\n if not data: break\n out.write(data)\n\n os.rename(tmp_file, file)\n print('%s is saved to %s' % (file_name, file))\n else:\n print('%s is already present' % file)\n","sub_path":"computer-networks/hw1-producer-consumer/ctd/old.version/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"102545113","text":"from datetime import datetime\nfrom dateutil import tz\n\ndef utc_to_local(utc_string):\n from_zone = tz.tzutc()\n to_zone = tz.tzlocal()\n\n # utc = datetime.utcnow()\n utc = datetime.strptime(utc_string, '%Y-%m-%d %H:%M:%S')\n\n # Tell the datetime object that it's in UTC time zone since \n # datetime objects are 'naive' by default\n utc = utc.replace(tzinfo=from_zone)\n\n # return local time\n return utc.astimezone(to_zone)\n","sub_path":"src/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"627797869","text":"from conn.connection import connect\n\n\ndef get_notices() -> dict:\n try:\n db = connect()\n cursor = db.cursor()\n sql = \"select id, title, content, unix_timestamp(create_time) as create_time \" \\\n \"from notices\"\n cursor.execute(sql)\n res = cursor.fetchall()\n return res\n except:\n return {}\n","sub_path":"models/notice.py","file_name":"notice.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"373361714","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom operator import itemgetter\nimport time\n\nimport openerp\nfrom openerp import SUPERUSER_ID, api\nfrom openerp import tools\nfrom openerp.osv import fields, osv, expression\nfrom openerp.tools.translate import _\nfrom openerp.tools.float_utils import float_round as round\nfrom openerp.tools.safe_eval import safe_eval as eval\n\nimport openerp.addons.decimal_precision as dp\n\n_logger = logging.getLogger(__name__)\n\n\n\nclass account_move(osv.osv):\n _inherit = \"account.move\"\n \n def button_validate(self, cursor, user, ids, context=None):\n \n \n for move in self.browse(cursor, user, ids, context=context):\n # check that all accounts have the same topmost ancestor\n top_common = None\n for line in move.line_id:\n \n analytic_account_id = line and line.analytic_account_id and line.analytic_account_id.id \n if analytic_account_id:\n analytic_state = line and line.analytic_account_id and line.analytic_account_id.state\n analytic_name = line and line.analytic_account_id and line.analytic_account_id.name\n if analytic_state == 'close':\n raise osv.except_osv(_('Warning!'), _('You Cannot Post an Accounting Entry on A Closed Project/Analtyic Account %s'%analytic_name))\n \n account = line.account_id\n top_account = account\n while top_account.parent_id:\n top_account = top_account.parent_id\n if not top_common:\n top_common = top_account\n elif top_account.id != top_common.id:\n raise osv.except_osv(_('Error!'),\n _('You cannot validate this journal entry because account \"%s\" does not belong to chart of accounts \"%s\".') % (account.name, top_common.name))\n return self.post(cursor, user, ids, context=context)\n \n\n\n\nclass account_account(osv.osv):\n \n _inherit = \"account.account\"\n def _check_moves(self, cr, uid, ids, method, context=None):\n line_obj = self.pool.get('account.move.line')\n account_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)\n\n if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):\n if method == 'write':\n pass\n# raise osv.except_osv(_('Error!'), _('You cannot deactivate an account that contains journal items.'))\n elif method == 'unlink':\n raise osv.except_osv(_('Error!'), _('You cannot remove an account that contains journal items.'))\n #Checking whether the account is set as a property to any Partner or not\n values = ['account.account,%s' % (account_id,) for account_id in ids]\n partner_prop_acc = self.pool.get('ir.property').search(cr, uid, [('value_reference','in', values)], context=context)\n if partner_prop_acc:\n return True\n# raise osv.except_osv(_('Warning!'), _('You cannot remove/deactivate an account which is set on a customer or supplier.'))\n return True\n def _check_allow_code_change(self, cr, uid, ids, context=None):\n line_obj = self.pool.get('account.move.line')\n \n for account in self.browse(cr, uid, ids, context=context):\n if account.note == '#re':\n return True\n account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])], context=context)\n if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):\n raise osv.except_osv(_('Warning !'), _(\"You cannot change the code of account which contains journal items!\"))\n return True","sub_path":"beta_invoice/account/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"102230957","text":"# -*- coding:utf-8 -*-\n# @Author : 'longguangbin'\n# @Contact : lgb453476610@163.com\n# @Date : 2019/2/3\n\"\"\" \nUsage Of '152_max_product.py' : \n\"\"\"\n\n\nclass Solution(object):\n def maxProduct(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # 28 ms - 99.66%\n # 计算从左到右的相乘的最大值,和计算从右到左的最大值;再将两组最大值相比\n B = nums[::-1]\n for i in range(1, len(nums)):\n nums[i] *= nums[i - 1] or 1\n B[i] *= B[i - 1] or 1\n return max(max(nums), max(B))\n\n\ndef get_test_instance(example=1):\n nums = [2, 3, -2, 4]\n if example == 1:\n pass\n if example == 2:\n nums = [-2, 0, -1]\n return nums\n\n\ndef main():\n nums = get_test_instance(example=1)\n # nums = get_test_instance(example=1)\n res = Solution().maxProduct(nums)\n print(res)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"leetcode/dynamic_programming/152_max_product.py","file_name":"152_max_product.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"263278543","text":"import os\nimport asyncio\nimport numpy as np\nimport time\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom lsst.ts import salobj\nfrom lsst.ts.idl.enums import MTHexapod\n\n\n\nasync def readingTemperatures(hexa):\n dir(temp)\n [getattr(temp,'temperatureC%02d'%i) for i in range(1,8+1)]\n end = Time(datetime.now(), scale='tai')\n start = end - timedelta(seconds=1000)\n df = await client.select_time_series('lsst.sal.ESS.temperature8Ch', '*', start, end, csc_index)\n fig, ax = plt.subplots(1,1, figsize=(15,4))\n for i in range(1,8+1):\n plt.plot(getattr(df, 'temperatureC%02d'%i))\n plt.grid()\n \nasync def prepareEnvironmentforHexa(hexa):\n #Start the telemetry \n #mount telemetry:\n mount_angle = await mount.tel_elevation.next(flush=False, timeout=10.)\n print(\"mount elevation angle\", mount_angle.actualPosition)\n elev = mount_angle.actualPosition \n \nasync def printHexaPosition(hexa):\n pos = await hexa.tel_application.next(flush=True, timeout=10.)\n print(\"Current Hexapod position\")\n print(\" \".join(f\"{p:10.2f}\" for p in pos.position[:3]), end = ' ') \n print(\" \".join(f\"{p:10.6f}\" for p in pos.position[3:]) )\n \nasync def printHexaUncompensatedAndCompensated(hexa):\n posU = await hexa.evt_uncompensatedPosition.aget(timeout=10.)\n print('Uncompensated position')\n print(\" \".join(f\"{p:10.2f}\" for p in [getattr(posU, i) for i in 'xyz']), end = ' ')\n print(\" \".join(f\"{p:10.6f}\" for p in [getattr(posU, i) for i in 'uvw']),' ',\n pd.to_datetime(posU.private_sndStamp, unit='s')) \n posC = await hexa.evt_compensatedPosition.aget(timeout=10.)\n print('Compensated position = (uncompensated + LUT)')\n print(\" \".join(f\"{p:10.2f}\" for p in [getattr(posC, i) for i in 'xyz']), end = ' ')\n print(\" \".join(f\"{p:10.6f}\" for p in [getattr(posC, i) for i in 'uvw']),' ',\n pd.to_datetime(posC.private_sndStamp, unit='s'))\n \n\n \nasync def printHexaUncompensatedAndCompensated(hexa):\n posU = await hexa.evt_uncompensatedPosition.aget(timeout=10.)\n print('Uncompensated position')\n print(\" \".join(f\"{p:10.2f}\" for p in [getattr(posU, i) for i in 'xyz']), end = ' ')\n print(\" \".join(f\"{p:10.6f}\" for p in [getattr(posU, i) for i in 'uvw']),' ',\n pd.to_datetime(posU.private_sndStamp, unit='s')) \n posC = await hexa.evt_compensatedPosition.aget(timeout=10.)\n print('Compensated position = (uncompensated + LUT)')\n print(\" \".join(f\"{p:10.2f}\" for p in [getattr(posC, i) for i in 'xyz']), end = ' ')\n print(\" \".join(f\"{p:10.6f}\" for p in [getattr(posC, i) for i in 'uvw']),' ',\n pd.to_datetime(posC.private_sndStamp, unit='s'))\n \nasync def moveHexaTo0(hexa, actual_z = 0):\n ### command it to collimated position (based on LUT)\n \n need_to_move = False\n try:\n posU = await hexa.evt_uncompensatedPosition.aget(timeout=10.)\n if abs(max([getattr(posU, i) for i in 'xyzuvw']))<1e-8:\n print('hexapod already at LUT position')\n else:\n need_to_move = True\n except asyncio.exceptions.TimeoutError:\n need_to_move = True\n if need_to_move:\n hexa.evt_inPosition.flush()\n #according to XML, units are micron and degree\n await hexa.cmd_move.set_start(x=0,y=0,z=actual_z, u=0,v=0,w=0,sync=True)\n while True:\n state = await hexa.evt_inPosition.next(flush=False, timeout=10)\n print(\"hexa in position?\",state.inPosition, pd.to_datetime(state.private_sndStamp, unit='s'))\n if state.inPosition:\n break\n await printHexaPosition(hexa)\n \nasync def readyHexaForAOS(hexa):\n settings = await hexa.evt_settingsApplied.aget(timeout = 10.)\n hasSettings = 0\n if hasattr(settings, 'settingsVersion'):\n print('settingsVersion = ', settings.settingsVersion, pd.to_datetime(settings.private_sndStamp, unit='s'))\n hasSettings = 1\n if (not hasSettings) or (not settings.settingsVersion[:12] == 'default.yaml'):\n print('YOU NEED TO SEND THIS HEXAPOD TO STANDBY, THEN LOAD THE PROPER CONFIG')\n else:\n hexaConfig = await hexa.evt_configuration.aget(timeout=10.)\n print(\"pivot at (%.0f, %.0f, %.0f) microns \"%(hexaConfig.pivotX, hexaConfig.pivotY, hexaConfig.pivotZ))\n print(\"maxXY = \", hexaConfig.maxXY, \"microns, maxZ= \", hexaConfig.maxZ, \" microns\")\n print(\"maxUV = \", hexaConfig.maxUV, \"deg, maxW= \", hexaConfig.maxW, \" deg\")\n\n lutMode = await hexa.evt_compensationMode.aget(timeout=10)\n if not lutMode.enabled:\n hexa.evt_compensationMode.flush()\n await hexa.cmd_setCompensationMode.set_start(enable=1, timeout=10)\n lutMode = await hexa.evt_compensationMode.next(flush=False, timeout=10)\n print(\"compsensation mode enabled?\",lutMode.enabled, pd.to_datetime(lutMode.private_sndStamp, unit='s'))\n await moveHexaTo0(hexa, actual_z = 100)\n await moveHexaTo0(hexa)\n await printHexaUncompensatedAndCompensated(hexa)\n print(\"Does the hexapod has enough inputs to do LUT compensation? (If the below times out, we do not.)\")\n #Note: the target events are what the hexa CSC checks; if one is missing, the entire LUT will not be applied\n #it also needs to see an uncompensatedPosition (a move would trigger that) in order to move to the compensatedPosition\n a = await hexa.evt_compensationOffset.aget(timeout=10.)\n print('mount elevation = ', a.elevation)\n print('mount azimth = ', a.azimuth)\n print('rotator angle = ', a.rotation)\n print('? temperature = ', a.temperature)\n print('x,y,z,u,v,w = ', a.x, a.y, a.z, a.u, a.v, a.w, pd.to_datetime(a.private_sndStamp, unit='s'))","sub_path":"procedures/hexaTools.py","file_name":"hexaTools.py","file_ext":"py","file_size_in_byte":5743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"204404743","text":"import turtle\nimport time\nimport random\n\nt = turtle.Turtle()\nt.pensize(3)\nt.speed(8)\n\n\ndef draw(ile, bok, kat):\n for i in range(ile):\n t.fd(bok)\n t.right(kat)\n\n\ndef star(bok):\n draw(5, bok, 144)\n\n\nstar(150)\n\ntime.sleep(3)\n\n","sub_path":"Python/turtle/star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"513668170","text":"from sqlalchemy import and_, func\n\nfrom ...shared.utils.restApi import RestResource\nfrom ...shared.utils.api_utils import build_req_parser\nfrom ..models.security_results import SecurityResultsDAST\nfrom ..models.security_reports import SecurityReport\n\n\nclass TestStatusUpdater(RestResource):\n _put_rules = (\n dict(name=\"test_status\", type=dict, location=\"json\"),\n )\n\n def __init__(self):\n super().__init__()\n self.__init_req_parsers()\n\n def __init_req_parsers(self):\n self._parser_put = build_req_parser(rules=self._put_rules)\n\n def put(self, project_id: int, test_id: int):\n args = self._parser_put.parse_args(strict=False)\n test_status = args.get(\"test_status\")\n\n if not test_status:\n return {\"message\": \"There's no enough parameters\"}, 400\n\n if isinstance(test_id, int):\n _filter = and_(\n SecurityResultsDAST.project_id == project_id, SecurityResultsDAST.id == test_id\n )\n else:\n _filter = and_(\n SecurityResultsDAST.project_id == project_id, SecurityResultsDAST.test_uid == test_id\n )\n test = SecurityResultsDAST.query.filter(_filter).first()\n test.set_test_status(test_status)\n\n if test_status[\"status\"].lower().startswith(\"finished\"):\n if isinstance(test_id, int):\n _filter = and_(\n SecurityReport.project_id == project_id, SecurityReport.id == test_id\n )\n else:\n _filter = and_(\n SecurityReport.project_id == project_id, SecurityReport.test_uid == test_id\n )\n counted_severity = SecurityReport.query.with_entities(\n SecurityReport.severity,\n func.count(SecurityReport.severity)\n ).filter(_filter).group_by(SecurityReport.severity).all()\n\n counted_statuses = SecurityReport.query.with_entities(\n SecurityReport.status,\n func.count(SecurityReport.status)\n ).filter(_filter).group_by(SecurityReport.status).all()\n\n for severity in counted_severity:\n setattr(test, severity[0].lower(), severity[1])\n\n for status in counted_statuses:\n setattr(test, status[0].lower().replace(\" \", \"_\"), status[1])\n test.commit()\n\n return {\"message\": f\"Status for test_id={test_id} of project_id: {project_id} updated\"}, 200\n","sub_path":"api/update_test_status.py","file_name":"update_test_status.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"244630987","text":"import re\r\nimport pandas as pd\r\nimport glob\r\nimport csv\r\n\r\nused= ['[\\u0041-\\u005A]', '[\\u0061-\\u007A]', '[\\u00C0-\\u01FF]', '[\\u0400-\\u04FF]',\r\n '[\\u3300-\\uA07F]', '[\\u0020]']\r\n\r\n# joins a list into a string\r\ndef jointhem(turp):\r\n over = ' '\r\n return (over.join(turp))\r\n\r\n# gets a list an an input and returns a list with the most common whatever\r\n# in that list\r\ndef mostcommon(listing):\r\n temp_dict = {}\r\n \r\n for chara in listing:\r\n if chara in temp_dict:\r\n temp_dict[chara]-=-1\r\n else:\r\n temp_dict[chara] = 1\r\n \r\n common_whatever = []\r\n try:\r\n del temp_dict[' ']\r\n except:\r\n pass\r\n search_value = max(temp_dict.values())\r\n \r\n for name, value in temp_dict.items():\r\n if value == search_value:\r\n common_whatever.append(name)\r\n \r\n return common_whatever\r\n \r\nname_of_file = []\r\n#puts txts into a list\r\nfor files in glob.glob('text\\\\*.txt'):\r\n name_of_file.append(files)\r\n \r\n \r\noutput = pd.DataFrame(columns=['Name', 'Length_of_book', 'Length_of_book_cleaned',\r\n 'Average_length_of_words', 'Most_common_character',\r\n 'Most_common_word'])\r\n\r\noutput = output.append({'Name': 'Name',\r\n 'Length_of_book': 'Length_of_book',\r\n 'Length_of_book_cleaned': 'Length_of_book_cleaned',\r\n 'Average_length_of_words': 'Average_length_of_words',\r\n 'Most_common_character': 'Most_common_character',\r\n 'Most_common_word': 'Most_common_word',\r\n }, ignore_index = True)\r\n\r\n\r\n#opens file and splits it into words\r\nfor file in name_of_file:\r\n split_by_function = []\r\n temp = []\r\n endlist = []\r\n with open(file, encoding=\"utf-8-sig\") as book:\r\n \r\n textdata = book.read()\r\n textdata = textdata.lower()\r\n \r\n split_by_function = textdata.splitlines(keepends=True)\r\n \r\n #splits words into letters\r\n for item in split_by_function:\r\n word_split = list(item)\r\n \r\n #checks for non-letter characters\r\n for x in word_split:\r\n for i in used:\r\n if bool(re.search(i, x)):\r\n temp.append(x)\r\n \r\n #joins them back up in a list\r\n endlist.append(''.join(temp))\r\n temp.clear()\r\n word_split.clear()\r\n #https://www.utf8-chartable.de/unicode-utf8-table.pl?start=128&number=128&names=-&utf8=string-literal\r\n\r\n #takes out null\r\n for stuff in endlist:\r\n if stuff == '':\r\n endlist.remove(stuff)\r\n else:\r\n stuff.replace(' ', '')\r\n \r\n \r\n #joins the list back into a string\r\n final = jointhem(endlist)\r\n \r\n # gets most common letter(s)\r\n common_char = mostcommon(final)\r\n\r\n # average word length\r\n finalsplit = final.split()\r\n common_word = mostcommon(finalsplit)\r\n average_word = sum(len(word) for word in finalsplit) / len(finalsplit)\r\n\r\n # write to panda file\r\n file = file.split('\\\\')\r\n file = file[1]\r\n file = file[:-4]\r\n if file != 'chinese':\r\n output = output.append({'Name': file,\r\n 'Length_of_book': len(textdata),\r\n 'Length_of_book_cleaned': len(final),\r\n 'Average_length_of_words': average_word,\r\n 'Most_common_character': common_char[0],\r\n 'Most_common_word': common_word[0],\r\n }, ignore_index = True)\r\n \r\n else:\r\n output = output.append({'Name': file,\r\n 'Length_of_book': len(textdata),\r\n 'Length_of_book_cleaned': len(final),\r\n 'Average_length_of_words': None,\r\n 'Most_common_character': common_char[0],\r\n 'Most_common_word': None\r\n }, ignore_index = True)\r\n \r\n\r\n# write to csv file\r\nwith open('ending.csv', encoding=\"utf-8-sig\", mode='w+', newline='') as f:\r\n writer = csv.writer(f)\r\n for i, row in output.iterrows():\r\n writer.writerow(row)\r\n\r\nprint('DONE')","sub_path":"ALT2 DATA EXTRACTOR.py","file_name":"ALT2 DATA EXTRACTOR.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"457721541","text":"while True:\n try:\n start = int(input('START: '))\n stop = int(input('STOP: '))\n divisor = int(input('DIVISOR: '))\n break\n except ValueError:\n print('Please enter just numbers!')\n\nif not divisor:\n print('Cannot divide by zero!')\nelse:\n answer = []\n for i in range(start, stop+1):\n if i % divisor == 0:\n answer.append(i)\n print(f'Numbers in range({start},{stop}) divisible by {divisor}:\\n{answer}')\n\n","sub_path":"divisor.py","file_name":"divisor.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"378423131","text":"import numpy as np\nfrom math import exp, log\nimport matplotlib.pyplot as plt\n\nn = 200\nd = 4\nx = 3 * (np.random.rand(n, d) - 0.5)\ny = 2 * x[:, 1] - 1 * x[:, 2] + 0.5\ny = y + 0.5 * np.random.rand(n)\ny = 2 * y - 1\n\n\ndef J(w, x, y):\n sum_val = 0\n for u, v in zip(x, y):\n sum_val += log(1 + exp(-v * np.dot(w, u)))\n return sum_val/n + lam * np.dot(w, w)\n\n\ndef grad_J(w, x, y):\n sum_val = 0\n for u, v in zip(x, y):\n sum_val += (-v * u) / (1 + exp(v * np.dot(w, u)))\n return sum_val/n + 2 * lam * w\n\n\ndef hess_J(w, x, y):\n H = np.eye(w.shape[0])\n for u, v in zip(x, y):\n H += (v ** 2 * exp(-v * np.dot(w, u)) * np.dot(u.reshape(-1, 1), u.reshape(1, -1))) / ((1 + exp(-v * np.dot(w, u)))**2)\n assert H.shape == (d, d), \"hessian shape error\"\n return H/n + 2 * lam * np.eye(w.shape[0])\n\n\nw_s = np.ones(d)*0\nw_n = np.ones(d)*0\nnum_round = 1000\nlam = 0.00001\n# alpha is the upper bound of Lipsitz constant\nalpha, _ = np.linalg.eig(np.dot(np.transpose(x), x)/n + 2 * lam * np.eye(d))\nalpha = max(alpha)\nJ_steep = []\nJ_newton = []\nw_steep = []\nw_newton = []\n\nt = 0\nfor i in range(num_round):\n # steepest gradient method\n J_s_1 = J(w_s, x, y)\n w_steep.append(w_s)\n J_steep.append(J_s_1)\n d_s = -grad_J(w_s, x, y)\n w_s += alpha * d_s\n\n # newton method\n J_n_1 = J(w_n, x, y)\n w_newton.append(w_n)\n J_newton.append(J_n_1)\n H = hess_J(w_n, x, y)\n d_n = -np.dot(np.linalg.pinv(H), grad_J(w_n, x, y))\n w_n += alpha * d_n\n\nfig, ax = plt.subplots()\nax.set_yscale(\"log\")\nax.plot([x for x in range(len(J_steep))], [x - J_steep[-1] for x in J_steep], color=\"b\", label=\"Steepest Gradient Descent\")\nax.plot([x for x in range(len(J_newton))], [x - J_newton[-1] for x in J_newton], color=\"r\", label=\"Newton method\")\nplt.xlabel(\"Iteration\")\nplt.ylabel(\"J_t - J_opt\")\nplt.legend()\nplt.show()\n","sub_path":"Problem1/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"213679122","text":"import google.auth\nfrom google.cloud import kms_v1\nfrom google.api_core.client_options import ClientOptions\n\ncredentials, _ = google.auth.default()\n\ncert = b\"\"\"\"\"\"\n\nkey = b\"\"\"\"\"\"\n\nproject = \"sijunliu-dca-test\"\n\ndef my_cert_source():\n return cert, key\n\ndef run_sample(client_cert_source):\n options = ClientOptions(client_cert_source=client_cert_source)\n\n client = kms_v1.KeyManagementServiceClient(client_options=options)\n parent = f\"projects/{project}/locations/global\"\n res = client.list_key_rings(request={\"parent\": parent})\n print(res)\n\n\nrun_sample(my_cert_source)","sub_path":"sample_raw_key.py","file_name":"sample_raw_key.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"49546587","text":"'''\nhttps://www.youtube.com/watch?v=6eqC1WTlIqc\nhttps://www.mediafire.com/folder/g6hqvd74uzymc/Tutorial_5_-_MultiThreading\n\nHow to create a lock:\n\nt = threading.Lock() creates new lock\n\nt.aquire() acquires the lock, if another thread has already acquired the lock the thread will wait\n when this line is called until the lock is released\nt.release() releases the lock so that other threads or methods can use it\n'''\nimport threading\nimport time\n\ntLock = threading.Lock()#creating the lock\n\ndef timer(name, delay, repeat):\n print(\"Timer: \" + name + \" Started\")\n # once .acquire() is called this thread will own the tLock and no other thread that wants to use\n # tLock can until it is released\n tLock.acquire() \n print(name + \" Has Acquired the lock\")\n while repeat > 0:\n time.sleep(delay)\n print(name + \": \" + str(time.ctime(time.time())))\n repeat -= 1\n print(name + \" is releasing the lock\")\n # once .release() is called any other methods that want to acquire tLock can\n tLock.release()\n print(\"Timer: \" + name + \" Completed\")\n\ndef Main():\n t1 = threading.Thread(target=timer, args=(\"Timer1\", 1, 5))\n t2 = threading.Thread(target=timer, args=(\"Timer2\", 2, 5))\n t1.start()\n t2.start()\n \n print(\"Main complete\")\n \nMain()","sub_path":"PythonLearn/Draps_tutorial/adv_5/LockingThreadDemo3.py","file_name":"LockingThreadDemo3.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"146196464","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the Windows recycler parsers.\"\"\"\n\nimport unittest\n\nfrom plaso.formatters import recycler # pylint: disable=unused-import\nfrom plaso.lib import eventdata\nfrom plaso.lib import timelib\nfrom plaso.parsers import recycler\n\nfrom tests import test_lib as shared_test_lib\nfrom tests.parsers import test_lib\n\n\nclass WinRecycleBinParserTest(test_lib.ParserTestCase):\n \"\"\"Tests for the Windows Recycle Bin parser.\"\"\"\n\n @shared_test_lib.skipUnlessHasTestFile([u'$II3DF3L.zip'])\n def testParseVista(self):\n \"\"\"Tests the Parse function on a Windows Vista RecycleBin file.\"\"\"\n parser_object = recycler.WinRecycleBinParser()\n storage_writer = self._ParseFile([u'$II3DF3L.zip'], parser_object)\n\n self.assertEqual(len(storage_writer.events), 1)\n\n event = storage_writer.events[0]\n\n expected_filename = (\n u'C:\\\\Users\\\\nfury\\\\Documents\\\\Alloy Research\\\\StarFury.zip')\n self.assertEqual(event.original_filename, expected_filename)\n\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2012-03-12 20:49:58.633')\n self.assertEqual(event.timestamp, expected_timestamp)\n self.assertEqual(event.file_size, 724919)\n\n expected_message = u'{0:s} (from drive: UNKNOWN)'.format(expected_filename)\n expected_message_short = u'Deleted file: {0:s}'.format(expected_filename)\n self._TestGetMessageStrings(event, expected_message, expected_message_short)\n\n @shared_test_lib.skipUnlessHasTestFile([u'$I103S5F.jpg'])\n def testParseWindows10(self):\n \"\"\"Tests the Parse function on a Windows 10 RecycleBin file.\"\"\"\n parser_object = recycler.WinRecycleBinParser()\n storage_writer = self._ParseFile([u'$I103S5F.jpg'], parser_object)\n\n self.assertEqual(len(storage_writer.events), 1)\n\n event = storage_writer.events[0]\n\n expected_filename = (\n u'C:\\\\Users\\\\random\\\\Downloads\\\\bunnies.jpg')\n self.assertEqual(event.original_filename, expected_filename)\n\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2016-06-29 21:37:45.618')\n self.assertEqual(event.timestamp, expected_timestamp)\n self.assertEqual(event.file_size, 222255)\n\n expected_message = u'{0:s} (from drive: UNKNOWN)'.format(expected_filename)\n expected_message_short = u'Deleted file: {0:s}'.format(expected_filename)\n self._TestGetMessageStrings(event, expected_message, expected_message_short)\n\n\nclass WinRecyclerInfo2ParserTest(test_lib.ParserTestCase):\n \"\"\"Tests for the Windows Recycler INFO2 parser.\"\"\"\n\n @shared_test_lib.skipUnlessHasTestFile([u'INFO2'])\n def testParse(self):\n \"\"\"Reads an INFO2 file and run a few tests.\"\"\"\n parser_object = recycler.WinRecyclerInfo2Parser()\n storage_writer = self._ParseFile([u'INFO2'], parser_object)\n\n self.assertEqual(len(storage_writer.events), 4)\n\n event = storage_writer.events[0]\n\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2004-08-25 16:18:25.237')\n self.assertEqual(event.timestamp, expected_timestamp)\n self.assertEqual(\n event.timestamp_desc, eventdata.EventTimestamp.DELETED_TIME)\n\n self.assertEqual(event.record_index, 1)\n\n expected_filename = (\n u'C:\\\\Documents and Settings\\\\Mr. Evil\\\\Desktop\\\\lalsetup250.exe')\n self.assertEqual(event.original_filename, expected_filename)\n\n event = storage_writer.events[1]\n\n expected_message = (\n u'DC2 -> C:\\\\Documents and Settings\\\\Mr. Evil\\\\Desktop'\n u'\\\\netstumblerinstaller_0_4_0.exe (from drive: C)')\n expected_message_short = (\n u'Deleted file: C:\\\\Documents and Settings\\\\Mr. Evil\\\\Desktop'\n u'\\\\netstumblerinstaller...')\n\n self._TestGetMessageStrings(event, expected_message, expected_message_short)\n\n event = storage_writer.events[2]\n\n self._TestGetSourceStrings(event, u'Recycle Bin', u'RECBIN')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/parsers/recycler.py","file_name":"recycler.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"393282305","text":"# -*- coding: utf-8 -*-\n__author__ = \"Sergey Karakovskiy, sergey at idsia fullstop ch\"\n__date__ = \"$${date} ${time}$\"\n\nimport sys\nimport os\n\nfrom forwardjumpingagent import ForwardJumpingAgent\nimport numpy as np\n\nfrom evaluationinfo import EvaluationInfo\n\nfrom PyJavaInit import amiCoSimulator\n\nif __name__ == \"__main__\":\n\tlibamico, reset, getEntireObservation, performAction, getEvaluationInfo, getObservationDetails, options = amiCoSimulator()\n\t\n\tagent = ForwardJumpingAgent()\n\n\toptions = \"\"\n\tif len(sys.argv) > 1:\n\t\toptions = sys.argv[1]\n\n\tif options.startswith('\"') and options.endswith('\"'):\n\t\toptions = options[1:-1]\n\n\tk = 1\n\tseed = 0\n\tprint(\"Py: ======Evaluation STARTED======\")\n\ttotalIterations = 0\n\tfor i in range(k, k+10000):\n\t\toptions1 = options + \" -ls \" + str(seed)\n\t\tprint(\"options: \", options1)\n\t\treset(options1.encode('utf-8'))\n\t\tobsDetails = getObservationDetails()\n\t\tagent.setObservationDetails(obsDetails[0], obsDetails[1], obsDetails[2], obsDetails[3])\n\t\twhile (not libamico.isLevelFinished()):\n\t\t\ttotalIterations +=1 \n\t\t\tlibamico.tick();\n\t\t\tobs = getEntireObservation(1, 0)\n\n\t\t\tagent.integrateObservation(obs[0], obs[1], obs[2], obs[3], obs[4]);\n\t\t\taction = agent.getAction()\n\t\t\t#print(\"action: \", action)\n\t\t\tperformAction(action);\n\t\tprint(\"Py: TOTAL ITERATIONS: \", totalIterations)\n\t\t#evaluationInfo = getEvaluationInfo()\t\n\t\t#print(\"evaluationInfo = \\n\", EvaluationInfo(evaluationInfo))\n\t\tseed += 1\n\n\n","sub_path":"mario-ai-master/src/main/bin/AmiCoBuild/JavaPy/DemoForwardJumpingAgent.py","file_name":"DemoForwardJumpingAgent.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"366985271","text":"import os\n\nfrom flask import render_template, request, send_from_directory\nfrom app import app\nfrom form import LoginForm\n\n\n@app.route('/')\n@app.route('/index')\ndef hello_world():\n user = {'name': \"Hank\"}\n files = list_download('./')\n return render_template('index.html', title=\"Home Page\", user=user, files=files)\n\n\n\n@app.route(\"/login\")\ndef login():\n form = LoginForm()\n return render_template('login.html', form=form)\n\n\ndef list_download(path):\n files = []\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)) is True:\n files.append(file)\n return files\n\n\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"356268655","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/peng3dnet/util.py\n# Compiled at: 2017-06-26 13:11:20\n# Size of source mod 2**32: 3054 bytes\nfrom . import errors\n\ndef parse_address(addr, default_port=8080):\n r\"\"\"\n Parses an IP Address into a tuple of ``(addr,port)``\\ .\n \n If the address does not contain an explicitly specified port, the value given with ``default_port`` is used.\n \n Note that currently only IPv4 addresses are supported, but IPv6 support may be added in the future.\n If an IPv6 Address is detected, a :py:exc:`~peng3dnet.errors.UnsupportedAddressError` will be raised.\n \n Additionally, the port returned is checked for plausibility, e.g. an integer in range 0-65535.\n If the port is invalid in any way, a :py:exc:`~peng3dnet.errors.InvalidPortError` will be raised.\n \"\"\"\n addr = str(addr)\n addrs = addr.split(':')\n if len(addrs) == 1:\n addr = addrs[0]\n port = default_port\n else:\n if len(addrs) == 2:\n addr = addrs[0]\n try:\n port = int(addrs[1])\n except Exception:\n raise errors.InvalidPortError('Port %s is not an integer' % addrs[1])\n\n else:\n raise errors.UnsupportedAddressError('Address appears to be an IPv6 address, currently not supported')\n if not isinstance(port, int):\n raise errors.InvalidPortError('Port must be an integer')\n else:\n if port < 0:\n raise errors.InvalidPortError('Port may not be less than zero')\n else:\n if port > 65535:\n raise errors.InvalidPortError('Port may not be higher than 65535')\n return (\n addr, port)\n\n\ndef normalize_addr_socketstyle(addr, default_port=8080):\n \"\"\"\n Normalizes the given address to a 2-tuple as accepted by the :py:mod:`socket` module.\n \n Currently accepts a 2-tuple and IPv4 addresses in string format.\n \n If the address does not contain a port, the ``default_port`` will be used.\n \n Note that this function will pass through any exceptions raised by parsing functions it calls.\n \"\"\"\n if len(addr) == 2:\n return addr\n return parse_address(addr, default_port)\n\n\ndef normalize_addr_formatted(addr):\n r\"\"\"\n Normalizes the given address to a string like ``127.0.0.1``\\ .\n \n This method is currently not implemented.\n \"\"\"\n raise NotImplementedError('not yet implemented')","sub_path":"pycfiles/peng3dnet-0.1.3a0-py3.7/util.cpython-37.py","file_name":"util.cpython-37.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"397829335","text":"# 456. 132 Pattern\n# Given a sequence of n integers a1, a2, ..., an, a 132 pattern is a subsequence ai, aj, ak such that i < j < k and ai < ak < aj. Design an algorithm that takes a list of n numbers as input and checks whether there is a 132 pattern in the list.\n# Note: n will be less than 15,000.\n# Example 1:\n# Input: [1, 2, 3, 4]\n# Output: False\n# Explanation: There is no 132 pattern in the sequence.\n# Example 2:\n# Input: [3, 1, 4, 2]\n# Output: True\n# Explanation: There is a 132 pattern in the sequence: [1, 4, 2].\n# Example 3:\n# Input: [-1, 3, 2, 0]\n# Output: True\n# Explanation: There are three 132 patterns in the sequence: [-1, 3, 2], [-1, 3, 0] and [-1, 2, 0].\n# https://leetcode.com/problems/132-pattern/discuss/94089/Java-solutions-from-O(n3)-to-O(n)-for-%22132%22-pattern-(updated-with-one-pass-slution)\ndef pat132_on3(ls):\n if not ls or len(ls)<=3:\n return False\n l = len(ls)\n for i in range(l):\n for j in range(i+1,l):\n if ls[j]>ls[i]:\n for k in range(j+1,l):\n if ls[i]> {} {}\".format(count_i, self.folder_no, folder_i, model_i))\r\n # generate reg file path\r\n csv_reg_path = os.path.join(\r\n self.root_path, \"{}_{}_reg.csv\".format(folder_i, model_i, ),\r\n )\r\n xs = self.model_xs[model_i]\r\n # the number of x\r\n x_no = len(xs)\r\n # y train and predict dataframe\r\n temp2[\"y\"] = np.log10(temp2[\"Axial Resilient Modulus\"]*1000)\r\n y_train = temp2[\"y\"].iloc[self.rows_for_train]\r\n y_predict = temp2[\"y\"].iloc[self.rows_for_predict]\r\n # x train and predict empty dataframes\r\n x_train = pd.DataFrame([])\r\n x_predict = pd.DataFrame([])\r\n # get x dataframes\r\n for x_i in range(x_no):\r\n temp2[\"x{}\".format(x_i)] = np.log10(temp2[xs[x_i]])\r\n x_train = pd.concat(\r\n [x_train, temp2[\"x{}\".format(x_i)].iloc[self.rows_for_train]], \r\n ignore_index=True, axis=1, \r\n )\r\n x_predict = pd.concat(\r\n [x_predict, temp2[\"x{}\".format(x_i)].iloc[self.rows_for_predict]], \r\n ignore_index=True, axis=1, \r\n )\r\n # drop rows\r\n x_train = x_train.drop(self.rows2drop).to_numpy()\r\n y_train = y_train.drop(self.rows2drop).to_numpy()\r\n # do regression\r\n self.regression(x_train, y_train, x_predict, y_predict, )\r\n # append result\r\n self.result_sum[\"Folder\"].append(folder_i)\r\n self.result_sum[\"Model\"].append(model_i)\r\n for item_i in self.regr_result:\r\n self.result_sum[item_i].append(self.regr_result[item_i])\r\n # output data used for regression \r\n self.writecsv(\r\n pd.DataFrame(temp2), \r\n os.path.join(self.root_path, csv_reg_path, )\r\n )\r\n except:\r\n # print hint\r\n print(\"!!! Regressing: {}\".format(folder_i, ))\r\n # log duration\r\n self.log[\"Regression\"].append(self.duration(self.start_time))\r\n count_i += 1\r\n self.writecsv(\r\n pd.DataFrame(self.result_sum), \r\n os.path.join(self.root_path, \"RLTT_reg_result.csv\", )\r\n )\r\n # write log\r\n self.writecsv(\r\n pd.DataFrame(self.log), \r\n os.path.join(self.root_path, \"last_5_seq_regr_log.csv\", )\r\n )\r\n\r\n\r\n# generate file names\r\n# file path\r\n# Linux path = \"/mnt/c/Users/Chuanjun Lau/Documents/RLTT_Data/Mr\"\r\n# Windows path = r\"D:\\TestData\\RLTT\\Mr\"\r\nroot_path = r\"/mnt/c/Users/Chuanjun Lau/Documents/RLTT_Data/Mr\"\r\n# get all files\r\nfor i, j, k in os.walk(root_path):\r\n if len(j) != 0:\r\n temp0 = k\r\n# get all \"*mr.csv\" files\r\nfile_names = []\r\nfor file_i in temp0:\r\n temp = file_i.split(\"_\")\r\n if temp[-1].split(\".\")[0] == \"mr\":\r\n file_names.append(file_i.split(\"_mr\")[0])\r\n\r\n# Set particular folder name to analysis\r\n# file_names = [\"RLTT_C16_95_1_3_mr.csv\"]\r\n\r\ndemo = DoRegression(root_path, file_names, )\r\ndemo.main()\r\n","sub_path":"Mr/RLTT_Regression_last_5_cycles.py","file_name":"RLTT_Regression_last_5_cycles.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"453234056","text":"# Name : Gobang game \n# Author: simon \n# e-mail: 2441873702@qq.com\n# Date : 2020.05.27 19:02\n# version: v2\n# To-do : 实现胜负功能判断\n# bug 1 : 当鼠标点击到画布棋盘外仍可显示棋子\n# bug 2 : 棋子会覆盖之前已经绘制的位置 ———— fixed\n\n\nimport pygame,sys\nimport pygame.freetype\n\npygame.init()\n\nfps = 300 # fps setting\nfclock = pygame.time.Clock()\n\n# default str value\nsize = width, height = 800, 600\nborder = 50 \nwlc_str = \"Welcom to gobang game!\"\n\n# default color\nbg_color = (128,138,135)#pygame.Color(\"white\")\nline_color = pygame.Color(\"black\")\n# chess color\nWHITE = 255,255,255\nBLACK = 0,0,0\nfont_color = 0,0,0\n\n# pygame Surface\nscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\nbackground = pygame.Surface(screen.get_size())\n\ncaption = \"Gobang Game\"\npygame.display.set_caption(caption)\n\ndef draw_font(background, string='Hello pygame!',font_size=20, positon=(0,0)):\n\t# font_type = pygame.freetype.Font('C://Windows//Fonts//msyh.ttc', 1)\n\tfont_type = pygame.freetype.Font('./consola.ttf', 1)\t\n\tfont_rect = font_type.render_to(background, positon, string, fgcolor=font_color, size=font_size)\n\tscreen.blit(background, (0, 0))\n\n\ndef draw_chessboard_rect(background, rect_point, border):\n\tx_num = int((width - 1.5 * border) / border)\n\ty_num = int((height - 1.5 * border) / border)\n\tfor num_w in range(x_num):\n\t\tfor num_h in range(y_num):\n\t\t\trect_point.append([num_w*border + 50, num_h*border + 50])\n\tfor item in rect_point:\n\t\ts_rect = item[0], item[1], border, border\n\t\tpygame.draw.rect(background, line_color, s_rect, 1)\n\treturn rect_point\n\n\ndef success(positon):\n\tfor item in positon:\n\t\t# 行\n\t\tif [item[0]+1,item[1]] in positon:\n\t\t\tif [item[0]+2,item[1]] in positon:\n\t\t\t\tif [item[0]+3,item[1]] in positon:\n\t\t\t\t\tif ([item[0]+4,item[1]] in positon):\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 列\n\t\telif [item[0],item[1]+1] in positon:\n\t\t\tif [item[0],item[1]+2] in positon:\n\t\t\t\tif [item[0],item[1]+3] in positon:\n\t\t\t\t\tif [item[0],item[1]+4] in positon:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 对角\n\t\telif [item[0]+1,item[1]+1] in positon:\n\t\t\tif [item[0]+2,item[1]+2] in positon:\n\t\t\t\tif [item[0]+3,item[1]+3] in positon:\n\t\t\t\t\tif [item[0]+4,item[1]+4] in positon:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\n\ndef success_judge(chess_dict):\n\tblack_pos = []\n\twhite_pos = []\n\t# print(chess_dict)\n\t# {'10,4': 2, '10,5': 1, '6,4': 2, '6,5': 1, '8,7': 2, '5,7': 1, '6,9': 2, '9,4': 1, '9,6': 2, '10,8': 1}\n\tfor item in chess_dict:\n\t\tx = item.split(\",\", 1)\n\t\tif chess_dict[item] == 1:\n\t\t\twhite_pos.append([int(x[0]),int(x[1])])\n\t\telse:\n\t\t\tblack_pos.append([int(x[0]),int(x[1])])\n\n\tif success(white_pos):\n\t\tprint(\"white success!\")\n\telif success(black_pos):\n\t\tprint(\"black success!\")\n\n\ndef game_over(delay_time):\n\timport time\n\ttime.sleep(delay_time)\n\tprint(\"game over!\")\n\n# put chess down \ndef chess_down(background, position, color):\n\tpygame.draw.circle(background, color, position, 20, 0)\n\n\n\n\nmouse_pos = []\nblack_position = []\nwhite_position = []\nwhile True:\n\t# event manage\n\tfor event in pygame.event.get():\n\t\t# quit\n\t\tif event.type == pygame.QUIT:\n\t\t\tgame_over(0.1)\n\t\t\tsys.exit()\n\t\t# window resize\n\t\telif event.type == pygame.VIDEORESIZE:\n\t\t\tsize = width, height = event.size[0], event.size[1]\n\t\t\tscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\n\t\t\tbackground = pygame.Surface(screen.get_size())\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_pos.append([event.pos[0],event.pos[1]])\t# .pos --> tuple = (x_pos,y_pos)\n\n\n\trect_point = []\n\tbackground.fill(bg_color)\n\tdraw_chessboard_rect(background, rect_point, border)\n\tdraw_font(background, string=wlc_str)\n\n\tchess_dict = {}\n\tcount, black_num, white_num = 0, 0, 0\n\tfor position in mouse_pos:\n\t\t# position calculate:\n\t\tposition[0] = round(position[0] / 50) * 50\n\t\tposition[1] = round(position[1] / 50) * 50\n\n\t\tkey = str(position[0]//50)+\",\"+str(position[1]//50)\n\t\t# flags \n\t\t# 0 -- no\n\t\t# 1 -- white\n\t\t# 2 -- black\n\n\t\tif key in chess_dict:\n\t\t\t# cannot put down the chess\n\t\t\tprint(\"can't put chess here!\")\n\t\telse:\n\t\t\t# flags = 0\n\t\t\tif count % 2 == 0:\n\t\t\t\tchess_color = BLACK\n\t\t\t\tflags = 2\n\t\t\t\t# black_position.append([position[0]//50, position[1]//50])\n\t\t\t\t# print(len(black_position))\n\t\t\t\t# print(black_position)\n\n\t\t\telse:\n\t\t\t\tchess_color = WHITE\n\t\t\t\tflags = 1\n\t\t\t\t# pygame.draw.circle(background, WHITE, position, 20, 0)\n\t\t\t\t# white_position.append([position[0]//50, position[1]//50])\n\n\t\t\tcount = count + 1\n\t\t\t# 归一化\n\t\t\tnew_dict = {key : flags}\n\t\t\tchess_dict.update(new_dict)\n\t\t\tchess_down(background, position, chess_color)\n\n\tsuccess_judge(chess_dict)\n\t\"\"\"\n\t# judge success or not \n\tif success_judge(black_position):\n\t\tprint(\"black wins!\")\n\telif success_judge(white_position):\n\t\tprint(\"white wins!\")\n\t\"\"\"\n\n\tscreen.blit(background, (0, 0))\n\tfclock.tick(fps)\t# fps each second\n\tpygame.display.update()\n","sub_path":"gobang/Gobang_v1.2.py","file_name":"Gobang_v1.2.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"142629402","text":"import keras\nfrom keras.applications.vgg16 import VGG16, preprocess_input\nfrom keras.preprocessing.image import load_img,img_to_array\nfrom keras.models import Model\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass vizdnn:\n\n def __init__(self, model_arch, layer_name, image_name):\n \"\"\"\n model_arch: Model architecture \n layer_name: Neural Network layer of interest\n image_name: Test image name\n \"\"\"\n self.model = model_arch\n self.layer_name = layer_name\n self.image_name = image_name\n\n def preprocess_image(self):\n input_width = self.model.input_shape[1]\n input_height = self.model.input_shape[2]\n image = load_img(self.image_name, target_size=(input_width , input_height))\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n image = preprocess_input(image) \n return image \n\n\n def get_layer(self):\n model_layers_dic = { layer.name: layer for layer in self.model.layers}\n trimmed_model = Model(inputs=self.model.inputs, outputs= model_layers_dic[self.layer_name].output)\n feature_maps = trimmed_model.predict(self.preprocess_image())\n return feature_maps\n\n def viz_feature_map(self , feature_map):\n plt.figure(figsize = (25 , 25))\n square = int(np.sqrt(feature_map.shape[-1]))\n index = 1\n for _ in range(square):\n for _ in range(square):\n ax = plt.subplot(square, square, index)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(feature_map[0, :, :, index-1] , cmap= 'YlOrRd')\n index += 1\n return plt.show()\n","sub_path":"build/lib/vizdnn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"567228550","text":"import cProfile\n\nfrom files import *\nfrom objects import *\nfrom video import *\n\ndef showImage(image, windowName):\n\tcv2.namedWindow(windowName)\n\tcv2.imshow(windowName, image)\n\ndef test():\n\tdataset_root_folder = '../aff-wild2'\n\tfiles = getFiles(dataset_root_folder)\n\n\taff_wild_videos = Objects(files, ['mp4', 'avi'])\n\taff_wild_videos_iter = iter(aff_wild_videos)\n\n\tpairs = []\n\tobjects = []\n\tfor object in aff_wild_videos_iter:\n\t\tannotations = getAnnotations(object, files)\n\t\tpairs.append({\n\t\t\t'object': object,\n\t\t\t'annotation': annotation\n\t\t})\n\t\tif len(pairs) % 10000 == 0:\n\t\t\tprint(len(pairs))\n\t\t# objects.append(object)\n\t\t# data = readObject(object)\n\t\t# showImage(data, 'data')\n\t\t# cv2.waitKey(0)\n\tprint(len(objects))\n\n# test()\n# cProfile.run('test()')\n\nimport cv2\nimport numpy as np\nfrom scipy.spatial import distance\n\ndef resize(image, scale = 0.5):\n\twidth = int(image.shape[1] * scale)\n\theight = int(image.shape[0] * scale)\n\treturn cv2.resize(image, (width, height))\n\nimport math\n\ndef getHog(image):\n\tcells_by_side = 16\n\t# print('image size:', image.shape)\n\tcell_size = (math.floor(image.shape[0] / cells_by_side), math.floor(image.shape[1] / cells_by_side)) # h x w in pixels\n\tblock_size = (1, 1) # h x w in cells\n\tnbins = 9 # number of orientation bins\n\n\t# winSize is the size of the image cropped to an multiple of the cell size\n\t# cell_size is the size of the cells of the img patch over which to calculate the histograms\n\t# block_size is the number of cells which fit in the patch\n\t_winSize=(cells_by_side * cell_size[1], cells_by_side * cell_size[0])\n\timage = image[0:_winSize[1], 0:_winSize[0]]\n\t# print('new image size:', image.shape, _winSize[0], _winSize[1])\n\t# print('_winSize:', _winSize, _winSize[0] / cell_size[1], _winSize[1] / cell_size[0])\n\t_blockSize=(block_size[1] * cell_size[1],\n\t\t\t\tblock_size[0] * cell_size[0])\n\t_blockStride=(cell_size[1], cell_size[0])\n\t_cellSize=(cell_size[1], cell_size[0])\n\t# print('_cell_size:', _cellSize)\n\t_nbins=nbins\n\thog = cv2.HOGDescriptor(_winSize=_winSize,\n\t _blockSize=_blockSize,\n\t _blockStride=_blockStride,\n\t _cellSize=_cellSize,\n\t _nbins=_nbins)\n\tresult = hog.compute(image).T[0]\n\t# print('hog size:', result.shape)\n\treturn result\n\ndef getHogMatrix(images):\n\treturn np.array([getHog(image) for image in images])\n\nvideos = map(getVideo, ['../aff-wild2/expr/videos/validation_set/118-30-640x480.mp4'])\nframes = map(getCurrentFrame, videos)\nhogMatrix = getHogMatrix(frames)\nfor v in hogMatrix:\n\tprint(v.shape)\n\tprint(v[:18])\nexit()\n\nimport cupy as cp\nimport time\n\ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n if 'log_time' in kw:\n name = kw.get('log_name', method.__name__.upper())\n kw['log_time'][name] = int((te - ts) * 1000)\n else:\n print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))\n return result\n return timed\n\nfrom numba import jit\n\ndef distance_cosine(a, b, lib):\n\tnumerator = lib.dot(a, b)\n\ta_norm = lib.sqrt(lib.sum(a ** 2))\n\tb_norm = lib.sqrt(lib.sum(b ** 2))\n\tdenominator = a_norm * b_norm\n\tresult = 1 - numerator / denominator\n\treturn result\n\n@jit(nopython=True)\ndef cosine_similarity_numba(u:np.ndarray, v:np.ndarray):\n assert(u.shape[0] == v.shape[0])\n uv = 0\n uu = 0\n vv = 0\n for i in range(u.shape[0]):\n uv += u[i]*v[i]\n uu += u[i]*u[i]\n vv += v[i]*v[i]\n cos_theta = 1\n if uu!=0 and vv!=0:\n cos_theta = uv/np.sqrt(uu*vv)\n return cos_theta\n\ndef distance_cosine_np(a, b):\n\tnumerator = np.dot(a, b)\n\ta_norm = np.sqrt(np.sum(a ** 2))\n\tb_norm = np.sqrt(np.sum(b ** 2))\n\tdenominator = a_norm * b_norm\n\tresult = 1 - numerator / denominator\n\treturn result\n\n@timeit\ndef distance_cosine_for(A, B, f, lib):\n\tfor i in range(A.shape[0]):\n\t\ta = A[i]\n\t\tb = B[i]\n\t\tf(a, b)\n\ndef generate(lib, size):\n\treturn lib.array([lib.random.uniform(size = 2048) for i in range(size)]), lib.array([lib.random.uniform(size = 2048) for i in range(size)])\n\nwith cp.cuda.Device(0):\n\tfor lib in ['np', 'cp']:\n\t\tprint(lib)\n\t\tprint('generating...')\n\t\tA, B = generate(lib, 512)\n\t\tprint('calculating...')\n\t\tdistance_cosine_for(A, B, distance_cosine, lib)\n# A, B = generate(np, 512)\n# distance_cosine_for(A, B, distance_cosine_np, 'np')\nexit()\n\n# optimized, complexity is n * (n-1) / 2\ndef getSimilaritiesMatrix(matrix):\n\tnumberOfVectors = matrix.shape[0]\n\tresult = np.zeros([numberOfVectors, numberOfVectors])\n\tfor i in range(numberOfVectors):\n\t\tfor j in range(i + 1, numberOfVectors):\n\t\t\tvector1 = matrix[i]\n\t\t\tvector2 = matrix[j]\n\t\t\t# print(vector1.shape, vector1)\n\t\t\t# print(vector2.shape, vector2)\n\t\t\t# exit()\n\t\t\td = distance.cosine(vector1, vector2)\n\t\t\tresult[i][j] = d\n\t\t\tresult[j][i] = d\n\treturn 1 - result\n\ndef getMetrics(similarities_matrix):\n\treturn np.array([np.mean(similarities_matrix)])\n\ndataset_root_folder = '../aff-wild2'\nfiles = getFiles(dataset_root_folder)\nprint('loaded files')\n\naff_wild_videos = Objects(files, ['mp4', 'avi'])\naff_wild_videos_iter = iter(aff_wild_videos)\nall_objects = [object for object in aff_wild_videos_iter]\nprint('loaded objects')\n\ndef getHogSimilaritiesMatrix(images):\n\treturn getSimilaritiesMatrix(getHogMatrix(images))\n\ndef getMetricsDeltas(images1, images2):\n\tmetrics1 = getMetrics(getHogSimilaritiesMatrix(images1))\n\tmetrics2 = getMetrics(getHogSimilaritiesMatrix(images2))\n\treturn metrics2 / metrics1\n\nimport random\n\ndef experiment(attempts = 1):\n\tresults = np.empty([attempts])\n\tfor i in range(attempts):\n\t\tobjects = random.sample(all_objects, 100)\n\t\t# print(objects)\n\t\timages = list(map(readObject, objects))\n\t\tprint('images:', [t for t in map(type, images)])\n\t\tresized_images = list(map(resize, images))\n\t\tresults[i] = getMetricsDeltas(images, resized_images)\n\treturn results\n\nimport os\n\ndef experiment1():\n\tprocessed = 0\n\tpaths = {}\n\tshapes = {}\n\tfor object in all_objects:\n\t\tpath = os.path.normcase(object['dir'] + '/' + object['name'] + '.' + object['extension'])\n\t\tprocessed += 1\n\t\tif processed % 100000 == 0:\n\t\t\tprint('processed', processed, 'images')\n\t\tif path in paths:\n\t\t\tcontinue\n\t\tpaths[path] = True\n\t\timage = readObject(object)\n\t\tshape = str(image.shape)\n\t\tif shape in shapes:\n\t\t\tcontinue\n\t\tshapes[shape] = True\n\t\thog = getHog(image)\n\t\tif hog.shape[0] != 2304:\n\t\t\tprint(hog.shape, object)\n\ncProfile.run('experiment()')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"446297406","text":"\"\"\"python filter to format json.\n\nborrowed from python module json.tool\"\"\"\nimport argparse\nimport collections\nimport json\nimport sys\nimport io\n\n\ndef main():\n prog = 'python -m json.tool'\n description = ('A simple command line interface for json module ' 'to validate and pretty-print JSON objects.')\n parser = argparse.ArgumentParser(prog=prog, description=description)\n parser.add_argument('infile', nargs='?', type=argparse.FileType(), help='a JSON file to be validated or pretty-printed')\n parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), help='write the output of infile to outfile')\n parser.add_argument('--sort-keys', action='store_true', default=False, help='sort the output of dictionaries alphabetically by key')\n parser.add_argument('--decode-unicode', action='store_true', default=False, help='decode the encoded unicode data')\n options = parser.parse_args()\n\n infile = options.infile or io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')\n outfile = options.outfile or sys.stdout\n sort_keys = options.sort_keys\n decode_unicode = not options.decode_unicode\n with infile:\n try:\n if sort_keys:\n obj = json.load(infile)\n else:\n obj = json.load(infile, object_pairs_hook=collections.OrderedDict)\n except ValueError as e:\n raise SystemExit(e)\n with outfile:\n json.dump(obj, outfile, sort_keys=sort_keys, indent=4, ensure_ascii=decode_unicode)\n outfile.write('\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/format_json.py","file_name":"format_json.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"335207707","text":"#Fade Khalifah Rosyad\n#Python script build for send post and receive get for Genesis IVA\n\n#Pada import digunakan 2 request dimana 1 requests dari python dan satu lagi request dari Flask\n#Hal ini terjadi dikarenakan terdapat perbedaan cara penggunaan yang menyebabkan dibutuhkan requests pada function dan request pada route\nimport requests, json\nfrom flask import Flask, render_template, request, json\nfrom datetime import datetime\n\ndef get_timestamp():\n return datetime.now().strftime((\"%d-%m-%Y %H:%M:%S\"))\n\nKVStoreEndpoint = 'http://127.0.0.1:5000/api/v1/kvstore' #Endpoint URL untuk Genesis IVA bagian KVStore\nServicesEndpoint = 'http://127.0.0.1:5000/api/v1/services' #Endpoint URL untuk Genesis IVA bagian Services\nheaders = {'Content-type': 'application/json'} # Header yang dapat diterima oleh Genesis IVA\n\n#Function untuk menerima data dari Genesis IVA dengan method GET (Services)\ndef gettingdatas():\n terimas = requests.get(ServicesEndpoint, headers=headers)\n return terimas\n\n#Function untuk mengirimkan data ke Genesis IVA dengan method POST (Services)\ndef postingdatas(service):\n data = {'service_name': service}\n kirims = requests.post(ServicesEndpoint, data=json.dumps(data), headers=headers)\n return kirims\n\n##Function untuk menerima data dari Genesis IVA dengan method GET (KeyValue)\n#def gettingdata():\n# terima = requests.get(KVStoreEndpoint, headers=headers)\n# return terima\n\n#Function untuk mengirimkan data ke Genesis IVA dengan method POST (KeyValue)\ndef postingdata(key, value):\n data = {'key': key, 'value': value}\n kirim = requests.post(KVStoreEndpoint, data=json.dumps(data), headers=headers)\n return kirim\n\napp = Flask(__name__)\n\n@app.route('/ivaconfig', methods = ['POST', 'GET'])\ndef ivaconfig():\n if request.method == 'POST': \n #iterasi untuk mengirimkan banyak data service secara bersamaan\n i = 0\n while i < 100:\n service = request.form['service' + str(i)]\n kirimins = postingdatas(service)\n i += 1\n timestamp = get_timestamp()\n return render_template('home.html', service = service, timestamp = timestamp)\n\n return render_template('home.html')\n \n@app.route('/keyvalue', methods = ['POST', 'GET'])\ndef keyvalue():\n #if request.method == 'GET':\n terima = requests.get(KVStoreEndpoint, headers=headers) # menghubungkan ke genesis API dan mengambil datanya\n batas = json.loads(terima.text) #mengeluarkan hasil dari get tersebut berupa json\n j = 0\n panjang = len(batas)#menentukan panjang dari keseluruhan array json yang didapat\n listkey = [] #array untuk menampung key\n listvalue = [] #array untuk menampung value\n while j <= (panjang-1):\n tampil = json.loads(terima.text)[j] #memunculkan data ke i pada array yang didapat \n keygen = tampil['key']\n valuegen = tampil['value']\n listkey.append(keygen)\n listvalue.append(valuegen)\n j+=1\n\n #return render_template('home2.html')\n if request.method == 'POST': \n #iterasi untuk mengirimkan banyak data key dan value secara bersamaan\n i = 0\n while i < 1: #Pembatasan pengiriman hanya untuk 100 key dan value\n key = request.form.get('key'+ str(i))\n value = request.form['value'+ str(i)]\n kirimin = postingdata(key, value)\n i += 1\n #timestamp = get_timestamp()\n return render_template('home2.html', listkey=listkey, listvalue=listvalue)\n\n return render_template('home2.html', listkey=listkey, listvalue=listvalue)\n\nif __name__ == '__main__':\n app.run(debug = True, port= 2000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"14996075","text":"#!/usr/bin/env python3 -B\nimport unittest\n\nfrom cromulent import vocab\n\nfrom tests import TestSalesPipelineOutput, classified_identifier_sets\n\nvocab.add_attribute_assignment_check()\n\nclass PIRModelingTest_AR154(TestSalesPipelineOutput):\n def get_services_override(self):\n return {\n 'problematic_records': {\n \"lots\": [\n [[\"F-A791\", \"0069\", \"1784-06-21\"], \"multiple values may have been used in present_loc_inst: F-A791 0069 (1784-06-21)\"]\n ]\n }\n }\n\n def test_modeling_ar154(self):\n '''\n AR-154: Update URI for problematic records\n '''\n output = self.run_pipeline('ar154')\n activities = output['model-sale-activity']\n \n auction = activities['tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#AUCTION,F-A791,0069,1784-06-21']\n refs = auction.get('referred_to_by', [])\n problems = [r for r in refs if r.get('classified_as', [{}])[0].get('_label') == 'Problematic Record']\n self.assertEqual(len(problems), 1)\n problem = problems[0]\n cl = problem['classified_as'][0]\n self.assertEqual(cl['id'], 'https://data.getty.edu/local/thesaurus/problematic-record')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n","sub_path":"tests/test_sales_issue_ar154.py","file_name":"test_sales_issue_ar154.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"518090362","text":"\"\"\"OCSS URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\nfrom onlinecourse import views\n\nurlpatterns = [\n url(r'^$',views.login),\n url(r'^bbs/',include('bbs.urls')),\n url(r'^register/',views.register),\n url(r'^home/',views.home),\n url(r'^test/',include('onlinecourse.urls')),\n url(r'^test01/',views.test01),\n url(r'^admin/', admin.site.urls),\n url(r'^index/',views.index),\n url(r'^stu_add/',views.stu_add),\n url(r'^tea_add/',views.tea_add),\n url(r'^cou_add/',views.cou_add),\n url(r'^stu_modify/',views.stu_modify),\n url(r'^tea_modify/',views.tea_modify),\n url(r'^cou_modify/',views.cou_modify),\n url(r'^help/',views.help),\n\n # url(r'^test/',views.test),\n url(r'^.*$',views.error),]\n","sub_path":"OCSS/OCSS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"63801512","text":"import random\n\nuserFile = input(\"Which quiz would you like to take: \")\nquiz = open(userFile, \"r\")\nquestions = []\nfor question in quiz:\n questions.append(question)\nscore = 0\nwhile True:\n nextQuestion = random.choice(questions)\n nextQuestion = nextQuestion.split(\", \")\n answer = input(nextQuestion[0])\n if answer == nextQuestion[1][:-1]:\n score = score + 1\n print(\"Great\")\n else:\n print(\"Better luck next time\")\n print(\"Score: \" + str(score))\n","sub_path":"quizPrograms/quizProgram.py","file_name":"quizProgram.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"447051899","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'users'\n\nurlpatterns = [\n\n url(r'^register/$',views.user_register,name=\"user_register\"),\n url(r'^login/$',views.user_login,name=\"user_login\"),\n url(r'^logout/$',views.user_logout,name=\"user_logout\"),\n\n\n url(r'^address_add/$',views.address_add,name=\"address_add\"),\n url(r'^address_list/$',views.address_list,name=\"address_list\"),\n\n\n]","sub_path":"shoppintest/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"407879052","text":"from flask import request\nfrom flask_restplus import Resource\nfrom app.main.util.decorator import auth_token_required\nfrom app.main.services.auth_service import AuthService\nfrom app.main.util.dto import AuthDto\n\napi = AuthDto.api\nprovide_auth_token_dto = AuthDto.provide_auth_token\n\n@api.route(\"/login\")\nclass Login(Resource):\n @api.doc(\"log in a user\")\n @api.expect(provide_auth_token_dto, validate=True)\n def post(self):\n post_data = request.json\n\n if post_data:\n return AuthService.provide_auth_token(post_data)\n\n@api.route(\"/logout\")\nclass Logout(Resource):\n @auth_token_required\n @api.doc(\"log out a user\")\n def post(self):\n authorization_header = request.headers.get(\"Authorization\")\n\n return AuthService.dispose_auth_token(authorization_header.split(\" \")[1])\n\n","sub_path":"app/main/controller/auth_controller.py","file_name":"auth_controller.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"642344219","text":"import time\nimport json\nimport requests\nfrom six import moves\n\n\nclass Singularity:\n def __init__(self, config):\n self.config = config\n\n def create_request_body(self):\n request_body = {\n \"id\": self.config['container_name'],\n \"owners\": [self.config.get('singularity_email', '')],\n \"rackSensitive\": False,\n \"loadBalanced\": False,\n \"skipHealthchecks\": True,\n \"requestType\": \"SERVICE\",\n \"requiredSlaveAttributes\": self.config.get('host_attributes', {})\n }\n\n if self.config['slave_placement']:\n request_body['slavePlacement'] = self.config.get('slave_placement', '')\n\n if self.config[\"cron_schedule\"]:\n request_body['schedule'] = self.config.get('cron_schedule', '')\n request_body['scheduleType'] = 'CRON'\n request_body['requestType'] = 'SCHEDULED'\n\n return request_body\n\n def create_deploy_body(self):\n deploy_body = {\n \"requestId\": self.config['container_name'],\n \"unpauseOnSuccessfulDeploy\": True,\n \"message\": \"Initiated by {}\".format(self.config.get('singularity_email', '')),\n \"deploy\": {\n \"requestId\": self.config['container_name'],\n \"id\": \"\",\n \"command\": self.config.get('entrypoint', None), # set command equal to entrypoint\n \"arguments\": self.config.get(\"arguments\", []),\n \"containerInfo\": {\n \"type\": \"DOCKER\",\n \"volumes\": self.config.get('volumes', []),\n \"docker\": {\n \"forcePullImage\": self.config.get(\"force_pull_image\", False),\n \"privileged\": self.config.get(\"privileged\", False),\n \"network\": self.config[\"network_mode\"],\n \"portMappings\": self.config[\"port_mappings\"],\n \"image\": self.config[\"image\"],\n \"parameters\": self.config.get(\"docker_params\", {})\n }\n },\n \"hostname\": self.config[\"container_name\"],\n \"env\": self.config.get('environment', {}),\n \"resources\": {\n \"cpus\": self.config.get('cpus', ''),\n \"memoryMb\": self.config.get('memory', ''),\n \"diskMb\": self.config.get('disk', ''),\n \"numPorts\": self.config.get('num_ports', '') or 1\n },\n \"skipHealthchecksOnDeploy\": True\n }\n }\n return deploy_body\n\n def deploy(self):\n endpoint = self.config.get('singularity_endpoint', '')\n container_name = self.config[\"container_name\"]\n deploy_id = str(int(time.time()))\n\n yn = moves.input(\"Are you sure, you want to deploy '{}' Singularity (y/n)? \".format(container_name))\n yn = yn.lower()\n if yn not in ['yes', 'y']:\n return False\n\n print(\"Creating deploy request for '{}'\".format(container_name))\n request_body = self.create_request_body()\n print(json.dumps(request_body, indent=4))\n\n resp = requests.post(endpoint + '/requests', data=json.dumps(request_body),\n headers={'Content-Type': 'application/json'})\n\n if resp and resp.status_code == 200:\n status_code = 400\n print(\"Deploying '{}'..\".format(deploy_id))\n deploy_body = self.create_deploy_body()\n deploy_body['deploy']['id'] = deploy_id\n print(json.dumps(deploy_body, indent=4))\n while status_code != 200:\n time.sleep(2)\n resp = requests.post(endpoint + '/deploys', data=json.dumps(deploy_body),\n headers={'Content-Type': 'application/json'})\n status_code = resp.status_code\n\n print(\"Deployed '{}' successfully.\".format(deploy_id))\n print(json.dumps(resp.json(), indent=4))\n","sub_path":"compose_paas/platform/singularity.py","file_name":"singularity.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"182686339","text":"# encoding: utf-8\n\"\"\" memcache\n\nhttps://pymemcache.readthedocs.io/en/latest/getting_started.html\n\n\"\"\"\n\nfrom pymemcache.client.base import Client\n\nclient = Client(('localhost', 11211))\nclient.set('some_key', 'some_value')\nresult = client.get('some_key')\n\n\n","sub_path":"memcache/01_quick_start.py","file_name":"01_quick_start.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"85887847","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[10]:\n\n\nimport pandas as pd\n\nyear = pd.Period('2016')\nyear\nmonth = pd.Period('2016-1',freq='M')\nmonth\n\n\n# In[2]:\n\n\ndir(year)\n\n\n# In[7]:\n\n\nyear.start_time\nyear.end_time\n\n\n# In[11]:\n\n\nmonth + 1\n\n\n# In[12]:\n\n\nyear + 1\n\n\n# In[13]:\n\n\nhour =pd.Period('2016-1-31 22:00:00',freq='H')\nhour\n\n\n# In[15]:\n\n\nhour - 1\nhour + 1\n\n\n# In[21]:\n\n\nquarter = pd.Period('2017Q1')\nquarter1 = pd.Period('2017Q1',freq = 'Q-JAN') #For fiscal year when it start at different month\n\nquarter1\n\n\n# In[22]:\n\n\nquarter + 1\nquarter1.start_time\n\n\n# In[25]:\n\n\nquarter.asfreq('M',how='end')\n\n\n# In[30]:\n\n\nidx = pd.period_range('2011','2017',freq='Q-Jan')\nidx\n\n\n# In[32]:\n\n\nidx[0].start_time\n\n\n# In[34]:\n\n\nidx1 = pd.period_range('2011',periods=5,freq='Q-Jan')\nidx1\n\n\n# In[42]:\n\n\nimport pandas as pd\n\ndf = pd.read_csv('E:\\python-ethans\\Pandas-Practise\\wallmart.csv')\ndf\n\n\n# In[56]:\n\n\ndf = pd.reset_index(drop=True)\ndf\n\n\n# In[50]:\n\n\ndf.index\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Pandas - Period and PeriodIndex.py","file_name":"Pandas - Period and PeriodIndex.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"434866747","text":"def collatz(n):\r\n result=0\r\n while n>1:\r\n if n%2==0:\r\n result= n/2\r\n else:\r\n result=3*n+1\r\n print(int(result))\r\n n=result\r\n if result==1:\r\n break\r\nn=int(input())\r\ncollatz(n)\r\n","sub_path":"collatz_seq.py","file_name":"collatz_seq.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"631124168","text":"from chainermn.communicators import _memory_utility\nfrom chainermn.communicators import mpi_communicator_base\n\n\nclass DummyCommunicator(mpi_communicator_base.MpiCommunicatorBase):\n\n \"\"\"Dummy communicator that does not communicate at all.\n\n This class is intended to measure the overhead of packing and unpacking.\n This class does not pass the tests.\n \"\"\"\n\n def __init__(self, mpi_comm):\n super(DummyCommunicator, self).__init__(mpi_comm)\n\n self.gpu_buffer_a = _memory_utility.DeviceMemory()\n\n def multi_node_mean_grad(self, model, zero_fill=False):\n params = _memory_utility.extract_params_set_grad(model, zero_fill)\n itemsize = 4\n n_elems_total = _memory_utility.count_grad_elements(params,\n zero_fill)\n n_bytes_total = n_elems_total * itemsize\n self.gpu_buffer_a.assign(n_bytes_total)\n\n _memory_utility.pack_params(\n params, 'grad', self.gpu_buffer_a, zero_fill)\n\n _memory_utility.unpack_params(\n params, 'grad', self.gpu_buffer_a, zero_fill)\n","sub_path":"chainermn/communicators/dummy_communicator.py","file_name":"dummy_communicator.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"633460444","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom profileApi import views\n\nrouter = DefaultRouter()\n# router.register('hello-viewset', views.HelloViewSets, basename='hello-viewset')\nrouter.register('profile', views.UserProfileViewSet)\nrouter.register('feed', views.UserProfileFeedViewSet)\n\nurlpatterns = [\n # path('hello-view/', views.HelloApiView.as_view(), name=\"hello-view\"),\n # path('hello-view/', HelloApiView.as_view(), name=\"hello-view\"),\n # path('', include(router.urls), name=\"hello-viewset-uri\"),\n path('', include(router.urls)),\n path('login/', views.UserLoginApiViews.as_view(), )\n\n]\n","sub_path":"profileApi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"28848415","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.speakers_home, name='speakers_home'),\n path('/speakers.html', views.nominate_others),\n path('/form2.html', views.nominate_yourself),\n path('/blog.html', views.blogs),\n path('/about_us.html', views.about_us),\n path('/contact.html', views.contact),\n\n \n path('/speakers.html/form2.html', views.nominate_yourself),\n path('/speakers.html/speakers.html', views.nominate_others),\n path('/speakers.html/blog.html', views.blogs),\n path('/speakers.html/about_us.html', views.about_us),\n path('/contact.html', views.contact),\n\n \n path('/Alana_Golmei', views.speakerDesc1, name='speaker_description1'),\n path('/Anamika_Barua', views.speakerDesc2, name='speaker_description2'),\n path('/Arup_Kumar_Dutta', views.speakerDesc3, name='speaker_description3'),\n path('/Binita_Jain', views.speakerDesc4, name='speaker_description4'),\n path('/Milin_Dutta', views.speakerDesc5, name='speaker_description5'),\n path('/Zoma_Sailo', views.speakerDesc6, name='speaker_description6'),\n path('/Pragnya_Ramjee', views.speakerDesc7, name='speaker_description7'),\n\n path('/Seema_Biswas', views.speakerDesc8, name='speaker_description8'),\n path('/Uddhab_Bharali', views.speakerDesc9, name='speaker_description9'),\n path('/Sankara_Subramaniam', views.speakerDesc10, name='speaker_description10'),\n path('/Hasina_Kharbhih', views.speakerDesc11, name='speaker_description11'),\n path('/Sonjoy_Hazarika', views.speakerDesc12, name='speaker_description12'),\n path('/Ravindranath_Ravi', views.speakerDesc13, name='speaker_description13'),\n\n path('/Aashish_Chandratreya', views.speakerDesc14, name='speaker_description14'),\n path('/Aditya_Gupta', views.speakerDesc15, name='speaker_description15'),\n path('/Bhagvan_Kommadi', views.speakerDesc16, name='speaker_description16'),\n path('/Bidisha_Som', views.speakerDesc17, name='speaker_description17'),\n path('/Nisha_Bora', views.speakerDesc18, name='speaker_description18'),\n path('/Prabhagaran', views.speakerDesc19, name='speaker_description19'),\n path('/Rudy_Wallang', views.speakerDesc20, name='speaker_description20'),\n path('/Seema_Gupta', views.speakerDesc21, name='speaker_description21'),\n path('/Shiva_Sah', views.speakerDesc22, name='speaker_description22'),\n \n path('/speakers.html/Alana_Golmei', views.speakerDesc1, name='speaker_description1'),\n path('/speakers.html/Anamika_Barua', views.speakerDesc2, name='speaker_description2'),\n path('/speakers.html/Arup_Kumar_Dutta', views.speakerDesc3, name='speaker_description3'),\n path('/speakers.html/Binita_Jain', views.speakerDesc4, name='speaker_description4'),\n path('/speakers.html/Milin_Dutta', views.speakerDesc5, name='speaker_description5'),\n path('/speakers.html/Zoma_Sailo', views.speakerDesc6, name='speaker_description6'),\n path('/speakers.html/Pragnya_Ramjee', views.speakerDesc7, name='speaker_description7'),\n\n path('/speakers.html/Seema_Biswas', views.speakerDesc8, name='speaker_description8'),\n path('/speakers.html/Uddhab_Bharali', views.speakerDesc9, name='speaker_description9'),\n path('/speakers.html/Sankara_Subramaniam', views.speakerDesc10, name='speaker_description10'),\n path('/speakers.html/Hasina_Kharbhih', views.speakerDesc11, name='speaker_description11'),\n path('/speakers.html/Sonjoy_Hazarika', views.speakerDesc12, name='speaker_description12'),\n path('/speakers.html/Ravindranath_Ravi', views.speakerDesc13, name='speaker_description13'),\n\n path('/speakers.html/Aashish_Chandratreya', views.speakerDesc14, name='speaker_description14'),\n path('/speakers.html/Aditya_Gupta', views.speakerDesc15, name='speaker_description15'),\n path('/speakers.html/Bhagvan_Kommadi', views.speakerDesc16, name='speaker_description16'),\n path('/speakers.html/Bidisha_Som', views.speakerDesc17, name='speaker_description17'),\n path('/speakers.html/Nisha_Bora', views.speakerDesc18, name='speaker_description18'),\n path('/speakers.html/Prabhagaran', views.speakerDesc19, name='speaker_description19'),\n path('/speakers.html/Rudy_Wallang', views.speakerDesc20, name='speaker_description20'),\n path('/speakers.html/Seema_Gupta', views.speakerDesc21, name='speaker_description21'),\n path('/speakers.html/Shiva_Sah', views.speakerDesc22, name='speaker_description22'),\n\n path('/about_us.html/Jaikishan_Mansukhani', views.memberDesc1, name='member_description1'),\n path('/about_us.html/Anvita_Kodru', views.memberDesc2, name='member_description2'),\n path('/about_us.html/Sreesiddesh_Bhavanasi', views.memberDesc3, name='member_description3'),\n path('/about_us.html/Shivangi_Kumar', views.memberDesc4, name='member_description4'),\n path('/about_us.html/Vishwaprasanna_Hariharan', views.memberDesc5, name='member_description5'),\n path('/about_us.html/Samarth_Saraswat', views.memberDesc6, name='member_description6'),\n path('/about_us.html/Aarya_Shrivastava', views.memberDesc7, name='member_description7'),\n path('/about_us.html/Amey_Varhade', views.memberDesc8, name='member_description8'),\n path('/about_us.html/Anindya_Rajan', views.memberDesc9, name='member_description9'),\n path('/about_us.html/Anisha_Khati', views.memberDesc10, name='member_description10'),\n path('/about_us.html/Ankit_Raj', views.memberDesc11, name='member_description11'),\n path('/about_us.html/Anushka_Anand', views.memberDesc12, name='member_description12'),\n path('/about_us.html/Anushka_Srivastava', views.memberDesc13, name='member_description13'),\n path('/about_us.html/Arpita_Mohapatra', views.memberDesc14, name='member_description14'),\n path('/about_us.html/Ayush_Srivastava', views.memberDesc15, name='member_description15'),\n path('/about_us.html/Digisha_Verma', views.memberDesc16, name='member_description16'),\n path('/about_us.html/Emily_Huiling', views.memberDesc17, name='member_description17'),\n path('/about_us.html/Gourav_Kumar', views.memberDesc18, name='member_description18'),\n path('/about_us.html/Govind_Singh', views.memberDesc19, name='member_description19'),\n path('/about_us.html/Jaideep_Buksagarmath', views.memberDesc20, name='member_description20'),\n path('/about_us.html/Janhavi_Lande', views.memberDesc21, name='member_description21'),\n path('/about_us.html/Lalika_Laya_K', views.memberDesc22, name='member_description22'),\n path('/about_us.html/Miloni_Patel', views.memberDesc23, name='member_description23'),\n path('/about_us.html/Monalisha_Majumder', views.memberDesc24, name='member_description24'),\n path('/about_us.html/Niladri_Sarkar', views.memberDesc25, name='member_description25'),\n path('/about_us.html/Nishant', views.memberDesc26, name='member_description26'),\n path('/about_us.html/Nishtha_Sharma', views.memberDesc27, name='member_description27'),\n path('/about_us.html/Pragnya_Ramjee', views.memberDesc28, name='member_description28'),\n path('/about_us.html/Pratyanshu_Raj_Singh', views.memberDesc29, name='member_description29'),\n path('/about_us.html/Ritik_Singh', views.memberDesc30, name='member_description30'),\n path('/about_us.html/Sai_Sreyas_Ray', views.memberDesc31, name='member_description31'),\n path('/about_us.html/Shatakshi_Kaushal', views.memberDesc32, name='member_description32'),\n path('/about_us.html/Shiva_Sah', views.memberDesc33, name='member_description33'),\n path('/about_us.html/Sudarshan_Birla', views.memberDesc34, name='member_description34'),\n path('/about_us.html/Titiksha', views.memberDesc35, name='member_description35'),\n\n\n # path('add_speaker', views.addSpeaker),\n]\n","sub_path":"Tedwebsite/speakers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"185920595","text":"# Guess Game\nimport random\n\nhighest = 1000\nanswer = random.randint(1, highest)\niGuess = 0\n\nprint(\"Please guess a number between 1 and {}: \".format(highest))\nguess = int(input())\n\nif guess != answer:\n while guess != answer:\n if iGuess == 10:\n print(\"Exceeded number of guesses\")\n break\n\n if guess == 0:\n break\n elif guess < answer:\n print(\"Please guess higher. Enter 0 to quit\")\n iGuess += 1\n elif guess > answer:\n print(\"Please guess lower. Enter 0 to quit\")\n iGuess += 1\n elif guess == answer:\n print(\"Well done, you guessed it\")\n break\n\n guess = int(input())\nelse:\n print(\"You got it first time\")\n","sub_path":"Challenge_Program_Flow_Control/Challenge_While_Loop.py","file_name":"Challenge_While_Loop.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"622961567","text":"# This is the first draft of the GUI - working, but with some obvious flaws in functionality\n\nfrom rubik.cube import Cube\nfrom tkinter import *\nfrom tkinter import messagebox\n\nfrom Solver import SolveCube\nfrom rubik_solver import utils\n\n# import RPi.GPIO as GPIO\nfrom datetime import *\nimport time\n\nroot = Tk() # Draws the window\nedit_mode = False\ncurrent_color = \" \"\n\n# These next lines define size, position and title of tkinter window\nroot.geometry('1400x740+10+10') # width x height + xpos + ypos\nroot.title(\"Cube Solver\")\n\n# Frame and its contents\nmainFrame = Frame(root, width=200, height=200)\nmainFrame.grid(row=0, column=0, padx=10, pady=2)\n\ncubeCanvas = Canvas(mainFrame, width=1000, height=720, bg='black')\ncubeCanvas.grid(row=0, column=0, padx=0, pady=2)\n# WOWOOBBWWRGRYBBORRBYGRGBYWRGBOBYWOWGRGOWYOYYYYWGORGGRB\n# OOOOOOOOOGGGWWWBBBYYYGGGWWWBBBYYYGGGWWWBBBYYYRRRRRRRRR\ncube_layout = \"OOOOOOOOOGGGWWWBBBYYYGGGWWWBBBYYYGGGWWWBBBYYYRRRRRRRRR\"\nc = Cube(cube_layout)\n\n\ndef makeMove(move):\n print(\"Making move\")\n c.sequence(move)\n updateCubeColours()\n\n# R U Fi dI bI r uI b d\n\ndef performAlgorithm(algo):\n for move in algo.split():\n try:\n c.sequence(move)\n print(c)\n cubeCanvas.after(100, updateCubeColours())\n cubeCanvas.update_idletasks()\n except:\n print(\"Cannot Perform Move '\" + move + \"'\")\n\n\n# Converts Cubie To Colour for Digital Cube\ndef colourFromLetter(value):\n try:\n colors = {'G': 'green', 'R': 'red', 'B': 'blue', 'O': 'orange', 'W': 'White', 'Y': 'Yellow'}\n print(colors[value])\n return colors[value]\n except:\n messagebox.showinfo(\"Say Hello\", \"Hello World\")\n return None\n\n# Updates cube colours from c\ndef updateCubeColours():\n # Green Face (Layer 1)\n cubeCanvas.itemconfigure(green_00, fill=colourFromLetter(c.get_piece(-1, 1, -1).colors[0]))\n cubeCanvas.itemconfigure(green_01, fill=colourFromLetter(c.get_piece(-1, 1, 0).colors[0]))\n cubeCanvas.itemconfigure(green_02, fill=colourFromLetter(c.get_piece(-1, 1, 1).colors[0]))\n # Green Face (Layer 2)\n cubeCanvas.itemconfigure(green_10, fill=colourFromLetter(c.get_piece(-1, 0, -1).colors[0]))\n cubeCanvas.itemconfigure(green_11, fill=colourFromLetter(c.get_piece(-1, 0, 0).colors[0]))\n cubeCanvas.itemconfigure(green_12, fill=colourFromLetter(c.get_piece(-1, 0, 1).colors[0]))\n # Green Face (Layer 3)\n cubeCanvas.itemconfigure(green_20, fill=colourFromLetter(c.get_piece(-1, -1, -1).colors[0]))\n cubeCanvas.itemconfigure(green_21, fill=colourFromLetter(c.get_piece(-1, -1, 0).colors[0]))\n cubeCanvas.itemconfigure(green_22, fill=colourFromLetter(c.get_piece(-1, -1, 1).colors[0]))\n\n # White Face (Layer 1)\n cubeCanvas.itemconfigure(white_00, fill=colourFromLetter(c.get_piece(-1, 1, 1).colors[2]))\n cubeCanvas.itemconfigure(white_01, fill=colourFromLetter(c.get_piece(0, 1, 1).colors[2]))\n cubeCanvas.itemconfigure(white_02, fill=colourFromLetter(c.get_piece(1, 1, 1).colors[2]))\n # White Face (Layer 2)\n cubeCanvas.itemconfigure(white_10, fill=colourFromLetter(c.get_piece(-1, 0, 1).colors[2]))\n cubeCanvas.itemconfigure(white_11, fill=colourFromLetter(c.get_piece(0, 0, 1).colors[2]))\n cubeCanvas.itemconfigure(white_12, fill=colourFromLetter(c.get_piece(1, 0, 1).colors[2]))\n # White Face (Layer 3)\n cubeCanvas.itemconfigure(white_20, fill=colourFromLetter(c.get_piece(-1, -1, 1).colors[2]))\n cubeCanvas.itemconfigure(white_21, fill=colourFromLetter(c.get_piece(0, -1, 1).colors[2]))\n cubeCanvas.itemconfigure(white_22, fill=colourFromLetter(c.get_piece(1, -1, 1).colors[2]))\n\n # Orange Face (Layer 1)\n cubeCanvas.itemconfigure(orange_00, fill=colourFromLetter(c.get_piece(-1, 1, -1).colors[1]))\n cubeCanvas.itemconfigure(orange_01, fill=colourFromLetter(c.get_piece(0, 1, -1).colors[1]))\n cubeCanvas.itemconfigure(orange_02, fill=colourFromLetter(c.get_piece(1, 1, -1).colors[1]))\n # Orange Face (Layer 2)\n cubeCanvas.itemconfigure(orange_10, fill=colourFromLetter(c.get_piece(-1, 1, 0).colors[1]))\n cubeCanvas.itemconfigure(orange_11, fill=colourFromLetter(c.get_piece(0, 1, 0).colors[1]))\n cubeCanvas.itemconfigure(orange_12, fill=colourFromLetter(c.get_piece(1, 1, 0).colors[1]))\n # Orange Face (Layer 3)\n cubeCanvas.itemconfigure(orange_20, fill=colourFromLetter(c.get_piece(-1, 1, 1).colors[1]))\n cubeCanvas.itemconfigure(orange_21, fill=colourFromLetter(c.get_piece(0, 1, 1).colors[1]))\n cubeCanvas.itemconfigure(orange_22, fill=colourFromLetter(c.get_piece(1, 1, 1).colors[1]))\n\n # Red Face (Layer 1)\n cubeCanvas.itemconfigure(red_00, fill=colourFromLetter(c.get_piece(-1, -1, 1).colors[1]))\n cubeCanvas.itemconfigure(red_01, fill=colourFromLetter(c.get_piece(0, -1, 1).colors[1]))\n cubeCanvas.itemconfigure(red_02, fill=colourFromLetter(c.get_piece(1, -1, 1).colors[1]))\n # Red Face (Layer 2)\n cubeCanvas.itemconfigure(red_10, fill=colourFromLetter(c.get_piece(-1, -1, 0).colors[1]))\n cubeCanvas.itemconfigure(red_11, fill=colourFromLetter(c.get_piece(0, -1, 0).colors[1]))\n cubeCanvas.itemconfigure(red_12, fill=colourFromLetter(c.get_piece(1, -1, 0).colors[1]))\n # Red Face (Layer 3)\n cubeCanvas.itemconfigure(red_20, fill=colourFromLetter(c.get_piece(-1, -1, -1).colors[1]))\n cubeCanvas.itemconfigure(red_21, fill=colourFromLetter(c.get_piece(0, -1, -1).colors[1]))\n cubeCanvas.itemconfigure(red_22, fill=colourFromLetter(c.get_piece(1, -1, -1).colors[1]))\n\n # Blue Face (Layer 1)\n cubeCanvas.itemconfigure(blue_00, fill=colourFromLetter(c.get_piece(1, 1, 1).colors[0]))\n cubeCanvas.itemconfigure(blue_01, fill=colourFromLetter(c.get_piece(1, 1, 0).colors[0]))\n cubeCanvas.itemconfigure(blue_02, fill=colourFromLetter(c.get_piece(1, 1, -1).colors[0]))\n # Blue Face (Layer 2)\n cubeCanvas.itemconfigure(blue_10, fill=colourFromLetter(c.get_piece(1, 0, 1).colors[0]))\n cubeCanvas.itemconfigure(blue_11, fill=colourFromLetter(c.get_piece(1, 0, 0).colors[0]))\n cubeCanvas.itemconfigure(blue_12, fill=colourFromLetter(c.get_piece(1, 0, -1).colors[0]))\n # Blue Face (Layer 3)\n cubeCanvas.itemconfigure(blue_20, fill=colourFromLetter(c.get_piece(1, -1, 1).colors[0]))\n cubeCanvas.itemconfigure(blue_21, fill=colourFromLetter(c.get_piece(1, -1, 0).colors[0]))\n cubeCanvas.itemconfigure(blue_22, fill=colourFromLetter(c.get_piece(1, -1, -1).colors[0]))\n\n # Yellow Face (Layer 1)\n cubeCanvas.itemconfigure(yellow_00, fill=colourFromLetter(c.get_piece(1, 1, -1).colors[2]))\n cubeCanvas.itemconfigure(yellow_01, fill=colourFromLetter(c.get_piece(0, 1, -1).colors[2]))\n cubeCanvas.itemconfigure(yellow_02, fill=colourFromLetter(c.get_piece(-1, 1, -1).colors[2]))\n # Yellow Face (Layer 2)\n cubeCanvas.itemconfigure(yellow_10, fill=colourFromLetter(c.get_piece(1, 0, -1).colors[2]))\n cubeCanvas.itemconfigure(yellow_11, fill=colourFromLetter(c.get_piece(0, 0, -1).colors[2]))\n cubeCanvas.itemconfigure(yellow_12, fill=colourFromLetter(c.get_piece(-1, 0, -1).colors[2]))\n # Yellow Face (Layer 3)\n cubeCanvas.itemconfigure(yellow_20, fill=colourFromLetter(c.get_piece(1, -1, -1).colors[2]))\n cubeCanvas.itemconfigure(yellow_21, fill=colourFromLetter(c.get_piece(0, -1, -1).colors[2]))\n cubeCanvas.itemconfigure(yellow_22, fill=colourFromLetter(c.get_piece(-1, -1, -1).colors[2]))\n\n\ndef editMode(edit_mode_label, edit_cube_button):\n global edit_mode, cube_layout\n cubeCanvas.update_idletasks()\n edit_mode = not edit_mode\n if edit_mode:\n print(\"Entering Edit Mode\")\n edit_cube_button.config(text=\"Leave Edit Mode\")\n edit_mode_label.place(x=30, y=20)\n cubeCanvas.itemconfigure(white_color, state='normal')\n cubeCanvas.itemconfigure(red_color, state='normal')\n cubeCanvas.itemconfigure(blue_color, state='normal')\n cubeCanvas.itemconfigure(green_color, state='normal')\n cubeCanvas.itemconfigure(orange_color, state='normal')\n cubeCanvas.itemconfigure(yellow_color, state='normal')\n cubeCanvas.update_idletasks()\n print(flattenCube())\n else:\n print(\"Leaving Edit mode\")\n edit_cube_button.config(text=\"Enter Edit Mode\")\n edit_mode_label.place(x=-40, y=-40)\n cubeCanvas.itemconfigure(white_color, state='hidden')\n cubeCanvas.itemconfigure(red_color, state='hidden')\n cubeCanvas.itemconfigure(blue_color, state='hidden')\n cubeCanvas.itemconfigure(green_color, state='hidden')\n cubeCanvas.itemconfigure(orange_color, state='hidden')\n cubeCanvas.itemconfigure(yellow_color, state='hidden')\n print(flattenCube())\n return not edit_mode\n\n\ndef editColor(event):\n global current_color, edit_mode\n if edit_mode:\n if current_color == \" \":\n current_color = \"white\"\n\n # First Stage Decide What Colour Has Been Selected...\n options = {55: \"white\", 56: \"red\", 57: \"blue\", 58: \"orange\", 59: \"green\", 60: \"yellow\"}\n if event.widget.find_withtag(\"current\")[0] in options:\n current_color = options.get(event.widget.find_withtag(\"current\")[0])\n print(\"Current Color = \" + str(current_color))\n # Default To White...\n else:\n # # Check the value is not in dict\n print(\"You clicked \" + str(event))\n print(\"Current Color = \" + str(current_color))\n current = event.widget.find_withtag(\"current\")[0]\n event.widget.itemconfig(current, fill=current_color)\n else:\n print(\"Need to be in edit mode\")\n\n\n# Converts Rectangle To Color Letter (\"green_00\" = \"g\") For use in cube...\ndef convertToLetter(tag):\n color = cubeCanvas.itemcget(tag, \"fill\")\n options = {\"red\": \"R\", \"White\": \"W\", \"Yellow\": \"Y\", \"orange\": \"O\", \"blue\": \"B\", \"green\": \"G\"}\n return str(options.get(color))\n\n\n# Takes the values from each index and returns a string...\n# This will then be used with the solver.\ndef flattenCube():\n cube_string = \"\"\n # First Layer\n\n # Orange green red and blue working\n # Yellow and White not working?\n\n # print(convertToLetter(green_00))\n # print(convertToLetter(orange_00))\n # print(convertToLett(yellow_00))\n\n\n cube_string += str(convertToLetter(orange_00) + convertToLetter(orange_01)+ convertToLetter(orange_02)\n + convertToLetter(orange_10)+ convertToLetter(orange_11)+ convertToLetter(orange_12) + convertToLetter(orange_20)\n + convertToLetter(orange_21) + convertToLetter(orange_22))\n\n # Second Layer\n cube_string += str(convertToLetter(green_00) + convertToLetter(green_01) + convertToLetter(green_02)\n + convertToLetter(white_00) + convertToLetter(white_01) + convertToLetter(white_02) + convertToLetter(blue_00)\n + convertToLetter(blue_01) + convertToLetter(blue_02) + convertToLetter(yellow_00) + convertToLetter(yellow_01)\n + convertToLetter(yellow_02))\n\n # Third Layer\n cube_string += str(convertToLetter(green_10) + convertToLetter(green_11) + convertToLetter(green_12)\n + convertToLetter(white_10) + convertToLetter(white_11) + convertToLetter(white_12) + convertToLetter(blue_10)\n + convertToLetter(blue_11) + convertToLetter(blue_12) + convertToLetter(yellow_10) + convertToLetter(yellow_11)\n + convertToLetter(yellow_12))\n\n # Fourth Layer\n cube_string += str(convertToLetter(green_20) + convertToLetter(green_21) + convertToLetter(green_22)\n + convertToLetter(white_20) + convertToLetter(white_21) + convertToLetter(white_22) + convertToLetter(blue_20)\n + convertToLetter(blue_21) + convertToLetter(blue_22) + convertToLetter(yellow_20) + convertToLetter(yellow_21)\n + convertToLetter(yellow_22))\n\n # Final Layer\n cube_string += str(convertToLetter(red_00) + convertToLetter(red_01) + convertToLetter(red_02) + convertToLetter(red_10)\n + convertToLetter(red_11) + convertToLetter(red_12) + convertToLetter(red_20) + convertToLetter(\n red_21) + convertToLetter(red_22))\n\n return cube_string\n\n# Green Face (Layer 1)\ngreen_00 = cubeCanvas.create_rectangle(20, 240, 90, 310, width=0, fill='green', tag=\"green_00\")\ncubeCanvas.tag_bind(\"green_00\", \"\", editColor)\ngreen_01 = cubeCanvas.create_rectangle(100, 240, 170, 310, width=0, fill='green', tag=\"green_01\")\ncubeCanvas.tag_bind(\"green_01\", \"\", editColor)\ngreen_02 = cubeCanvas.create_rectangle(180, 240, 250, 310, width=0, fill='green', tag=\"green_02\")\ncubeCanvas.tag_bind(\"green_02\", \"\", editColor)\n\n# Green Face (Layer 2)\ngreen_10 = cubeCanvas.create_rectangle(20, 320, 90, 390, width=0, fill='green', tag=\"green_10\")\ncubeCanvas.tag_bind(\"green_10\", \"\", editColor)\ngreen_11 = cubeCanvas.create_rectangle(100, 320, 170, 390, width=0, fill='green', tag=\"green_11\")\ncubeCanvas.tag_bind(\"green_11\", \"\", editColor)\ngreen_12 = cubeCanvas.create_rectangle(180, 320, 250, 390, width=0, fill='green', tag=\"green_12\")\ncubeCanvas.tag_bind(\"green_12\", \"\", editColor)\n# Green Face (Layer 3)\ngreen_20 = cubeCanvas.create_rectangle(20, 400, 90, 470, width=0, fill='green', tag=\"green_20\")\ncubeCanvas.tag_bind(\"green_20\", \"\", editColor)\ngreen_21 = cubeCanvas.create_rectangle(100, 400, 170, 470, width=0, fill='green', tag=\"green_21\")\ncubeCanvas.tag_bind(\"green_21\", \"\", editColor)\ngreen_22 = cubeCanvas.create_rectangle(180, 400, 250, 470, width=0, fill='green', tag=\"green_22\")\ncubeCanvas.tag_bind(\"green_22\", \"\", editColor)\n\n# White Face (Layer 1)\nwhite_00 = cubeCanvas.create_rectangle(260, 240, 330, 310, width=0, fill='white', tag=\"white_00\")\ncubeCanvas.tag_bind(\"white_00\", \"\", editColor)\nwhite_01 = cubeCanvas.create_rectangle(340, 240, 410, 310, width=0, fill='white', tag=\"white_01\")\ncubeCanvas.tag_bind(\"white_01\", \"\", editColor)\nwhite_02 = cubeCanvas.create_rectangle(420, 240, 490, 310, width=0, fill='white', tag=\"white_02\")\ncubeCanvas.tag_bind(\"white_02\", \"\", editColor)\n\n# White Face (Layer 2)\nwhite_10 = cubeCanvas.create_rectangle(260, 320, 330, 390, width=0, fill='white', tag=\"white_10\")\ncubeCanvas.tag_bind(\"white_10\", \"\", editColor)\nwhite_11 = cubeCanvas.create_rectangle(340, 320, 410, 390, width=0, fill='white', tag=\"white_11\")\ncubeCanvas.tag_bind(\"white_11\", \"\", editColor)\nwhite_12 = cubeCanvas.create_rectangle(420, 320, 490, 390, width=0, fill='white', tag=\"white_12\")\ncubeCanvas.tag_bind(\"white_12\", \"\", editColor)\n\n# White Face (Layer 3)\nwhite_20 = cubeCanvas.create_rectangle(260, 400, 330, 470, width=0, fill='white', tag=\"white_20\")\ncubeCanvas.tag_bind(\"white_20\", \"\", editColor)\nwhite_21 = cubeCanvas.create_rectangle(340, 400, 410, 470, width=0, fill='white', tag=\"white_21\")\ncubeCanvas.tag_bind(\"white_21\", \"\", editColor)\nwhite_22 = cubeCanvas.create_rectangle(420, 400, 490, 470, width=0, fill='white', tag=\"white_22\")\ncubeCanvas.tag_bind(\"white_22\", \"\", editColor)\n\n# Orange Face (Layer 1)\norange_00 = cubeCanvas.create_rectangle(260, 0, 330, 70, width=0, fill='orange', tag=\"orange_00\")\ncubeCanvas.tag_bind(\"orange_00\", \"\", editColor)\norange_01 = cubeCanvas.create_rectangle(340, 0, 410, 70, width=0, fill='orange', tag=\"orange_01\")\ncubeCanvas.tag_bind(\"orange_01\", \"\", editColor)\norange_02 = cubeCanvas.create_rectangle(420, 0, 490, 70, width=0, fill='orange', tag=\"orange_02\")\ncubeCanvas.tag_bind(\"orange_02\", \"\", editColor)\n\n# Orange Face (Layer 2)\norange_10 = cubeCanvas.create_rectangle(260, 80, 330, 150, width=0, fill='orange', tag=\"orange_10\")\ncubeCanvas.tag_bind(\"orange_10\", \"\", editColor)\norange_11 = cubeCanvas.create_rectangle(340, 80, 410, 150, width=0, fill='orange', tag=\"orange_11\")\ncubeCanvas.tag_bind(\"orange_11\", \"\", editColor)\norange_12 = cubeCanvas.create_rectangle(420, 80, 490, 150, width=0, fill='orange', tag=\"orange_12\")\ncubeCanvas.tag_bind(\"orange_12\", \"\", editColor)\n\n# Orange Face (Layer 3)\norange_20 = cubeCanvas.create_rectangle(260, 160, 330, 230, width=0, fill='orange', tag=\"orange_20\")\ncubeCanvas.tag_bind(\"orange_20\", \"\", editColor)\norange_21 = cubeCanvas.create_rectangle(340, 160, 410, 230, width=0, fill='orange', tag=\"orange_21\")\ncubeCanvas.tag_bind(\"orange_21\", \"\", editColor)\norange_22 = cubeCanvas.create_rectangle(420, 160, 490, 230, width=0, fill='orange', tag=\"orange_22\")\ncubeCanvas.tag_bind(\"orange_22\", \"\", editColor)\n\n# Blue Face (Layer 1)\nblue_00 = cubeCanvas.create_rectangle(500, 240, 570, 310, width=0, fill='blue', tag=\"blue_00\")\ncubeCanvas.tag_bind(\"blue_00\", \"\", editColor)\nblue_01 = cubeCanvas.create_rectangle(580, 240, 650, 310, width=0, fill='blue', tag=\"blue_01\")\ncubeCanvas.tag_bind(\"blue_01\", \"\", editColor)\nblue_02 = cubeCanvas.create_rectangle(660, 240, 730, 310, width=0, fill='blue', tag=\"blue_02\")\ncubeCanvas.tag_bind(\"blue_02\", \"\", editColor)\n\n# Blue Face (Layer 2)\nblue_10 = cubeCanvas.create_rectangle(500, 320, 570, 390, width=0, fill='blue', tag=\"blue_10\")\ncubeCanvas.tag_bind(\"blue_10\", \"\", editColor)\nblue_11 = cubeCanvas.create_rectangle(580, 320, 650, 390, width=0, fill='blue', tag=\"blue_11\")\ncubeCanvas.tag_bind(\"blue_11\", \"\", editColor)\nblue_12 = cubeCanvas.create_rectangle(660, 320, 730, 390, width=0, fill='blue', tag=\"blue_21\")\ncubeCanvas.tag_bind(\"blue_12\", \"\", editColor)\n\n# Blue Face (Layer 2)\nblue_20 = cubeCanvas.create_rectangle(500, 400, 570, 470, width=0, fill='blue', tag=\"blue_20\")\ncubeCanvas.tag_bind(\"blue_20\", \"\", editColor)\nblue_21 = cubeCanvas.create_rectangle(580, 400, 650, 470, width=0, fill='blue', tag=\"blue_21\")\ncubeCanvas.tag_bind(\"blue_21\", \"\", editColor)\nblue_22 = cubeCanvas.create_rectangle(660, 400, 730, 470, width=0, fill='blue', tag=\"blue_22\")\ncubeCanvas.tag_bind(\"blue_22\", \"\", editColor)\n\n# Yellow Face (Layer 1)\nyellow_00 = cubeCanvas.create_rectangle(740, 240, 810, 310, width=0, fill='yellow', tag=\"yellow_00\")\ncubeCanvas.tag_bind(\"yellow_00\", \"\", editColor)\nyellow_01 = cubeCanvas.create_rectangle(820, 240, 890, 310, width=0, fill='yellow', tag=\"yellow_01\")\ncubeCanvas.tag_bind(\"yellow_01\", \"\", editColor)\nyellow_02 = cubeCanvas.create_rectangle(900, 240, 970, 310, width=0, fill='yellow', tag=\"yellow_02\")\ncubeCanvas.tag_bind(\"yellow_02\", \"\", editColor)\n\n# Yellow Face (Layer 2)\nyellow_10 = cubeCanvas.create_rectangle(740, 320, 810, 390, width=0, fill='yellow', tag=\"yellow_10\")\ncubeCanvas.tag_bind(\"yellow_10\", \"\", editColor)\nyellow_11 = cubeCanvas.create_rectangle(820, 320, 890, 390, width=0, fill='yellow', tag=\"yellow_11\")\ncubeCanvas.tag_bind(\"yellow_11\", \"\", editColor)\nyellow_12 = cubeCanvas.create_rectangle(900, 320, 970, 390, width=0, fill='yellow', tag=\"yellow_21\")\ncubeCanvas.tag_bind(\"yellow_12\", \"\", editColor)\n\n# Yellow Face (Layer 3)\nyellow_20 = cubeCanvas.create_rectangle(740, 400, 810, 470, width=0, fill='yellow', tag=\"yellow_20\")\ncubeCanvas.tag_bind(\"yellow_20\", \"\", editColor)\nyellow_21 = cubeCanvas.create_rectangle(820, 400, 890, 470, width=0, fill='yellow', tag=\"yellow_21\")\ncubeCanvas.tag_bind(\"yellow_21\", \"\", editColor)\nyellow_22 = cubeCanvas.create_rectangle(900, 400, 970, 470, width=0, fill='yellow', tag=\"yellow_22\")\ncubeCanvas.tag_bind(\"yellow_22\", \"\", editColor)\n\n# Red Face (Layer 1)\nred_00 = cubeCanvas.create_rectangle(260, 480, 330, 550, width=0, fill='red', tag=\"red_00\")\ncubeCanvas.tag_bind(\"red_00\", \"\", editColor)\nred_01 = cubeCanvas.create_rectangle(340, 480, 410, 550, width=0, fill='red', tag=\"red_01\")\ncubeCanvas.tag_bind(\"red_01\", \"\", editColor)\nred_02 = cubeCanvas.create_rectangle(420, 480, 490, 550, width=0, fill='red', tag=\"red_02\")\ncubeCanvas.tag_bind(\"red_02\", \"\", editColor)\n\n# Red Face (Layer 2)\nred_10 = cubeCanvas.create_rectangle(260, 560, 330, 630, width=0, fill='red', tag=\"red_10\")\ncubeCanvas.tag_bind(\"red_10\", \"\", editColor)\nred_11 = cubeCanvas.create_rectangle(340, 560, 410, 630, width=0, fill='red', tag=\"red_11\")\ncubeCanvas.tag_bind(\"red_11\", \"\", editColor)\nred_12 = cubeCanvas.create_rectangle(420, 560, 490, 630, width=0, fill='red', tag=\"red_12\")\ncubeCanvas.tag_bind(\"red_12\", \"\", editColor)\n\n# Red Face (Layer 3)\nred_20 = cubeCanvas.create_rectangle(260, 640, 330, 710, width=0, fill='red', tag=\"red_20\")\ncubeCanvas.tag_bind(\"red_20\", \"\", editColor)\nred_21 = cubeCanvas.create_rectangle(340, 640, 410, 710, width=0, fill='red', tag=\"red_21\")\ncubeCanvas.tag_bind(\"red_21\", \"\", editColor)\nred_22 = cubeCanvas.create_rectangle(420, 640, 490, 710, width=0, fill='red', tag=\"red_21\")\ncubeCanvas.tag_bind(\"red_22\", \"\", editColor)\n\n\n\ndef solveCube():\n print(flattenCube())\n cube = Cube(flattenCube())\n print(cube)\n if cube.is_solved():\n print(\"cube = \" + flattenCube())\n messagebox.showinfo(\"Cube Solved\", \"This cube is already solved!\")\n else:\n solver = SolveCube(cube)\n algorithm = solver.solveCube()\n print(algorithm)\n performAlgorithm(algorithm)\n\nsolveButton = Button(root, text=\"Solve Cube\", command=lambda: solveCube())\nsolveButton.config(font=(\"Arial\", 20))\nsolveButton.place(x=1150, y=600)\n\n# Label For Main Title\ntitle = Label(root, text=\"Rubiks Cube Solver\")\ntitle.config(font=(\"Courier\", 30))\ntitle.place(x=1045, y=30)\n\n# Label For Subtitle\nsubTitle = Label(root, text=\"Project By Ryan Jewsbury\")\nsubTitle.config(font=(\"Arial\", 20))\nsubTitle.place(x=1090, y=60)\n\n# More Information Button\nmoreInfo = Button(root, text=\"More Info\", command=lambda: print(\"more info pressed\"))\nmoreInfo.config(font=(\"Arial\", 15))\nmoreInfo.place(x=1100, y=100)\n\n# Settings Button\nsettingsButton = Button(root, text=\"Settings\", command=lambda: print(\"settings pressed\"))\nsettingsButton.config(font=(\"Arial\", 15))\nsettingsButton.place(x=1230, y=100)\n\n# Scan Your Own Cube Label\nscan_title = Label(root, text=\"Scan Your Own Cube\")\nscan_title.config(font=(\"Arial\", 20))\nscan_title.place(x=1110, y=150)\n\n# Scan Cube Button\nscanCube_button = Button(root, text=\"Scan Cube\", command=lambda: print(\"Scan Cube Pressed\"))\nscanCube_button.config(font=(\"Arial\", 15))\nscanCube_button.place(x=1160, y=180)\n\n# Perform Move\nmove_title = Label(root, text=\"Make Your Own Move\")\nmove_title.config(font=(\"Arial\", 20))\nmove_title.place(x=1110, y=230)\n\n# Move Input\nmoveInput = Entry(root)\nmoveInput.place(x=1125, y=260)\ninputButton = Button(root, text=\"Make Move\", command=lambda: performAlgorithm(moveInput.get()))\ninputButton.place(x=1160, y=290)\n\n# Edit Mode Label\nedit_mode_label = Label(root, text=\"Edit Mode Activated\")\nedit_mode_label.config(font=(\"Arial\", 20), fg=\"white\", bg=\"black\")\n\n# Edit Digital Cube\nedit_cube_title = Label(root, text=\"Edit This Cube\")\nedit_cube_title.config(font=(\"Arial\", 20))\nedit_cube_title.place(x=1142, y=340)\nedit_cube_button = Button(root, text=\"Enter Edit Mode\", command=lambda: editMode(edit_mode_label, edit_cube_button))\nedit_cube_button.place(x=1150, y=380)\n\n# Edit Mode Cubes\nwhite_color = cubeCanvas.create_rectangle(600, 600, 650, 650, width=0, fill='white', state=\"hidden\", tag=\"edit_white\")\nred_color = cubeCanvas.create_rectangle(660, 600, 710, 650, width=0, fill='red', state=\"hidden\", tag=\"edit_red\")\nblue_color = cubeCanvas.create_rectangle(720, 600, 770, 650, width=0, fill='blue', state=\"hidden\", tag=\"edit_blue\")\norange_color = cubeCanvas.create_rectangle(780, 600, 830, 650, width=0, fill='orange', state=\"hidden\",\n tag=\"edit_orange\")\ngreen_color = cubeCanvas.create_rectangle(840, 600, 890, 650, width=0, fill='green', state=\"hidden\", tag=\"edit_green\")\nyellow_color = cubeCanvas.create_rectangle(900, 600, 950, 650, width=0, fill='yellow', state=\"hidden\",\n tag=\"edit_yellow\")\ncubeCanvas.tag_bind(\"edit_white\", \"\", editColor)\ncubeCanvas.tag_bind(\"edit_red\", \"\", editColor)\ncubeCanvas.tag_bind(\"edit_blue\", \"\", editColor)\ncubeCanvas.tag_bind(\"edit_orange\", \"\", editColor)\ncubeCanvas.tag_bind(\"edit_green\", \"\", editColor)\ncubeCanvas.tag_bind(\"edit_yellow\", \"\", editColor)\n\n# print(flattenCube())\n\nroot.resizable(False, False)\nroot.mainloop()\n","sub_path":"Development/gui_test.py","file_name":"gui_test.py","file_ext":"py","file_size_in_byte":24270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"236487118","text":"\"\"\"Rm nms with multi coords\"\"\"\nimport argparse, pandas, csv\n\ndef get_singles(bed_in):\n df = pandas.read_csv(args.bed_in, sep='\\t', usecols=['nm', 'bin'])\n c = {0:'size'}\n g = df.groupby(['nm', 'bin']).size().reset_index().rename(index=str, columns=c)\n singles = set([x + ':' + str(y) for x,y in g[g['size']==1][['nm', 'bin']].values])\n return singles\n\ndef main(args):\n singles = get_singles(args.bed_in)\n with open(args.bed_in) as f, open(args.bed_out, 'w') as fout:\n reader = csv.DictReader(f, delimiter='\\t')\n fields = reader.fieldnames\n print('\\t'.join(fields), file=fout)\n for row in reader:\n key = row['nm'] + ':' + row['bin']\n if key in singles:\n print('\\t'.join([row[x] for x in fields]), file=fout)\n \nif __name__ == \"__main__\":\n desc = 'Rms with mulit entries'\n parser = argparse.ArgumentParser(description=desc)\n argLs = ('bed_in', 'bed_out', )\n for param in argLs:\n parser.add_argument(param)\n args = parser.parse_args()\n main(args)\n","sub_path":"code/scripts/fix_fly_gene_regions.py","file_name":"fix_fly_gene_regions.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"375883568","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ##\n# @brief [py example simple] motion basic test for doosan robot\n# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com) \n# TEST 2019-12-09\nimport rospy\nimport os\nimport threading, time\nimport sys\nsys.dont_write_bytecode = True\nsys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),\"../../../../common/imp\")) ) # get import path : DSR_ROBOT.py \n\n# for single robot \nROBOT_ID = \"dsr01\"\nROBOT_MODEL = \"a0509\"\nimport DR_init\nDR_init.__dsr__id = ROBOT_ID\nDR_init.__dsr__model = ROBOT_MODEL\nfrom DSR_ROBOT import *\n\ndef shutdown():\n print(\"shutdown time!\")\n print(\"shutdown time!\")\n print(\"shutdown time!\")\n\n pub_stop.publish(stop_mode=STOP_TYPE_QUICK)\n return 0\n\ndef msgRobotState_cb(msg):\n msgRobotState_cb.count += 1\n\n if (0==(msgRobotState_cb.count % 100)): \n rospy.loginfo(\"________ ROBOT STATUS ________\")\n print(\" robot_state : %d\" % (msg.robot_state))\n print(\" robot_state_str : %s\" % (msg.robot_state_str))\n print(\" actual_mode : %d\" % (msg.actual_mode))\n print(\" actual_space : %d\" % (msg.actual_space))\n print(\" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))\n print(\" current_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.current_velj[0],msg.current_velj[1],msg.current_velj[2],msg.current_velj[3],msg.current_velj[4],msg.current_velj[5]))\n print(\" joint_abs : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.joint_abs[0],msg.joint_abs[1],msg.joint_abs[2],msg.joint_abs[3],msg.joint_abs[4],msg.joint_abs[5]))\n print(\" joint_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.joint_err[0],msg.joint_err[1],msg.joint_err[2],msg.joint_err[3],msg.joint_err[4],msg.joint_err[5]))\n print(\" target_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.target_posj[0],msg.target_posj[1],msg.target_posj[2],msg.target_posj[3],msg.target_posj[4],msg.target_posj[5]))\n print(\" target_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.target_velj[0],msg.target_velj[1],msg.target_velj[2],msg.target_velj[3],msg.target_velj[4],msg.target_velj[5])) \n print(\" current_posx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.current_posx[0],msg.current_posx[1],msg.current_posx[2],msg.current_posx[3],msg.current_posx[4],msg.current_posx[5]))\n print(\" current_velx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.current_velx[0],msg.current_velx[1],msg.current_velx[2],msg.current_velx[3],msg.current_velx[4],msg.current_velx[5]))\n print(\" task_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.task_err[0],msg.task_err[1],msg.task_err[2],msg.task_err[3],msg.task_err[4],msg.task_err[5]))\n print(\" solution_space : %d\" % (msg.solution_space))\n sys.stdout.write(\" rotation_matrix : \")\n for i in range(0 , 3):\n sys.stdout.write( \"dim : [%d]\"% i)\n sys.stdout.write(\" [ \")\n for j in range(0 , 3):\n sys.stdout.write(\"%d \" % msg.rotation_matrix[i].data[j])\n sys.stdout.write(\"] \")\n print ##end line\n print(\" dynamic_tor : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.dynamic_tor[0],msg.dynamic_tor[1],msg.dynamic_tor[2],msg.dynamic_tor[3],msg.dynamic_tor[4],msg.dynamic_tor[5]))\n print(\" actual_jts : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.actual_jts[0],msg.actual_jts[1],msg.actual_jts[2],msg.actual_jts[3],msg.actual_jts[4],msg.actual_jts[5]))\n print(\" actual_ejt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.actual_ejt[0],msg.actual_ejt[1],msg.actual_ejt[2],msg.actual_ejt[3],msg.actual_ejt[4],msg.actual_ejt[5]))\n print(\" actual_ett : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.actual_ett[0],msg.actual_ett[1],msg.actual_ett[2],msg.actual_ett[3],msg.actual_ett[4],msg.actual_ett[5]))\n print(\" sync_time : %7.3f\" % (msg.sync_time))\n print(\" actual_bk : %d %d %d %d %d %d\" % (msg.actual_bk[0],msg.actual_bk[1],msg.actual_bk[2],msg.actual_bk[3],msg.actual_bk[4],msg.actual_bk[5]))\n print(\" actual_bt : %d %d %d %d %d \" % (msg.actual_bt[0],msg.actual_bt[1],msg.actual_bt[2],msg.actual_bt[3],msg.actual_bt[4]))\n print(\" actual_mc : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.actual_mc[0],msg.actual_mc[1],msg.actual_mc[2],msg.actual_mc[3],msg.actual_mc[4],msg.actual_mc[5]))\n print(\" actual_mt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\" % (msg.actual_mt[0],msg.actual_mt[1],msg.actual_mt[2],msg.actual_mt[3],msg.actual_mt[4],msg.actual_mt[5]))\n\n #print digital i/o\n sys.stdout.write(\" ctrlbox_digital_input : \")\n for i in range(0 , 16):\n sys.stdout.write(\"%d \" % msg.ctrlbox_digital_input[i])\n print ##end line\n sys.stdout.write(\" ctrlbox_digital_output: \")\n for i in range(0 , 16):\n sys.stdout.write(\"%d \" % msg.ctrlbox_digital_output[i])\n print\n sys.stdout.write(\" flange_digital_input : \")\n for i in range(0 , 6):\n sys.stdout.write(\"%d \" % msg.flange_digital_input[i])\n print\n sys.stdout.write(\" flange_digital_output : \")\n for i in range(0 , 6):\n sys.stdout.write(\"%d \" % msg.flange_digital_output[i])\n print\n #print modbus i/o\n sys.stdout.write(\" modbus_state : \" )\n if len(msg.modbus_state) > 0:\n for i in range(0 , len(msg.modbus_state)):\n sys.stdout.write(\"[\" + msg.modbus_state[i].modbus_symbol)\n sys.stdout.write(\", %d] \" % msg.modbus_state[i].modbus_value)\n print\n\n print(\" access_control : %d\" % (msg.access_control))\n print(\" homming_completed : %d\" % (msg.homming_completed))\n print(\" tp_initialized : %d\" % (msg.tp_initialized))\n print(\" mastering_need : %d\" % (msg.mastering_need))\n print(\" drl_stopped : %d\" % (msg.drl_stopped))\n print(\" disconnected : %d\" % (msg.disconnected))\nmsgRobotState_cb.count = 0\n\ndef thread_subscriber():\n rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)\n rospy.spin()\n #rospy.spinner(2) \n \nif __name__ == \"__main__\":\n rospy.init_node('single_robot_simple_py')\n rospy.on_shutdown(shutdown)\n set_robot_mode = rospy.ServiceProxy('/'+ROBOT_ID +ROBOT_MODEL+'/system/set_robot_mode', SetRobotMode)\n t1 = threading.Thread(target=thread_subscriber)\n t1.daemon = True \n t1.start()\n\n pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10) \n\n set_robot_mode(ROBOT_MODE_AUTONOMOUS)\n\n set_velx(30,20) # set global task speed: 30(mm/sec), 20(deg/sec)\n set_accx(60,40) # set global task accel: 60(mm/sec2), 40(deg/sec2)\n\n velx=[50, 50]\n accx=[100, 100]\n\n p1= posj(0,0,0,0,0,0) #joint\n p2= posj(0.0, 0.0, 90.0, 0.0, 90.0, 0.0) #joint\n\n x1= posx(367, 37.6, 540.5, 45, 180, 45) #task\n x2= posx(367, 10, 540.5, 62.0, 180, 62.0) #task\n\n c1 = posx(367, 40, 540.5, 12, 180, 12)\n c2 = posx(367, 10, 490, 12, 180, 12)\n\n\n q0 = posj(0,0,90,0,90,0)\n q1 = posj(10,0,90,10,90,0)\n q2 = posj(0,10,90,10,90,0)\n q3 = posj(0,10,80,0,90,0)\n q4 = posj(0,0,100,0,90,0)\n q5 = posj(20,0,90,20,90,0)\n qlist = [q0, q1, q2, q3, q4, q5]\n\n x1 = posx(330, 185, 497, 110, 160, 90)\n x2 = posx(360, 185, 497, 110, 160, 90)\n x3 = posx(310, 185, 497, 110, 160, 90)\n x4 = posx(310, 185, 397, 110, 160, 90)\n x5 = posx(310, 185, 397, 110, 160, 90)\n xlist = [x1, x2, x3, x4, x5, x5]\n\n\n X1 = posx(370, 670, 650, 0, 180, 0)\n X1a = posx(370, 670, 400, 0, 180, 0)\n X1a2= posx(370, 545, 400, 0, 180, 0)\n X1b = posx(370, 595, 400, 0, 180, 0)\n X1b2= posx(370, 670, 400, 0, 180, 0)\n X1c = posx(370, 420, 150, 0, 180, 0)\n X1c2= posx(370, 545, 150, 0, 180, 0)\n X1d = posx(370, 670, 275, 0, 180, 0)\n X1d2= posx(370, 795, 150, 0, 180, 0)\n\n\n seg11 = posb(DR_LINE, X1, radius=20)\n seg12 = posb(DR_CIRCLE, X1a, X1a2, radius=21)\n seg14 = posb(DR_LINE, X1b2, radius=20)\n seg15 = posb(DR_CIRCLE, X1c, X1c2, radius=22)\n seg16 = posb(DR_CIRCLE, X1d, X1d2, radius=23)\n b_list1 = [seg11, seg12, seg14, seg15, seg16] \n\n while not rospy.is_shutdown():\n movej(p2, vel=60, acc=30)\n movejx(x1, vel=60, acc=60, sol=2)\n movel(x2, velx, accx)\n movec(c1, c2, velx, accx)\n movesj(qlist, vel=100, acc=100)\n movesx(xlist, vel=100, acc=100)\n movej(p2, vel=60, acc=30)\n move_spiral(rev=1.00, rmax=20.00, lmax=20.00, time=5.00, axis=DR_AXIS_Z, ref=DR_TOOL)\n move_periodic(amp=[10.00, 0.00, 20.00, 0.00, 0.50, 0.00], period=[1.00, 0.00, 1.50, 0.00, 0.00, 0.00], atime=0.50, repeat=3, ref=DR_BASE)\n\n print('good bye!')\n","sub_path":"dsr_example/py/scripts/simple/single_robot_simple_a0509.py","file_name":"single_robot_simple_a0509.py","file_ext":"py","file_size_in_byte":9126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"650615738","text":"from django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom django import forms\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, LayoutObject, Div, Submit, HTML, Button, Row, Field, ButtonHolder, Fieldset\n\n\nclass ContactoForm(forms.Form):\n required_css_class = 'required'\n\n nombre = forms.CharField(max_length=255, label=_(\"nombre_completo\"), help_text=\"\", required=True)\n email = forms.CharField(max_length=155, label=_(\"email\"), help_text=\"\", required=True)\n telefono = forms.IntegerField(label=_(\"Numero Telefonico\"), help_text=\"\", required=True, widget=forms.TextInput())\n mensaje = forms.CharField(max_length=255, label=_(\"mensaje\"), help_text=\"\", required=True)\n\n\n def __init__(self, *args, **kwargs):\n super(ContactoForm, self).__init__(*args, **kwargs)\n\n cancel_url = reverse('home')\n\n self.helper = FormHelper()\n self.helper.form_method = 'POST'\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-sm-4'\n self.helper.field_class = 'col-sm-6'\n self.helper.attrs = {\"onsubmit\": \"return validarForm();\"}\n\n self.helper.layout = Layout(\n Div(\n\n Fieldset(\n '',\n 'nombre',\n 'email',\n 'telefono',\n 'mensaje'\n ),\n\n ))","sub_path":"afiansa_django/applications/website/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"615545792","text":"# -*- coding: utf-8 -*-\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSignal, QObject\nfrom design.ui_post_controls import Ui_post_controls\n\nclass signals(QObject):\n\tsubscribe = pyqtSignal(bool) # True for subscript, False for unsubscribe\n\treport = pyqtSignal()\n\tmute = pyqtSignal()\n\thide = pyqtSignal()\n\tdelete = pyqtSignal()\n\nclass post_controls(QtWidgets.QWidget):\n\tdef __init__(self, own_post=False, is_subscribed=False, parent=None):\n\t\tQtWidgets.QWidget.__init__(self, parent=parent)\n\t\tself.ui = Ui_post_controls()\n\t\tself.ui.setupUi(self)\n\t\t\n\t\tself.signals = signals()\n\t\t\n\t\tself.is_subscribed = is_subscribed\n\t\tself.subscriped_icon = QtGui.QIcon(QtGui.QPixmap(\":/icons/icons/notify_post_active.svg\"))\n\t\tself.unsubscriped_icon = QtGui.QIcon(QtGui.QPixmap(\":/icons/icons/notify_post_normal.svg\"))\n\t\t\n\t\tif own_post:\n\t\t\tself.ui.report.hide()\t\t\t\t\t\t\t\t\t\t# hide report button\n\t\t\tself.ui.subscribe.hide()\t\t\t\t\t\t\t\t\t# hide subscribe button\n\t\t\tself.ui.mute.hide()\t\t\t\t\t\t\t\t\t\t\t# hide mute button\n\t\t\tself.ui.delete_.clicked.connect( self.delete )\n\t\telse:\n\t\t\tif self.is_subscribed:\n\t\t\t\tself.ui.subscribe.setIcon( self.subscriped_icon )\n\t\t\telse:\n\t\t\t\tself.ui.subscribe.setIcon( self.unsubscriped_icon )\n\t\t\t\n\t\t\tself.ui.delete_.clicked.connect( self.hide_post )\n\t\t\tself.ui.delete_.setToolTip(\"Click here to hide this post.\")\n\t\t\t\n\t\t\tself.ui.report.clicked.connect( self.report )\n\t\t\tself.ui.report.setToolTip(\"Click here to report this post.\")\n\t\t\t\n\t\t\tself.ui.subscribe.clicked.connect( self.subscribe )\t\t\t# toggle\n\t\t\t\n\t\t\tself.ui.mute.clicked.connect( self.mute )\n\t\t\tself.ui.mute.setToolTip(\"Click here to block author of this post.\")\n\n\tdef confirm(self, title, quesion):\n\t\tanswer = QtWidgets.QMessageBox.question(self, title, quesion, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)\n\t\tif answer == QtWidgets.QMessageBox.Yes:\n\t\t\treturn True\n\t\treturn False\n\n\tdef hide_post(self):\t\t\t\t\t\t\t\t\t\t\t\t# used if not own_post\n\t\tif self.confirm(\"Question\", \"Are you sure you want to hide this post?\"):\n\t\t\tself.ui.delete_.clicked.disconnect()\n\t\t\tself.signals.hide.emit()\n\n\tdef delete(self):\t\t\t\t\t\t\t\t\t\t\t\t\t# used if own_post\n\t\tif self.confirm(\"Question\", \"Are you sure you want to delete this post?\"):\n\t\t\tself.ui.delete_.clicked.disconnect()\n\t\t\tself.signals.delete.emit()\n\n\tdef mute(self):\n\t\tif self.confirm(\"Question\", \"Are you sure you want block this author?\"):\n\t\t\tself.ui.mute.clicked.disconnect()\n\t\t\tself.signals.mute.emit()\n\n\tdef subscribe(self):\n\t\tif self.is_subscribed:\n\t\t\tself.signals.subscribe.emit(False)\n\t\telse:\n\t\t\tself.signals.subscribe.emit(True)\n\n\tdef report(self):\n\t\tself.ui.report.clicked.disconnect()\n\n\t\"\"\" These functions will be called after remote action succesfull\"\"\"\n\tdef unsubscribed(self):\n\t\tself.is_subscribed = False\n\t\tself.ui.subscribe.setIcon( self.unsubscriped_icon )\n\t\tself.ui.subscribe.setToolTip(\"Click to subscript to this post.\")\n\n\tdef subscribed(self):\n\t\tself.is_subscribed = True\n\t\tself.ui.subscribe.setIcon( self.subscriped_icon )\n\t\tself.ui.subscribe.setToolTip(\"Click to unsubscript to this post.\")\n\n\tdef reported(self):\n\t\tself.ui.report.hide()\n\t\t#TODO turn icon red.\n\n\t\"\"\" guess these functions can be deleted since to post will be\n\tdeleted from stream \"\"\"\n\tdef muted(self):\n\t\tpass\n\n\tdef deleted(self):\n\t\tpass\n\n\tdef hided(self):\n\t\tpass\n","sub_path":"widgets/post_controls.py","file_name":"post_controls.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"129174154","text":"# -*- coding: utf-8 -*-\nimport re\nfrom io import BytesIO\nimport base64\nimport time\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime, timedelta, date\nimport xlwt\nfrom xlwt import *\nfrom odoo import fields, api, models\nfrom odoo.tools.translate import _\nfrom odoo.tools.misc import formatLang\nfrom odoo.exceptions import UserError\n\nclass account_report_general_ledger(models.TransientModel):\n\n _inherit=\"account.report.general.ledger\"\n\n xls_theme_id = fields.Many2one('color.xls.theme','XLS Theme')\n\n @api.multi\n def print_ledgerreport_xls(self):\n current_obj = self\n\n data = {}\n init_balance = self.initial_balance\n sortby = self.sortby\n display_account = self.display_account\n target_move = self.target_move\n\n codes = []\n\n if self.journal_ids:\n codes = [journal.code for journal in self.env['account.journal'].search([('id', 'in', self.journal_ids.ids)])]\n\n if self.account_ids:\n accounts = self.env['account.account'].browse(self.account_ids.ids)\n else:\n accounts = self.env['account.account'].search([])\n\n data['form'] = self.read(['date_from', 'date_to', 'journal_ids','target_move'])[0]\n if self.initial_balance and not self.date_from:\n raise UserError(_(\"You must define a Start Date\"))\n used_context = self._build_contexts(data)\n data['form']['used_context'] = dict(used_context, lang=self.env.context.get('lang', 'en_US'))\n accounts_res = self.env['report.account.report_generalledger'].with_context(data['form'].get('used_context',{}))._get_account_move_entry(accounts, init_balance, sortby, display_account)\n target_move = dict(self.env['account.report.general.ledger'].fields_get(allfields=['target_move'])['target_move']['selection'])[current_obj.target_move]\n sortby = dict(self.env['account.report.general.ledger'].fields_get(allfields=['sortby'])['sortby']['selection'])[current_obj.sortby]\n display_account = dict(self.env['account.report.general.ledger'].fields_get(allfields=['display_account'])['display_account']['selection'])[current_obj.display_account]\n\n fp = BytesIO()\n wb = xlwt.Workbook(encoding='utf-8')\n\n header_style = xlwt.XFStyle()\n font = xlwt.Font()\n pattern = xlwt.Pattern()\n pattern.pattern = xlwt.Pattern.SOLID_PATTERN\n bg_color = current_obj.xls_theme_id.bg_color or 'black'\n pattern.pattern_fore_colour = xlwt.Style.colour_map[bg_color]\n font.height = int(current_obj.xls_theme_id.font_size)\n font.bold = current_obj.xls_theme_id.font_bold\n font.italic = current_obj.xls_theme_id.font_italic\n font_color = current_obj.xls_theme_id.font_color or 'white'\n font.colour_index = xlwt.Style.colour_map[font_color]\n header_style.pattern = pattern\n header_style.font = font\n al3 = Alignment()\n al3.horz = current_obj.xls_theme_id.header_alignment or 0x02\n header_style.alignment = al3\n\n\n column_header_style = xlwt.XFStyle()\n font = xlwt.Font()\n pattern = xlwt.Pattern()\n pattern.pattern = xlwt.Pattern.SOLID_PATTERN\n bg_color = current_obj.xls_theme_id.column_bg_color or 'red'\n pattern.pattern_fore_colour = xlwt.Style.colour_map[bg_color]\n font.height = int(current_obj.xls_theme_id.column_font_size)\n font.bold = current_obj.xls_theme_id.column_font_bold\n font.italic = current_obj.xls_theme_id.column_font_italic\n font_color = current_obj.xls_theme_id.column_font_color or 'white'\n font.colour_index = xlwt.Style.colour_map[font_color]\n column_header_style.pattern = pattern\n column_header_style.font = font\n al3 = Alignment()\n al3.horz = current_obj.xls_theme_id.column_header_alignment\n column_header_style.alignment = al3\n\n\n body_header_style = xlwt.XFStyle()\n font = xlwt.Font()\n pattern = xlwt.Pattern()\n pattern.pattern = xlwt.Pattern.SOLID_PATTERN\n bg_color = current_obj.xls_theme_id.body_bg_color or 'gold'\n pattern.pattern_fore_colour = xlwt.Style.colour_map[bg_color]\n font.height = int(current_obj.xls_theme_id.body_font_size)\n font.bold = current_obj.xls_theme_id.body_font_bold\n font.italic = current_obj.xls_theme_id.body_font_italic\n font_color = current_obj.xls_theme_id.body_font_color or 'white'\n font.colour_index = xlwt.Style.colour_map[font_color]\n body_header_style.pattern = pattern\n body_header_style.font = font\n al3 = Alignment()\n al3.horz = current_obj.xls_theme_id.body_header_alignment\n body_header_style.alignment = al3\n\n\n final_arr_data = {}\n filename = 'General-Ledger-Report.xls'\n ledger_obj = self.pool.get(\"account.report.general.ledger\")\n worksheet = wb.add_sheet(\"GENERAL-LEDGER\" + \".xls\")\n header = current_obj.company_id.name+':'+'General Ledger'\n worksheet.write_merge(0, 0, 0, 8, header, header_style)\n journal_names = []\n journal_string = ''\n for journal_name in self.env['account.journal'].browse(data['form']['journal_ids']):\n journal_names.append(journal_name.code)\n journal_string += journal_name.code + ','\n\n\n header_header_list = [\"Journals:\", \"Display Account:\", \"Sorted By:\", \"Target Moves:\"]\n header_data_list = [journal_string, display_account, sortby, target_move]\n\n header_data = dict(zip(header_header_list, header_data_list))\n row = col = 1\n for key in header_header_list:\n worksheet.write(row, col, key, column_header_style)\n row+=1\n worksheet.write(row, col, header_data[key], body_header_style)\n #if key == 'Filter By:' and header_data[key] in ['Filtered by date', 'Filtered by period']:\n # per_row = row+1\n # for per in period:\n # worksheet.write(per_row, col, per, body_header_style)\n # per_row+=1\n row-=1\n col+=1\n # sending row cursor after 3 new rows\n row +=6\n col = 1\n\n body_header_list = [\"DATE\", \"JRNL\", \"Partner\", \"Ref\", \"Move\", \"Entry Label\", \"Debit\", \"Credit\", \"Balance\"]\n for header in body_header_list:\n worksheet.write(row, col, header, column_header_style)\n col+=1\n\n row+=1\n col=1\n\n tot_currency = 0.0\n company_name = self.company_id.name\n\n for i in range(1,15):\n column = worksheet.col(i)\n column.width = 225 * 30\n body_body_list = ['ldate', 'lcode', 'partner_name', 'lref', 'move_name', 'lname', 'debit', 'credit', 'balance']\n\n for account in accounts_res:\n\n col = 1\n row+=1\n worksheet.write(row, col, account['code'], body_header_style)\n col+=1\n worksheet.write(row, col, account['name'], body_header_style)\n col+=5\n worksheet.write(row, col, formatLang(self.env, account['debit'], currency_obj=current_obj.company_id.currency_id), body_header_style)\n col+=1\n worksheet.write(row, col, formatLang(self.env, account['credit'], currency_obj=current_obj.company_id.currency_id), body_header_style)\n col+=1\n worksheet.write(row, col, formatLang(self.env, account['balance'], currency_obj=current_obj.company_id.currency_id), body_header_style)\n\n for line in account['move_lines']:\n col =1\n row+=1\n for item in body_body_list:\n if item == 'debit':\n line[item] = formatLang(self.env, line[item], currency_obj=current_obj.company_id.currency_id)\n elif item == 'credit':\n line[item] = formatLang(self.env, line[item], currency_obj=current_obj.company_id.currency_id)\n elif item == 'balance':\n line[item] = formatLang(self.env, line[item], currency_obj=current_obj.company_id.currency_id)\n\n worksheet.write(row, col, line[item], body_header_style)\n\n col += 1\n wb.save(fp)\n out = base64.encodestring(fp.getvalue())\n final_arr_data = {}\n final_arr_data['file_stream'] = out\n final_arr_data['name'] = filename\n\n create_id = self.env['account.report.view'].create(final_arr_data)\n return {\n 'nodestroy': True,\n 'res_id': create_id.id,\n 'name': filename,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'account.report.view',\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n }\n","sub_path":"accounting_xls_reports/report/account_report_general_ledgerxls.py","file_name":"account_report_general_ledgerxls.py","file_ext":"py","file_size_in_byte":8904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"589094488","text":"\"\"\"\nModule to run tests on PypeItPar classes\n\"\"\"\nimport os\nimport numpy\n\nimport pytest\n\nfrom astropy.io import fits\n\nfrom pypeit.images import buildimage\nfrom pypeit import edgetrace, slittrace, specobjs\nfrom pypeit.spectrographs.keck_deimos import KeckDEIMOSSpectrograph\nfrom pypeit.spectrographs.util import load_spectrograph\nfrom pypeit.tests.tstutils import dev_suite_required, cooked_required\n\n\ndef data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'files')\n return os.path.join(data_dir, filename)\n\ndef deimos_flat_files():\n return [os.path.join(os.getenv('PYPEIT_DEV'), 'RAW_DATA', 'keck_deimos', '830G_M_8500', ifile)\n for ifile in ['DE.20100913.57161.fits.gz', 'DE.20100913.57006.fits.gz']]\n\n\n@cooked_required\ndef test_assign_maskinfo():\n\n # Spectrograph\n keck_deimos = KeckDEIMOSSpectrograph()\n par = keck_deimos.default_pypeit_par()\n # working only on detector 3\n det = 3\n\n # Built trace image\n traceImage = buildimage.buildimage_fromlist(keck_deimos, det, par['calibrations']['traceframe'],\n deimos_flat_files())\n msbpm = keck_deimos.bpm(traceImage.files[0], det)\n\n # load specific config parameters\n par = keck_deimos.config_specific_par(traceImage.files[0])\n trace_par = par['calibrations']['slitedges']\n\n # Run edge trace\n edges = edgetrace.EdgeTraceSet(traceImage, keck_deimos, trace_par, bpm=msbpm, auto=True,\n debug=False, show_stages=False,qa_path=None)\n\n slits = edges.get_slits()\n\n # Test that the maskfile is saved properly\n hdul = fits.open(slits.maskfile)\n det_par = keck_deimos.get_detector_par(hdul, det=det)\n\n specobjs_file = os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked', 'Science',\n 'spec1d_DE.20100913.22358-CFHQS1_DEIMOS_2010Sep13T061231.334.fits')\n # specobjs_file = os.path.join(os.getenv('PYPEIT_DEV'), 'REDUX_OUT', 'keck_deimos',\n # '830G_M_8500', 'Science',\n # 'spec1d_DE.20100913.22358-CFHQS1_DEIMOS_2010Sep13T061231.334.fits')\n sobjs = specobjs.SpecObjs.from_fitsfile(specobjs_file)\n # Init at null\n for sobj in sobjs:\n sobj.MASKDEF_OBJNAME = None\n sobj.RA = None\n sobj.DEC = None\n\n # Run me\n slits.assign_maskinfo(sobjs, det_par['platescale'])\n\n # Test\n assert sobjs[sobjs.SLITID == 496].MASKDEF_OBJNAME == 'ero89', 'Wrong MASKDEF_OBJNAME'\n assert sobjs[sobjs.SLITID == 496].RA == 352.27471667, 'Wrong object RA'\n assert sobjs[sobjs.SLITID == 496].DEC == -3.09223056, 'Wrong object DEC'\n\n # Write sobjs\n sobjs.write_to_fits({}, data_path('tst_sobjs.fits'))\n os.remove(data_path('tst_sobjs.fits'))\n\n\n@dev_suite_required\ndef test_deimosslitmask():\n f = os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_deimos', '830G_M_8500',\n 'DE.20100913.22358.fits.gz')\n spec = KeckDEIMOSSpectrograph()\n spec.get_slitmask(f)\n assert spec.slitmask.nslits == 106, 'Incorrect number of slits read!'\n\n","sub_path":"pypeit/tests/test_slitmask.py","file_name":"test_slitmask.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"535310026","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('homepage', '0002_notice_notice_content'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='News',\n fields=[\n ('news_id', models.IntegerField(serialize=False, auto_created=True, primary_key=True)),\n ('news_title', models.CharField(max_length=100)),\n ('news_author', models.CharField(max_length=30)),\n ('news_content', models.TextField()),\n ('news_created', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Qna',\n fields=[\n ('qna_id', models.IntegerField(serialize=False, auto_created=True, primary_key=True)),\n ('qna_title', models.CharField(max_length=100)),\n ('qna_author', models.CharField(max_length=30)),\n ('qna_password', models.CharField(max_length=30)),\n ('qna_content', models.TextField()),\n ('qna_created', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.AlterField(\n model_name='notice',\n name='notice_id',\n field=models.IntegerField(serialize=False, auto_created=True, primary_key=True),\n ),\n ]\n","sub_path":"homepage/migrations/0003_auto_20151112_1802.py","file_name":"0003_auto_20151112_1802.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"504530461","text":"import cv2\nimport judger_hand\nimport numpy as np\nimport skimage.io\n\n\ndef nice_contour(contour, height=140):\n min_y = min([c[0][1] for c in contour])\n new_contour = [c for c in contour if c[0][1] < min_y + height]\n return np.array(new_contour)\n\n\ndef find_hand_by_color(img, thre_area=2000, color_range=[[0, 87, 134], [255, 137, 150]], height=140):\n img = np.array(img)\n # Constants for finding range of skin color in YCrCb\n min_YCrCb = np.array(color_range[0], np.uint8)\n max_YCrCb = np.array(color_range[1], np.uint8)\n # Find region with skin tone in YCrCb image\n imageYCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)\n skinRegion = cv2.inRange(imageYCrCb, min_YCrCb, max_YCrCb)\n\n # Do contour detection on skin region\n _, contours, _ = cv2.findContours(\n skinRegion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n candidate_box = []\n for i, c in enumerate(contours):\n area = cv2.contourArea(c)\n if area > thre_area:\n contours[i] = nice_contour(contours[i], height)\n x, y, w, h = cv2.boundingRect(contours[i])\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.drawContours(img, contours, i, (0, 0, 255), 2)\n candidate_box.append((w * h, [x, y, w, h]))\n\n candidate_box = sorted(candidate_box, key=lambda x: -x[0])[:2]\n bbox = {'bbox': {}}\n for _, box in candidate_box:\n coor = [box[0], box[1], box[0] + box[2], box[1] + box[3]]\n if box[0] + box[2] > 0.6 * img.shape[1]:\n bbox['bbox']['R'] = coor\n else:\n bbox['bbox']['L'] = coor\n print(img.shape)\n print(candidate_box)\n print(bbox)\n return bbox, img\n\n\ntest_files = judger_hand.get_file_names()\noutput_f = judger_hand.get_output_file_object()\nbbox_ = {'bbox': {}}\nfor fname in test_files:\n img = skimage.io.imread(fname)\n bbox, img = find_hand_by_color(img)\n if not bbox['bbox']:\n bbox = bbox_\n for hand, box in bbox['bbox'].items():\n hand = 0 if hand == 'L' else 1\n out = '%s %d %d %d %d %d 1.0 \\n' % (\n fname, box[0], box[1], box[2], box[3], hand)\n print(out)\n output_f.write(out.encode())\n bbox_ = bbox\njudger_hand.judge()\n\n\"\"\" For demo use\n# Camera\ncamera = cv2.VideoCapture(0)\n\nwhile(1):\n # Capture frame from camera\n ret, frame = camera.read()\n frame = cv2.bilateralFilter(frame,5,50,100)\n bbox, frame = find_hand_by_color(frame, color_range=[[0, 140, 80], [255,180,128]], height=300)\n \n cv2.imshow('Hand Detection',frame)\n interrupt=cv2.waitKey(10)\n\"\"\"\n","sub_path":"hand_skin_detector/hand_skin_detector.py","file_name":"hand_skin_detector.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"207768751","text":"# Extract data pieces from one web page.\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\nurl = \"https://boston.craigslist.org/search/sof\"\r\n\r\nresponse = requests.get(url)\r\n\r\ndata = response.text\r\n\r\nsoup = BeautifulSoup(data,'html.parser')\r\n\r\njobs = soup.find_all('div',{'class':'result-info'})\r\n\r\n\r\nfor job in jobs:\r\n title = job.find('a',{'class':'result-title'}).text\r\n location_tag = job.find('span',{'class':'result-hood'})\r\n # added [2:-1] to remove the brackets, some job positions do not have location -> added loop\r\n location = location_tag.text[2:-1] if location_tag else \"N/A\"\r\n dates = job.find('time',{'class':'result-date'}).text\r\n link = job.find('a',{'class':'result-title'}).get('href')\r\n print(\"Job Title:\", title, \"\\nLocation:\", location, \"\\nDates:\", dates, \"\\nLink:\", link, \"\\n--\")","sub_path":"craiglist/job_details_wrapper.py","file_name":"job_details_wrapper.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"544010276","text":"w=lambda a,k: 1 if (a != k) else 0\ndef addGF2Vectors(acc=[], *a):\n '''\n Galois Fields has 2 elements 0, 1 (binary). \n Addition works like exclusive-or.\n The lists of ints inside the tuple/*a is assumed to be of same length\n @acc -> list of ints\n @*a -> tuple of lists of ints\n @return -> list of ints\n '''\n if not acc:\n acc = [0 for _ in range(len(a[0]))]\n if not a:\n return acc\n else:\n acc = [w(acc[j], a[0][j]) for j in range(len(acc))]\n return addGF2Vectors(acc, *a[1:])\n","sub_path":"addGF2Vectors.py","file_name":"addGF2Vectors.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"462830189","text":"import tkinter as tk\r\n\r\njanela = tk.Tk()\r\n\r\nlb1 = tk.Label(janela, text=\"Teste\", bg=\"gray\")\r\nlb1.pack(side=tk.LEFT, fill = tk.Y) # side=BOTTOM TOP LEFT RIGHT\r\n\r\njanela.geometry(\"400x300+200+200\")\r\n\r\njanela.mainloop()","sub_path":"Pack.py","file_name":"Pack.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"491503428","text":"import cv2\nimport numpy as np\nimport serial\nimport time\n\ndef find_rect_of_target_color(image):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)\n h = hsv[:, :, 0]\n s = hsv[:, :, 1]\n mask = np.zeros(h.shape, dtype=np.uint8)\n #mask[((h < 150) & (h > 90)) & (s > 128)] = 180\n mask[((h < 150) & (h > 90)) & (s > 128)] = 180\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rects = []\n for contour in contours:\n approx = cv2.convexHull(contour)\n rect = cv2.boundingRect(approx)\n rects.append(np.array(rect))\n return rects\n\nif __name__ == \"__main__\":\n tm = cv2.TickMeter()\n tm.start()\n count = 0\n max_count = 10\n fps = 0\n ser = serial.Serial('COM6', 115200, timeout=0.1)\n time.sleep(1)\n \n capture = cv2.VideoCapture(0)\n #capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) # カメラ画像の横幅を1280に設定\n #capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) # カメラ画像の縦幅を720に設定\n\n while 1:\n _, frame = capture.read()\n if count == max_count:\n tm.stop()\n fps = max_count / tm.getTimeSec()\n tm.reset()\n tm.start()\n count = 0\n cv2.putText(frame, 'FPS: {:.2f}'.format(fps),\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), thickness=2)\n rects = find_rect_of_target_color(frame)\n if len(rects) > 0:\n rect = max(rects, key=(lambda x: x[2] * x[3]))\n print(rect[2], rect[3])\n cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (255, 0, 0), thickness=2)\n if((rect[2]>=12) and (rect[3]>=11)):\n print(\"detected\")\n ser.write(b'a')\n count += 1\n cv2.imshow('LED detection', frame)\n k = cv2.waitKey(1)\n if k == ord('q'):\n break\n capture.release()\n cv2.destroyAllWindows()\n ser.close()\n","sub_path":"opencv/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"321478208","text":"# coding:utf-8\n\"\"\"\ncreate on Nov 5, 2019 By Wayne Yu\nFun: 全球各国骨干运营商相对价值评估模型数据爬取程序,本程序主要实现信源信息的获取、整理和计算。\n\n1)从alexa网站中爬取,每个国家的前30个信源网站地址;\n2)拿着这30个信源的网站地址,去HE网站查询该信源所接入的AS号信息;\n3)针对每个AS号,利用CAIDA的全球AS号BGP互联数据,去找他们各自的Peer关系的AS号;\n4)最后再去计算获取每个国家的骨干运营商AS网内的信源数量。\n\n需要查询的国家有美国(US)、日本(JP)、印度(IN)、法国(FR)、新加坡(SG)、澳大利亚(AU)、中国香港(HK)、中国(CN)\n\n\"\"\"\nfrom selenium import webdriver\nimport time\nfrom bs4 import BeautifulSoup\nimport csv\nimport re\n\n\ndef write_to_csv(res_list, des_path):\n \"\"\"\n 把给定的List,写到指定路径的csv文件中\n :param res_list:\n :param des_path:\n :return None:\n \"\"\"\n print(\"write file <%s>...\" % des_path)\n csv_file = open(des_path, 'w', newline='', encoding='utf-8')\n try:\n writer = csv.writer(csv_file)\n for i in res_list:\n writer.writerow(i)\n except Exception as e:\n print(e)\n finally:\n csv_file.close()\n print('write finish!')\n\n\ndef obtain_top_sites_by_country(page_url):\n \"\"\"\n 根据每个国家的Alexa TOP Sites页面,获取每个国家top 30的信源\n :param page_url:\n :return top_sites:\n \"\"\"\n top_sites = []\n # 获取页面信息\n driver.get(page_url)\n time.sleep(1) # 延迟加载,等待页面内容加载完毕\n # 获取页面的html信息\n page_html = driver.page_source\n bs_obj = BeautifulSoup(page_html, \"html.parser\")\n # print(bs_obj)\n tr_list = bs_obj.findAll(\"div\", {\"class\": \"tr site-listing\"})\n for tr_item in tr_list:\n # print(tr_item.find(\"a\").get_text().lower())\n url_item = tr_item.find(\"a\").get_text().lower()\n top_sites.append(url_item)\n return top_sites[0:50]\n\n\ndef obtain_asn_by_site(page_url):\n \"\"\"\n 根据每个网站的域名,去HE网站获取其接入运营商的AS号\n :param page_url:\n :return access_asn:\n \"\"\"\n access_asn = []\n # 获取页面信息\n driver.get(page_url)\n time.sleep(3) # 延迟加载,等待页面内容加载完毕\n # 获取页面html信息\n page_html = driver.page_source\n bs_obj = BeautifulSoup(page_html, \"html.parser\")\n # print(bs_obj)\n ip_info = bs_obj.find(\"div\", {\"id\": \"ipinfo\"})\n ip_info_string = str(ip_info)\n pattern = re.compile(r'>AS\\d+<') # 使用正则表达式,查找页面ip_info中的AS号\n re_return = pattern.findall(ip_info_string)\n for item in re_return:\n item = item[1:-1]\n if item not in access_asn:\n access_asn.append(item)\n return access_asn\n\n\nif __name__ == \"__main__\":\n # countries = [\"US\", \"JP\", \"IN\", \"FR\", \"SG\", \"AU\", \"HK\", \"CN\"]\n countries = [\"FR\"]\n # web_url = \"https://www.alexa.com/topsites/countries/US\" # 爬虫的入口程序\n time_start = time.time()\n # 启动浏览器\n driver = webdriver.Firefox()\n try:\n countries_top_sites_with_as = []\n for countries_item in countries:\n web_url = \"https://www.alexa.com/topsites/countries/\" + countries_item\n countries_top_sites = obtain_top_sites_by_country(web_url)\n print(\"countries top sites:\", countries_top_sites)\n print(\"sleep 10 seconds......\")\n time.sleep(10)\n temp_list = []\n cnt_rank = 1\n for sites_url in countries_top_sites:\n temp_list.append(cnt_rank)\n temp_list.append(sites_url)\n request_url = \"https://bgp.he.net/dns/\" + sites_url + \"#_ipinfo\"\n print(\"request_url:\", request_url)\n site_access_asn = obtain_asn_by_site(request_url)\n print(\"site access asn:\", site_access_asn)\n for site_access_asn_item in site_access_asn:\n temp_list.append(site_access_asn_item)\n countries_top_sites_with_as.append(temp_list)\n temp_list = []\n cnt_rank += 1\n print(countries_top_sites_with_as)\n country_string = web_url.split(\"/\")[-1]\n save_path = \"./data/top_sites_with_as_(\" + country_string + \").csv\"\n write_to_csv(countries_top_sites_with_as, save_path)\n countries_top_sites_with_as = []\n except Exception as e:\n print(e)\n # 关闭浏览器\n driver.quit()\n time_end = time.time()\n print(\"=>Scripts Finish, Time Consuming:\", (time_end - time_start), \"S\")\n","sub_path":"017RelativeValueOfISP/crawler_info_for_ISP_NoCut.py","file_name":"crawler_info_for_ISP_NoCut.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"115232321","text":"#\n\n\"\"\"Модуль расчета площадей геометрических фигур\"\"\"\n\ndef calculate_square_area(x):\n \"\"\"по длине стороне считает площадь квадрата\"\"\"\n return x**2\n\ndef calculate_rechtangle_area(a, b):\n \"\"\"по длинам сторон считает площадь прямоугольника\"\"\"\n return a*b\na = 666\n\n__all__ = [ #перечень импортируемых функций по команде import *\n \"calculate_square_area\",\n \"calculate_rechtangle_area\"\n ]\n\nif __name__ == \"__main__\":\n print(\"тест\")\n","sub_path":"lectures/lesson05/square_shapes.py","file_name":"square_shapes.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"422078883","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport argparse\nimport dataset\nfrom models import tj\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--batch_size\", type = int, default = 4)\nparser.add_argument(\"--n_epochs\", type = int, default = 100)\nparser.add_argument(\"--lr\", type = float, default = 2e-3)\nparser.add_argument(\"--init_weights\", type = bool, default = False)\nparser.add_argument(\"--GPU\", type = bool, default = False)\nargs = parser.parse_args()\nprint(args)\n\n#for reproducibility\ntorch.manual_seed(0)\n\n#load dataset\nprint('loading dataset...')\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))])\ntrain_data = dataset.tj(transform = transform, mode = 'train')\nprint('number of data points:', len(train_data))\ntrainloader = DataLoader(dataset = train_data, batch_size = args.batch_size, shuffle = True)\nprint('dataset laoding finished')\nprint('data length: ', len(trainloader))\n\n#create model\ntj = tj()\nif args.init_weights:\n tj.init_weights()\n\noptimizer = torch.optim.Adam(tj.parameters(), lr = args.lr, betas = [0.5, 0.999])\ncurrent_epoch = 1\n\nif args.GPU and torch.cuda.is_available():\n print('using GPU...')\n tj = tj.cuda()\n\n#loss function settings\ncriterionL1 = nn.L1Loss()\ncriterionBCE = nn.BCELoss()\n\nif current_epoch >= args.n_epochs:\n raise Exception('training already finished!')\nelse:\n print('start training!')\n\n#start training\nfor epoch in range(current_epoch, args.n_epochs+1):\n print('current_epoch:', epoch)\n for i, (img, obj, hw, cp) in enumerate(trainloader, 1):\n '''print('obj before:', obj)\n print('obj after:', obj > 0)'''\n obj = (obj > 0).float()\n mask = obj.unsqueeze(2).expand_as(hw)\n if args.GPU and torch.cuda.is_available():\n img = img.cuda()\n obj = obj.cuda()\n hw = hw.cuda()\n cp = cp.cuda()\n mask = mask.cuda()\n\n #train tjCNN\n pred_cp, pred_hw, pred_obj = tj(img)\n #print('pred_obj:', pred_obj)\n cp_loss = criterionL1(pred_cp * mask, cp * mask)\n hw_loss = criterionL1(pred_hw * mask, hw * mask)\n obj_loss = criterionBCE(pred_obj, obj)\n loss = (0.1*cp_loss + 0.1*hw_loss + obj_loss)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if i%10 == 0:\n print('loss:', loss.mean())\n\n\n torch.save(tj.state_dict(), './checkpoints/tj_{}.pth'.format(epoch))\n print('model at {}th epoch is saved'.format(epoch))\n\n\n","sub_path":"traffic_jam_detection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"567822158","text":"import pickle as pk\nimport numpy as np\n\n# ref: tensorflow.examples.tutorials.mnist.input_data\n\n# helper function for loading data\ndef load_data(name):\n print('Loading data... ', end='')\n if name == 'euc-kr':\n file_path = 'dataset/euc-kr.pkl'\n else:\n print('No such Data!: %s' % name)\n raise FileNotFoundError\n\n with open(file_path, 'rb') as f:\n data = pk.load(f)\n # train = DataSet(data['train_images'], data['train_labels'])\n # test = DataSet(data['test_images'], data['test_labels'])\n train = DataSet(data['images'], data['labels_cho'])\n test = None\n print('done!')\n return Datasets(train=train, test=test)\n\n\nclass DataSet:\n\n def __init__(self, images, labels):\n self.images = images\n self.labels = labels\n assert images.shape[0] == labels.shape[0]\n self.num_examples = images.shape[0]\n self._index_in_epoch = 0\n self._epochs_completed = 0\n\n def next_batch(self, batch_size, shuffle=True):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n assert 0 <= batch_size <= self.num_examples\n\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self.num_examples)\n np.random.shuffle(perm0)\n self.images = self.images[perm0]\n self.labels = self.labels[perm0]\n # Go to the next epoch\n if start + batch_size > self.num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self.num_examples - start\n images_rest_part = self.images[start:self.num_examples]\n labels_rest_part = self.labels[start:self.num_examples]\n # Shuffle the data\n if shuffle:\n perm = np.arange(self.num_examples)\n np.random.shuffle(perm)\n self.images = self.images[perm]\n self.labels = self.labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n images_new_part = self.images[start:end]\n labels_new_part = self.labels[start:end]\n return np.concatenate((images_rest_part, images_new_part), axis=0), np.concatenate(\n (labels_rest_part, labels_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self.images[start:end], self.labels[start:end]\n\n\nclass Datasets:\n\n def __init__(self, train, test):\n self.train = train\n self.test = test\n # assert train.images.shape[1] == test.images.shape[1]\n # assert train.labels.shape[1] == test.labels.shape[1]\n # self.image_size = train.images.shape[1]\n # self.label_size = train.labels.shape[1]","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"89907456","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport matplotlib.pyplot as plt \nfrom torch.utils.data import sampler\nimport torch.utils.data as data_utils\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import TensorDataset\nfrom progress.bar import Bar\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nfrom synthetic_data_generation import initialize_synthetic_sampler, sample_data_from_sampler\nfrom sup_functions import test_model, test_model_on_gen, weights_init\nfrom models import Net, autoencoder\n#from models import autoencoder2 as autoencoder\n\nimport os\n\nroot = '~/workspace/Projects/Journal_paper/'\ndataset = 'LSUN'\nprint('Loading data')\nopts = {\n 'batch_size': 1000,\n 'mode': 'multi-class',\n 'dataset': 'LSUN',\n 'test_every': 1,\n 'learning_rate': 0.001,\n 'number_of_epochs': 100,\n 'dim': 2048,\n 'nb_classes': 30,\n 'code_size': 32,\n 'betta': 0.2,\n 'add_noise': True,\n 'cuda_device': 0,\n }\n \ntorch.cuda.set_device(opts['cuda_device'])\ncode_size = opts['code_size']\nnb_classes = opts['nb_classes']\ntrainset, testset = {}, {}\n\ntrainset_ = torch.load(root + 'datasets/' + dataset + '_features/trainset.pth')\ntestset_ = torch.load(root + 'datasets/' + dataset + '_features/testset.pth')\ntrainset = data_utils.TensorDataset(trainset_[0], trainset_[1])\ntestset = data_utils.TensorDataset(testset_[0], testset_[1])\n\ntrain_loader = data_utils.DataLoader(trainset, batch_size=opts['batch_size'], shuffle = True)\ntest_loader = data_utils.DataLoader(testset, batch_size=opts['batch_size'], shuffle = False)\n\nautoencoder_model = autoencoder(code_size).cuda()\n#autoencoder_model.apply(weights_init)\nclassifier_model = torch.load(root+'batch_training/results/LSUN/models/LSUN_classifier_original.pth')\n\ncriterion_AE = nn.MSELoss().cuda()\ncriterion_classif = nn.MSELoss().cuda()\n#optimizer = torch.optim.SGD(autoencoder_model.parameters(), lr=opts['learning_rate'], momentum=0.99)\noptimizer_main = torch.optim.Adam(autoencoder_model.parameters(), lr=opts['learning_rate'], betas=(0.9, 0.999),\n weight_decay=1e-5)\n\naccuracies = []\nbest_acc = 0\nacc = test_model(classifier_model, test_loader)\nprint('Accuracy of pretrained model on the original testset: ' + str(acc))\nfor epoch in range(opts['number_of_epochs']):\n bar = Bar('Training: ', max=int(opts['nb_classes']*100000/opts['batch_size']))\n for idx, (train_X, train_Y) in enumerate(train_loader):\n bar.next()\n inputs = train_X.cuda()\n labels = train_Y.cuda()\n optimizer_main.zero_grad()\n #optimizer_class.zero_grad()\n #\n# img = Variable(inputs).cuda()\n # ===================forward=====================\n outputs = autoencoder_model(inputs)\n \n orig_classes = classifier_model(inputs)\n classification_reconstructed = classifier_model(outputs)\n \n loss_classif = criterion_classif(classification_reconstructed, orig_classes)\n loss_AE = criterion_AE(outputs, inputs)\n #\n #loss_classif.backward(retain_graph=True)\n #\n loss = opts['betta']*loss_classif + loss_AE\n # ===================backward====================\n loss.backward()\n #optimizer_class.step()\n optimizer_main.step()\n \n if idx%100==0:\n #plt.plot(range(2048), inputs[0].cpu().detach().numpy(), label='in')\n #plt.plot(range(2048), outputs[0].cpu().detach().numpy(), label='out')\n #plt.legend()\n #plt.savefig('imgs/epoch_'+str(epoch)+'_idx_'+str(idx)+'.png')\n #plt.close()\n print('epoch [{}/{}], loss:{:.4f}'\n .format(epoch+1, opts['number_of_epochs'], loss.item()))\n # ===================log========================\n bar.finish()\n print('epoch [{}/{}], loss:{:.4f}'\n .format(epoch+1, opts['number_of_epochs'], loss.item()))\n if epoch % opts['test_every'] == 0:\n autoencoder_model.eval()\n acc = test_model_on_gen(classifier_model, autoencoder_model, test_loader)\n accuracies.append(acc)\n torch.save(accuracies, 'results/representivity_LSUN_' + str(opts['code_size']) + '_code_size_' + str(opts['nb_classes']) +'_classes.pth')\n if acc>best_acc:\n best_acc=acc\n torch.save(autoencoder_model.state_dict(), 'models/LSUN_' +str(opts['code_size']) + '_code_size_' + str(opts['nb_classes']) +'_classes.pth')\n autoencoder_model.train()\n print('Accuracy on reconstructed testset: ' + str(acc))\n\n#torch.save(model.state_dict(), './conv_autoencoder_LSUN.pth')\n","sub_path":"gen_model_training/LSUN/autoencoders_LSUN.py","file_name":"autoencoders_LSUN.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"614104986","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nfig = plt.figure(figsize =(8, 3))\r\nax1 = plt.subplot(231, projection=\"3d\")\r\nax2 = plt.subplot(232, projection=\"3d\")\r\nax3 = plt.subplot(233, projection=\"3d\")\r\nax4 = plt.subplot(234, projection=\"3d\")\r\nax5 = plt.subplot(235, projection=\"3d\")\r\nax6 = plt.subplot(236, projection=\"3d\")\r\n\r\n_x = np.arange(4)\r\n_y = np.arange(5)\r\n_xx, _yy = np.meshgrid(_x, _y)\r\nx, y = _xx.ravel(), _yy.ravel()\r\ntop = x + y\r\nbottom = np.zeros_like(top)\r\nwidth = depth = 1\r\nax1.bar3d(x, y, bottom, width, depth, top, shade=True)\r\nax2.bar3d(x, y, bottom, width, depth, top, shade=True, color=\"m\")\r\nax3.bar3d(x, y, bottom, width, depth, top, shade=True, color=\"c\")\r\nax4.bar3d(x, y, bottom, width, depth, top, shade=True, color=\"r\")\r\nax5.bar3d(x, y, bottom, width, depth, top, shade=True, color=\"g\")\r\nax6.bar3d(x, y, bottom, width, depth, top, shade=True, color=\"b\")\r\nplt.show()\r\n","sub_path":"cw11/cw11_z4.py","file_name":"cw11_z4.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"289378187","text":"import sqlite3\n\nconnector = sqlite3.connect(\"sqlite_test.db\")\n\n# sql = \"insert into test_table values('1', 'python')\"\n# connector.execute(sql)\n# sql = \"insert into test_table values('2', 'パイソン')\"\n# connector.execute(sql)\n# sql = \"insert into test_table values('3', 'ぱいそん')\"\n# connector.execute(sql)\ncursor = connector.cursor()\ncursor.execute(\"select * from test_table order by id\")\n\nresult = cursor.fetchall()\nfor row in result:\n print(\"code -- \" + str(row[0]))\n print(\"name -- \" + str(row[1]))\n\nconnector.commit()\nconnector.close()\n","sub_path":"LibraryTest/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"386959121","text":"# ../entities/attributes.py\n\n# =============================================================================\n# >> IMPORTS\n# =============================================================================\n# Site-Package Imports\n# ConfigObj\nfrom configobj import ConfigObj\n# Sys\nimport sys\n\n# Source.Python Imports\nfrom core import GAME_NAME\nfrom paths import SP_DATA_PATH\n# Entities\nfrom entities import EntitiesLogger\n\n\n# =============================================================================\n# >> ALL DECLARATION\n# =============================================================================\n__all__ = []\n\n\n# =============================================================================\n# >> GLOBAL VARIABLES\n# =============================================================================\n# Get the sp.entities.attributes logger\nEntitiesAttributesLogger = EntitiesLogger.attributes\n\n\n# =============================================================================\n# >> CLASSES\n# =============================================================================\nclass EntityAttributes(dict):\n '''Base Attribute class used to interact with\n entity's based off of ini data files.'''\n\n '''Each class that inherits from EntityAttributes\n must have the following attributes:\n type - used to know which directory within data to get values\n unrepr - used to know what to have ConfigObj unrepr set to\n instance - used to know which class to use to create the objects\n '''\n\n def __missing__(self, entity):\n '''Called the first time an entity is added to the dictionary'''\n\n # Get all attributes for the given entity\n values = self[entity] = self._retrieve_attributes(entity)\n\n # Return the attributes and their values\n return values\n\n def get_game_attributes(self, args):\n '''Returns all attributes for the given entities'''\n\n # Create an empty dictionary\n values = dict()\n\n # Loop through all given entities\n for arg in args:\n\n # Add the entities to the dictionary\n values.update(self[arg])\n\n # Return all attributes for the given entities\n return values\n\n def _retrieve_attributes(self, entity):\n '''Retrieves all attributes for the given entity'''\n\n # Create an empty dictionary\n game_attributes = dict()\n\n # Get the path to the entity's attributes\n inifile = SP_DATA_PATH.joinpath(self.type, entity, GAME_NAME + '.ini')\n\n # Does the file exist?\n if not inifile.isfile():\n\n # Return the empty dictionary\n return game_attributes\n\n # Get the file's contents\n ini = ConfigObj(inifile, unrepr=self.unrepr)\n\n # Loop through all items in the file\n for key in ini:\n\n # Use try/except in case an error occurs\n try:\n\n # Get the object for the current key\n value = self.instance(ini[key])\n\n # Was an error encountered?\n except:\n\n # Get the exception\n exctype, value, trace_back = sys.exc_info()\n\n # Log the error as a warning\n EntitiesAttributesLogger.log_warning(\n 'Unable to add attribute \"{0}\"'.format(key) +\n 'of type \"{0}\" to entity type '.format(self.type) +\n '\"{0}\" due to the following:'.format(entity) +\n '\\n\\t{0}'.format(value))\n\n # Was no error encountered?\n else:\n\n # Add the item to the dictionary\n game_attributes[key] = value\n\n # Return the dictionary\n return game_attributes\n","sub_path":"addons/source-python/packages/source-python/entities/attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"221489148","text":"import cv2 as cv\n\ncascadeLoc = 'cascades/data/haarcascade_frontalface_default.xml'\nface_cascade = cv.CascadeClassifier(cascadeLoc)\n\n\ndef detect_faces(color_img, only_largest):\n \"\"\"\n takes in a color image read in using cv2 and returns a list of cropped\n color images of the detected faces. If only_largest is true, then only\n the largest detected face will be returned.\n \"\"\"\n\n gray = cv.cvtColor(color_img, cv.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=2)\n cropped_list = []\n\n i = -1\n largest_index = -1\n largest_size = -1\n\n for (x, y, w, h) in faces:\n i += 1\n size = w * h\n if size > largest_size:\n largest_index = i\n largest_size = size\n\n cropped_color = color_img[y:y+h, x:x+w]\n cropped_list.append(cropped_color)\n\n if only_largest and largest_size > -1:\n cropped_list = [cropped_list[largest_index]]\n\n return cropped_list\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"471411372","text":"#!/usr/bin/env python3\nimport subprocess\nimport sys\nimport signal\nimport sys\nimport os, shutil\nimport xml.etree.ElementTree as ET\n\nimport time\nimport requests\nfrom xml.etree.ElementTree import Comment\n\nfrom requests import ConnectionError\nfrom selenium.webdriver.common.by import By\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom smoketest.mylib.utils import Utils\n\n\ndef ensure_path_exists(path):\n import errno\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\nlogs_dir = os.path.join(Utils.log_dir(), 'logs')\nsub_path = requests.get('http://localhost:3000/next').content.decode('utf-8');\npath_to_dir = os.path.join(os.getcwd(), 'logs', *sub_path.split('/'))\nensure_path_exists(path_to_dir)\n# except ConnectionError:\n# print \"Need to start webserver. Run ./startup.sh from smoketest dir\", os.getcwd()\n\n\ndef signal_handler(sig, frame):\n print('You pressed Ctrl+C!')\n [p.kill() for p in opens]\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nopens = []\nroot = ET.Element(\"ipAddresses\")\nroot.append(Comment('Auto Generated in multi-run.py'))\n\nrun_dates_path = os.path.join(Utils.log_dir(), 'logs', \"runInfo.txt\")\n\n\ndef run_some(browser, start, end):\n from sys import platform as platform\n\n if platform == \"win32\":\n path_to_python = \"c:\\\\Python27\\\\python.exe\"\n elif platform == \"linux\":\n path_to_python = sys.executable\n else:\n path_to_python = \"/cygdrive/c/cygwin64/bin/python\"\n\n for i in range(start, end):\n time.sleep(2)\n opens.append(subprocess.Popen([path_to_python, \"./runAll.py\", sys.argv[i], browser, path_to_dir]))\n\n [p.wait() for p in opens]\n\nif not os.path.exists(run_dates_path):\n\n run_dates_file = open(run_dates_path, \"a+\")\n\n for date in Utils.get_dirs(logs_dir):\n for run in Utils.get_dirs(os.path.join(Utils.log_dir(), 'logs', date)):\n print('date', date, 'list', os.listdir(os.path.join(Utils.log_dir(), 'logs', date)))\n run_dates_file.write(date + '/' + run + '\\n')\n\nelse:\n run_dates_file = open(run_dates_path, \"a+\")\n run_dates_file.write(sub_path + '\\n')\n\nrun_dates_file.close()\n\nfor i in range(3, len(sys.argv)):\n # path = os.path.join(path_to_dir, Utils.format_ip_address(sys.argv[i]))\n path = os.path.normcase(os.path.join(\"logs\", sub_path, Utils.format_ip_address(sys.argv[i])))\n field = ET.SubElement(root, \"ipAddress\", location=path).text = sys.argv[i]\n\ntree = ET.ElementTree(root)\ntree.write(os.path.join(path_to_dir, 'ip-addresses.xml'))\n\nargStart = 1;\nbrowser = 'chrome'\n\nfor i in range (1, len(sys.argv)):\n if (sys.argv[i] == '-browser'):\n argStart = i + 2\n browser = sys.argv[i+1]\n\nstep = 3\nfor i in range(argStart, len(sys.argv), step):\n run_some(browser, i, min(i + step, len(sys.argv)))\n\nUtils.print_tree(path_to_dir)\n\nres = {}\n\n\ndef get_test_run_info(date):\n\n total_error_count = 0\n total_test_count = 0\n\n for run in Utils.get_dirs(os.path.join(logs_dir, date)):\n for ip_in_runs in Utils.get_dirs(os.path.join(logs_dir, date, run)):\n for xml in os.listdir(os.path.join(logs_dir, date, run, ip_in_runs)):\n if xml.startswith('testresult'):\n fname = os.path.join(logs_dir, date, run, ip_in_runs, xml)\n tree = ET.parse(fname)\n\n test_count = tree.find('totalTestCount').get('totalTestCount')\n error_count = tree.find('errorCount').get('errorCount')\n\n total_test_count += int(test_count)\n total_error_count += int(error_count)\n\n return {'date': date, 'total_test_count': total_test_count, 'total_error_count': total_error_count}\n\n\ndef make_test_summary_xml(date):\n\n summary_data = get_test_run_info(date)\n\n root_ele = ET.Element('results')\n root_ele.append(Comment('Auto Generated by make_test_summary_xml() multi-run.py'))\n\n summary_ele = ET.SubElement(root_ele, 'summary')\n summary_ele.set('date', summary_data['date'])\n summary_ele.set('totalTestCount', str(summary_data['total_test_count']))\n summary_ele.set('totalErrorCount', str(summary_data['total_error_count']))\n\n tree = ET.ElementTree(root_ele)\n\n path = os.path.join(logs_dir, date, 'testsummary.xml')\n tree.write(path)\n\n\nfor log_date in Utils.get_dirs(logs_dir):\n make_test_summary_xml(log_date)\n","sub_path":"smoketest/multi-run.py","file_name":"multi-run.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"408090316","text":"#!/usr/bin/env python2\n\n\"\"\" \nReducer\n\"\"\"\n\nimport sys\nimport zipimport\n\n\ntry:\n importer = zipimport.zipimporter('nltk.mod')\n nltk = importer.load_module(\"nltk\")\n nltk.data.path += [\"./nltkData/\"]\nexcept zipimport.ZipImportError:\n import nltk\n\nsys.path.append(\"..\")\nimport TextUtils as tu\n\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\nfrom nltk.tokenize import sent_tokenize\n\nfrom cPickle import load\n\n# punkt sentence tokenizer setup\npunkt_param = PunktParameters()\npunkt_param.abbrev_types = set(['dr', 'vs', 'mr', 'mrs', 'prof', 'inc'])\nsentence_splitter = PunktSentenceTokenizer(punkt_param)\n\n# load trained POS tagger from disk\ninput = open(\"t2.pkl\", \"rb\")\ntagger = load(input)\ninput.close()\n\ncurrent_word = None\ncurrent_count = 0\nlength = None\n\nwords = dict()\n\nimport re\ndef normalize_pos(complex):\n # nouns start with A, N, DT, F, P, WP\n #if re.match(\"^([ANFP]|WP|DT)\", complex):\n # return \"NOUN\"\n \n\n return complex\n # verbs: B, D\n\n# treat each line as a filename\nfor line in sys.stdin:\n # read file and split into sentences\n num, filename = line.split('\\t')\n file = open(filename.strip())\n raw = file.read()\n file.close()\n raw = raw.decode('utf8').lower()\n sents = sent_tokenize(raw)\n\n # pos-tag each word in the sentence\n for sent in sents:\n text = nltk.word_tokenize(sent)\n text = tu.filter_non_alpha_words(text)\n tagged_sent = tagger.tag(text)\n\n # then add it to our table of words and counts\n for tag in tagged_sent:\n pos = normalize_pos(tag[1])\n key = \"{0}\\{1}\".format(tag[0].encode('utf8'), pos)\n\n #words[key] = words.setdefault(key, default=0) + 1\n if key in words:\n words[key] += 1\n else:\n words[key] = 1\n\nfrom operator import itemgetter\nsorted_words = sorted(words.items(), key=itemgetter(1), reverse=True)\nfor w in sorted_words:\n print(\"{0}\\t{1}\".format(*w))\n","sub_path":"U3/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"538280846","text":"#!/usr/bin/env python\n\nimport json\nimport logging\nimport re\nfrom numpy import asarray, rollaxis\n\nfrom pyclowder.utils import CheckMessage\nfrom pyclowder.datasets import download_metadata, get_info, upload_metadata\nfrom terrautils.extractors import TerrarefExtractor, is_latest_file, load_json_file, \\\n build_metadata, build_dataset_hierarchy\nfrom terrautils.betydb import add_arguments, get_sites, get_sites_by_latlon, submit_traits, \\\n get_site_boundaries\nfrom terrautils.geostreams import create_datapoint_with_dependencies\nfrom terrautils.gdal import clip_raster, centroid_from_geojson\nfrom terrautils.metadata import get_extractor_metadata, get_terraref_metadata\n\nimport canopyCover as ccCore\n\n\nlogging.basicConfig(format='%(asctime)s %(message)s')\n\ndef add_local_arguments(parser):\n # add any additional arguments to parser\n add_arguments(parser)\n\nclass CanopyCoverHeight(TerrarefExtractor):\n def __init__(self):\n super(CanopyCoverHeight, self).__init__()\n\n add_local_arguments(self.parser)\n\n # parse command line and load default logging configuration\n self.setup(sensor='stereoTop_canopyCover')\n\n # assign other argumentse\n self.bety_url = self.args.bety_url\n self.bety_key = self.args.bety_key\n\n def check_message(self, connector, host, secret_key, resource, parameters):\n if resource['name'].find('fullfield') > -1 and re.match(\"^.*\\d+_rgb_.*thumb.tif\", resource['name']):\n # Check metadata to verify we have what we need\n md = download_metadata(connector, host, secret_key, resource['parent']['id'])\n if get_extractor_metadata(md, self.extractor_info['name']) and not self.overwrite:\n logging.info(\"skipping dataset %s; metadata indicates it was already processed\" % resource['id'])\n return CheckMessage.ignore\n return CheckMessage.download\n\n return CheckMessage.ignore\n\n def process_message(self, connector, host, secret_key, resource, parameters):\n self.start_message()\n\n tmp_csv = \"canopycovertraits.csv\"\n csv_file = open(tmp_csv, 'w')\n (fields, traits) = ccCore.get_traits_table()\n csv_file.write(','.join(map(str, fields)) + '\\n')\n\n # Get full list of experiment plots using date as filter\n logging.info(connector)\n logging.info(host)\n logging.info(secret_key)\n logging.info(resource)\n ds_info = get_info(connector, host, secret_key, resource['parent']['id'])\n timestamp = ds_info['name'].split(\" - \")[1]\n all_plots = get_site_boundaries(timestamp, city='Maricopa')\n\n successful_plots = 0\n for plotname in all_plots:\n bounds = all_plots[plotname]\n\n # Use GeoJSON string to clip full field to this plot\n try:\n (pxarray, geotrans) = clip_raster(resource['local_paths'][0], bounds)\n if len(pxarray.shape) < 3:\n logging.error(\"unexpected array shape for %s (%s)\" % (plotname, pxarray.shape))\n continue\n ccVal = ccCore.gen_cc_for_img(rollaxis(pxarray,0,3), 5)\n ccVal *= 100.0 # Make 0-100 instead of 0-1\n successful_plots += 1\n if successful_plots % 10 == 0:\n logging.info(\"processed %s/%s plots successfully\" % (successful_plots, len(all_plots)))\n except:\n logging.error(\"error generating cc for %s\" % plotname)\n continue\n\n traits['canopy_cover'] = str(ccVal)\n traits['site'] = plotname\n traits['local_datetime'] = timestamp+\"T12:00:00\"\n trait_list = ccCore.generate_traits_list(traits)\n\n csv_file.write(','.join(map(str, trait_list)) + '\\n')\n\n # Prepare and submit datapoint\n centroid_lonlat = json.loads(centroid_from_geojson(bounds))[\"coordinates\"]\n time_fmt = timestamp+\"T12:00:00-07:00\"\n dpmetadata = {\n \"source\": host + (\"\" if host.endswith(\"/\") else \"/\") + \"files/\" + resource['id'],\n \"canopy_cover\": ccVal\n }\n create_datapoint_with_dependencies(connector, host, secret_key, \"Canopy Cover\",\n (centroid_lonlat[1], centroid_lonlat[0]), time_fmt, time_fmt,\n dpmetadata, timestamp)\n\n # submit CSV to BETY\n csv_file.close()\n submit_traits(tmp_csv, betykey=self.bety_key)\n\n # Add metadata to original dataset indicating this was run\n ext_meta = build_metadata(host, self.extractor_info, resource['parent']['id'], {\n \"plots_processed\": successful_plots,\n \"plots_skipped\": len(all_plots)-successful_plots,\n \"betydb_link\": \"https://terraref.ncsa.illinois.edu/bety/api/beta/variables?name=canopy_cover\"\n }, 'dataset')\n upload_metadata(connector, host, secret_key, resource['parent']['id'], ext_meta)\n\n self.end_message()\n\nif __name__ == \"__main__\":\n extractor = CanopyCoverHeight()\n extractor.start()\n","sub_path":"canopycover/terra_canopycover.py","file_name":"terra_canopycover.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"150839731","text":"#!/usr/bin/python3\n\n'''\n If you're using this script from a certain directory for the very first time,\n make sure you choose 1 in main menu and create local database, holding City info.\n After that you may use any options as you intend.\n If you've already installed it, by executing install.py, then you're good to go.\n'''\n\ntry:\n from colorama import Fore, init as color_init\n from colorama.initialise import reset_all\n from os import environ\n from os.path import join\n from install import __is_init_setup_done__\n import re\n from sys import platform\n from subprocess import run\n from city_info import fetch as fetch_city\n from records import fetch_city_name_id, store_city_name_id\n from weather import fetch as fetch_weather\nexcept ImportError as e:\n print('[!Module Unavailable : {}'.format(str(e)))\n exit(1)\n\n\ndef __is_os_supported__():\n regex = re.compile(r'^(linux)$', flags=re.I)\n if(regex.match(platform)):\n return True\n return False\n\n\ndef __fetch_a_certain_city__(db_name):\n tmp = input('[?]Search by\\n\\t1. CityName ( finds all possible matches )\\n\\t2. CityID\\n>> ')\n resp = {}\n try:\n tmp = int(tmp)\n except ValueError as e:\n resp = {'error': str(e)}\n return resp\n if(tmp not in range(1, 3)):\n resp = {'error': 'bad input'}\n return resp\n if(tmp == 1):\n city_name = input('[?]Get me CityName >> ')\n if(not city_name):\n resp = {'error': 'bad input'}\n return resp\n resp = fetch_city_name_id(city_name=city_name, db_name=db_name)\n if(not resp or resp.get('status')):\n resp = {'error': 'found no record'}\n else:\n city_id = input('[?]Get me CityID >> ')\n if(not city_id):\n resp = {'error': 'bad input'}\n return resp\n resp = fetch_city_name_id(city_id=city_id, db_name=db_name)\n if(not resp or resp.get('status')):\n resp = {'error': 'found no record'}\n return resp\n\n\ndef __get_menu__():\n ch = input('[+]Main Menu:\\n\\t1. Fetch City Names\\n\\t2. Fetch a certain City\\n\\t3. Fetch Weather data of a City\\n[?]Choose one >> ')\n try:\n ch = int(ch)\n except ValueError as e:\n print('[!]Error : {}'.format(str(e)))\n ch = -1\n return ch\n if(ch not in range(1, 4)):\n print('[!]Bad input')\n ch = -1\n return ch\n\n\ndef app(db_name='imd_city_db'):\n run('clear')\n print('[+]City Weather ::\\n\\n***Choose 1 from below list for first time use***\\n')\n ch = __get_menu__()\n if(ch == -1):\n return\n if(ch == 1):\n resp = fetch_city()\n if(not resp.get('error')):\n print('[+]Status after storing record : {}'.format(store_city_name_id(resp, db_name=db_name)))\n print('\\n')\n for i, j in resp.items():\n print('\\t{}\\n'.format(i))\n for k in j:\n for l, m in k.items(): \n print('\\t\\t\\'{}\\' | {}'.format(l, m))\n print('\\n')\n else:\n print('[!]{} -> {}\\n'.format('Error', resp.get('error', ':/')))\n resp = fetch_city_name_id(db_name=db_name)\n for i, j in resp.items():\n print('\\t\\t{}\\t---\\t{}'.format(i, j))\n elif(ch == 2):\n resp = __fetch_a_certain_city__(db_name)\n print('\\n')\n for i, j in resp.items():\n print('\\t{}\\t---\\t{}'.format(i, j))\n print('\\n')\n else:\n resp = __fetch_a_certain_city__(db_name)\n print('\\n')\n if(resp.get('error')):\n print('{}\\n'.format(resp))\n else:\n if(len(resp.keys()) > 1):\n print('[+]Possible Matches found ...\\n')\n for i, j in enumerate(resp.keys()):\n print('\\t{} -> {}'.format(i+1, resp.get(j)))\n tmp = input('\\n[?]Choose one from above >> ')\n try:\n tmp = int(tmp)\n except ValueError as e:\n print('[!]Error : {}'.format(str(e)))\n return\n if(tmp not in range(1, len(resp.keys())+1)):\n print('[!]Bad input')\n return\n resp = {list(resp.keys())[tmp-1]: resp.get(list(resp.keys())[tmp-1])}\n else:\n print('[+]Match found :\\n\\t{}\\n'.format(resp))\n print('[+]Fetching data ...\\n')\n city_id = list(resp.keys())[0]\n weather = fetch_weather(city_id, db_name=db_name)\n if(weather.get('error')):\n print('{}\\n'.format(weather))\n return\n print('[+]Weather Data :\\n')\n pref_it = 'http://city.imd.gov.in/citywx/'\n color_init()\n for i, j in weather.get(city_id).items():\n if(i == 'past_24_hours_weather'):\n print('\\t{}{}{} :\\n'.format(Fore.GREEN, ' '.join([x.capitalize() for x in i.split('_')]), Fore.RESET))\n for k, l in j.items():\n if(k.startswith('Departure from Normal(oC)')):\n k = 'Departure from Normal(oC)'\n print('\\t\\t{:<90} --- {}{}{}'.format(k, Fore.RED, l, Fore.RESET))\n print('\\n')\n elif(i == '7_days_forecast'):\n print('\\t{}{}{} :\\n'.format(Fore.GREEN, ' '.join([x.capitalize() for x in i.split('_')]), Fore.RESET))\n for k in j:\n k[3] = Fore.MAGENTA+pref_it+k[3]+Fore.RESET\n print('\\t\\t{} | {} | {} | {}'.format(*k))\n print('\\n')\n else:\n print('\\t{}{}{}\\t---\\t{}\\n'.format(Fore.GREEN, ' '.join([x.capitalize() for x in i.split('_')]), Fore.RESET, Fore.MAGENTA+pref_it+j+Fore.RESET))\n reset_all()\n print('[+]End\\n')\n return\n\n\nif __name__ == '__main__':\n try:\n if(not __is_os_supported__()):\n print('[!]You need to be on Linux to run this program :)\\n')\n exit(0)\n if(__is_init_setup_done__()):\n app(db_name=join(environ.get('HOME'), '.imd_weather', 'imd_city_db'))\n else:\n app()\n except KeyboardInterrupt:\n print('\\n[!]Terminated')\n finally:\n exit(0)\n","sub_path":"imd_weather_app.py","file_name":"imd_weather_app.py","file_ext":"py","file_size_in_byte":6327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"249528386","text":"import io\nfrom difflib import unified_diff\n\nfrom .exceptions import InvalidTargetFile\nfrom ..logging import get_logger\n\nfrom typing import TYPE_CHECKING\nfrom ..config import Configuration\nfrom ..versioning import Version\n\n\nlogger = get_logger()\n\n\nclass FileUpdater:\n def __init__(self, config: Configuration, current_version: Version, new_version: Version):\n self.config = config\n self.paths = config.files()\n self.current_version = current_version\n self.new_version = new_version\n self.context = {\n \"current_version\": current_version.serialize(),\n \"new_version\": new_version.serialize(),\n }\n\n def _validate(self):\n \"\"\"\n Checks that all files listed in the config have matching text to replace\n \"\"\"\n for path in self.paths:\n options = self.config.get_file_section(path)\n serialized_version = options[\"search\"].format(**self.context)\n if not self._contains(path):\n raise InvalidTargetFile(\n f\"Did not find '{self.current_version}' or '{serialized_version}' in file {path}\"\n )\n return True\n\n def _contains(self, path):\n try:\n with io.open(path, 'rb') as f:\n options = self.config.get_file_section(path)\n serialized_version = options[\"search\"].format(**self.context)\n search_lines = serialized_version.splitlines()\n lookbehind = []\n\n for lineno, line in enumerate(f.readlines()):\n lookbehind.append(line.decode('utf-8').rstrip(\"\\n\"))\n\n if len(lookbehind) > len(search_lines):\n lookbehind = lookbehind[1:]\n\n if (search_lines[0] in lookbehind[0] and\n search_lines[-1] in lookbehind[-1] and\n search_lines[1:-1] == lookbehind[1:-1]):\n logger.info(\"Found '{}' in {} at line {}: {}\".format(\n serialized_version, path, lineno - (len(lookbehind) - 1), line.decode('utf-8').rstrip()))\n return True\n return False\n except FileNotFoundError:\n raise InvalidTargetFile(f\"file listed in config not found: '{path}'\")\n\n def _replace(self, path, dry_run=False):\n with io.open(path, 'rb') as f:\n file_content_before = f.read().decode('utf-8')\n\n options = self.config.get_file_section(path)\n search_for = options[\"search\"].format(**self.context)\n replace_with = options[\"replace\"].format(**self.context)\n\n file_content_after = file_content_before.replace(search_for, replace_with)\n\n if file_content_before == file_content_after:\n # TODO expose this to be configurable\n file_content_after = file_content_before.replace(\n self.current_version.original,\n replace_with,\n )\n\n if file_content_before != file_content_after:\n logger.info(\"{} file {}:\".format(\n \"Would change\" if dry_run else \"Changing\",\n path,\n ))\n logger.info(\"\\n\".join(list(unified_diff(\n file_content_before.splitlines(),\n file_content_after.splitlines(),\n lineterm=\"\",\n fromfile=\"a/\"+path,\n tofile=\"b/\"+path\n ))))\n else:\n logger.info(\"{} file {}\".format(\n \"Would not change\" if dry_run else \"Not changing\",\n path,\n ))\n if not dry_run:\n with io.open(path, 'wb') as f:\n f.write(file_content_after.encode('utf-8'))\n\n def replace(self, dry_run=False):\n if self._validate():\n for path in self.paths:\n self._replace(path, dry_run)\n\n def __str__(self):\n return self.paths\n\n def __repr__(self):\n return ''.format(self.paths)\n","sub_path":"bumpv/client/files/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"230441201","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\ndef rewrite_client_str_to_client_model(apps, schema_editor):\n Client = apps.get_model(\"entries\", \"Client\")\n Project = apps.get_model(\"entries\", \"Project\")\n \n # For each project, get the text version of the client name, and\n # see if there's already a corresponding Client. If not, create it.\n # Then set the client property to be the new or existing Client.\n for project in Project.objects.all():\n c, created = Client.objects.get_or_create(name=project.client_as_str)\n project.client = c\n project.save()\n\ndef rewrite_client_model_to_client_str(apps, schema_editor):\n Client = apps.get_model(\"entries\", \"Client\")\n Project = apps.get_model(\"entries\", \"Project\")\n \n # For each project, set the text version of the client name to\n # the name from the Client instance.\n for project in Project.objects.all():\n project.client_as_str = project.client.name\n # Must have this or delete of clients cascades to products\n project.client = None\n project.save()\n\n # Now delete all clients. \n Client.objects.all().delete()\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('entries', '0002_auto_20150723_0819'),\n ]\n\n # Switch from a charfield-based client, to a model-based one.\n operations = [\n migrations.AlterField(\n model_name='project',\n name='client',\n field=models.CharField(max_length=200, null=True),\n ),\n # Move the existing client field to the side\n migrations.RenameField(\n model_name='project',\n old_name='client',\n new_name='client_as_str',\n ),\n # Create the client model\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ],\n ),\n # Create a client field in the project. Note it allows null, as\n # a way of avoiding to specify what the default value is.\n migrations.AddField(\n model_name='project',\n name='client',\n field=models.ForeignKey(null=True, to='entries.Client'),\n ),\n # Do the conversion from text to model instance.\n migrations.RunPython(\n rewrite_client_str_to_client_model,\n rewrite_client_model_to_client_str,\n ),\n # Now we can disallow null.\n migrations.AlterField(\n model_name='project',\n name='client',\n field=models.ForeignKey(to='entries.Client'),\n ),\n # And remove the text version of the client\n migrations.RemoveField(\n model_name='project',\n name='client_as_str',\n ),\n ]\n","sub_path":"timetracker/entries/migrations/0003_move_project_client_to_model.py","file_name":"0003_move_project_client_to_model.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"608467728","text":"#encoding:UTF-8\n#Autor: José Antonio Gómez\n#El usuario escribe la opción que quiere ejecutar en el menú, ya sea contar insectos, encontrar el mayor de una lista de números o salir.\n\n#cuenta insectos\ndef contarInsectos():\n cInsectos=0\n cDias=0\n while cInsectos <= 29:\n insectos = int(input(\"Teclea el número de insectos recolectados hoy: \"))\n cInsectos += insectos\n cDias += 1\n print(\"\\nDespués de %d día(s), llevas %d insectos.\" % (cDias, cInsectos))\n\n if cInsectos == 30:\n print(\"Te hace falta recolectar 0 insectos.\")\n print(\"\\n¡Felicidades, has llegado a la meta!\\n\")\n\n else:\n extra = cInsectos - 30\n print(\"Te has pasado por %d insectos.\" % extra)\n print(\"\\n¡Felicidades, has llegado a la meta!\\n\")\n\n#calcula el mayor con la lista que recibe como parámetro\ndef encontrarMayor(valores):\n return max(valores)\n\n\ndef main():\n\n menu=True\n print(\"Tarea 6\")\n print(\"-------\")\n print(\"Bienvendio al programa\\n\")\n print(\"\"\"1. Recolectar insectos\n2. Encontrar el mayor\n3. Salir\"\"\")\n seleccion=int(input(\"Teclea opción: \"))\n\n #menu\n while menu==True:\n #Selección no se encuentra\n if seleccion == 0 or seleccion >= 4:\n print(\"\\nLa opción no se encuentra en el menú. Intenta nuevamente.\\n\")\n print(\"\"\"1. Recolectar insectos\n2. Encontrar el mayor\n3. Salir\"\"\")\n seleccion = int(input(\"Teclea opción: \"))\n\n #Selección negativa\n if seleccion <= -1:\n print(\"\\nError. Escribe números positivos. Intenta nuevamente.\\n\")\n print(\"\"\"1. Recolectar insectos\n2. Encontrar el mayor\n3. Salir\"\"\")\n seleccion = int(input(\"Teclea opción: \"))\n\n #selección contar insectos\n if seleccion==1:\n print(\"\\nSeleccionaste 'Contar insectos'. !La meta son 30 insectos!¡Buena suerte!\\n\")\n contarInsectos()\n print(\"\"\"1. Recolectar insectos\n2. Encontrar el mayor\n3. Salir\"\"\")\n seleccion = int(input(\"Teclea opción: \"))\n\n #selección encontrar el mayor\n if seleccion==2:\n print(\"\\nSeleccionaste 'Encontrar el mayor'\\n\")\n valor=int(input(\"Teclea un número[-1 para salir]: \"))\n\n\n if valor != -1:\n listaValores=[valor]\n while valor!=-1:\n valor = int(input(\"Teclea un número[-1 para salir]: \"))\n if valor != -1:\n listaValores.append(valor)\n else:\n print(\"\\nEl mayor es:\",(encontrarMayor(listaValores)))\n\n if valor==-1:\n print(\"\"\"\\n1. Recolectar insectos\n2. Encontrar el mayor\n3. Salir\"\"\")\n seleccion = int(input(\"Teclea opción: \"))\n\n #selección salir\n if seleccion==3:\n menu=False\n print(\"\\nGracias por tu visita. ¡Vuelve pronto!\")\nmain()","sub_path":"tarea6.py","file_name":"tarea6.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"506844676","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.core.models.mixins import TimestampMixin\n\n\nclass Video(TimestampMixin, models.Model):\n title = models.CharField(\n max_length=255,\n verbose_name=_('VN__TITLE'),\n help_text=_('HT__TITLE'),\n )\n source_id = models.CharField(\n max_length=255,\n verbose_name=_('VN__SOURCE_ID'),\n help_text=_('HT__SOURCE_ID'),\n )\n url = models.URLField(\n verbose_name=_('VN__URL'),\n help_text=_('HT__URL'),\n )\n application = models.ForeignKey(\n 'application.Application',\n models.CASCADE,\n related_name='videos',\n )\n word = models.ForeignKey(\n 'accumulator.Word',\n models.CASCADE,\n related_name='videos',\n )\n\n class Meta:\n verbose_name = _('VN__VIDEO')\n verbose_name_plural = _('VN__VIDEOS')\n\n def __str__(self):\n return self.title\n","sub_path":"server/apps/accumulator/models/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"455031091","text":"import torch\nfrom codes import mvtecad\nfrom functools import reduce\nfrom torch.utils.data import DataLoader\nfrom codes.datasets import *\nfrom codes.networks import *\nfrom codes.networks import multi_center_res_backbone_test\nfrom codes.inspection import eval_encoder_NN_multiK\nfrom codes.utils import *\nfrom datasets import Multi_Center_Dataset\nimport numpy as np\nimport cv2\nimport my_resnet\nfrom my_resnet import Bottleneck\nimport glob\ndef cnn_output_size(H, K, S=1, P=0) -> int:\n \"\"\"\n\n :param int H: input_size\n :param int K: filter_size\n :param int S: stride\n :param int P: padding\n :return:\n \"\"\"\n return 1 + (H - K + 2 * P) // S\n\ndef crop_CHW(image, i, j, K, S=1):\n if S == 1:\n h, w = i, j\n else:\n h = S * i\n w = S * j\n return image[:, h: h + K, w: w + K]\n\n\n\n\nenc = my_resnet.ResNet(Bottleneck,[3, 4, 6, 3],1000).cuda()\n\nckpt = torch.load(\"./model/enc.pth\")\n\nenc.load_state_dict(ckpt)\n\n\n\n\ndef img_infer(img_name):\n a = np.zeros(20)\n img = cv2.imread(img_name)\n mean = np.mean(img)\n img = (img - mean) / 255\n img = cv2.resize(img,(1024,1024))\n # cv2.imshow(\"11\",img)\n # cv2.waitKey(0)\n row = cnn_output_size(1024,256,48)\n col = cnn_output_size(1024,256,48)\n img = np.transpose(img, [2, 0, 1])\n\n for i in range(row):\n for j in range(col):\n\n img_p = crop_CHW(img, i, j, 256, 48)\n aa = np.transpose(img_p, [1, 2, 0])\n in_tensor = torch.from_numpy(img_p.astype(np.float32)).contiguous()\n in_tensor = in_tensor.unsqueeze(0).cuda()\n out = enc(in_tensor)\n out, dis = multi_cls(out)\n\n print(out, \" \", dis)\n # out_f = torch.nn.functional.softmax(out_f, dim=1)\n if a[out[0]] < dis:\n a[out[0]] = dis\n print(a)\n # cv2.namedWindow(\"p\",0)\n # cv2.imshow(\"p\", aa)\n #\n #\n # cv2.waitKey(0)\n if dis >0.6 and out[0]==8:\n aa =cv2.resize(aa,(256,256))\n cv2.imshow(\"p\",aa)\n cv2.waitKey(0)\n # print(torch.argmax(out_f,dim=1))\n return a\n\n\n\nif __name__ == \"__main__\":\n D = 1000\n multi_cls = multi_center_res_backbone_test(D, 20).cuda()\n multi_cls.load_state_dict(torch.load(\"./model/mul.pth\"))\n enc.eval()\n multi_cls.eval()\n\n a = np.zeros(20)\n\n\n a = img_infer(\"./20210311175003974.jpg\")\n print(a)\n\n\n # [0.41824254 0. 0.38242683 1.041839 0. 1.16508436\n # 0.81420916 0.96830255 0. 0.45293742 1.25321531 0.90334588\n # 0. 0.03888164 0.73888123 0. 0. 0.\n # 0. 0.]\n\n #ok center_ dist","sub_path":"res_backbone_test.py","file_name":"res_backbone_test.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"626442315","text":"list01 = []\nresult = []\nlist03 = []\nlist04 = []\ncount_01 = 0\ncount_02 = 0\nwhile count_01 < 7:\n count_02 +=1\n count_01 = 0\n list01 = []\n result = []\n list03 = []\n list04 = []\n code_count = 1\n\n import random\n\n while code_count <= 6:\n random_number = random.randint(1, 33)\n if random_number not in list01:\n list01.append(random_number)\n code_count += 1\n\n code_count = 1\n while code_count <= 6:\n random_number = random.randint(1, 33)\n if random_number not in result:\n result.append(random_number)\n code_count += 1\n\n code_count = 1\n while code_count <= 1:\n random_number = random.randint(1, 16)\n if random_number not in list03:\n list03.append(random_number)\n code_count += 1\n\n code_count = 1\n while code_count <= 1:\n random_number = random.randint(1, 16)\n if random_number not in list04:\n list04.append(random_number)\n code_count += 1\n\n\n print(list01, result, list03, list04)\n for item in result:\n if item in list01:\n count_01 += 1\n\n for item in list04:\n if item in list03:\n count_01 += 1\n\n print(count_02)\n print(count_01)\n","sub_path":"part_01_python_base/python_core/day5/homework04_extra.py","file_name":"homework04_extra.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"342121606","text":"import raop.helper as helper\nimport raop.pipeline as pipeline\nfrom sklearn.naive_bayes import GaussianNB\n\nkeysInFileName = \"resources/train.json\"\nkeysOutFileName = \"output.json\"\ninFileName = \"output.json\"\noutFileName = \"output2.json\"\n\n\npipeline.removeNonNeededKeys(keysInFileName,keysOutFileName)\npipeline.addPreprocessedKeyVals(inFileName,outFileName)\n\nactualX, actualY = pipeline.getFeatures(outFileName)\n\ngnb = GaussianNB()\ny_pred = gnb.fit(actualX, actualY).predict(actualX)\n","sub_path":"sandbox/sklearnExperiment.py","file_name":"sklearnExperiment.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"348898576","text":"#5.3\n#In homework 5.3, we have to create a graph \"same\" as the one in the given pdf file\n#There are some problems by creating the exactly same graph, therefore line chart is allowed\n#the blue line in 5.3 is also allowed to show in line chart instead of smooth curve\n\nimport matplotlib.pyplot as plt\n\nCPU_burst = [6,6,4,6,4,13,13,13]\nguess_time = [10,8,6,6,5,9,11,12]\n\n#output figure of 5.3\n#from scipy.interpolate import spline\nx = range(len(guess_time))\ny = guess_time\nplt.figure(figsize=(6,8))\nplt.xlim(1,max(x)+1)\nplt.ylim(0,max(CPU_burst)+1)\nplt.plot(x,guess_time)\nplt.plot(x,CPU_burst)\nplt.legend(['guess (Taui)','CPU burst (ti)'], loc='upper left')\nplt.title('Prediction of the Length of the Next CPU Burst')\nplt.xlabel('time ->')\nplt.axes().set_aspect(0.4)\nplt.suptitle('wolfe', style='italic', y=-0.3, fontsize=12)\nplt.grid(True)\n#plt.xticks(0,8,1)\n#cell_text = ['CPU burst (ti)']\nTable = plt.table(cellText=[CPU_burst+[\"...\"],guess_time+[\"...\"]],rowLabels=['CPU burst (ti)','\"guess\"(Taui)'],loc=\"bottom\", bbox=[0.3, -0.5, 0.7, 0.3])\n\nplt.show()\n","sub_path":"HW3_5.3.py","file_name":"HW3_5.3.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"221471980","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Para el archivo `data.csv`, imprima la suma de la columna 2 para cada \n## letra de la columna 4, ordnados alfabeticamente.\n##\n## Rta/\n## a,114\n## b,40\n## c,91\n## d,65\n## e,79\n## f,110\n## g,35\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\n\ndatos = open('data.csv','r').readlines()\ndatos = [r.replace('\\n','') if r[-1] == '\\n' else r for r in datos]\ndatos = [r.split('\\t') for r in datos]\n\nletras = [[r[1],r[3].split(',')] for r in datos]\nletras = [[r[0], e] for r in letras for e in r[1]]\n\ndicc = {}\nfor valor, letra in letras:\n if letra in dicc.keys():\n dicc[letra] += int(valor)\n else:\n dicc[letra] = int(valor)\n\nfor k in sorted(dicc.keys()):\n print(k + \",\" + str(dicc[k]))\n\n","sub_path":"03-python=1/q11=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"431088258","text":"#!/usr/bin/env python\n\n\"\"\"\nThis script employs a VERY basic heuristic ('porn' in webpage.lower()) to check\nif we are not 'age_limit' tagging some porn site\n\"\"\"\n\n# Allow direct execution\nimport os\nimport sys\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom test.helper import get_testcases\nfrom youtube_dl.utils import compat_urllib_request\n\nfor test in get_testcases():\n try:\n webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()\n except:\n print('\\nFail: {0}'.format(test['name']))\n continue\n\n webpage = webpage.decode('utf8', 'replace')\n\n if 'porn' in webpage.lower() and ('info_dict' not in test\n or 'age_limit' not in test['info_dict']\n or test['info_dict']['age_limit'] != 18):\n print('\\nPotential missing age_limit check: {0}'.format(test['name']))\n\n elif 'porn' not in webpage.lower() and ('info_dict' in test and\n 'age_limit' in test['info_dict'] and\n test['info_dict']['age_limit'] == 18):\n print('\\nPotential false negative: {0}'.format(test['name']))\n\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n\nprint()\n","sub_path":"devscripts/check-porn.py","file_name":"check-porn.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"359733985","text":"# Author J'yrens Christenvie , Please acknowledge the author if you are using his code for your game\r\n# People acknowledged : Eric Matthes \r\n\r\n\r\n# 15 / 05 / 2020\r\n# This class controls the Alien that will be placed on the screen\r\n\r\n\r\nimport pygame\r\n\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Alien(Sprite):\r\n \"\"\"A class to represent a single Alien in a fleet \"\"\"\r\n\r\n def __init__(self,ai_game):\r\n \"\"\"Initialize the alien and sets its starting position \"\"\"\r\n super().__init__()\r\n self.screen = ai_game.screen\r\n self.settings = ai_game.settings\r\n\r\n # Load the alien image and set its rect attribute \r\n self.image = pygame.image.load('images/first_alien.bmp')\r\n self.rect = self.image.get_rect()\r\n\r\n # start each new alien near the top left of the screen.\r\n self.rect.x = self.rect.width\r\n self.rect.y = self.rect.height\r\n\r\n # Store the Alien exact horizontal position\r\n self.x = float(self.rect.x)\r\n \r\n def check_edges(self):\r\n \"\"\"Return true if an alien is at the edge of the screen. \"\"\"\r\n screen_rect = self.screen.get_rect()\r\n\r\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\r\n return True\r\n\r\n def update(self):\r\n \"\"\"Move an alien to the right or left \"\"\"\r\n self.x += (self.settings.alien_speed * \r\n self.settings.fleet_direction)\r\n self.rect.x = self.x \r\n\r\n\r\n \r\n\r\n","sub_path":"alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"606812130","text":"# _*_ coding:utf-8 _*_\r\nfrom keras.applications.resnet50 import ResNet50\r\n\r\nfrom keras import layers\r\nfrom keras.layers import Dense\r\nfrom keras import utils\r\nfrom keras.models import Model\r\nfrom keras import backend as K\r\nfrom attention_module import attach_attention_module\r\n\r\n# from . import get_submodules_from_kwargs\r\n# from . import imagenet_utils\r\n# from .imagenet_utils import decode_predictions\r\n# from .imagenet_utils import _obtain_input_shape\r\n\r\nWEIGHTS_PATH = ('https://github.com/fchollet/deep-learning-models/'\r\n 'releases/download/v0.2/'\r\n 'resnet50_weights_tf_dim_ordering_tf_kernels.h5')\r\nWEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'\r\n 'releases/download/v0.2/'\r\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')\r\n\r\n\r\n\r\ndef identity_block(input_tensor, kernel_size, filters, stage, block, bn_axis=3):\r\n filters1, filters2, filters3 = filters\r\n conv_name_base = 'res' + str(stage) + block + '_branch'\r\n bn_name_base = 'bn' + str(stage) + block + '_branch'\r\n\r\n x = layers.Conv2D(filters1, (1, 1),\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2a')(input_tensor)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\r\n x = layers.Activation('relu')(x)\r\n\r\n x = layers.Conv2D(filters2, kernel_size,\r\n padding='same',\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2b')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\r\n x = layers.Activation('relu')(x)\r\n\r\n x = layers.Conv2D(filters3, (1, 1),\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2c')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\r\n\r\n x = layers.add([x, input_tensor])\r\n x = layers.Activation('relu')(x)\r\n return x\r\n\r\n\r\ndef conv_block(input_tensor,\r\n kernel_size,\r\n filters,\r\n stage,\r\n block,\r\n strides=(2, 2),\r\n bn_axis=3):\r\n filters1, filters2, filters3 = filters\r\n conv_name_base = 'res' + str(stage) + block + '_branch'\r\n bn_name_base = 'bn' + str(stage) + block + '_branch'\r\n\r\n x = layers.Conv2D(filters1, (1, 1), strides=strides,\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2a')(input_tensor)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\r\n x = layers.Activation('relu')(x)\r\n\r\n x = layers.Conv2D(filters2, kernel_size, padding='same',\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2b')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\r\n x = layers.Activation('relu')(x)\r\n\r\n x = layers.Conv2D(filters3, (1, 1),\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2c')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\r\n\r\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '1')(input_tensor)\r\n shortcut = layers.BatchNormalization(\r\n axis=bn_axis, name=bn_name_base + '1')(shortcut)\r\n\r\n x = layers.add([x, shortcut])\r\n x = layers.Activation('relu')(x)\r\n return x\r\n\r\ndef finite_difference(input_feature):\r\n channel = input_feature._keras_shape[-1]\r\n finite_feature=layers.concatenate(\r\n [K.expand_dims(K.abs(layers.subtract([input_feature[...,i+1],\r\n input_feature[...,i]])),axis=-1) for i in range(channel-1)],axis=-1)\r\n return finite_feature\r\n\r\ndef res_Net50(input,classes=51,attention_module=None):\r\n #global backend, layers, models, keras_utils\r\n #backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)\r\n #x = layers.Lambda(finite_difference)(input)\r\n #print(x.get_shape())\r\n #exit()\r\n if attention_module is not None:\r\n x=attach_attention_module(input,'fcbam_block')\r\n x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(input)\r\n x = layers.Conv2D(128, (7, 7),\r\n strides=(2, 2),\r\n padding='valid',\r\n kernel_initializer='he_normal',\r\n name='conv1_he_normal')(x)\r\n x = layers.BatchNormalization( name='bn_conv1_he_normal')(x)\r\n x = layers.Activation('relu')(x)\r\n x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad_he_normal')(x)\r\n x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\r\n\r\n\r\n if attention_module is not None:\r\n x=attach_attention_module(x,attention_module)\r\n\r\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a_he_normal', strides=(1, 1))\r\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b_he_normal')\r\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c_he_normal')\r\n\r\n\r\n\r\n if attention_module is not None:\r\n x=attach_attention_module(x,attention_module)\r\n\r\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\r\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\r\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\r\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\r\n\r\n\r\n if attention_module is not None:\r\n x=attach_attention_module(x,attention_module)\r\n\r\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\r\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\r\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\r\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\r\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\r\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\r\n\r\n\r\n if attention_module is not None:\r\n x=attach_attention_module(x,attention_module)\r\n\r\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\r\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\r\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\r\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\r\n\r\n # linear = layers.Dense(units=512,activation='sigmoid',name='dense_layer_1')(x)\r\n # linear = layers.Dropout(rate=0.75)(linear)\r\n\r\n linear = layers.Dense(units=classes, activation='softmax',name='dense_layer')(x)\r\n\r\n model = Model(inputs=input, outputs=linear)\r\n\r\n weights_path = utils.get_file(\r\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\r\n WEIGHTS_PATH_NO_TOP,\r\n cache_subdir='models',\r\n md5_hash='a268eb855778b3df3c7506639542a6af')\r\n model.load_weights(weights_path,by_name=True)\r\n return model\r\n\r\n\r\n #x = layers.Dense(self.classes, activation='softmax', name='fc10')(x)\r\n #model=Model(self.input,x,name='resnet50')\r\n #return model","sub_path":"models/resnet50.py","file_name":"resnet50.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"551637870","text":"#!/usr/bin/env python\n\nimport csv\n\ncueTags = {'B', 'I', 'U' , 'L'}\n\ndef backToX(resultFile):\n X = []\n with open(resultFile) as f:\n for line in f:\n line = line.strip('\\n')\n if line:\n # arr is the word/token (0), pos (1), assigned tag (2)\n X.append(line.split('\\t'))\n else:\n #denotes end of sentence\n X.append([\"\"])\n return X\n\ndef getRanges(X):\n ranges = []\n inRange = False\n for i in range(len(X)):\n t = X[i]\n if t[2] in cueTags and not inRange:\n ranges.append(str(i)+\"-\")\n inRange = True\n elif inRange and t[2] == 'O':\n ranges[-1] += str(i-1)\n inRange = False\n if inRange:\n ranges[-1] += str(len(X)-1)\n return ranges\n\ndef removeNewLineTuples(X):\n newX = []\n for t in X:\n if t[0] != \"\":\n newX.append(t)\n return newX\n\ndef uncertainRangeDetection(isPublic):\n fileName = \"priv\"\n if isPublic:\n fileName = \"pub\"\n fileName += \"results.txt\"\n\n X = backToX(fileName)\n X = removeNewLineTuples(X)\n ranges = getRanges(X)\n\n return ranges\n\ndef tuplesToSentences(tuples):\n taggedSentences = []\n currSentence = []\n for t in tuples:\n if t[0]==\"\" and len(currSentence) > 0:\n taggedSentences.append(currSentence)\n currSentence = []\n else:\n currSentence.append(t)\n return taggedSentences\n\n#currently determining if it has a tag in it\ndef isSentenceUncertain(sentence):\n # thresh = THRESHOLD\n # uncCount = 0.0\n for t in sentence:\n if t[2] in cueTags:\n return True\n # uncCount += 1.0\n # return uncCount/float(len(sentence)) >= thresh\n return False\n\ndef indicesOfTaggedSentences(sentences):\n indices = []\n for i in range(len(sentences)):\n s = sentences[i]\n if isSentenceUncertain(s):\n indices.append(i)\n return indices\n\ndef uncertainSentenceDetection(isPublic):\n fileName = \"priv\"\n if isPublic:\n fileName = \"pub\"\n fileName += \"results.txt\"\n\n X = backToX(fileName)\n sentences = tuplesToSentences(X)\n sentenceIndices = indicesOfTaggedSentences(sentences)\n\n return sentenceIndices\n\ndef formatArray(arr):\n s = \"\"\n for i in range(len(arr)-1):\n s += str(arr[i]) + ' '\n s += str(arr[-1])\n return s\n\ndef writeRanges(publicRanges, privateRanges):\n with open('CRFkaggleSubmission1.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['Type', 'Spans'])\n writer.writerow(['CUE-public', formatArray(publicRanges)])\n writer.writerow(['CUE-private', formatArray(privateRanges)])\n\ndef writeSentences(publicSentences, privateSentences):\n with open('CRFkaggleSubmission2.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['Type', 'Indices'])\n writer.writerow(['SENTENCE-public', formatArray(publicSentences)])\n writer.writerow(['SENTENCE-private', formatArray(privateSentences)])\n\nif __name__ == \"__main__\":\n publicRanges = uncertainRangeDetection(1)\n privateRanges = uncertainRangeDetection(0)\n\n publicSentences = uncertainSentenceDetection(1)\n privateSentences = uncertainSentenceDetection(0)\n\n writeRanges(publicRanges, privateRanges)\n writeSentences(publicSentences, privateSentences)\n","sub_path":"P4/postProcess.py","file_name":"postProcess.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"233707922","text":"\nimport numpy as np\nimport logging\n\nimport MDAnalysis\n\nfrom boxutils import center_mol, rotate_mol\n\nfrom constants import SEL_SPEC_HEAVIES_NOWALL\n\nfrom mdtools import ParallelTool\n\nfrom MDAnalysis.analysis.rms import rmsd\n\nimport sys\n\nfrom IPython import embed\n\nclass Trajconv(ParallelTool):\n prog='trajconv'\n description = '''\\\nCenter and align a structure (PDB or GRO) or trajectory (XTC or TRR) to a reference\nstructure (Requires a GRO and a TPR file)\n\nAutomatically treats PBC for selected molecule group, assuring molecule to be centered\n and aligned is whole in each frame. \n\n NOTE: **This tool assumes the reference structure is whole** and will not work correctly\n otherwise (it will throw an error if it finds the reference structure is broken)\n The reference structure will automaticall be centered according to its COM before any\n fitting or alignment\n\n\n-----------------------------------------------------------------------------\nCommand-line options\n-----------------------------------------------------------------------------\n'''\n \n def __init__(self):\n super(Trajconv,self).__init__()\n \n # Parallel processing by default (this is not actually necessary, but it is\n # informative!)\n self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager\n\n self.ref_univ = None\n self.other_univ = None\n self.outputfilename = None\n\n # are we processing a trajectory?\n self.do_traj = False\n\n self.start_frame = None\n self.end_frame = None\n\n self.sel_spec = None\n self.rmsd_spec = None\n\n self.sel_spec_other = None\n self.rmsd_spec_other = None\n\n self.rmsd_out = None\n\n self.center_only = None\n\n # Shape: (n_frames, n_rms_specs+1)\n self.rmsd_arr = None\n # rms per-atom (for each frame) for rms on the rmsdspec (empty if none supplied)\n self.rms_per_atom = None\n\n @property\n def n_frames(self):\n return self.last_frame - self.start_frame\n\n \n def add_args(self, parser):\n \n sgroup = parser.add_argument_group('Trajconv options')\n sgroup.add_argument('-s1', '--tprfile1', metavar='TPR', type=str, required=True,\n help='Input topology file (tpr) for ref structure')\n sgroup.add_argument('-s2', '--tprfile2', metavar='TPR', type=str, required=True,\n help='Input topology file (tpr) for structure/trajectory to fit')\n sgroup.add_argument('-c', '--grofile', metavar='GRO', type=str, required=True,\n help='Input reference structure file')\n sgroup.add_argument('-f', '--fitfile', metavar='XTC', type=str, required=True,\n help='Input file to fit to reference. can be GRO or XTC')\n sgroup.add_argument('-b', '--start', type=int, default=0,\n help='First timepoint (in ps)')\n sgroup.add_argument('-e', '--end', type=int, \n help='Last timepoint (in ps) - default is last available')\n sgroup.add_argument('--fitspec', type=str, default=SEL_SPEC_HEAVIES_NOWALL,\n help='MDAnalysis selection string for fitting. Default selects all protein heavy atoms')\n sgroup.add_argument('--fitspec-other', type=str,\n help='Fit spec for selecting the other structure to fit (default: same as fitspec)')\n sgroup.add_argument('--center-only', action='store_true', \n help='If true, only center molecule (no fitting)')\n sgroup.add_argument('--rmsdspec', type=str, \n help='MDAnalysis selection string for rmsd (after fitting on fitspec). Optional.')\n sgroup.add_argument('--rmsdspec-other', type=str,\n help='Sel spec for other structure rmsd (default: same as rmsdspec)')\n agroup = parser.add_argument_group('other options')\n agroup.add_argument('-o', '--outfile', type=str, default='fit.gro',\n help='Output file to write fitted trajectory or structure. File type determined by input')\n agroup.add_argument('-orms', '--outrmsd', type=str, default='rmsd_fit.dat',\n help='Output rmsd values for each frame after fitting')\n\n def process_args(self, args):\n\n #try:\n self.ref_univ = MDAnalysis.Universe(args.tprfile1, args.grofile)\n\n ext = args.fitfile.split('.')[-1]\n if ext in ['trr', 'xtc']:\n self.do_traj = True\n self.other_univ = other_univ = MDAnalysis.Universe(args.tprfile2, args.fitfile)\n elif ext == 'gro' or ext == 'pdb':\n self.other_univ = other_univ = MDAnalysis.Universe(args.tprfile2, args.fitfile)\n else:\n print(\"unknown or missing extension\")\n sys.exit()\n #except:\n # print(\"Error processing input files: {} and {}\".format(args.grofile, args.fitfile))\n # sys.exit()\n\n\n if (args.start > (other_univ.trajectory.n_frames * other_univ.trajectory.dt)):\n raise ValueError(\"Error: provided start time ({} ps) is greater than total time ({} ps)\"\n .format(args.start, (other_univ.trajectory.n_frames * other_univ.trajectory.dt)))\n\n self.start_frame = int(args.start / other_univ.trajectory.dt)\n if args.end is not None:\n self.last_frame = args.end\n else:\n self.last_frame = other_univ.trajectory.n_frames\n\n self.sel_spec = args.fitspec\n self.rmsd_spec = args.rmsdspec\n\n self.sel_spec_other = args.fitspec_other or args.fitspec\n self.rmsd_spec_other = args.rmsdspec_other or args.rmsdspec\n\n self.outfile = args.outfile.split('.')[0]\n self.rmsd_out = args.outrmsd\n\n self.center_only = args.center_only\n\n\n\n def go(self):\n\n header_str = \"fitspec: {}; rmsdspec: {}; fitspec_other: {}; rmsd_spec_other: {}\".format(self.sel_spec, self.rmsd_spec, self.sel_spec_other, self.rmsd_spec_other)\n\n n_frames = self.last_frame - self.start_frame\n\n ndim = 2 if self.rmsd_spec is None else 3\n self.rmsd_arr = np.zeros((self.n_frames, ndim))\n\n self.ref_univ.atoms.write('fit_ref.gro')\n \n if self.rmsd_spec is not None:\n ref_struct = self.ref_univ.select_atoms(self.rmsd_spec)\n other_struct = self.other_univ.select_atoms(self.rmsd_spec_other)\n \n assert ref_struct.n_atoms == other_struct.n_atoms\n \n self.rms_per_atom = np.zeros((self.n_frames, ref_struct.n_atoms))\n\n if self.do_traj:\n with MDAnalysis.Writer(self.outfile + \".xtc\", self.other_univ.atoms.n_atoms) as W:\n for i_frame in range(self.start_frame, self.last_frame):\n if i_frame % 100 == 0:\n print(\"doing frame {} of {}\".format(i_frame, self.last_frame))\n sys.stdout.flush()\n curr_ts = self.other_univ.trajectory[i_frame]\n\n center_mol(self.other_univ, do_pbc=False)\n if not self.center_only:\n rms = rotate_mol(self.ref_univ, self.other_univ, ref_spec=self.sel_spec, other_spec=self.sel_spec_other)\n self.rmsd_arr[i_frame-self.start_frame, 0] = curr_ts.time\n self.rmsd_arr[i_frame-self.start_frame, 1] = rms \n\n if i_frame == self.start_frame:\n self.other_univ.atoms.write('first_frame_fit.gro')\n W.write(self.other_univ.atoms)\n\n\n if self.rmsd_spec is not None and not self.center_only:\n rms_other = rmsd(ref_struct.atoms.positions, other_struct.atoms.positions)\n self.rmsd_arr[i_frame-self.start_frame, 2] = rms_other\n\n self.rms_per_atom[i_frame-self.start_frame, :] = np.sqrt( np.sum((ref_struct.atoms.positions - other_struct.atoms.positions)**2, axis=1) )\n\n\n else:\n center_mol(self.other_univ, do_pbc=False, check_broken=False)\n rms = rotate_mol(self.ref_univ, self.other_univ, ref_spec=self.sel_spec, other_spec=self.sel_spec_other)\n self.other_univ.atoms.write(self.outfile + \".gro\")\n\n self.rmsd_arr[0,0] = 0.0\n self.rmsd_arr[0,1] = rms\n if self.rmsd_spec is not None:\n rms_other = rmsd(ref_struct.atoms.positions, other_struct.atoms.positions)\n self.rmsd_arr[0,2] = rms_other\n\n self.rms_per_atom[0,:] = np.sqrt( np.sum((ref_struct.atoms.positions - other_struct.atoms.positions)**2, axis=1) )\n\n if self.rmsd_spec is not None:\n avg_rms_per_atom = self.rms_per_atom.mean(axis=0)\n self.other_univ.add_TopologyAttr('tempfactors')\n other_struct.tempfactors = avg_rms_per_atom\n other_struct.write('fit_per_atom_rmsd.pdb', bonds=None)\n\n self.ref_univ.add_TopologyAttr('tempfactors')\n ref_struct.tempfactors = avg_rms_per_atom\n ref_struct.write('fit_ref_per_atom_rmsd.pdb', bonds=None)\n\n # Save output\n np.savetxt(self.rmsd_out, self.rmsd_arr, header=header_str)\n np.savez_compressed('rms_per_atom.dat', header=self.rmsd_spec, rms_per_atom=self.rms_per_atom)\n\n\nif __name__=='__main__':\n Trajconv().main()\n","sub_path":"trajconv.py","file_name":"trajconv.py","file_ext":"py","file_size_in_byte":9471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"41111382","text":"class HashNode:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\nclass HashTable:\n def __init__(self, capacity=16):\n self.capacity = capacity\n self.size = 0\n self.table = [None] * capacity\n\n def __hash(self, k, m):\n hash_val = 0\n\n for c in k:\n hash_val = 37 * hash_val + ord(c)\n\n return hash_val % m\n\n def __get(self, property):\n x = []\n\n for i in range(len(self.table)):\n if self.table[i] != None and self.table[i] != 'DELETED':\n if property == 'keys':\n x.append(self.table[i].key)\n elif property == 'values':\n x.append(self.table[i].value)\n\n return x\n\n def keys(self):\n return self.__get('keys')\n\n def values(self):\n return self.__get('values')\n\n def get_capacity(self):\n return self.capacity\n\n def get_size(self):\n return self.size\n\n def add(self, key, value):\n idx = self.__hash(key, self.capacity)\n\n while self.table[idx] != None and self.table[idx] != \"DELETED\":\n if self.table[idx].key == key:\n self.table[idx].value = value \n return\n idx = (idx + 1) % self.capacity\n\n self.table[idx] = HashNode(key, value)\n self.size += 1\n\n def get(self, key):\n idx = self.__hash(key, self.capacity)\n\n while self.table[idx] != None and self.table[idx] != \"DELETED\":\n if self.table[idx].key == key:\n return self.table[idx].value\n idx = (idx + 1) % self.capacity\n\n return None\n\n def remove(self, key):\n idx = self.__hash(key, self.capacity)\n\n while self.table[idx] != None:\n if self.table[idx].key == key:\n self.table[idx] = \"DELETED\"\n self.size -= 1\n idx = (idx + 1) % self.capacity\n\n return None\n","sub_path":"hash-tables/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"567875551","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nimport os\nimport time\n\nfrom scipy.spatial import KDTree\n\nSTATE_COUNT_THRESHOLD = 3\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.lights = []\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n self.pred_count = 0\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n \n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n \n self.out_images_debug_path = '/home/workspace/out_imgs'\n if os.path.exists(self.out_images_debug_path):\n os.removedirs(self.out_images_debug_path)\n os.makedirs(self.out_images_debug_path)\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n Args:\n msg (Image): image from car-mounted camera\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n# rospy.loginfo(\"Processing traffic lights\")\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if ((state == TrafficLight.RED) or (state == TrafficLight.YELLOW)) else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def get_closest_waypoint(self, x ,y):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n Returns:\n int: index of the closest waypoint in self.waypoints\n \"\"\"\n \n if all(i is not None for i in [self.pose and self.waypoint_tree]):\n closest_idx = self.waypoint_tree.query([x, y],1)[1]\n return closest_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n Args:\n light (TrafficLight): light to classify\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n \n if(not self.has_image):\n self.prev_light_loc = None\n return TrafficLight.UNKNOWN\n \n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n cv_image = cv2.resize(cv_image, (224, 224))\n cv_image = cv_image/255.\n\n #Get classification\n t0 = time.time()\n light_id = self.light_classifier.get_classification(cv_image)\n # print(\"Pred time: \", time.time() - t0)\n \n def get_light_text(id):\n if id == TrafficLight.GREEN:\n return \"GREEN\"\n elif id == TrafficLight.RED:\n return \"RED\"\n elif id == TrafficLight.UNKNOWN:\n return \"UNKNOWN\"\n elif id == TrafficLight.YELLOW:\n return \"YELLOW\"\n\n print('Predicted State: \\t\\t{}'.format(get_light_text(light_id)))\n print('Actual State: \\t\\t{}'.format(get_light_text(light.state)))\n\n# # Saving the image\n# self.pred_count += 1\n# file_name = os.path.join(self.out_images_debug_path, 'img_{}_{}.png'.format(self.pred_count, light_id))\n# cv2.imwrite(file_name, cv_image)\n \n return light_id\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n closest_light = None \n line_wp_idx = None \n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)\n diff = 100 # TODO: Replace hardcoding with param\n for i, light in enumerate(self.lights):\n # Get stop line waypoint index\n line = stop_line_positions[i]\n temp_wp_idx = self.get_closest_waypoint(line[0], line[1])\n d = temp_wp_idx - car_wp_idx\n if d >= 0 and d < diff:\n print(\"Diff, d:\", diff, d)\n diff = d \n closest_light = light \n line_wp_idx = temp_wp_idx\n\n if closest_light:\n# rospy.loginfo(\"Closest light found\")\n state = self.get_light_state(closest_light)\n # print('State: ',state)\n return line_wp_idx, state\n \n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"456654754","text":"import subprocess\nimport os\n\nimport ujson\n\nfrom flask import Flask, render_template, request\n\nfrom sqlalchemy import not_\n\nfrom Database import Session, Photo\n\n\napp = Flask(__name__)\n\napp.debug = True\n\n\n@app.route('/')\ndef index():\n session = Session()\n s = '

'+'
'.join(str(_) for _ in session.query(Photo))+'

'\n session.close()\n return s\n\n\n@app.route('/test')\ndef test_gallery():\n session = Session()\n photos = [p for p in session.query(Photo).order_by(Photo.time.desc())]\n output = render_template('GalleryTemplate.html', photos=[p.filename for p in photos], indices=[p.id for p in photos])\n session.close()\n return output\n\n\n@app.route('/get_updates')\ndef fetch(*args, **kwargs):\n known_indices = ujson.loads(request.query_string)\n session = Session()\n new_photos = session.query(Photo).filter(not_(Photo.id.in_(known_indices))).order_by(Photo.time).all()\n return ujson.dumps({'ids': [p.id for p in new_photos],\n 'filenames': [p.filename for p in new_photos],\n })\n\n@app.route('/get_comments')\ndef get_comments():\n session = Session()\n image_name = request.query_string.decode('UTF-8')\n print(image_name)\n image = session.query(Photo).filter_by(filename=image_name).first()\n comments = image.comments\n session.close()\n return comments\n\n\n@app.route('/update_comments', methods=['PUT'])\ndef update_comments():\n image_name = os.path.basename(request.form.get('imagename'))\n comments = request.form.get('comments')\n print(image_name, comments)\n session = Session()\n image = session.query(Photo).filter_by(filename=image_name).first()\n image.comments = comments\n session.commit()\n session.close()\n return \"Updated\"\n\n\n@app.route('/print')\ndef print_command(*args, **kwargs):\n filename = os.path.basename(request.query_string[3:-3]).decode('UTF-8')\n print(\"called print_command with {}\".format(filename))\n if filename in os.listdir('/Users/jonathan/Pictures/PhotoBooth'):\n print_photo('/Users/jonathan/Pictures/PhotoBooth/{}'.format(filename))\n return \"Good!\"\n\ndef print_photo(image_name):\n subprocess.call(['lpr', '-P', 'EPSON_PictureMate_PM_225', '{}'.format(image_name)])\n\nif __name__ == '__main__':\n app.run()\n\n","sub_path":"BaseServer.py","file_name":"BaseServer.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"237905702","text":"from keras.models import Sequential\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.core import Activation, Dense, Flatten\n\nclass Modle_phase_one:\n @staticmethod\n def build_model(width, height, depth, classes):\n #Initialize he model\n model = Sequential()\n input_shape = (height,width, depth)\n activ = 'relu'\n kernel_size = (5,5)\n\n #First Convolutional layer:\n model.add(Conv2D(30, kernel_size = kernel_size, padding='same', input_shape=input_shape))\n model.add(Activation(activation=activ))\n model.add(MaxPooling2D(pool_size=(2,2), strides= (2,2)))\n\n #Second Convolutional layer:\n model.add(Conv2D(50, kernel_size= kernel_size, padding='same'))\n model.add(Activation(activation=activ))\n model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n #Second Convolutional layer:\n # model.add(Conv2D(50, kernel_size= kernel_size, padding='same'))\n # model.add(Activation(activation=activ))\n # model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n #Flatten layer:\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation(activation=activ))\n\n #Output layer:\n model.add(Dense(classes))\n model.add(Activation(activation='softmax'))\n\n return model\n\n#Modle_phase_one.build_model(96,96,1,2)","sub_path":"Phase_one_model.py","file_name":"Phase_one_model.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"354725228","text":"# Documentation:\n# qute://help/configuring.html\n# qute://help/settings.html\n\n# Name of the session to load by default\nc.session.default_name = \"default\"\n\n# Automatically save the current session\nc.auto_save.session = True\n\n# Disable