diff --git "a/4702.jsonl" "b/4702.jsonl" new file mode 100644--- /dev/null +++ "b/4702.jsonl" @@ -0,0 +1,678 @@ +{"seq_id":"49246937","text":"user1_answer = input(\"Player 1, do you want to choose rock, paper or scissors?\")\r\nuser2_answer = input(\"Player 2, do you want to choose rock, paper or scissors?\")\r\n\r\nif user1_answer == user2_answer:\r\n print(\"It's a tie! The scores weren't changed.\")\r\nelif user1_answer == 'rock':\r\n if user2_answer == 'scissors':\r\n print(\"Player 1 wins this round!\")\r\n else:\r\n print(\"Player 2 wins this round!\")\r\nelif user1_answer == 'scissors':\r\n if user2_answer == 'paper':\r\n print(\"Player 1 win this round!\")\r\n else:\r\n print(\"Player 2 wins this round!\")\r\nelif user1_answer == 'paper':\r\n if user2_answer == 'rock':\r\n print(\"Player 1 wins this round!\")\r\n else:\r\n print(\"Player 2 win this round!\")\r\nelse:\r\n print(\"Invalid input! You have not entered rock, paper or scissors, try again.\")","sub_path":"RPS/completed/RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"189079943","text":"class Empty(Exception):\n pass\n\nclass DoublyLinkedList:\n class Node:\n def __init__(self, data=None, prev=None, next=None):\n self.data = data\n self.prev = prev\n self.next = next\n\n def disconnect(self):\n self.data = None\n self.prev = None\n self.next = None\n\n\n def __init__(self):\n self.header = DoublyLinkedList.Node()\n self.trailer = DoublyLinkedList.Node()\n self.header.next = self.trailer\n self.trailer.prev = self.header\n self.size = 0\n\n def __len__(self):\n return self.size\n\n def is_empty(self):\n return len(self) == 0\n\n def first_node(self):\n if(self.is_empty()):\n raise Empty(\"List is empty\")\n return self.header.next\n\n def last_node(self):\n if(self.is_empty()):\n raise Empty(\"List is empty\")\n return self.trailer.prev\n\n def add_after(self, node, data):\n prev = node\n succ = node.next\n new_node = DoublyLinkedList.Node(data, prev, succ)\n prev.next = new_node\n succ.prev = new_node\n self.size += 1\n return new_node\n\n def add_first(self, data):\n return self.add_after(self.header, data)\n\n def add_last(self, data):\n return self.add_after(self.trailer.prev, data)\n\n def add_before(self, node, data):\n return self.add_after(node.prev, data)\n\n def delete_node(self, node):\n pred = node.prev\n succ = node.next\n pred.next = succ\n succ.prev = pred\n self.size -= 1\n data = node.data\n node.disconnect()\n return data\n\n def delete_first(self):\n if (self.is_empty()):\n raise Empty(\"List is empty\")\n self.delete_node(self.first_node())\n\n def delete_last(self):\n if (self.is_empty()):\n raise Empty(\"List is empty\")\n self.delete_node(self.last_node())\n\n def __iter__(self):\n if (self.is_empty()):\n return\n cursor = self.first_node()\n while cursor is not self.trailer:\n yield cursor.data\n cursor = cursor.next\n\n def __repr__(self):\n return \"[\" + \" <--> \".join([str(item) for item in self]) + \"]\"\n\nclass Integer:\n\n def __init__(self, num_str):\n self.data = DoublyLinkedList()\n for val in num_str:\n self.data.add_before(self.data.trailer, int(val))\n\n def __add__(self, other):\n if(self.data.size > other.data.size):\n res = Integer('0'*(self.data.size+1))\n else:\n res = Integer('0'*(other.data.size+1))\n node1 = self.data.last_node()\n node2 = other.data.last_node()\n node3 = res.data.last_node()\n while not (node1 == self.data.header and node2 == other.data.header):\n if(node1 == self.data.header):\n val1 = 0\n else:\n val1 = node1.data\n if(node2 == other.data.header):\n val2 = 0\n else:\n val2 = node2.data\n val = node3.data+val1+val2\n node3.data = val%10\n if(val >= 10):\n node3.prev.data = 1\n if(node1!=self.data.header):\n node1=node1.prev\n if(node2!=other.data.header):\n node2=node2.prev\n node3 = node3.prev\n while(res.data.first_node().data == 0):\n res.data.delete_first()\n return res\n\n\n def __repr__(self):\n return ''.join([str(item) for item in self.data])","sub_path":"CS-UY 1134/HW/HW6/sx670_hw6_q2.py","file_name":"sx670_hw6_q2.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"464171448","text":"# coding: utf-8\nimport hashlib\n\nfrom flask import Flask, request\n\nTOKEN = 'syy'\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n user_agent = request.headers.get('User-Agent')\n return u'你的浏览器是%s' % user_agent\n\n@app.route('/user/')\ndef user(name):\n return u'

Hello, %s!

' % name\n\n@app.route('/wechat/', methods=['GET', 'POST'])\ndef wechat():\n \"\"\"\n 微信工作的主视图\n :return:\n \"\"\"\n if request.method == 'GET': # 接入微信\n arguments = request.args\n signature = arguments.get('signature', '')\n timestamp = arguments.get('timestamp', '')\n nonce = arguments.get('nonce', '')\n echostr = arguments.get('echostr', '')\n token = TOKEN\n if checkSignature(signature, timestamp, nonce, token):\n return echostr\n else:\n return ''\n elif request.method == 'POST':\n pass\n\ndef checkSignature(signature, timestamp, nonce, token):\n l = [timestamp, nonce, token]\n l.sort()\n s = ''.join(l)\n h = hashlib.sha1()\n h.update(s)\n tempstr = h.hexdigest()\n if tempstr == signature:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"mysite.py","file_name":"mysite.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"12440091","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the maximumToys function below.\ndef maximumToys(prices, k):\n\tprices = sorted(prices)\n\tntoys = 0\n\ttotal_price = 0\n\tfor p in prices:\n\t\ttotal_price += p\n\t\tif total_price <= k:\n\t\t\tntoys += 1\n\treturn ntoys\n\nif __name__ == '__main__':\n\tn = 7\n\tk = 50\n\tprices = [1, 12, 5 ,111, 200, 1000, 10]\n\tresult = maximumToys(prices, k)\n\tprint(result)\n\n","sub_path":"code/python/hackerrank/maximumToys.py","file_name":"maximumToys.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"580082519","text":"#!/usr/bin/env python3\n\ntotal = 0.0\n\nwith open('portfolio.csv', 'r') as f:\n headers = next(f) # skip the first line of the file\n for myline in f:\n myline = myline.strip() # remove white space\n parts = myline.split(',') # split the line on the commas\n parts[0] = parts[0].strip('\"')\n parts[1] = parts[1].strip('\"')\n parts[2] = int(parts[2])\n parts[3] = float(parts[3])\n total += parts[2]*parts[3]\n\nprint('Total assets value: $', total)\n","sub_path":"Python5/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"247010811","text":"import copy\nimport optparse\n\nfrom twisted.internet import defer, reactor\n\nfrom feat.agents.base import document, view\nfrom feat.agencies.net import database\nfrom feat.agencies.interface import ConflictError\nfrom feat.common import log\n\n\n_documents = []\n\n\ndef reset_documents(documents):\n global _documents\n\n _documents = documents\n\n\ndef get_current_initials():\n global _documents\n return copy.deepcopy(_documents)\n\n\ndef initial_data(doc):\n global _documents\n\n if callable(doc) and issubclass(doc, document.Document):\n doc = doc()\n if not isinstance(doc, document.Document):\n raise AttributeError(\n 'First argument needs to be an instance or class of something '\n 'inheriting from feat.agents.base.document.Document!')\n if doc.doc_id:\n for x in _documents:\n if x.doc_id == doc.doc_id:\n _documents.remove(x)\n _documents.append(doc)\n\n\ndef create_connection(host, port, name):\n db = database.Database(host, port, name)\n return db.get_connection()\n\n\n@defer.inlineCallbacks\ndef push_initial_data(connection):\n global _documents\n\n for doc in _documents:\n try:\n yield connection.save_document(doc)\n except ConflictError:\n log.error('script', 'Document with id %s already exists!',\n doc.doc_id)\n\n design = view.generate_design_doc()\n yield connection.save_document(design)\n\n\nDEFAULT_DB_HOST = database.DEFAULT_DB_HOST\nDEFAULT_DB_PORT = database.DEFAULT_DB_PORT\nDEFAULT_DB_NAME = database.DEFAULT_DB_NAME\n\n\ndef parse_options():\n usage = \"%prog -H host -P port -N name push\"\n parser = optparse.OptionParser(usage)\n # database related options\n parser.add_option('-H', '--dbhost', dest=\"db_host\",\n help=\"host of database server to connect to\",\n metavar=\"HOST\", default=DEFAULT_DB_HOST)\n parser.add_option('-P', '--dbport', dest=\"db_port\",\n help=\"port of messaging server to connect to\",\n metavar=\"PORT\", default=DEFAULT_DB_PORT, type=\"int\")\n parser.add_option('-N', '--dbname', dest=\"db_name\",\n help=\"host of database server to connect to\",\n metavar=\"NAME\", default=DEFAULT_DB_NAME)\n return parser.parse_args()\n\n\ndef create_db(connection):\n\n def display_warning(f):\n log.warning('script', 'Creating of database failed, reason: %s',\n f.value)\n\n d = connection.create_database()\n d.addErrback(display_warning)\n return d\n\n\ndef script():\n with dbscript() as (d, args):\n\n def body(connection):\n log.info('script', \"I will push %d documents.\", len(_documents))\n d = create_db(connection)\n d.addCallback(lambda _: push_initial_data(connection))\n return d\n\n d.addCallback(body)\n\n\nclass dbscript(object):\n\n def __enter__(self):\n opts, args = parse_options()\n self.connection = create_connection(\n opts.db_host, opts.db_port, opts.db_name)\n\n log.FluLogKeeper.init()\n log.FluLogKeeper.set_debug('5')\n log.info('script', \"Using host: %s, port: %s, db_name; %s\",\n opts.db_host, opts.db_port, opts.db_name)\n self._deferred = defer.Deferred()\n return self._deferred, args\n\n def __exit__(self, type, value, traceback):\n self._deferred.addBoth(lambda _: reactor.stop())\n reactor.callWhenRunning(self._deferred.callback, self.connection)\n reactor.run()\n","sub_path":"src/feat/agents/base/dbtools.py","file_name":"dbtools.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88563213","text":"from .visitor import NodeVisitor\nfrom .token_type import TokenType\nfrom .memory import CallStack, ActivationRecord, ARType\nfrom .ast import *\n\nSHOULD_LOG_STACK = False\n\ndef enable_log():\n global SHOULD_LOG_STACK\n SHOULD_LOG_STACK = True\n\nclass Interpreter(NodeVisitor):\n def __init__(self, ast):\n self.ast = ast\n self.call_stack = CallStack()\n\n def get_var_value(self, var_name):\n var_name = var_name.upper()\n ar = self.call_stack.peek()\n return self.ar.get(var_name)\n\n def log(self, msg):\n if SHOULD_LOG_STACK:\n print(msg)\n\n def visit_Program(self, node):\n program_name = node.variable.token.value\n\n ar = ActivationRecord(\n name=program_name,\n type=ARType.PROGRAM,\n nesting_level=1\n )\n self.call_stack.push(ar)\n\n self.log(f'ENTER: PROGRAM {program_name}')\n self.log(self.call_stack)\n\n self.visit(node.block)\n\n self.log(f'LEAVE: PROGRAM {program_name}')\n self.log(self.call_stack)\n\n self.call_stack.pop()\n\n def visit_Block(self, node):\n for decl in node.declarations:\n self.visit(decl)\n self.visit(node.compound_statement)\n\n def visit_VarDecl(self, node):\n pass\n\n def visit_ProcDecl(self, node):\n pass\n\n def visit_ProcCall(self, node: ProcCall):\n proc_name = node.name\n proc_symbol = node.symbol\n\n ar = ActivationRecord(\n name=proc_name,\n type = ARType.PROCEDURE,\n nesting_level=proc_symbol.scope_level+1,)\n\n formal_params = proc_symbol.params\n actual_params = node.actual_params\n\n for param_symbol, argument_node in zip(formal_params, actual_params):\n ar[param_symbol.name] = self.visit(argument_node)\n\n self.call_stack.push(ar)\n self.log(f'ENTER: PROCEDURE {proc_name}')\n self.log(self.call_stack)\n\n self.visit(proc_symbol.body)\n\n self.log(f'LEAVE: PROCEDURE {proc_name}')\n self.log(self.call_stack)\n self.call_stack.pop()\n\n def visit_Type(self, node):\n pass\n\n def visit_Compound(self, node):\n for leaf in node.leaves:\n self.visit(leaf)\n\n def visit_Assign(self, node):\n var_tok = node.left.token\n var_nam = var_tok.value\n var_val = self.visit(node.right)\n ar = self.call_stack.peek()\n ar[var_nam] = var_val\n\n def visit_Var(self, node):\n ar = self.call_stack.peek()\n var_nam = node.token.value\n var_val = ar.get(var_nam)\n\n if var_val is None:\n raise NameError(repr(var_nam))\n else:\n return var_val\n\n def visit_NoOp(self, node):\n pass\n\n def visit_BinOp(self, node):\n if node.token.type == TokenType.PLUS:\n return self.visit(node.left) + self.visit(node.right)\n elif node.token.type == TokenType.MINUS:\n return self.visit(node.left) - self.visit(node.right)\n elif node.token.type == TokenType.MUL:\n return self.visit(node.left) * self.visit(node.right)\n elif node.token.type == TokenType.INTEGER_DIV:\n return self.visit(node.left) // self.visit(node.right)\n elif node.token.type == TokenType.FLOAT_DIV:\n return self.visit(node.left) / self.visit(node.right)\n\n def visit_Num(self, node):\n return node.value\n\n def visit_UnOp(self, node):\n value = self.visit(node.factor)\n if node.token.type == TokenType.MINUS:\n return -value\n elif node.token.type == TokenType.PLUS:\n return value\n\n def interpret(self):\n self.visit(self.ast)\n","sub_path":"interpreter/ex19/interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"48491272","text":"#!/usr/bin/env python3\n\nimport csv\nimport sys\n\nfr = open(sys.argv[1], 'rt')\nfw = open(sys.argv[2], 'wt')\n\ntry:\n\treader = csv.reader(fr)\n\twriter = csv.writer(fw)\n\tfor row in reader:\n\t\t#print(row)\n\t\twriter.writerow(row)\n\nfinally:\n\tfr.close()\n\tfw.close()\n","sub_path":"csvdemo.py","file_name":"csvdemo.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"590949873","text":"#\n # ......................我佛慈悲......................\n # _oo0oo_\n # o8888888o\n # 88\" . \"88\n # (| -_- |)\n # 0\\ = /0\n # ___/`---'\\___\n # .' \\\\| |// '.\n # / \\\\||| : |||// \\\n # / _||||| -卍-|||||- \\\n # | | \\\\\\ - /// | |\n # | \\_| ''\\---/'' |_/ |\n # \\ .-\\__ '-' ___/-. /\n # ___'. .' /--.--\\ `. .'___\n # .\"\" '< `.___\\_<|>_/___.' >' \"\".\n # | | : `- \\`.;`\\ _ /`;.`/ - ` : | |\n # \\ \\ `_. \\_ __\\ /__ _/ .-` / /\n # =====`-.____`.___ \\_____/___.-`___.-'=====\n # `=---='\n #\n # ..................佛祖开光 ,永无BUG...................\n # ..................佛祖保佑,永不加班...................\n\nfrom urllib import request\nif __name__ == \"__main__\":\n #以 CSDN 为例, CSDN 不更改 User Agent 是无法访问的\n url = 'http://www.csdn.net/'\n head = {}\n # 写入 User Agent 信息\n head['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 ' \\\n '(KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n # 创建 Request 对象\n req = request.Request(url, headers=head)\n # 也可以通过调用 Request.add_header() 添加/修改一个特定的 header\n req.add_header(\"Connection\", \"keep-alive\")\n # req.add_header(\"User-Agent\", \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\")\n # 也可以通过调用 Request.get_header()来查看 header 信息\n print(req.get_header(header_name=\"Connection\"))\n # print(req.get_header(header_name=\"User-Agent\"))\n print(req.get_header(\"User-agent\"))\n # 传入创建好的 Request 对象\n response = request.urlopen(req)\n # 读取响应信息并解码\n html = response.read().decode('utf-8')\n # 打印信息\n\n print('file.getcode,HTTPResponse类型:', response.getcode)\n print('file.info 返回当前环境相关的信息:', response.info())\n\n # print(html)","sub_path":"Urlib_stu/urlib_04_user-agent.py","file_name":"urlib_04_user-agent.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"515179692","text":"from django.http import HttpResponse, JsonResponse, FileResponse\nfrom django.views import View\nfrom django.template import loader\nfrom .models import PlanningSession, User, Temp, Route\nimport json\nimport os\nimport rbBot.botbrain.logic as Logic\nimport rbBot.botbrain.reply as Reply\ntry:\n # HERE API Keys\n import rb.settings._secrets as secure\n SECRET_KEY_3 = secure.SECRET_KEY_3\n SECRET_KEY_4 = secure.SECRET_KEY_4\nexcept ImportError:\n SECRET_KEY_3 = \"error_token\"\n SECRET_KEY_4 = \"error_token\"\n\ndef index(request):\n return HttpResponse(\"Hello, world. This is the bot app.\")\n\ndef route(request):\n template = loader.get_template(\"route.html\")\n routeQuery = Route.objects.filter(id=request.GET['route'])\n if routeQuery.exists():\n info = routeQuery.get().info\n context = {\n 'route' : info['routes'][0],\n 'apiKey' : os.getenv(\"SECRET_KEY_3\", SECRET_KEY_3),\n }\n return HttpResponse(template.render(context, request))\n return JsonResponse({\"ERROR\": \"Route Does Not Exists\"})\n\ndef settings(request):\n template = loader.get_template(\"settings.html\")\n if request.method == 'POST':\n search_id = request.POST.get('textfield', None)\n print(search_id)\n return HttpResponse(\"no such user\") \n else:\n return HttpResponse(template.render({}, request))\n\ndef organiser(request):\n template = loader.get_template(\"organiser.html\")\n routeQuery = Route.objects.filter(id=request.GET['route'])\n if routeQuery.exists():\n info = routeQuery.get().destinations\n context = {\n 'dests' : info\n }\n return HttpResponse(template.render(context, request))\n return JsonResponse({\"ERROR\": \"Route Does Not Exists\"})\n \n# https://api.telegram.org/bot/setWebhook?url=\nclass rbHookView(View):\n\n def get(self, request, *args, **kwargs):\n return JsonResponse({\"ok\": \"POST request processed\"})\n\n def post(self, request, *args, **kwargs):\n telegramData = json.loads(request.body)\n ##################\n # DEBUGGING LINE #\n ##################\n print(telegramData)\n ########################\n # COMMENT OUT TO RESET #\n ########################\n self.postHandler(telegramData)\n return JsonResponse({\"ok\": \"POST request processed\"})\n \n def postHandler(self,data):\n \n if 'callback_query' in data:\n query = data['callback_query']\n self.callbackHandler(query)\n elif 'message' in data:\n message = data['message']\n if 'text' not in message:\n Reply.no_reply(message['from']['id'])\n return\n else:\n key = message['text']\n\n if key[1:] != \"start\":\n sender = message['from']['id']\n if Logic.hasUser(User,sender):\n if Logic.isActive(sender):\n pass \n elif Logic.hasUser(Temp,sender):\n pass\n else:\n Reply.no_reply(sender)\n return\n\n if 'entities' in message:\n if message['entities'][0]['type'] == \"bot_command\":\n self.commandHandler(key[1:],message)\n else:\n self.replyHandler(key,message)\n else: \n return\n\n def commandHandler(self,command,message):\n \n sender = message['from']['id']\n message_id = message['message_id']\n target = message['chat']['id']\n\n if not Logic.hasUser(User,sender):\n Logic.addTmp(message)\n Reply.registerQuery(target)\n return\n\n if command == \"start\":\n Reply.welcome(target,sender)\n Reply.provideChoice(target)\n Logic.activate(message_id,sender)\n elif command == \"plan\":\n return \n elif command == \"quit\":\n if Logic.isPlanning(sender):\n Logic.stopPlanning(sender)\n Logic.delPlanningSession(message,-1)\n Reply.commandQuit(target,sender)\n Logic.deactivate(message_id,sender)\n else:\n Reply.no_reply(target)\n\n def callbackHandler(self, callbackData):\n \n sender = callbackData['from']['id']\n instance = callbackData['id']\n message_id = callbackData['message']['message_id']\n target = callbackData['message']['chat']['id']\n \n #################################\n # TEMPORARY ERROR HANDLING LINE #\n #################################\n if not Logic.hasUser(User,sender):\n Reply.invalidCallback(instance)\n Reply.no_reply(target)\n return\n\n if message_id <= Logic.getUser(User,sender).latest_message:\n Reply.invalidCallback(instance)\n return\n \n if callbackData['data'] == \"plan\":\n if Logic.isEditing(sender):\n Logic.stopEditing(sender)\n if Logic.isPlanning(sender):\n Reply.prePlanCallback(instance)\n Reply.prePlanClicked(target,message_id)\n else:\n Logic.startPlanning(sender)\n Logic.addPlanningSession(callbackData,-1)\n Reply.planCallback(instance)\n Reply.planClicked(target,message_id)\n elif callbackData['data'] == \"reset_plan_clicked\":\n Logic.stopPlanning(sender)\n Logic.delPlanningSession(callbackData,-1)\n Logic.startPlanning(sender)\n Logic.addPlanningSession(callbackData,-1)\n Reply.postResetCallback(instance)\n Reply.planClicked(target,message_id)\n elif callbackData['data'][:9] == \"use_saved\":\n if Logic.isPlanning(sender):\n Logic.stopPlanning(sender)\n Logic.delPlanningSession(callbackData,0)\n Logic.startPlanning(sender)\n route_id = int(callbackData['data'][10:])\n if callbackData['data'][9] == \"_\":\n Logic.addPlanningSession(callbackData,route_id)\n Reply.planCallback(instance)\n Reply.useOld(target,message_id,Logic.retrieveDests(sender))\n elif callbackData['data'][9] == \"#\":\n if Logic.getRoute(sender,route_id).get().logged:\n Logic.addPlanningSession(callbackData,-1)\n Logic.duplicateRoute(route_id,sender)\n else:\n Logic.addPlanningSession(callbackData,route_id)\n Reply.planCallback(instance)\n dests = json.loads(Logic.getRoute(Logic.getUser(User,sender),route_id).get().destinations)\n Reply.useOld(target,message_id,dests)\n else:\n Reply.no_reply(target)\n elif callbackData['data'] == \"not_plan\":\n Reply.notplanCallback(instance)\n Reply.notplanClicked(target,message_id)\n elif callbackData['data'] == \"settings\":\n Reply.notplanCallback(instance)\n Reply.notplanClicked(target,message_id)\n elif callbackData['data'] == \"plan_help\":\n Reply.planHelpCallback(instance)\n elif callbackData['data'] == \"resume_plan\":\n Reply.resumePlanCallback(instance)\n Reply.resumePlanClicked(target,message_id,Logic.retrieveDests(sender))\n elif callbackData['data'] == \"reset_plan\":\n Reply.warningCallback(instance)\n Reply.resetPlanClicked(target,message_id)\n elif callbackData['data'] == \"pre_plan\":\n Reply.prePlanCallback(instance)\n Reply.prePlanClicked(target,message_id)\n elif callbackData['data'] == \"edit\":\n if len(Logic.retrieveDests(sender)) < 1:\n Reply.invalidPlanCallback(instance)\n Reply.invalidEditPlanClicked(target,message_id)\n else:\n Logic.startEditing(sender)\n Reply.editCallback(instance)\n Reply.editPlan(sender,target,message_id,Logic.retrieveDests(sender))\n elif callbackData['data'] == \"edit_dests\":\n Reply.editDestsCallback(instance)\n Reply.editDests(target,message_id,Logic.retrieveDests(sender))\n elif callbackData['data'] == \"change_dest\" or callbackData['data'][:12] == \"change_dest,\":\n Reply.changeDestCallback(instance)\n Reply.changeDest(target,message_id)\n if len(callbackData['data']) > 10:\n Logic.setDestChange(callbackData['data'][12:],sender)\n elif callbackData['data'] == \"change_dest_next\":\n if not Logic.canChange(sender):\n Reply.invalidPlanCallback(instance)\n Reply.noReplacementFound(target,message_id)\n else:\n Reply.warningCallback(instance)\n Reply.changeDestNext(target,message_id)\n elif callbackData['data'] == \"confirm_change_dest\":\n Reply.confirmDestChangeCallback(instance)\n Logic.replaceInRoute(sender)\n Reply.editDests(target,message_id,Logic.retrieveDests(sender))\n elif callbackData['data'] == \"options\":\n Reply.backCallback(instance)\n Reply.backToOptions(target,message_id)\n elif callbackData['data'] == \"plan_next\":\n if len(Logic.retrieveDests(sender)) < 2:\n Reply.invalidPlanCallback(instance)\n Reply.invalidPlanClicked(target,message_id,Logic.retrieveDests(sender))\n else:\n Reply.warningCallback(instance)\n Reply.preRouting(target,message_id,Logic.retrieveDests(sender))\n elif callbackData['data'] == \"visualise\":\n Logic.stopPlanning(sender)\n Logic.confirmRoute(sender)\n Reply.visualiseCallback(instance)\n Reply.visualise(sender,target,message_id)\n Logic.delPlanningSession(callbackData,-1)\n elif callbackData['data'] == \"get_saved\" or callbackData['data'][:10] == \"get_saved#\":\n if len(callbackData['data']) > 9:\n Reply.routeDetailsCallback(instance)\n Reply.routeDetails(sender,target,message_id,int(callbackData['data'][10:]))\n else:\n Reply.getSavedCallback(instance)\n Reply.getSaved(sender,target,message_id)\n elif callbackData['data'] == \"pre_wipe\":\n Reply.warningCallback(instance)\n Reply.preWipe(target,message_id)\n elif callbackData['data'] == \"wipe\":\n Reply.wipeCallback(instance)\n Logic.wipeData(callbackData)\n Reply.provideChoice(target)\n Logic.activate(message_id,sender)\n elif callbackData['data'] == \"pre_quit\":\n Reply.warningCallback(instance)\n Reply.preQuit(target,message_id)\n elif callbackData['data'] == \"quit\":\n if Logic.isPlanning(sender):\n Logic.stopPlanning(sender)\n Logic.delPlanningSession(callbackData,-1)\n Reply.quitCallback(instance)\n Reply.inlineQuit(target,message_id,sender)\n Logic.deactivate(message_id,sender)\n else:\n Reply.no_reply(target)\n \n def replyHandler(self,command,message):\n sender = message['from']['id']\n message_id = message['message_id']\n \n if 'animation' in message or 'document' in message:\n print(\"sent invalid file\")\n Reply.no_reply(sender)\n return\n\n if Logic.hasUser(User, sender):\n if Logic.isEditing(sender):\n Reply.delete_message(sender,message_id)\n current = User.objects.get(user_id=sender)\n session = PlanningSession.objects.get(user=current)\n oldDest = Logic.retrieveDests(sender)[Logic.getUser(User,sender).key_for_dest]\n if Logic.locExists(message['text']):\n result = Logic.getResult(message['text'])\n Logic.logDest(result,sender)\n Logic.logReplacement(message['text'],sender)\n Reply.replaceDestValid(session.chat_id,session.message_id,oldDest,message['text'])\n else:\n Reply.replaceDestInvalid(session.chat_id,session.message_id,[oldDest,message['text']])\n elif Logic.isPlanning(sender): \n Reply.delete_message(sender,message_id)\n current = User.objects.get(user_id=sender)\n session = PlanningSession.objects.get(user=current)\n if Logic.locExists(message['text']):\n result = Logic.getResult(message['text'])\n Logic.logDest(result,sender)\n Logic.addToRoute(result,message['text'],sender)\n Reply.addDest(session.chat_id,session.message_id,Logic.retrieveDests(sender))\n else:\n Reply.invalidDest(session.chat_id,session.message_id,Logic.retrieveDests(sender))\n else:\n Reply.no_reply(sender)\n elif Logic.hasUser(Temp,sender):\n if Logic.isRegistering(message['text']):\n Logic.deleteTmp(message)\n if message['text'] == 'Register':\n Logic.addUser(message)\n Reply.registered(sender)\n Logic.activate(message_id,sender);\n Reply.provideChoice(sender)\n elif message['text'] == 'Back':\n Reply.nextTime(sender)\n else:\n Reply.no_reply(sender)\n else:\n Reply.no_reply(sender)\n","sub_path":"rbBot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"482522729","text":"\"\"\"\nAuthor: audreyc\nLast Updated: 04/04/16\n\nThis file parses Everaldo's benchmark dataset, saved in DS_request format.\n- Removes bad_chars from OCR text and replaces with the chars indicated\n- Prints out one example result (last one)\n- Saves out new pickle files to X_train, X_test, Y_train, and Y_test. Ready for modeling.\n\"\"\"\n\nimport pandas as pd\nimport json\nimport string\nimport DS_type_mapping\nimport pickle\nfrom collections import Counter\nfrom sklearn import cross_validation\nimport time\n\n\nbad_chars = {'\\\\r': ' ', '\\\\n': ' ', '•': ' '}\n # '1': '•', '2': '•', '3': '•', '4': '•', '5': '•',\n # '6': '•', '7': '•', '8': '•', '9': '•', '0': '•'}\noutput_dir = 'sampledata2/'\n\n\ndef parse_input(file_name, cross_validate=True, ds_types=False):\n start = time.time()\n df = pd.read_csv(file_name, sep='\\t')\n ds_suffix = 'DS' if ds_types else 'ET'\n type_order, type_dict = DS_type_mapping.query_file()\n\n doc_list = []\n target_list = []\n ds_target_list = []\n info_list = []\n user_hist = {}\n company_hist = {}\n vendor_hist = {}\n allowed_dict = {}\n count_violations = 0\n\n for i in df.index:\n\n sub_df = json.loads(df.ix[i, 'ds_request'])\n t = sub_df['ocrText']\n this_expense_key = df.ix[i, 'expense_type_legacy_key']\n this_expense_name = df.ix[i, 'expense_type_name']\n this_expense_name = ' '.join(word.strip(string.punctuation) for word in this_expense_name.split())\n this_expense_name = this_expense_name.lower()\n this_allowed_list = {x['ExpKey']: x['Name'] for x in sub_df['userExpenseTypes']}\n entity = sub_df['entityId']\n # if entity != 'p00425z4gu':\n # continue\n user_key = sub_df['entityId'] + '-' + sub_df['userId']\n vendor = sub_df['vendor']\n\n if this_expense_key not in this_allowed_list.keys():\n # In this very rare case of violations, just drop...\n count_violations += 1\n # print(\"Not found: \" + df.ix[i, 'expense_type_legacy_key']) # + str(sub_df['userExpenseTypes']))\n # continue\n\n this_expense_ds_key = DS_type_mapping.to_ds_types(df.ix[i, 'expense_type_legacy_key'],\n df.ix[i, 'expense_type_name'], type_order, type_dict)\n\n target_list.append(this_expense_key)\n ds_target_list.append(this_expense_ds_key)\n\n info_list.append({'datekey': df.ix[i, 'trans_date_key'], 'vendor': vendor,\n 'entity': entity, 'userid': sub_df['userId'],\n 'amount': sub_df['receiptAmt'], 'ds_request': sub_df})\n\n # This line for alpha characters only. Actually it doesn't work very well.\n # t = re.sub('[^a-zA-Z \\-]', ' ', df.ix[i, 'ocr_text'])\n\n # These lines are for replacing the bad characters (listed above) with some other token.\n for bc, rw in bad_chars.items():\n t = t.replace(bc, rw)\n # print(t)\n t = t.lower()\n s = t.split()\n s = [x for x in s if len(x) > 1]\n doc_list.append(' '.join(s))\n\n if i > 16812:\n continue\n\n if user_key not in user_hist.keys():\n user_hist[user_key] = Counter()\n user_hist[user_key][this_expense_key] += 1\n\n if entity not in company_hist.keys():\n company_hist[entity] = Counter()\n company_hist[entity][this_expense_key] += 1\n\n if vendor not in vendor_hist.keys():\n vendor_hist[vendor] = Counter()\n vendor_hist[vendor][this_expense_name] += 1\n\n if entity not in allowed_dict.keys():\n allowed_dict[entity] = this_allowed_list\n\n if cross_validate:\n # x_train, x_test, y_train, y_test = cross_validation.train_test_split(\n # doc_list, target_list, test_size=0.1, random_state=1)\n\n ninety = int(len(doc_list) * 0.9)\n print(ninety)\n x_train = doc_list[:ninety]\n x_test = doc_list[ninety:]\n y_train = target_list[:ninety]\n y_ds_train = ds_target_list[:ninety]\n y_test = target_list[ninety:]\n y_ds_test = ds_target_list[ninety:]\n info_train = info_list[:ninety]\n info_test = info_list[ninety:]\n\n pickle.dump(x_test, open(output_dir + 'sampled_X_test.pkl', 'wb'))\n pickle.dump(y_test, open(output_dir + 'sampled_Y_testET.pkl', 'wb'))\n pickle.dump(y_ds_test, open(output_dir + 'sampled_Y_testDS.pkl', 'wb'))\n pickle.dump(info_test, open(output_dir + 'sampled_info_test.pkl', 'wb'))\n else:\n x_train = doc_list\n y_train = target_list\n y_ds_train = ds_target_list\n info_train = info_list\n\n pickle.dump(x_train, open(output_dir + 'sampled_X_train.pkl', 'wb'))\n pickle.dump(y_train, open(output_dir + 'sampled_Y_trainET.pkl', 'wb'))\n pickle.dump(y_ds_train, open(output_dir + 'sampled_Y_trainDS.pkl', 'wb'))\n pickle.dump(info_train, open(output_dir + 'sampled_info_train.pkl', 'wb'))\n\n pickle.dump(vendor_hist, open(output_dir + 'sampled_vendorhist' + ds_suffix + '.pkl', 'wb'))\n pickle.dump(company_hist, open(output_dir + 'sampled_companyhist' + ds_suffix + '.pkl', 'wb'))\n pickle.dump(user_hist, open(output_dir + 'sampled_userhist' + ds_suffix + '.pkl', 'wb'))\n pickle.dump(allowed_dict, open(output_dir + 'sampled_allowedtypes.pkl', 'wb'))\n\n # print(len(doc_list))\n # print(len(target_list))\n # print(t)\n print(doc_list[0])\n print(target_list[0])\n print(\"violations skipped (expense key not in allowed list): \" + str(count_violations))\n\n end = time.time()\n print(\"Time: %.2f seconds for %d entries\" % (end-start, len(target_list)))\n\nif __name__ == '__main__':\n input_file = 'C:/Users/audreyc/PyCharm/ecgm-new/new_ds_model/expenseit_token_benchmark_dataset.tsv'\n parse_input(input_file)\n","sub_path":"token-mod.py","file_name":"token-mod.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"470797484","text":"import unittest\nfrom re import search\nfrom expected_type_decorator import expected_type, UnexpectedTypeException\nfrom find_parents import main_parents\nfrom matrix_determinant import determinant\nfrom multifilter_iterator import multifilter\nfrom permutations import permutations\nfrom regex_pass_validation import regex\nfrom RGB_to_hex import rgb\nfrom validate_sudoku import Sudoku\n\n\nclass ExpectedTypes(unittest.TestCase):\n @staticmethod\n @expected_type((str,))\n def return_something(something):\n return something\n\n def test_expected_types(self):\n self.assertEqual(self.return_something('The quick brown fox jumps over the lazy dog.'),\n 'The quick brown fox jumps over the lazy dog.')\n\n def test_false(self):\n with self.assertRaises(UnexpectedTypeException):\n self.return_something(322)\n\n\nclass FindParents(unittest.TestCase):\n def test_first(self):\n self.assertEqual(main_parents(\"\"\"[{\"name\": \"B\", \"parents\": [\"A\", \"C\"]}, {\"name\": \"C\", \"parents\": [\"A\"]},\n {\"name\": \"A\", \"parents\": []}, {\"name\": \"D\", \"parents\": [\"C\", \"F\"]},\n {\"name\": \"E\", \"parents\": [\"D\"]}, {\"name\": \"F\", \"parents\": []}]\"\"\"),\n \"A : 5\\nB : 1\\nC : 4\\nD : 2\\nE : 1\\nF : 3\")\n\n\nclass MatrixDeterminant(unittest.TestCase):\n m1 = [[1, 3], [2, 5]]\n m2 = [[2, 5, 3], [1, -2, -1], [1, 3, 4]]\n\n def test_first(self):\n self.assertEqual(determinant([[1]]), 1,)\n\n def test_second(self):\n self.assertEqual(determinant(self.m1), -1)\n\n def test_third(self):\n self.assertEqual(determinant(self.m2), -20)\n\n\nclass MultifilterIterator(unittest.TestCase):\n @staticmethod\n def mul3(x):\n return x % 3 == 0\n\n @staticmethod\n def mul2(x):\n return x % 2 == 0\n\n @staticmethod\n def mul5(x):\n return x % 5 == 0\n\n a = [i for i in range(31)]\n\n def test_multifilter(self):\n self.assertEqual(list(multifilter(self.a, self.mul2, self.mul3, self.mul5)),\n [0, 2, 3, 4, 5, 6, 8, 9, 10, 12, 14, 15, 16, 18, 20, 21, 22, 24, 25, 26, 27, 28, 30])\n self.assertEqual(list(multifilter(self.a, self.mul2, self.mul3, self.mul5, judge=multifilter.judge_all)),\n [0, 30])\n self.assertEqual(list(multifilter(self.a, self.mul2, self.mul3, self.mul5, judge=multifilter.judge_half)),\n [0, 6, 10, 12, 15, 18, 20, 24, 30])\n\n\nclass Permutations(unittest.TestCase):\n def test_permutations(self):\n self.assertEqual(sorted(permutations('a')), ['a'])\n self.assertEqual(sorted(permutations('ab')), ['ab', 'ba'])\n self.assertEqual(sorted(permutations('aabb')), ['aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])\n\n\nclass RegexPasswordValidation(unittest.TestCase):\n def test_pass_validation(self):\n self.assertEqual(bool(search(regex, 'fjd3IR9')), True)\n self.assertEqual(bool(search(regex, 'ghdfj32')), False)\n self.assertEqual(bool(search(regex, 'DSJKHD23')), False)\n self.assertEqual(bool(search(regex, 'dsF43')), False)\n self.assertEqual(bool(search(regex, '4fdg5Fj3')), True)\n self.assertEqual(bool(search(regex, 'DHSJdhjsU')), False)\n self.assertEqual(bool(search(regex, 'fjd3IR9.;')), False)\n self.assertEqual(bool(search(regex, 'fjd3 IR9')), False)\n self.assertEqual(bool(search(regex, 'djI38D55')), True)\n self.assertEqual(bool(search(regex, 'a2.d412')), False)\n self.assertEqual(bool(search(regex, 'JHD5FJ53')), False)\n self.assertEqual(bool(search(regex, '!fdjn345')), False)\n self.assertEqual(bool(search(regex, 'jfkdfj3j')), False)\n self.assertEqual(bool(search(regex, '123')), False)\n self.assertEqual(bool(search(regex, 'abc')), False)\n self.assertEqual(bool(search(regex, '123abcABC')), True)\n self.assertEqual(bool(search(regex, 'ABC123abc')), True)\n self.assertEqual(bool(search(regex, 'Password123')), True)\n\n\nclass RGBToHex(unittest.TestCase):\n def test_rbg_to_hex(self):\n self.assertEqual(rgb(0, 0, 0), \"000000\", \"testing zero values\")\n self.assertEqual(rgb(1, 2, 3), \"010203\", \"testing near zero values\")\n self.assertEqual(rgb(255, 255, 255), \"FFFFFF\", \"testing max values\")\n self.assertEqual(rgb(254, 253, 252), \"FEFDFC\", \"testing near max values\")\n self.assertEqual(rgb(-20, 275, 125), \"00FF7D\", \"testing out of range values\")\n\n\nclass ValidateSudoku(unittest.TestCase):\n good_sudoku1 = Sudoku([\n [7, 8, 4, 1, 5, 9, 3, 2, 6],\n [5, 3, 9, 6, 7, 2, 8, 4, 1],\n [6, 1, 2, 4, 3, 8, 7, 5, 9],\n\n [9, 2, 8, 7, 1, 5, 4, 6, 3],\n [3, 5, 7, 8, 4, 6, 1, 9, 2],\n [4, 6, 1, 9, 2, 3, 5, 8, 7],\n\n [8, 7, 6, 3, 9, 4, 2, 1, 5],\n [2, 4, 3, 5, 6, 1, 9, 7, 8],\n [1, 9, 5, 2, 8, 7, 6, 3, 4]\n ])\n\n good_sudoku2 = Sudoku([\n [1, 4, 2, 3],\n [3, 2, 4, 1],\n\n [4, 1, 3, 2],\n [2, 3, 1, 4]\n ])\n\n # Invalid Sudoku\n bad_sudoku1 = Sudoku([\n [0, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9]\n ])\n\n bad_sudoku2 = Sudoku([\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4],\n [1, 2, 3, 4],\n [1]\n ])\n\n def test_sudoku(self):\n self.assertTrue(self.good_sudoku1.is_valid())\n self.assertTrue(self.good_sudoku2.is_valid())\n self.assertFalse(self.bad_sudoku1.is_valid())\n self.assertFalse(self.bad_sudoku2.is_valid())\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"409372272","text":"from selenium import webdriver\n\n#browser = webdriver.PhantomJS()\nbrowser = webdriver.Chrome(\"./chromedriver\")\nbrowser.implicitly_wait(3)\n\nurl_login = \"https://www.google.co.jp/\"\nbrowser.get(url_login)\n\ne = browser.find_element_by_id(\"lst-ib\")\ne.clear()\ne.send_keys('猫')\n\nform = browser.find_element_by_id(\"tsf\")\nform.submit()\n\n\n\n#browser.save_screenshot(\"webtest.png\")\n#browser.quit()\n","sub_path":"selenium-google.py","file_name":"selenium-google.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"408056560","text":"import logging\nimport subprocess\nimport fastapi\nTHRE=15 #threshold of scores\n\n\napp = fastapi.FastAPI()\nlogger = logging.getLogger(\"api\")\n\n##########################################################\n# Launch a command with pipes\np = subprocess.Popen(['python -m parlai.scripts.interactive -mf models/covid7 --single-turn True'], shell=True,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)\n\n# Wait for the parlai CLI to initialize\nwhile True:\n line = p.stdout.readline()\n line = line.strip().decode()\n if line == '[ polyencoder_type: codes ]':\n logger.info(\"parlai ready for commands\")\n break\n\n@app.get(\"/\")\ndef root(question: str = \"What is mask?\"):\n logger.info(\"Question: {}\".format(question))\n # Send the question and get the output\n p.stdin.write(bytes(question, 'utf-8'))\n p.stdin.write(bytes(\"\\n\", 'utf-8'))\n p.stdin.flush()\n line=''\n while '[Polyencoder]' not in line: # Exclude warnings and other messages\n line = p.stdout.readline()\n line = line.strip().decode() # To interpret as text, decode\n result = line.split('[Polyencoder]:')\n #print(result)\n result =result[-1].split('|')\n score=result[0]\n if float(score)>THRE:\n answer=result[1]\n else: #skip if score<15\n answer=\"Sorry, I don't know.\"\n logger.info(\"Answer: {}\".format(answer))\n logger.info(\"Score: {}\".format(score))\n return {\"question\": question, \"answer\": answer,\"score\":score}\n\n","sub_path":"fastapi_covid.py","file_name":"fastapi_covid.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"515831647","text":"# 3. Определить, какие из слов «attribute», «класс», «функция», «type» невозможно записать в байтовом типе.\n\nstrings = ['attribute', 'класс', 'функция', 'type']\n\nfor s, string in enumerate(strings):\n try:\n binary_string = bytes(string, encoding='ascii')\n print('\"', string, '\"', 'in ascii: ', binary_string)\n except UnicodeEncodeError:\n print('\"', string, '\"', 'cannot be encoded as ascii string')\n","sub_path":"Lesson_1/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"431463578","text":"import os\ndef main():\n infile = open('/Users/Python/Desktop/mypython/mypython-4/employees2.txt','r')\n outfile = open('/Users/Python/Desktop/mypython/mypython-4/behidemidterm/file/temp.txt','w') \n oldname = input('Enter old name : ')\n newname = input('Enter new name : ')\n for line in infile :\n rec = (line.rsplit('|'))\n if rec[0] == oldname :\n outfile.write(newname+'|'+rec[1]+'|'+rec[2])\n else:\n outfile.write(line)\n infile.close()\n outfile.close()\n\nmain()\n\n","sub_path":"sheet final/behidemidtermwhile_if/file/modifyfile.py","file_name":"modifyfile.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"295572925","text":"#!/usr/bin/env python3\n\nimport sqlite3\nimport json\n\nDBNAME = 'Nov2008'\nFILENAME = 'RC_2008-11'\n\ndb = sqlite3.connect(DBNAME + '.sqlite')\nc = db.cursor()\n\nc.execute('drop table if exists {0}'.format(DBNAME))\nc.execute('''\n create table if not exists {0}(\n id integer primary key,\n timestamp text,\n subreddit text,\n body text\n )\n'''.format(DBNAME))\n\ninsert_query = 'insert into {0} values (?,?,?,?)'.format(DBNAME)\ncolumns = ['created_utc' , 'subreddit', 'body']\n\nwith open(FILENAME) as infile:\n for idx, line in enumerate(infile):\n data = json.loads(line)\n keys = (idx, ) + tuple(data[col] for col in columns)\n c = db.cursor()\n c.execute(insert_query, keys)\n c.close()\ndb.commit()\ndb.close()\n","sub_path":"HW1/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"8052640","text":"\nfrom pandas import read_csv\nimport numpy as np\nimport gbUtils as gb\n\n# Read input files\nlongFileName = \"routes_EBK_2_orig_long.csv\"\nlong = np.array(gb.readFile(longFileName))\n#longTable.drop(axis = 1, labels = [10, 11, 12, 13, 14], inplace = True)\n\nlatFileName = \"routes_EBK_2_orig_lat.csv\"\nlat = np.array(gb.readFile(latFileName))\n#latTable.drop(axis = 1, labels = [10, 11, 12, 13, 14], inplace = True)\n\npredicted_path = 5\n\nmo = gb.load_model(\"sandbox/valami\")\n\n# mlat = (mo.lat)[0:1, :-predicted_path]\n# mlong = (mo.long)[0:1, :-predicted_path]\nmlat = (lat)[5678:5682, :-predicted_path]\nmlong = (long)[5678:5682, :-predicted_path]\nprint(\"test set lat dims: \", mlat.shape)\nprint(\"test set long dims: \", mlong.shape)\n\ns,f,l,a = mo.ll_predict(mlat, mlong)\nprint(\"original: \", mlat)\nprint(\"predicted s:\", s)\nprint(\"original: \", mlong)\nprint(\"predicted f:\", f)\n\nmo.save_model(\"sandbox/valami2\")\n\n\n# inputTableSimple = []\n# inputTableSimpleAngle = []\n# inputTablePair = []\n# complexTable = []\n# numberOfRows = latTable.shape[0]\n# for indexCounter in range(numberOfRows):\n\n# lat = latTable.iloc[indexCounter]\n# long = longTable.iloc[indexCounter]\n# (length, angle) = convert.convert2DiffPolarInput(lat, long)\n# complex = convert.ConvertPolar2ComplexInput(length, angle)\n# inputDataSimple, inputDataSimpleFlipped, inputDataPair = convert.convert2PairInput(\n# length, angle)\n# indexCounter += 1\n# inputTableSimple.append(inputDataSimple)\n# inputTableSimpleAngle.append(inputDataSimpleFlipped)\n# inputTablePair.append(inputDataPair)\n# complexTable.append(complex)\n\n# # print(np.shape(inputTableSimple))\n# # print(np.shape(inputTableSimple))\n# # print(np.shape(inputTablePair))\n\n# dataa = np.array(inputTableSimpleAngle)\n# X = dataa[:, :-10]\n# y = dataa[:, -10]\n# # print(np.shape(X))\n# # print(np.shape(y))\n# # print(data[1000])\n# # print(X[1000])\n# # print(y[1000])\n\n# # Split input data into train and test dataset based on test size\n# testSize = 0.1\n# X_train, aX_test, y_train, y_test = train_test_split(\n# X, y, test_size=testSize, random_state=1)\n\n# print(np.shape(X_train))\n# print(np.shape(aX_test))\n# print(np.shape(y_train))\n# # Create and train the model\n# amodel = XGBRegressor()\n# amodel.fit(X_train, y_train)\n# #model = XGBRegressor(n_estimators=1000, max_depth=7, eta=0.1, subsample=0.7, colsample_bytree=0.8)\n\n# # Maked predictions on the test dataset\n# y_pred = amodel.predict(aX_test)\n# print(y_pred[100])\n\n# datal = np.array(inputTableSimple)\n# X = datal[:, :-10]\n# y = datal[:, -10]\n# # print(np.shape(X))\n# # print(np.shape(y))\n# # print(data[1000])\n# # print(X[1000])\n# # print(y[1000])\n\n# # Split input data into train and test dataset based on test size\n# testSize = 0.1\n# X_train, lX_test, y_train, y_test = train_test_split(\n# X, y, test_size=testSize, random_state=1)\n\n# print(np.shape(X_train))\n# print(np.shape(lX_test))\n# print(np.shape(y_train))\n# # Create and train the model\n# lmodel = XGBRegressor()\n# lmodel.fit(X_train, y_train)\n# #model = XGBRegressor(n_estimators=1000, max_depth=7, eta=0.1, subsample=0.7, colsample_bytree=0.8)\n\n# # Maked predictions on the test dataset\n# y_pred = lmodel.predict(lX_test)\n# print(y_pred[100])\n\n# data_orig = datal[4567]\n# print(data_orig)\n\n# dl = datal[4567:4568, :-10]\n# # print(dl)\n# da = dataa[4567:4568, :-10]\n# print(da)\n\n\n# dlpred = lmodel.predict(dl)\n# print(dlpred)\n# dapred = amodel.predict(da)\n# print(dapred)\n","sub_path":"prb_load.py","file_name":"prb_load.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"375813418","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 12 21:34:58 2019\n\n@author: xiaohezhang\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\nfrom google_images_download import google_images_download\n\n#getting the breed list\nurl = 'https://tica.org/breeds/browse-all-breeds'\nsoup = BeautifulSoup(requests.get(url).content,'lxml')\ntable = soup.find_all('a',{'data-parent':'#set-rl_sliders-1'})\nbreed_list = []\nfor i in table:\n breed_list.append(i.text[3:])\n#this is the list of all breed\nbreed_list \n\ndef download_image(x): \n down = google_images_download.googleimagesdownload()\n arguments = {\"keywords\":x,\n \"limit\":99,\n 'related_images':True,\n 'no_directory':True\n }\n return down.download(arguments) \n\ndownload_image('Birman cat') \n\n","sub_path":"code/google_image_collect.py","file_name":"google_image_collect.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"51816624","text":"#coding:utf-8\r\n\r\nimport requests\r\nimport traceback\r\nimport datetime\r\n\r\nclass logger():\r\n \"\"\"Log errors in a log.txt file and receive them by telegram.\"\"\"\r\n def __init__(self, chat_id:str=None, bot_token:str=None):\r\n self.chat_id = str(chat_id) # the telegram chat_id you want to receive the logs on\r\n self.bot_token = str(bot_token) # the token of the telegram bot which sends you the logs\r\n self.isdebugging = True \r\n\r\n def log_func(self, message, disable_notification=False):\r\n try :\r\n with open('log.txt','a') as f:\r\n written = str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")) + \"\\n\" + message + \"\\n\"\r\n f.write('{}\\n'.format(written)) \r\n without_notification = \"true\" if disable_notification else \"false\"\r\n send_text = 'https://api.telegram.org/bot' + self.bot_token + '/sendMessage?chat_id=' + self.chat_id + '&text=' + str(message) + \"&disable_notification=\" + str(without_notification).lower()\r\n requests.get(send_text)\r\n except :\r\n try :\r\n message = \"WARNING : Error in sending log by telegram, {}.\\n\".format(traceback.format_exc()) + message\r\n with open('log.txt','a') as f:\r\n f.write('{}\\n'.format(message)) \r\n except : pass\r\n\r\n def debug(self, message) :\r\n if not self.isdebugging : return None\r\n else :\r\n message = \"- DEBUG -\\n\" + message\r\n self.log_func(message, disable_notification=True)\r\n\r\n def info(self, message) :\r\n message = \"- INFO -\\n\" + message\r\n self.log_func(message)\r\n\r\n def error(self, message) :\r\n message = \"- ERROR -\\n\" + message\r\n self.log_func(message)","sub_path":"tglog/tglog.py","file_name":"tglog.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"406349065","text":"#!/usr/bin/env python3\n\nimport sys\n\nimport colorama\n\ncolorama.init()\n\n\ndef colprint(s):\n print(colorama.Fore.RED + s, end='')\n print(colorama.Style.RESET_ALL)\n\n\ntemplate = \"\"\"DROP PROCEDURE\nIF EXISTS {procedure_name};\ndelimiter ;;\n\nCREATE PROCEDURE {procedure_name} (IN record_id INT(11))\nBEGIN\n\nDECLARE my_sql VARCHAR (500) ;\nSET my_sql = concat('UPDATE {table_name} SET enabled = 0 WHERE id=', record_id);\nSET @ms = my_sql ; \nPREPARE s1 FROM @ms ; \nEXECUTE s1 ; \nDEALLOCATE PREPARE s1 ; \nSELECT count(id) as success FROM {table_name} WHERE\tid = record_id AND enabled = 0 ;\n\nEND;;\ndelimiter ;\n\"\"\"\n\nif __name__ == '__main__':\n colprint('Please make sure that the virtual delete column name is *enabled*!')\n procedure_name = sys.argv[1]\n table_name = sys.argv[2]\n with open('sql_delete_record.sql', 'w') as f:\n f.write(template.format(procedure_name=procedure_name, table_name=table_name))\n print(\"Job finished\")\n","sub_path":"sql_generator_delete.py","file_name":"sql_generator_delete.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"42640916","text":"from chainer import Link, Chain, ChainList\nimport chainer.links as L\nimport chainer.functions as F\nimport chainerrl\n\n\nclass RealPPOModel(Chain):\n def __init__(self, n_actions):\n super(RealPPOModel, self).__init__()\n with self.init_scope():\n self.conv1 = L.Convolution2D(None, 32, 8, stride=4)\n self.conv2 = L.Convolution2D(None, 64, 4, stride=2)\n #self.conv3 = L.Convolution2D(None, 64, 3, stride=1)\n self.l1 = L.Linear(None, 512)\n self.l2_pi = L.Linear(None, 256)\n self.l2_val = L.Linear(None, 256)\n self.pi = L.Linear(None, n_actions)\n self.val = L.Linear(None, 1)\n self.gaussianPolicy = chainerrl.policies.GaussianHeadWithStateIndependentCovariance(\n action_size=n_actions,\n var_type='diagonal',\n var_func=lambda x: F.exp(2 * x), # Parameterize log std\n var_param_init=0, # log std = 0 => std = 1\n )\n\n def forward(self, x):\n # shared layers\n im = x['retina']\n im = F.relu(self.conv1(im))\n im = F.relu(self.conv2(im))\n #im = F.relu(self.conv3(im))\n im = self.l1(im)\n imx = F.concat([im, x['joint_positions'], x['touch_sensors']])\n\n # pi layers\n l2_pi = F.relu(self.l2_pi(imx))\n pi = self.pi(l2_pi)\n pi = self.gaussianPolicy(pi)\n\n # value layers\n value = F.relu(self.l2_val(imx))\n value = self.val(value)\n return pi, value\n","sub_path":"real_model.py","file_name":"real_model.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"580657766","text":"import json\n\n\nclass Preprocess_tri():\n def __init__(self, lam):\n self.train_sen_argu = self.load_json('train_sen_argu.json')\n self.train_sen_trig = self.load_json('train_sen_trig.json')\n self.train_part_argu = self.load_json('train_part_argu.json')\n self.train_part_trig = self.load_json('train_part_trig.json')\n self.lam = lam\n\n def load_json(self, path):\n with open(path) as f:\n dic = json.load(f)\n return dic\n\n def pre(self, lst_part, dic_sen, lam):\n # initialize\n emiss = dict()\n emiss['start'] = dict()\n emiss['start']['*S*'] = 0\n for k in range(0, len(lst_part)):\n k = lst_part[k]\n emiss[k] = dict()\n emiss[k]['UNK'] = 0\n\n for i in dic_sen:\n emiss['start']['*S*'] += 1\n sen = dic_sen[i]\n for j in range(0, len(sen)):\n word = sen[str(j)][0]\n part = sen[str(j)][1]\n if word not in emiss[part].keys():\n emiss[part][word] = 1\n else:\n emiss[part][word] += 1\n\n for a in emiss.keys():\n for b in emiss[a].keys():\n emiss[a][b] += lam # smoothing\n return emiss\n\n def emissionProb(self, emiss):\n for i in emiss.keys():\n s = sum(emiss[i].values())\n emiss[i] = {k: v / s for k, v in emiss[i].items()}\n return emiss\n\n def StateTransProb(self, sen, lst_part, lam): # using Trigram\n # initialize trigram\n trans = dict()\n trans['bstart'] = dict()\n trans['start'] = dict()\n trans['bstart']['start'] = dict()\n for j in lst_part:\n trans['bstart']['start'][j] = 0\n trans['start'][j] = dict()\n trans['start'][j]['end'] = 0\n trans[j] = dict()\n for i in lst_part:\n trans['start'][j][i] = 0\n trans[j][i] = dict()\n trans[j][i]['end'] = 0\n for k in lst_part:\n trans[j][i][k] = 0\n\n # initialize unigram\n trans_uni = dict()\n trans_uni['end'] = 0\n\n for i in sen.keys():\n dic_sen = sen[i]\n if len(dic_sen) == 0:\n continue\n elif len(dic_sen) == 1:\n trans_uni['end'] += 1\n trans['bstart']['start'][dic_sen.values()] += 1\n elif len(dic_sen) == 2:\n trans_uni['end'] += 1\n w1 = dic_sen['0'][1]\n w2 = dic_sen['1'][1]\n trans['bstart']['start'][w1] += 1\n trans['start'][w1][w2] += 1\n trans[w1][w2]['end'] += 1\n else:\n trans_uni['end'] += 1\n for j in dic_sen.keys():\n j = int(j)\n u = dic_sen[str(j)][1]\n if u not in trans_uni.keys():\n trans_uni[u] = 1\n else:\n trans_uni[u] += 1\n if j == 0:\n s1 = 'bstart'\n s2 = 'start'\n s3 = dic_sen[str(j)][1]\n elif (j+1) % 3 == 0:\n s1 = dic_sen[str(j-2)][1]\n s2 = dic_sen[str(j-1)][1]\n s3 = dic_sen[str(j)][1]\n elif j == len(dic_sen)-1:\n s1 = dic_sen[str(j-2)][1]\n s2 = dic_sen[str(j-1)][1]\n s3 = 'end'\n else:\n continue\n trans[s1][s2][s3] += 1\n\n for m in trans.keys():\n for n in trans[m].keys():\n for l in trans[m][n].keys():\n trans[m][n][l] += 1\n s = sum(trans[m][n].values())\n trans[m][n] = {k: v / s for k, v in trans[m][n].items()}\n\n s_uni = sum(trans_uni.values())\n trans_uni = {k: v / s_uni for k, v in trans_uni.items()}\n return trans, trans_uni\n\n def save(self, to_save, filename):\n s = json.dumps(to_save)\n f = open(filename + \".json\", \"w\")\n f.write(s)\n f.close()\n\n def run(self):\n argu = self.StateTransProb(self.train_sen_argu, self.train_part_argu, self.lam)\n trig = self.StateTransProb(self.train_sen_trig, self.train_part_trig, self.lam)\n self.save(argu[0], 'train_StateTransProb_argu_tri')\n self.save(trig[0], 'train_StateTransProb_trig_tri')\n self.save(argu[1], 'train_StateTransProb_argu_uni')\n self.save(trig[1], 'train_StateTransProb_trig_uni')\n self.save(self.emissionProb(self.pre(self.train_part_argu, self.train_sen_argu, self.lam)), 'train_emissionProb_argu_tri')\n self.save(self.emissionProb(self.pre(self.train_part_trig, self.train_sen_trig, self.lam)), 'train_emissionProb_trig_tri')","sub_path":"program/preprocess_tri.py","file_name":"preprocess_tri.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"67736257","text":"import csv;\nimport sys\nsys.path.append('C:\\Kyan_Project\\Git_Versioned\\KyanToolKit_Unix')\nimport KyanToolKit_Py;\nktk = KyanToolKit_Py.KyanToolKit_Py();\n\n# defs\ndef search_local_db(_cmd, _user, _pswd, _sql):\n ktk.runCmd(_cmd + \" \"\n + \"-u\" + _user + \" \"\n + \"-p\" + _pswd + \" \"\n + \"-e\" + \" \" + '\"' + _sql + '\"');\n\n# define db vars\nmysql_cmd = \"C:\\\\Kyan_Software\\\\Wnmp\\\\mariadb\\\\bin\\\\mysql\";\ndb_user = \"root\"\ndb_pswd = \"password\"\n\n# open csv\nf = open('mingdan.csv', 'r')\nrdr = csv.reader(f, dialect='excel')\nwhile(True):\n ktk.clearScreen();\n person_name = ktk.getInput(\"Enter Person Name:\");\n search_local_db(mysql_cmd, db_user, db_pswd, \"USE ticket; SELECT name, company, duty, phone FROM `customer` WHERE name = '\" + person_name + \"';\")\n ktk.pressToContinue();\n","sub_path":"SearchPhone/SearchPhoneByName.py","file_name":"SearchPhoneByName.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"11310102","text":"from flask import Flask, request, json, make_response\n\n# Define app\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\ndef sum_num(num1, num2):\n return num1 + num2\n\n\ndef subtract_num(num1, num2):\n return num1 - num2\n\n\ndef multiply_num(num1, num2):\n return num1 * num2\n\n\ndef divide_num(num1, num2):\n return num1 / num2\n\n\ndef prepare_response(message):\n # build response body to return back to Dialogflow\n request_response = {\n \"speech\": message,\n \"displayText\": message,\n \"data\": None,\n \"contextOut\": [],\n \"source\": \"MY-CHATBOT-API\"\n }\n request_response = json.dumps(request_response, indent=4)\n response = make_response(request_response)\n response.headers['Content-Type'] = 'application/json'\n print(message)\n return response\n\n\n@app.route('/chatbot', methods=['POST'])\ndef chatbot():\n message = '' # going to store message for Chatbot to say\n\n # convert request from Dialogflow to JSON\n req = request.get_json(silent=True, force=True)\n # print out request JSON\n print(json.dumps(req, indent=4))\n # dissect the incoming message from dialogflow\n try:\n # if action is 'add_numbers' do addition\n if req['result']['action'] == 'add_numbers':\n # grab two numbers from dialogflow parameters\n num1 = int(req['result']['parameters']['num1'])\n num2 = int(req['result']['parameters']['num2'])\n # use sum_num() function to find sum of two numbers\n message = \"Why the fuck are you asking me. I don't know!\".format(num1, num2, sum_num(num1, num2))\n return prepare_response(message)\n\n elif req['result']['action'] == 'subtract_numbers':\n # grab two numbers from dialogflow parameters\n num1 = int(req['result']['parameters']['num1'])\n num2 = int(req['result']['parameters']['num2'])\n # use sum_num() function to find sum of two numbers\n message = \"The subtraction of {} and {} is {}\".format(num1, num2, subtract_num(num1, num2))\n return prepare_response(message)\n\n elif req['result']['action'] == 'multiply_numbers':\n # grab two numbers from dialogflow parameters\n num1 = int(req['result']['parameters']['num1'])\n num2 = int(req['result']['parameters']['num2'])\n # use sum_num() function to find sum of two numbers\n message = \"The product of {} and {} is {}\".format(num1, num2, multiply_num(num1, num2))\n return prepare_response(message)\n\n elif req['result']['action'] == 'divide_numbers':\n # grab two numbers from dialogflow parameters\n num1 = int(req['result']['parameters']['num1'])\n num2 = int(req['result']['parameters']['num2'])\n if num2 == 0:\n return prepare_response(\"You can't divide by 0\")\n else:\n # use sum_num() function to find sum of two numbers\n message = \"The quotient of {} and {} is {}\".format(num1, num2, divide_num(num1, num2))\n return prepare_response(message)\n\n except:\n message = \"Oops there was an error, try again\"\n return prepare_response(message)\n\n\nif __name__ == '__main__':\n app.run() # kick off our API here\n","sub_path":"my_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323603457","text":"'''\nStatement\nThe first line contains the number of records. After that, each entry contains the name of the candidate and the number of votes they got in some state. Count the results of the elections: sum the number of votes for each candidate. Print candidates in the alphabetical order.\n\nExample input\n5\nMcCain 10\nMcCain 5\nObama 9\nObama 8\nMcCain 1\n\nExample output\nMcCain 16\nObama 17\n\nTheory\nIf you don't know how to start solving this assignment, please, review a theory for this lesson:\nhttps://snakify.org/lessons/dictionaries_dicts/\n\n'''\n\ndict = {}\nlist1 = []\nlist2 = []\n\nfor i in range(int(input())):\n w1, w2 = input().split()\n if w1 not in list1:\n list1 += [w1]\n list2 += [w2]\n else:\n list2[int(list1.index(w1))] = int(list2[list1.index(w1)])+int(w2)\n\nlist3 = sorted(list1)\n\nfor k in range(len(list1)):\n q = int(list1.index(list3[k]))\n print(list3[k], list2[q])\n\n\n# Model Solution\n\n# n = int(input())\n# votes_total = {}\n# for i in range(n):\n# candidate, num_votes = input().split()\n# if candidate not in votes_total:\n# votes_total[candidate] = 0\n# votes_total[candidate] += int(num_votes)\n# for candidate in sorted(votes_total):\n# print(candidate, votes_total[candidate])","sub_path":"python_exercise/.py/Dicts_Elections.py","file_name":"Dicts_Elections.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"258340211","text":"DEBUG = True\nSECRET_KEY = \"secret\"\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.staticfiles',\n 'django.contrib.contenttypes',\n 'django_app_lti',\n)\nMIDDLEWARE_CLASSES = ()\nROOT_URLCONF = 'django_app_lti.urls'\nDATABASES = {}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n","sub_path":"django_app_lti/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"593984431","text":"\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport sys\n\ndef progress(count, total, suffix=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', suffix))\n sys.stdout.flush() \n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Please input the args as \")\n exit()\n else:\n\n # Get the folder path\n folderpath = sys.argv[1]\n\n # Move into the directory\n os.chdir(folderpath)\n\n # Get the output file type\n filetype = sys.argv[2]\n len_file_type = len(filetype)\n\n # Get resume from value\n resume_from = int(sys.argv[3])\n\n # Get the names of all the files\n files = [f for f in listdir(\".\") if isfile(join(\".\", f))]\n\n # Get the total files\n total_files = len(files)\n\n # Calculate the start point\n c = 0\n if resume_from != -1:\n c = resume_from\n\n for f in files:\n\n x = int(f[:len(filetype)])\n\n if x >= resume_from :\n # Update the progress\n progress(c, total_files)\n\n # Get the name of the new file\n new_file = str(c)+ filetype\n\n # Rename the file\n os.rename(f,new_file)\n \n # Increment the count\n c += 1\n\n print(\"Done! Please wait a while until the finder updates the changes\")\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"299145604","text":"import pygame\r\nfrom tkinter import Tk\r\n\r\nroot = Tk()\r\n\r\nDEFAULT_CAPTION = \"Windoner app!\"\r\nDEFAULT_WIDTH = root.winfo_screenwidth() // 2\r\nDEFAULT_HEIGHT = root.winfo_screenheight() // 2\r\nDEFAULT_ICON = pygame.Surface((1, 1))\r\nDEFAULT_BACKGROUND = (25, 25, 25)\r\n\r\nDEFAULT_FPS = 60\r\n","sub_path":"Windoner/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"310586542","text":"# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\n\nimport entity_type_management\nimport session_entity_type_management\n\nPROJECT_ID = os.getenv('GCLOUD_PROJECT')\nSESSION_ID = 'fake_session_for_testing'\nENTITY_TYPE_DISPLAY_NAME = 'fake_display_name_for_testing'\nENTITY_VALUES = ['fake_entity_value_1', 'fake_entity_value_2']\n\n\ndef test_create_session_entity_type(capsys):\n # Create an entity type\n entity_type_management.create_entity_type(\n PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME, 'KIND_MAP')\n\n session_entity_type_management.create_session_entity_type(\n PROJECT_ID, SESSION_ID, ENTITY_VALUES, ENTITY_TYPE_DISPLAY_NAME,\n 'ENTITY_OVERRIDE_MODE_SUPPLEMENT')\n session_entity_type_management.list_session_entity_types(\n PROJECT_ID, SESSION_ID)\n\n out, _ = capsys.readouterr()\n\n assert SESSION_ID in out\n assert ENTITY_TYPE_DISPLAY_NAME in out\n for entity_value in ENTITY_VALUES:\n assert entity_value in out\n\n\ndef test_delete_session_entity_type(capsys):\n session_entity_type_management.delete_session_entity_type(\n PROJECT_ID, SESSION_ID, ENTITY_TYPE_DISPLAY_NAME)\n session_entity_type_management.list_session_entity_types(\n PROJECT_ID, SESSION_ID)\n\n out, _ = capsys.readouterr()\n assert ENTITY_TYPE_DISPLAY_NAME not in out\n for entity_value in ENTITY_VALUES:\n assert entity_value not in out\n\n # Clean up entity type\n entity_type_ids = entity_type_management._get_entity_type_ids(\n PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)\n for entity_type_id in entity_type_ids:\n entity_type_management.delete_entity_type(\n PROJECT_ID, entity_type_id)\n","sub_path":"dialogflow/cloud-client/session_entity_type_management_test.py","file_name":"session_entity_type_management_test.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"389522905","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os,sys\nfrom datetime import datetime\n\n\n#this method is responsible for web scraping and finding the imdb link for the tvseries which we are searching for\ndef getserieslink(baselink,s,seriesname):\n serieslink=\"\"\n while 1==1 :\n #print(baselink+s)\n r = requests.get(baselink+s)\n c=r.content\n soup = BeautifulSoup(c,\"html.parser\")\n try:\n nextall = soup.find_all(\"div\",{\"class\":\"lister-item mode-detail\"})\n for item in nextall:\n next1 = item.find_all(\"h3\",{\"class\":\"lister-item-header\"})[0]\n temp=next1.find(\"a\").text\n #print(temp.lower())\n #print(temp.lower())\n if(temp.lower() == seriesname.lower()):\n href = next1.find_all(\"a\")[0]\n href = href[\"href\"]\n serieslink=href\n if(len(serieslink)>0):\n break\n else:\n \n nextpage = soup.find(\"div\",{\"class\":\"list-pagination\"})\n #print(nextpage)\n nextpage = nextpage.find_all(\"a\")[1]\n #print(nextpage[\"href\"])\n if(nextpage[\"href\"] != \"#\"):\n s=nextpage[\"href\"]\n else:\n break\n \n \n except:\n nextall = soup.find_all(\"div\",{\"class\":\"lister-item mode-advanced\"})\n for item in nextall:\n next1 = item.find_all(\"h3\",{\"class\":\"lister-item-header\"})[0]\n temp=next1.find(\"a\").text\n #print(temp.lower())\n #print(temp.lower())\n if(temp.lower() == seriesname.lower()):\n href = next1.find_all(\"a\")[0]\n href = href[\"href\"]\n serieslink=href\n break\n if(len(serieslink)>0):\n break\n else:\n nextpage = soup.find(\"div\",{\"class\":\"nav\"})\n #print(nextpage)\n try:\n nextpage = nextpage.find_all(\"a\")\n temp=nextpage[0]\n if(temp.text.lower()==\"next\"):\n s=temp[\"href\"]\n break\n else:\n temp=nextpage[1]\n if(temp.text.lower()==\"next\"):\n s=temp[\"href\"]\n break\n else:\n temp=nextpage[2]\n if(temp.text.lower()==\"next\"):\n s=temp[\"href\"]\n break\n else:\n break\n s=\"/search/title\"+s\n except:\n break\n return serieslink\n","sub_path":"serieslink.py","file_name":"serieslink.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"290803353","text":"import pygame\n\nfrom keyshift.Resources import Resources\n\nclass Audio:\n current_music = None\n @staticmethod\n def play_music(name):\n \"\"\"\n Play the given music file if it is not already playing.\n Suggested file type: .ogg.\n :param name: File name\n :return: None\n \"\"\"\n if Audio.current_music == name:\n return\n music_data = Resources.load_music(name)\n pygame.mixer.music.load(music_data)\n pygame.mixer.music.play(-1)\n Audio.current_music = name","sub_path":"keyshift/Audio.py","file_name":"Audio.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"147778888","text":"import torch, torch.nn as nn, numpy as np, matplotlib.pyplot as plt\nfrom topological_loss import PersistenceDgm\nimport time\ndef circlefn(i, j, n):\n r = np.sqrt((i - n/2.)**2 + (j - n/2.)**2)\n return np.exp(-(r - n/3.)**2/(n*2))\n\n\ndef gen_circle(n):\n beta = np.empty((n,n))\n for i in range(n):\n for j in range(n):\n beta[i,j] = circlefn(i,j,n)\n return beta\n\ndef savepersistence(n, beta_t, ground_t, beta, beta_ols, path):\n ########### save persistence Diagram #######\n outplot = PersistenceDgm((n,n))\n z, f = outplot.dgmplot(beta_t)\n fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15,10))\n ax[0][2].set(xlim=(-1.1, 1.1), ylim=(-1.1, 1.1))\n if len(z)>0:\n \tax[0][2].plot(z[:,0], z[:,1],'bo')\n if len(f)>0:\n \tax[0][2].plot(f[:,0], f[:,1],'ro')\n ax[0][2].set_title(\"output PersistenceDgm\")\n \n inplot = PersistenceDgm((n,n))\n z, f = inplot.dgmplot(ground_t)\n ax[0][0].set(xlim=(-1.1, 1.1), ylim=(-1.1, 1.1))\n if len(z)>0:\n \tax[0][0].plot(z[:,0], z[:,1],'bo')\n if len(f)>0:\n \tax[0][0].plot(f[:,0], f[:,1],'ro')\n ax[0][0].set_title(\"Ground Truth PersistenceDgm\")\n \n for i in range(2):\n ax[0][i].set_xlabel('Death')\n ax[0][i].set_ylabel('Birth')\n\n ############ save outputs #############\n beta_est = beta_t.detach().numpy()\n ax[1][0].imshow(beta)\n ax[1][0].set_title(\"Truth\")\n ax[1][1].imshow(beta_ols)\n ax[1][1].set_title(\"OLS\")\n ax[1][2].imshow(beta_est)\n ax[1][2].set_title(\"Topology Regularization\")\n for i in range(3):\n ax[1][i].set_yticklabels([])\n ax[1][i].set_xticklabels([])\n ax[1][i].tick_params(bottom=False, left=False)\n t = time.time()\n plt.savefig(path+'imgs/'+'persistence_dgm'+str(t)+'.png')","sub_path":"examples/levelset/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"329652588","text":"h = int ( input(\"Height?\"))\nw = int ( input(\"Weight?\"))\n\n\nfor row in range(0,h +1):\n for col in range(0, w +1):\n if( row == 0 or row == h or col == 0 or col == w):\n print('*', end = ' ')\n else:\n print(' ', end = ' ')\n\n print()\n\n","sub_path":"01-week/4-friday/labs/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"637407637","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport smtplib\nimport argparse\nimport sys\nimport time\nimport mysql.connector\nfrom datetime import datetime\n\nimport numpy as np\nimport tensorflow as tf\n\n\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(file_reader, channels = 3,\n name='png_reader')\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\n name='gif_reader'))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\n name='jpeg_reader')\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\ndef convertToBinaryData(filename):\n # Convert digital data to binary format\n with open(filename, 'rb') as file:\n binaryData = file.read()\n return binaryData\n \nif __name__ == \"__main__\":\n file_name = \"../download.jpeg\"\n model_file = \"files/retrained_graph.pb\"\n label_file = \"files/retrained_labels.txt\"\n input_height = 224\n input_width = 224\n input_mean = 128\n input_std = 128\n input_layer = \"input\"\n output_layer = \"final_result\"\n date = datetime.today().strftime('%Y-%m-%d')\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n if args.image:\n file_name = args.image\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_file(file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name);\n output_operation = graph.get_operation_by_name(output_name);\n\n with tf.Session(graph=graph) as sess:\n start = time.time()\n results = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: t})\n end=time.time()\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n print(top_k)\n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end-start))\n template = \"{} (score={:0.5f})\"\n for i in top_k:\n if results[i]> 0.7:\n print(template.format(labels[i], results[i]))\n if labels[i] == \"not clean\":\n mydb = mysql.connector.connect(\n host=\"urhost\",\n user=\"uruname\",\n passwd=\"urpass\",\n database=\"urdbname\"\n )\n mycursor = mydb.cursor()\n blob = convertToBinaryData(file_name)\n mySql_insert_query = \"\"\"Insert into Locations(name,address,image,reward,status,date) values('Lorem Ipsum','VIT Vellore',%s,250,'Available',%s)\"\"\"\n recordTuple = (blob,date)\n mycursor.execute(mySql_insert_query, recordTuple)\n mydb.commit()\n print(\"Record inserted successfully\")\n \n \n","sub_path":"scripts/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"223056127","text":"import torch as t\nimport torchvision\nfrom advertorch.attacks import FGSM\nfrom torch import nn\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n\nfrom Model.DefenseNet import DefenseNet\nfrom Model.DefenseNet import reduce_precision_np\nfrom Model.GoogleNet import GoogleNetCifar\nimport foolbox as fb\n\n\nif __name__ == '__main__':\n \n learning_rate = 1e-3 \n batch_size = 128 \n epochs = 20 \n t.manual_seed(0)\n use_cuda = t.cuda.is_available()\n device = t.device(\"cuda\" if use_cuda else \"cpu\")\n\n test_dataset = torchvision.datasets.CIFAR10(root='../dataset',\n train=False,\n transform=transforms.Compose([transforms.ToTensor()]),\n download=True)\n test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n model1 = GoogleNetCifar\n model1.load_state_dict(t.load('../SavedNetworkModel/CIFAR/GoogleNet/googlenet_cifar_39.pth'))\n model2 = DefenseNet()\n model2.load_state_dict(t.load('../SavedNetworkModel/CIFAR/GoogleNet/Defense_DeepFool/googlenet_defense_deepfool_cifar_59.pth'))\n model1.to(device)\n model2.to(device)\n\n epsilons = [.005]\n for eps in epsilons:\n \n correct = 0\n for img in test_loader:\n data, label = img\n data, label = data.to(device), label.to(device)\n\n bounds = (0, 1)\n fmodel = fb.PyTorchModel(model1, bounds)\n attack = fb.attacks.LinfDeepFoolAttack()\n raw, adv_untargeted, adv = attack(fmodel, data, label, epsilons=0.005)\n # clean dataset\n # adv_untargeted = adv_untargeted.cpu().detach().numpy()\n # data1 = reduce_precision_np(adv_untargeted, 8)\n # data1 = t.tensor(data1)\n # data1 = data1.to(device)\n #\n # output = model2(data1)\n output = model1(adv_untargeted)\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(label.data.view_as(pred)).cpu().sum() \n print('epsilon: {} Accuracy: {}/{} ({:.0f}%)'.format(eps, correct, len(test_loader.dataset), 100 * correct / len(test_loader.dataset)))\n","sub_path":"cifar/googlenet_test_defense_deepfool_cifar.py","file_name":"googlenet_test_defense_deepfool_cifar.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"408367066","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect\n\nfrom products.models import Product, Category\nfrom .forms import SearchForm\nfrom .models import Setting, Contact ,ContactForm\nfrom django.contrib import messages\n\nfrom django.core.mail import send_mail\n\n# Create your views here.\n\ndef index(request):\n setting = Setting.objects.get(pk = 1)\n product_first = Product.objects.all().order_by('category')[:9]\n product_last = Product.objects.all().order_by('-id')[:9]\n product_random = Product.objects.all().order_by('?')[:9]\n context = {\n 'setting':setting ,\n 'product_first':product_first,\n 'product_last': product_last,\n 'product_random': product_random,\n }\n return render(request, 'hometemps/index.html',context)\n\n\ndef about(request):\n setting = Setting.objects.get(pk = 1)\n context = {\n\n 'setting':setting\n }\n return render(request, 'hometemps/about.html',context)\n\ndef contact(request):\n if request.method == 'POST': # if method is post\n form = ContactForm(request.POST)\n if form.is_valid():\n data = Contact() # creating relation with model\n\n data.first_name = form.cleaned_data['first_name']\n data.last_name = form.cleaned_data['last_name']\n data.email = form.cleaned_data['email']\n data.subject = form.cleaned_data['subject']\n data.message = form.cleaned_data['message']\n data.ip = request.META.get('REMOTE_ADDR') # take ip address of requested user\n data.save() # save the data\n\n messages.success(request,'your message has been received.Thank you for your interest')\n # send email code goes here\n sender_name = form.cleaned_data['first_name']\n sender_email = form.cleaned_data['email']\n\n message = \"{0} has sent you a new message:\\n\\n{1}\".format(sender_name, form.cleaned_data['message'])\n send_mail('New Enquiry', message, sender_email, ['test@gmail.com'])\n return HttpResponseRedirect('/contact')\n else:\n setting = Setting.objects.get(pk=1)\n form = ContactForm()\n context = {\n 'setting':setting,'form':form\n }\n return render(request,'hometemps/contact.html',context)\n\ndef test(request):\n category = Category.objects.all()\n return render(request, 'hometemps/header.html',context={'category':category,\n })\n\ndef catwise_product(request,id,slug):\n count = 0\n brand_product = Product.objects.filter(category_id=id)\n for c in brand_product:\n count += count\n print(count)\n products = Product.objects.filter(category__parent_id=id)\n context = {\n 'products':products,\n 'brand_product':brand_product,\n 'count':count\n }\n return render(request,'product_temps/category.html',context)\n\ndef search(request):\n if request.method == 'GET':\n form = SearchForm(request.GET)\n if form.is_valid():\n query = form.cleaned_data['query']\n # catid = form.cleaned_data['catid']\n\n search_prod = Product.objects.filter(keywords__icontains=query)\n context = {\n 'serach_prod':search_prod\n }\n return render(request,'product_temps/search.html',context)\n else:\n return HttpResponse('something went wrong')\n return HttpResponse('/')","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"104608008","text":"# Лабораторная работа №1 Вариант 2.\r\nfrom math import tan, acosh\r\nprint(\"Здравствуйте\")\r\n\r\n# Ввод значений\r\na = int(input(\"Введите а:\"))\r\nx = int(input(\"Введите x:\"))\r\n\r\n# Функции\r\nG = (7 * ((-15 * a**2) + (22 * a * x) + (5 * x**2))) / ((4 * a**2) + (7 * a * x) + (3 * x**2))\r\nF = -tan((4 * a**2) - (3 * a * x) - (7 * x**2))\r\nY = acosh((-7 * a**2) + (20 * a * x) + (3 * x**2) + 1)\r\n\r\n# Результат\r\nprint('G = {0}, F = {1}, Y = {2}'.format(G, F, Y))\r\n","sub_path":"Lab1.py","file_name":"Lab1.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"206922069","text":"import os\nimport shlex, subprocess\nimport sys\nimport configparser\nfrom pathlib import Path\nimport os, fnmatch\nfrom git import Repo\n\ndef find(pattern, path):\n result = []\n for root, dirs, files in os.walk(path):\n for name in files:\n if fnmatch.fnmatch(name, pattern):\n result.append(name)\n return result\n\n#read a .properties file without section headers.\ndef add_section_header(properties_file, header_name):\n\t# configparser.ConfigParser requires at least one section header in a properties file.\n\t# Our properties file doesn't have one, so add a header to it on the fly.\n\tyield '[{}]\\n'.format(header_name)\n\tfor line in properties_file:\n\t\tyield line\n\ndef read_config_file(filename):\n\tconfig = configparser.RawConfigParser()\n\tif not os.path.isfile(filename):\n\t\tfile = open(filename, \"w+\", encoding=\"utf_8\")\n\telse:\n\t\tfile = open(filename, encoding=\"utf_8\")\n\tconfig.read_file(add_section_header(file, 'asection'), source=filename)\n\treturn config['asection']\n\ndef cmake(src,build_path,flags):\n\twd=os.getcwd()\n\tPath(build_path).mkdir(True,exist_ok=True)\n\tos.chdir(build_path)\n\tcmakeCmd = [\"cmake.exe\", '-G','Visual Studio 16 2019', os.path.relpath(src, build_path)]\n\tretCode = subprocess.check_call(cmakeCmd+flags, stderr=subprocess.STDOUT, shell=True)\n\tsln=find('*.sln','.')[0]\n\tprint(MSBUILD+'/p:Configuration=Release'+'/p:Platform=x64'+sln)\n\tpid=subprocess.Popen([MSBUILD,'/p:Configuration=Release','/p:Platform=x64',sln])\n\tprocess.poll()\n\tprint(MSBUILD+'/p:Configuration=Debug'+'/p:Platform=x64'+sln)\n\tpid=subprocess.Popen([MSBUILD,'/p:Configuration=Debug','/p:Platform=x64',sln])\n\tprocess.poll()\n\tos.chdir(wd)\n\ndef GetMSBuild():\n\tVSW=os.environ['ProgramFiles(x86)']+'/Microsoft Visual Studio/Installer/vswhere.exe'\n\tprocess = subprocess.Popen([VSW,'-latest','-find','MSBuild\\\\**\\\\Bin\\\\MSBuild.exe'], stdout=subprocess.PIPE)\n\t#'-requires','Microsoft.Component.MSBuild', not useful.\n\tMSB = process.stdout.readline().strip().decode('UTF-8')\n\tprocess.poll()\n\tprint('MSB '+MSB)\n\treturn MSB\n\ndef execute():\n\trepo = Repo(os.getcwd())\n\t#We can update from the main repo; doesn't require credentials in submodules\n\trepo.git.submodule('update', '--init') \n\t#sms = repo.submodules\n\t#for sm in sms:\n\t#\tsm.update()\n\n\tglfwflags=[\"-DGLFW_BUILD_DOCS=false\",\"-DGLFW_BUILD_EXAMPLES=false\",\"-DGLFW_BUILD_TESTS=false\",\"-DGLFW_INSTALL=false\",\"-DCMAKE_C_FLAGS_DEBUG=/MTd /Zi /Ob0 /Od /RTC1\",\"-DCMAKE_C_FLAGS_RELEASE=/MT /O2 /Ob2 /DNDEBUG\",\"-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=../lib\"]\n\tcmake('External/glfw','External/glfw/build_md',glfwflags)\n\tcmake('External/glfw','External/glfw/build_mt',glfwflags+[\"-DUSE_MSVC_RUNTIME_LIBRARY_DLL=false\"])\n\tassimpflags=[\"-DASSIMP_BUILD_DOCS=false\",\"-DASSIMP_BUILD_EXAMPLES=false\",\"-DASSIMP_BUILD_TESTS=false\",\"-DASSIMP_INSTALL=false\",\"-DCMAKE_C_FLAGS_DEBUG=/MTd /Zi /Ob0 /Od /RTC1\",\"-DCMAKE_C_FLAGS_RELEASE=/MT /O2 /Ob2 /DNDEBUG\",\"-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=../lib\"]\n\tcmake('External/assimp','External/assimp/build_md',glfwflags)\n\tcmake('External/assimp','External/assimp/build_mt',glfwflags+[\"-DUSE_MSVC_RUNTIME_LIBRARY_DLL=false\"])\n\tplatform_flags=[]\n\tcmake('.','build',platform_flags)\n\n\nversion=read_config_file('version.properties')\nuser=read_config_file('user.properties')\nMSBUILD=user.get('MSBUILD',GetMSBuild())\nexecute()","sub_path":"Setup.py","file_name":"Setup.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"12288313","text":"#!/usr/bin/env python\n\"\"\" Script to quickly investigate failed estimation runs.\n\"\"\"\n# standard library\nimport numpy as np\nimport sys\nimport os\n\n# project library\nimport modules.battery as development_tests\n\n# GRMPY import\nsys.path.insert(0, os.environ['GRMPY'])\n#import grmpy\nfrom grmpy.tests.test import Tests as package_tests\n\n\n''' Request\n'''\nlabel, seed = '0', 484788\n\n''' Error Reproduction\n'''\n# Define list of admissible tests\ntest_labels = ['0']\npackage_labels = ['1', '2', '3', '4', '5']\nlabels = test_labels + package_labels\n\n\nif label in test_labels:\n test = getattr(development_tests, 'test_' + label)\nelif label in package_labels:\n test = getattr(package_tests, 'test_' + label)\nelse:\n raise AssertionError\n\nnp.random.seed(seed)\n\n# This is required to set the seeds identical to the\n# case in the run.py script.\nlabel = np.random.choice(['0', '1', '2', '3', '4', '5'])\n\ntest()\n","sub_path":"testing/random/investigate.py","file_name":"investigate.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"75716215","text":"from tensorflow.keras import layers\n\nimport numpy as np\nimport tensorflow as tf\n\nclass LogisticCumulativeLink(layers.Layer):\n\n def __init__(self, num_classes, **kwargs):\n super(LogisticCumulativeLink, self).__init__()\n\n num_cutpoints = num_classes - 1\n # cutpoints = np.arange(num_cutpoints).astype(\"float32\") - num_cutpoints / 2\n cutpoints = np.arange(num_cutpoints).astype(\"float32\") + 0.5\n cutpoints = cutpoints.reshape((1, -1))\n self.cutpoints = tf.compat.v1.get_variable(\"cutpoints\",\n initializer=cutpoints,\n trainable=True)\n self.num_classes = num_classes\n\n def call(self, inputs):\n sigmoids = tf.math.sigmoid(self.cutpoints - inputs)\n link_mat = sigmoids[:, 1:] - sigmoids[:, :-1]\n link_mat = tf.concat((\n tf.reshape(sigmoids[:, 0], (-1, 1)),\n link_mat,\n (1 - tf.reshape(sigmoids[:, -1], (-1, 1)))\n ),\n axis=1\n )\n return link_mat\n\n def get_config(self):\n config = super(LogisticCumulativeLink, self).get_config()\n config.update({'num_classes': self.num_classes})\n return config","sub_path":"deliverable/ord_regressor_layer.py","file_name":"ord_regressor_layer.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"572036440","text":"from sys import argv\nimport random\nr = random.randint #rename random number generator for convenience\nrandom.seed(1000) # seed the random number generator so we all have the same tests\nalphabet=\"qazwsxedcrfvtgbyhnujmikolpQAZWSXEDCRFVTGBYHNUJMIKOLP\"\ndigits = \"1234567890\"\n\n\ndef gen_x_digits(x):\n dig = str(r(1,9))\n for i in range(0,x-1):\n dig+=digits[random.randint(0,9)]\n return dig\n\ndef gen_x_letters(x):\n dig = ''\n for i in range(0,x):\n dig+=alphabet[random.randint(0,len(alphabet)-1)]\n return dig\n\ndef gen_x_words(x):\n words = [gen_x_letters(random.randint(2,10)) for i in range(0,x)]\n return ' '.join(words)\n\nfrom os import path\ndef main(folder):\n for eachTest in range(0,6):\n input_filename = path.join(folder,\"test{0}\".format(eachTest))\n test_file = open(input_filename, \"w\")\n number_of_lines_in_file = pow(2,8)\n for i in range(0, number_of_lines_in_file):\n test_file.writelines('%s\\n'%gen_x_words(random.randint(10,20)))\n\n test_file.close()\n\n","sub_path":"project_1_tests/utility/create_tests.py","file_name":"create_tests.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"357859901","text":"import os\nimport pandas as pd\n\n\nclass TransferData:\n def __init__(self):\n self.label_dict = {\n '疾病和诊断': 'DISEASE',\n '影像检查': 'TESTPROC',\n '实验室检验': 'TESTLAB',\n '手术': 'TREATMENT',\n '解剖部位': 'BODY',\n '药物': 'DRUGS',\n }\n\n self.cate_dict = {\n 'O': 0,\n 'DISEASE-B': 1,\n 'DISEASE-I': 2,\n 'TESTPROC-B': 3,\n 'TESTPROC-I': 4,\n 'TESTLAB-B': 5,\n 'TESTLAB-I': 6,\n 'BODY-B': 7,\n 'BODY-I': 8,\n 'DRUGS-B': 9,\n 'DRUGS-I': 10,\n 'TREATMENT-B': 11,\n 'TREATMENT-I': 12,\n }\n self.entity_dirpath = \"data/yidu\"\n self.train_filepath = \"train/yidu_train.txt\"\n return\n\n def transfer(self):\n f = open(self.train_filepath, 'w+', encoding='utf-8')\n for root, dirs, files in os.walk(self.entity_dirpath):\n for file in files:\n json_path = root+\"/\"+file\n data = pd.read_json(json_path)\n if data.size == 0:\n continue\n res_dict = {}\n content = data[\"originalText\"][0]\n for i in enumerate(data[\"entities\"]):\n start = int(i[1]['start_pos'])\n end = int(i[1]['end_pos'])\n label = i[1][\"label_type\"]\n label_id = self.label_dict.get(label)\n for i in range(start, end):\n if i == start:\n label_cate = label_id + '-B'\n else:\n label_cate = label_id + '-I'\n res_dict[i] = label_cate\n for indx, char in enumerate(content):\n char_label = res_dict.get(indx, 'O')\n word_list = ['。']\n if char in word_list:\n char_label = 'O'\n if char != ' ':\n f.write(char + '\\t' + char_label + '\\n')\n print(\"%s 完成!\" % json_path)\n f.close()\n return\n\n\nif __name__ == '__main__':\n handler = TransferData()\n train_datas = handler.transfer()\n","sub_path":"transfer_yidu.py","file_name":"transfer_yidu.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"630387361","text":"import csv\nimport logging\n\nfrom dipper.models.Reference import Reference\nfrom dipper.models.assoc.G2PAssoc import G2PAssoc\nfrom dipper.sources.Source import Source\nfrom dipper.sources.ZFIN import ZFIN\nfrom dipper.models.Model import Model\n\n\nLOG = logging.getLogger(__name__)\n# note: currently no log issued\n\n\nclass ZFINSlim(Source):\n \"\"\"\n zfin mgi model only containing Gene to phenotype associations\n Using the file here: https://zfin.org/downloads/phenoGeneCleanData_fish.txt\n \"\"\"\n files = {\n 'g2p_clean': {\n 'file': 'phenoGeneCleanData_fish.txt',\n 'url': 'https://zfin.org/downloads/phenoGeneCleanData_fish.txt',\n # https://zfin.org/downloads# header Documentation is burried in UI crap\n },\n 'zpmap': {\n 'file': 'id_map_zfin.tsv',\n 'url': 'http://purl.obolibrary.org/obo/zp/id_map_zfin.tsv'\n # ^^ Nico's updated mapping, May 2019\n }\n }\n\n def __init__(self,\n graph_type,\n are_bnodes_skolemized,\n data_release_version=None):\n super().__init__(\n graph_type=graph_type,\n are_bnodes_skized=are_bnodes_skolemized,\n data_release_version=data_release_version,\n name='zfinslim',\n ingest_title='Simplified ZFIN',\n ingest_url='https://zfin.org/',\n ingest_logo=\"source-zfin.png\",\n license_url=None,\n data_rights='http://zfin.org/warranty.html',\n # file_handle=None\n )\n self.dataset.set_citation(\n 'https://wiki.zfin.org/display/general/ZFIN+db+information')\n\n def fetch(self, is_dl_forced=False):\n self.get_files(is_dl_forced)\n\n def parse(self, limit=None):\n zfin_parser = ZFIN(self.graph_type, self.are_bnodes_skized)\n model = Model(self.graph)\n zp_file = '/'.join((self.rawdir, self.files['zpmap']['file']))\n g2p_file = '/'.join((self.rawdir, self.files['g2p_clean']['file']))\n zfin_parser.zp_map = zfin_parser._load_zp_mappings(zp_file)\n\n with open(g2p_file, 'r', encoding=\"utf8\") as csvfile:\n filereader = csv.reader(csvfile, delimiter='\\t', quotechar='\\\"')\n for row in filereader:\n (internal_id,\n symbol,\n gene_id,\n subterm1_id,\n subterm1_label,\n pc_rel_id,\n pc_rel_label,\n superterm1_id,\n superterm1_label,\n quality_id,\n quality_name,\n modifier,\n subterm2_id,\n subterm2_label,\n pc_rel2_id,\n pc_rel2_label,\n superterm2_id,\n superterm2_label,\n fish_id,\n fish_label,\n start_stage,\n end_stage,\n environment,\n pub_id,\n figure_id\n ) = row\n\n if modifier != \"abnormal\":\n LOG.warning(\"skipping phenotype with modifier != abnormal: \" + modifier)\n continue\n\n zp_id = zfin_parser._map_octuple_to_phenotype(subterm1_id,\n pc_rel_id,\n superterm1_id,\n quality_id,\n subterm2_id,\n pc_rel2_id,\n superterm2_id,\n modifier)\n\n gene_curie = \"ZFIN:{0}\".format(gene_id)\n model.makeLeader(gene_curie)\n pub_curie = \"ZFIN:{0}\".format(pub_id)\n if zp_id:\n assoc = G2PAssoc(self.graph, self.name, gene_curie, zp_id)\n if pub_id:\n reference = Reference(self.graph, pub_curie,\n self.globaltt['document'])\n reference.addRefToGraph()\n assoc.add_source(pub_curie)\n\n assoc.add_evidence(\n self.globaltt['experimental phenotypic evidence'])\n assoc.add_association_to_graph()\n","sub_path":"dipper/sources/ZFINSlim.py","file_name":"ZFINSlim.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"159994529","text":"import requests\nimport folium\n\nURL = \"https://api.radio.com/v1/stations\"\nlocations = []\nPARAM = {\n 'page[number]': 1,\n 'page[size]': 10\n}\n\nr = requests.get(url=URL, params=PARAM)\n\n\ndef fetch_locations(response):\n response_JSON = response.json()\n # print(len(locations))\n for row in response_JSON['data']:\n id = row['id']\n name = row['attributes']['name']\n lat = row['attributes']['latitude']\n long = row['attributes']['longitude']\n city = row['attributes']['city']\n state = row['attributes']['state']\n locations.append({'id': id, 'name': name, 'city': city, 'state': state, 'lat': lat, 'long': long})\n if 'next' in response_JSON['links']:\n fetch_locations(requests.get(url=response_JSON['links']['next']))\n\n\nfetch_locations(r)\n\nmap = folium.Map(\n location=[39.8283, -98.5795],\n zoom_start=4\n\n)\ntooltip = 'Click me!'\n\nfor line in locations:\n # print(line['name'])\n # print(line['lat'])\n # print(line['long'])\n if line['lat'] is not None and line['long'] is not None:\n folium.Marker(location=[line['lat'], line['long']], popup=f\"{line['lat']}
{line['long']}\", tooltip=(line['name'])).add_to(map)\n\nmap.save('index.html')\nprint('Done!')\n","sub_path":"StationLocations/Locationtest.py","file_name":"Locationtest.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"551622629","text":"import csv\nimport re\nimport gensim\nimport pymorphy2\nimport pickle\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\n\n\ndef load_file(file_name, observer):\n\t\"\"\"Загрузка файла с данными\n\n\tArgs:\n\t\tfile_name (string): имя файлы\n\t\tobserver (func): обработчик для строки\n\t\"\"\"\n\tfile_obj = open(file_name, \"r\")\n\tfor line in file_obj:\n\t\tobserver(line)\n\ndef clear_null_bytes(file_name):\n\t\"\"\"Очистка строки от NULL-bytes символов\n\n\t\tArgs:\n\t\t\tfile_name (string): наименование файла\n\t\"\"\"\n\tdata = open(file_name, 'r').read()\n\tf = open('clear.data', 'w')\n\tdata = data.replace('\\x00', '')\n\tf.write(data)\n\tf.close()\n\ndef clear_twitter_message(message):\n\t\"\"\"Очистка текста твита. Оставляем только русские слова и выражения\n\n\tArgs:\n\t\tmessage (string): твит сообщения\n\n\tReturns:\n\t\tmessage (string): очищенный твит\n\t\"\"\"\n\tglobal LIST\n\tmessage = message.strip().lower()\n\tLIST.append(gensim.utils.simple_preprocess(message))\n\ndef build_word2vec_model(documents):\n\t\"\"\"Документы. Список предложений\n\n\t\tArgs:\n\t\t\tdocuments (list): список документов\n\n\t\tReturns:\n\t\t\tword2vec (object): модель\n\t\"\"\"\n\tmodel = gensim.models.Word2Vec(documents,size=150, window=10, min_count=2, workers=10, iter=100)\n\treturn model\n\ndef to_list_messages(message):\n\tglobal LIST\n\tmessage = re.sub(r'\\n', ' ', message[3])\n\tmessage = re.sub(r'RT', '', message)\n\tmessage = re.sub(r'htt[^\\s]+', '', message)\n\tmessage = re.sub(r'#[^\\s]+', '', message)\n\tmessage = re.sub(r'(^|\\s)@($|\\s)', '', message)\n\tmessage = re.sub(r'\\@[^\\s]+', 'Name', message)\n\tmessage = re.sub(r'\\s+', ' ', message)\n\tmessage = message.strip().lower()\n\tLIST.append(message)\n\ndef lemma_file(source_file, dest_file):\n\t\"\"\"\n\tЛемматизация сообщений с сохранением в файл\n\n\tArgs:\n\t\tsource_file (string): наименование файла источника\n\t\tdest_file (string): наименование файла назначения\n\t \n\t\"\"\"\n\tfile = open(source_file, \"r\")\n\tsave_file = open(dest_file, \"w\")\n\tmorph = pymorphy2.MorphAnalyzer()\n\tfor message in file.read().split(\"\\n\"):\n\t\tlemma = lemma_message(message, morph)\n\t\tsave_file.write(lemma + \"\\n\")\n\t\tprint(lemma)\n\tfile.close()\n\tsave_file.close()\n\tprint(\"Lemmatization end\")\n\ndef lemma_message(message, morph = None):\n\t\"\"\"\n\tЛемматизировать сообщение\n\n\tArgs:\n\t\tmessage (string): сообщение\n\n\tReturns:\n\t\t(string): лемматизированное сообщение\n\t\"\"\"\n\tif not morph:\n\t\tmorph = pymorphy2.MorphAnalyzer()\n\tlemma = []\n\tmessage = re.sub(r\"[\\(\\)\\{\\}\\[\\],\\.:;\\+\\-]\", \"\", message)\n\tfor word in message.split(\" \"):\n\t\tres = morph.parse(word)[0]\n\t\tif res.normal_form:\n\t\t\tres = res.normal_form\n\t\telse:\n\t\t\tres = word \n\t\tlemma.append(res)\n\treturn \" \".join(lemma)\n\ndef get_dataset(positive_file_name, negative_file_name):\n\t\"\"\"\n\tЗагрузить датасет позитивных и негативных сообщений\n\n\tArgs:\n\t\tpositive_file_name(string): наименование файла с позитивными сообщениями\n\t\tnegative_file_name(string): наименование файла с негативными сообщениями\n\n\tReturns:\n\t\t(list, list): датасет данных + вектор тональностей\n\t\"\"\"\n\tpositive_list = open(\"positive_list.data\").read().split(\"\\n\")\n\tnegative_list = open(\"negative_list.data\").read().split(\"\\n\")\n\tfull_list = positive_list + negative_list\n\tmarks = [1 for p in positive_list] + [0 for n in negative_list]\n\treturn (full_list, marks)\n\ndef get_vectorizer():\n\t\"\"\"\n\tПолучить векторизатор\n\n\tArgs:\n\t\tdataset (list): список сообщений\n\n\tReturns:\n\t\tvectorizer (object): объект векторизатор \n\t\"\"\"\n\tvectorizer = CountVectorizer(analyzer = \"word\", tokenizer = None, preprocessor = None, stop_words = None, max_features = 5000) \n\treturn vectorizer\n\ndef get_classifier(train_data_features, marks):\n\t\"\"\"\n\tОбучить и получить классификатор\n\n\tArgs:\n\t\ttrain_data_features (list): датасет сообщений\n\n\tReturns:\n\t\t \n\t\"\"\"\n\tforest = RandomForestClassifier(n_estimators = 100) \n\tforest = forest.fit(train_data_features, marks)\n\treturn forest\n\ndef get_dataset_and_learn_neural_network():\n\t\"\"\"\n\tЗагрузить датасет и обучить нейронную сеть \n\t\n\t\"\"\"\n\tpositive_list = open(\"positive_list.data\").read().split(\"\\n\")\n\tnegative_list = open(\"negative_list.data\").read().split(\"\\n\")\n\n\tvectorizer = CountVectorizer(analyzer = \"word\", tokenizer = None, preprocessor = None, stop_words = None, max_features = 5000) \n\n\tdataset = positive_list + negative_list\n\tmarks = [1 for p in positive_list] + [0 for n in negative_list]\n\ttrain_data_features = vectorizer.fit_transform(dataset)\n\ttrain_data_features = train_data_features.toarray()\n\n\tforest = RandomForestClassifier(n_estimators = 100) \n\tforest = forest.fit(train_data_features, marks)\n\n\tforest_dump = open(\"forest_dump.object\", \"wb\")\n\tpickle.dump(forest, forest_dump, pickle.HIGHEST_PROTOCOL)\n\tforest_dump.close()\n\tvect_dump = open(\"vect_dump.object\", \"wb\")\n\tpickle.dump(vectorizer, vect_dump, pickle.HIGHEST_PROTOCOL)\n\tvect_dump.close()\n\nif __name__ == '__main__':\n\t#load_file(\"negative_list.data\", clear_twitter_message)\n\t#build_word2vec_model(LIST)\n\t#lemma_file(\"positive_list.data\", \"lemma_positive_list.data\")\n\t#lemma_file(\"negative_list.data\", \"lemma_negative_list.data\")\n\tif False:\n\t\tdataset = get_dataset(\"lemma_positive_list.data\", \"lemma_negative_list.data\")\n\t\tvectorizer = get_vectorizer(dataset)\n\t\ttrain_data_features = vectorizer.fit_transform(dataset[0])\n\t\ttrain_data_features = train_data_features.toarray()\n\t\tclassifier = get_classifier(train_data_features, dataset[1])\n\t\tclassifier_dump = open(\"classifier_dump.object\", \"wb\")\n\t\tpickle.dump(classifier, classifier_dump, pickle.HIGHEST_PROTOCOL)\n\t\tclassifier_dump.close()\n\t\tvect_dump = open(\"vect_dump.object\", \"wb\")\n\t\tpickle.dump(vectorizer, vect_dump, pickle.HIGHEST_PROTOCOL)\n\t\tvect_dump.close()\n\tfile_classifier = open(\"classifier_dump.object\", \"rb\")\n\tclassifier = pickle.load(file_classifier)\n\tfile_vect = open(\"vect_dump.object\", \"rb\")\n\tvect = pickle.load(file_vect)\n\tfile_classifier.close()\n\tfile_vect.close()\n\td = vect.transform([lemma_message(\"ужасный день\")])[0]\n\tprint( classifier.predict(d) )\n\n\n\n","sub_path":"learn_neural_network.py","file_name":"learn_neural_network.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"590672649","text":"import os\nfrom email.mime.multipart import MIMEMultipart \nfrom email.mime.text import MIMEText \nfrom email.mime.application import MIMEApplication \n\ndef send_mail():\n DATA_DIR = \"T://CASA//James//hyde//B//UMA\" #檔案路徑\n _user = \"hyde@gmail.com\" #送件人\n _pwd = \"***\" #密碼\n _to = \"freddy@gmail.com\" #收件人 \n \n msg = MIMEMultipart() \n msg[\"Subject\"] = \"寄信測試\" \n msg[\"From\"] = _user \n msg[\"To\"] = _to \n\t\n\n for filename in os.listdir(DATA_DIR):\n print (\"Loading: %s\" % filename)\n\t\n loadFile = open(os.path.join(DATA_DIR, filename), 'rb')\n part = MIMEApplication(loadFile.read()) \n #part.add_header('Content-Disposition', 'attachment', filename=filename) \n part.add_header('Content-Disposition', 'attachment', filename) \n msg.attach(part) \n #file_data.append(loadFile.read())\n loadFile.close()\n\t\t\n s = smtplib.SMTP(\"smtp.qq.com\", timeout=30)\n s.login(_user, _pwd)\n s.sendmail(_user, _to, msg.as_string())\n s.close() \n\t\nif __name__ == '__main__':\n send_mail()\n \n","sub_path":"GoLearing/src/main/java/com/python/semdmail.py","file_name":"semdmail.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"577650197","text":"checkExisting = True\nisInteractive = True\ncheckforDoi = True\ncheckForArxiv = True\nsaveAfterElements = 100\nfileLiteraturBib = \"../data/literature.bib\"\nfolderPDFPrefix = \"../data/\"\nfolderTEIPrefix = \"../teibib/\"\nfileGraphModel = \"../data/graph-model.json\"\nbibKeyPrefix = \"reviz_key\"\nbibRefPrefix = \"reviz_ref\"\nbibKeyID = \"ID\" # \"bibtex_key\"\nbibKeyType = \"ENTRYTYPE\" # \"document_type\"\nbibKeyFile = \"file\" # \"note\"\nbibKeyTitle = \"title\"\nbibKeyAuthor = \"author\"\nbibKeyDoi = \"doi\"\nbibKeyUrl = \"url\"\nbibKeyYear = \"year\"\nbibKeyEprint = \"eprint\"\nbibKeyCheckedDoi = \"reviz_doi\"\nbibKeyCheckedTei = \"reviz_tei\"\nbibKeyCheckedEprint = \"reviz_eprint\"\nmaxManualQueueLen = 100\n","sub_path":"modules/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"440496271","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\n\nclass Node:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\n\ndef attractive_potential(x, y, goal, K_att, func):\n\tif func == 'p':\n\t\treturn K_att * (np.hypot(x - goal.x, y - goal.y))**2\n\telse:\n\t\treturn K_att * (np.hypot(x - goal.x, y - goal.y))\n\n\ndef repulsive_potential(x, y, K_rep):\n\tglobal obslist\n\tinfluence_region = 1\n\tmin_dist = float(\"inf\")\n\tdist_list = [np.hypot(x-o[0], y-o[1]) for o in obslist]\n\tclosest_index = dist_list.index(min(dist_list))\n\tdq = np.hypot(x - obslist[closest_index][0], y - obslist[closest_index][1])\n\tregion = obslist[closest_index][2] + influence_region\n\tif dq <= region:\n\t\tif dq <= 0.3:\n\t\t\tdq = 0.3\n\t\treturn 0.5 * K_rep * (1/dq - 1/region) ** 2 # gamma = 2\n\telse:\n\t\treturn 0\n\ndef get_potential_matrix(goal, grid_size, params, func, minx=0, maxx=30):\n\tglobal obslist\n\tmin_x = min_y = minx\n\tmax_x = max_y = maxx\n\tx_grid = int(round((max_x - min_x) / grid_size))\n\ty_grid = int(round((max_y - min_y) / grid_size))\n\n\tpotential_grid = [[0.0 for i in range(y_grid)] for i in range(x_grid)]\n\n\tfor i in range(x_grid):\n\t\tx = i * grid_size + min_x\n\t\tfor j in range(y_grid):\n\t\t\ty = j * grid_size + min_y\n\t\t\tu_att = attractive_potential(x, y, goal, params[0], func)\n\t\t\tu_rep = repulsive_potential(x, y, params[1])\n\t\t\tuf = u_rep + u_att\n\t\t\tpotential_grid[i][j] = uf\n\n\treturn potential_grid, min_x, min_y\n\n\ndef potential_field_planning(start, goal, grid_size, params, func):\n\tpmap, minx, miny = get_potential_matrix(goal, grid_size, params, func)\n\trobot_path = Node(start.x, start.y)\n\tmove = []\n\tdirections = [0,1,1]\t# move at max 1 step\n\tfor i in directions:\n\t\tfor j in directions:\n\t\t\tmove.append([i,j])\n\tmove.remove([0,0])\t# [0,0] => robot doesnt move\n\tpath = [start]\n\td = np.hypot(start.x - goal.x, start.y - goal.y)\n\tlim = 0\n\twhile d >= grid_size:\n\t\tmin_potential = float(\"inf\")\n\t\tmin_pot_x, min_pot_y = -100, -100\n\t\tfor i, _ in enumerate(move):\n\t\t\tmoved_x = int(robot_path.x + move[i][0])\n\t\t\tmoved_y = int(robot_path.y + move[i][1])\n\t\t\tif moved_x >= len(pmap) or moved_y >= len(pmap[0]) or moved_x < 0 or moved_y < 0:\n\t\t\t\tp = float(\"inf\") # outside area\n\t\t\t\tprint(\"outside region!\")\n\t\t\telse:\n\t\t\t\tp = pmap[moved_x][moved_y]\n\t\t\tif min_potential > p:\n\t\t\t\tmin_potential = p\n\t\t\t\tmin_pot_x = moved_x\n\t\t\t\tmin_pot_y = moved_y\n\t\trobot_path.x = min_pot_x\n\t\trobot_path.y = min_pot_y\n\t\tx_final = robot_path.x * grid_size + minx\n\t\ty_final = robot_path.y * grid_size + miny\n\t\td = np.hypot(goal.x - x_final, goal.y - y_final)\n\t\tpath.append(Node(x_final, y_final))\n\t\tlim +=1\n\n\tprint(\"Done!!\")\n\treturn path, pmap\n\n\ndef main(grid_size, func, ETA, K):\n\tglobal obslist\n\tstart = Node(1,1)\n\tgoal = Node(20,20)\n\tobslist = [(4.5, 3, 2), (3, 12, 2), (15, 15, 3)] #[(x, y, radius)]\n\tgrid_size = 0.5\n\tpath, pmap = potential_field_planning(start, goal, grid_size, [K, ETA], func)\n\n\t\"\"\"\n\tPlotting\n\t\"\"\"\n\tfigure, axes = plt.subplots()\n\tplt.rcParams[\"figure.figsize\"] = (15,15)\n\n\t# plotting the obstacles\n\tfor obs in obslist:\n\t\tobstacle = plt.Circle((obs[0], obs[1]), obs[2], color=\"black\", fill=False)\n\t\taxes.add_artist(obstacle)\n\n\t# plotting path\n\tplt.plot(1,1,'kp') #start\n\tplt.plot(20,20,'kp') #goal\n\tx_cord = []\n\ty_cord = []\n\tfor v in path:\n\t\tx_cord.append(v.x)\n\t\ty_cord.append(v.y)\n\tplt.plot(x_cord, y_cord, \"r-\", linewidth=1, label='Final Path')\n\n\taxes.set_aspect(1)\n\tplt.xlim(0,30)\n\tplt.ylim(0,30)\n\tplt.legend()\n\tplt.title('Configuration Space')\n\tplt.savefig(\"./images/apf.png\")\n\t# plt.show()\n\treturn figure\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Artificial Potential Function')\n\tparser.add_argument('-g','--grid', type=float,\n\t\t\t\t\t\thelp='Grid size; Default=0.5', default=0.5)\n\tparser.add_argument('-f','--function', type=str,\n\t\t\t\t\t\thelp='Attractive Potential function(p=paraboloid, c=conical); Default=c', default='c')\n\tparser.add_argument('-a','--attractive', type=float,\n\t\t\t\t\t\thelp='Attractive Potential Gain; Default=1', default=1)\n\tparser.add_argument('-r','--repulsive', type=float,\n\t\t\t\t\t\thelp='Repuslive Potential Gain; Default=5000', default=5000)\n\targs = parser.parse_args()\n\n\n\tgrid_size = args.grid\n\tfunc = args.function\n\tK = args.attractive\n\tETA = args.repulsive\n\n\tmain(grid_size, func, ETA, K)\n","sub_path":"pf.py","file_name":"pf.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"440888514","text":"import aiohttp\nimport aiohttp_jinja2\nimport asyncio\nimport jinja2\nimport pathlib\nimport uvloop\nimport ujson\nfrom aiohttp import web\n\nfrom aiohttp_handler import db\nfrom aiohttp_handler.routes import setup_routes\nfrom aiohttp_handler.settings import get_config\n\n\nCONFIG_ROOT = pathlib.Path(__file__).parent.parent / 'config'\nTEMPLATES_ROOT = pathlib.Path(__file__).parent / 'templates'\n\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n\nasync def close_client_session(app):\n await app.client_session.close()\n\n\nasync def create_client_session(app):\n app.client_session = aiohttp.ClientSession(json_serialize=ujson.dumps)\n\n\ndef setup_jinja(app):\n jinja_loader = jinja2.FileSystemLoader(TEMPLATES_ROOT.as_posix())\n jinja_env = aiohttp_jinja2.setup(app, loader=jinja_loader)\n return jinja_env\n\n\ndef create_app(config_filename='config_local.yaml'):\n # used for adev runserver aiohttp_handler\n\n app = web.Application(\n middlewares=[\n web.normalize_path_middleware(\n append_slash=True,\n merge_slashes=True,\n )]\n )\n app.cfg = get_config(CONFIG_ROOT, config_filename)\n setup_jinja(app)\n setup_routes(app)\n\n app.on_startup.append(db.init_es)\n app.on_shutdown.append(db.close_es)\n\n app.on_startup.append(create_client_session)\n app.on_shutdown.append(close_client_session)\n\n if not app.cfg['is_test']:\n app.on_startup.append(db.periodic_updater_init)\n\n return app\n\n\ndef main(config_filename='config_local.yaml'):\n # used for python -m aiohttp_handler\n\n app = create_app(config_filename)\n web.run_app(\n app,\n host=app.cfg['host'],\n port=app.cfg['port'],\n access_log=None)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"aiohttp/aiohttp_handler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"532544094","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom ctypes import *\nfrom binascii import a2b_hex\nfrom binascii import b2a_hex\n\nclass PCSCException(Exception):\n pass\n\nclass PCSC(object):\n \"\"\"PCSC Wrapper\"\"\"\n\n def __init__(self):\n # load pcsc library\n self.pcsclib = cdll.LoadLibrary('/System/Library/Frameworks/PCSC.framework/PCSC')\n\n def open(self):\n \"\"\"SCardEstablishContext -> SCardListReaders\"\"\"\n # establish\n scope_system = c_uint32(2)\n scard_context = c_int32(0)\n ret = self.pcsclib.SCardEstablishContext(scope_system, None, None, byref(scard_context))\n if ret != 0:\n raise PCSCException('SCardEstablishContext')\n\n self.scard_context = scard_context\n\n # list readers\n readers_buffer_size = c_uint32(1024)\n readers = create_string_buffer(readers_buffer_size.value)\n ret = self.pcsclib.SCardListReaders(scard_context, None, readers, byref(readers_buffer_size))\n if ret != 0:\n raise PCSCException('SCardListReaders')\n\n # use first reader\n self.reader = create_string_buffer(readers.value)\n\n def close(self):\n \"\"\"SCardReleaseContext\"\"\"\n ret = self.pcsclib.SCardReleaseContext(self.scard_context)\n # ignore result\n\n def _scard_connect_helper(self):\n \"\"\"SCardConnect\"\"\"\n share_mode_share = c_uint32(2)\n # T=1 only\n preferred_protocol_t1 = c_uint32(2)\n activate_protocol = c_uint32(0)\n scard_handle = c_int32(0)\n ret = self.pcsclib.SCardConnect(self.scard_context,\n self.reader,\n share_mode_share,\n preferred_protocol_t1,\n byref(scard_handle),\n byref(activate_protocol))\n if ret != 0:\n return None\n\n return scard_handle\n\n def _scard_disconnect_helper(self, scard_handle):\n \"\"\"SCardDisconnect\"\"\"\n disposition_leave = c_uint32(0)\n ret = self.pcsclib.SCardDisconnect(scard_handle, disposition_leave)\n if ret != 0:\n raise PCSCException('SCardDisconnect')\n\n def is_card_detect(self):\n \"\"\"SCardConnect -> SCardDisconnect\"\"\"\n scard_handle = self._scard_connect_helper()\n if scard_handle is None:\n return False\n\n self._scard_disconnect_helper(scard_handle)\n\n return True\n\n def transceive(self, command):\n \"\"\"SCardTransmit, transmit command and receive response\"\"\"\n scard_handle = self._scard_connect_helper()\n if scard_handle is None:\n raise PCSCException('SCardConnect')\n\n # command : hex string, ascii\n command_byte = a2b_hex(command.encode('ascii'))\n send_data = create_string_buffer(command_byte)\n send_data_length = c_uint32(len(command_byte))\n recv_buffer = create_string_buffer(1024)\n recv_buffer_length = c_uint32(1024)\n # T=1 only\n scard_pci_t1 = self.pcsclib.g_rgSCardT1Pci\n\n ret = self.pcsclib.SCardTransmit(scard_handle,\n scard_pci_t1,\n send_data,\n send_data_length,\n None,\n recv_buffer,\n byref(recv_buffer_length))\n if ret != 0:\n raise PCSCException('SCardTransmit')\n\n self._scard_disconnect_helper(scard_handle)\n\n return b2a_hex(recv_buffer[:recv_buffer_length.value]).upper().decode('ascii')\n\n def get_atr(self):\n \"\"\"SCardStatus\"\"\"\n scard_handle = self._scard_connect_helper()\n if scard_handle is None:\n raise PCSCException('SCardConnect')\n\n atr = create_string_buffer(128)\n atr_length = c_uint32(128)\n ret = self.pcsclib.SCardStatus(scard_handle, None, None, None, None, atr, byref(atr_length))\n if ret != 0:\n raise PCSCException('SCardStatus')\n\n self._scard_disconnect_helper(scard_handle)\n\n return b2a_hex(atr[:atr_length.value]).upper().decode('ascii')\n\n def acr1252u_transceive_iso7816_apdu(self, command):\n \"\"\"transmit and receive iso7816 apdu\"\"\"\n return self.transceive(command)\n\n def acr1252u_transceive_felica_apdu(self, command):\n \"\"\"transmit and receive felica apdu\"\"\"\n return self.transceive('FF000000' + command[:2] + command)\n\n\ndef atr():\n \"\"\"check ATR\"\"\"\n pcsc = PCSC()\n try:\n pcsc.open()\n while not pcsc.is_card_detect():\n pass\n\n print(pcsc.get_atr())\n\n except PCSCException as e:\n print(e)\n finally:\n pcsc.close()\n\n\ndef driver_license_ja():\n \"\"\"typeb card test\"\"\"\n pcsc = PCSC()\n try:\n pcsc.open()\n while not pcsc.is_card_detect():\n pass\n\n # password\n pin1 = '1111'\n pin2 = '2222'\n # commands\n select_mf = '00A40000'\n select_ef = '00A4020C02'\n select_df = '00A4040C10'\n verify = '0020008004'\n read_binary = '00B00000000000'\n commands = [\n ## read test\n # select mf\n select_mf,\n # select mf/ef01\n select_ef + '2f01',\n # read binary\n read_binary,\n ## verify pin1, 2\n # select mf\n select_mf,\n # select mf/ief01\n select_ef + '0001',\n # verify\n verify + b2a_hex(pin1.encode('ascii')).decode('ascii'),\n # select mf/ief02\n select_ef + '0002',\n # verify\n verify + b2a_hex(pin2.encode('ascii')).decode('ascii'),\n ## read df data\n # select df1\n select_df + 'A00000023101' + '00' * 10,\n # select df1/ef01, read_binary\n select_ef + '0001',\n read_binary,\n # select df1/ef02, read_binary\n select_ef + '0002',\n read_binary,\n ]\n for command in commands:\n print('COMMAND : {0}'.format(command))\n response = pcsc.acr1252u_transceive_iso7816_apdu(command)\n print('RESPONSE : {0}'.format(response))\n if len(response) < 4:\n raise PCSCException('Response APDU Length Error : {0}'.format(response))\n elif response[-4:] != '9000':\n raise PCSCException('Response APDU Error : {0}'.format(response))\n\n except PCSCException as e:\n print(e)\n finally:\n pcsc.close()\n\n\ndef main():\n #atr()\n driver_license_ja()\n\n\nif __name__ == '__main__':\n main()","sub_path":"python/nfcpython/nfcpython_driver.py","file_name":"nfcpython_driver.py","file_ext":"py","file_size_in_byte":6747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"9200861","text":"# 2020/12/19 by Typhoon\r\n# encoding: utf-8\r\n# Assemble header and footer of the file with the code section.\r\n\r\nimport sys\r\nimport shutil\r\n\r\nINFILE = sys.argv[1]\r\nHeader = INFILE + '_header.S'\r\nFooter = INFILE + '_footer.S'\r\nScheduled = INFILE + '_scheduled.S'\r\n\r\ndef main():\r\n in_file_header = open('output/0/'+Header, 'r')\r\n in_file_footer = open('output/0/'+Footer, 'r')\r\n in_file_scheduled = open('output/0/'+Scheduled, 'r')\r\n out_file = open('output/9/d_'+INFILE+'.S','w')\r\n\r\n in_head_lines = in_file_header.readlines()\r\n for line in xrange(len(in_head_lines)):\r\n code = in_head_lines[line]\r\n if code!='':\r\n out_file.writelines(code)\r\n\r\n in_scheduled_lines = in_file_scheduled.readlines()\r\n for line in xrange(len(in_scheduled_lines)):\r\n code = in_scheduled_lines[line]\r\n if code!='':\r\n out_file.writelines(code)\r\n\r\n in_footer_lines = in_file_footer.readlines()\r\n for line in xrange(len(in_footer_lines)):\r\n code = in_footer_lines[line]\r\n if code!='':\r\n out_file.writelines(code)\r\n\r\n in_file_header.close()\r\n in_file_scheduled.close()\r\n in_file_footer.close()\r\n out_file.close()\r\n\r\n shutil.copy('output/9/d_'+INFILE+'.S','output/d_'+INFILE+'.S')\r\n shutil.copy('output/9/d_'+INFILE+'.S','done/d_'+INFILE+'.S')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n\r\n","sub_path":"CPE/9_assemble.py","file_name":"9_assemble.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"20675553","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport sys\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QHBoxLayout)\nfrom PyQt5.QtGui import QPixmap\n\n\nclass PixelmapDemo(QWidget):\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n\n hbox = QHBoxLayout(self)\n pixmap = QPixmap(\"C:\\\\Users\\\\tohoe\\\\Desktop\\\\Balls.png\")\n\n l1 = QLabel(self)\n l1.setPixmap(pixmap)\n\n hbox.addWidget(l1)\n self.setLayout(hbox)\n\n self.move(300, 100)\n self.setWindowTitle(\"Balls.png\")\n self.show()\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n pixel_map = PixelmapDemo()\n exit(app.exec_())","sub_path":"QPixmapDemo.py","file_name":"QPixmapDemo.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"374766423","text":"from sru.support.looper import looper\nfrom sru.support.app import start\nfrom sru.conf import settings\nimport time\nimport threading\n\ndef run_monitor():\n ENV_VAR = settings.ENV_VAR\n while True:\n if looper.threads.get(\"server\") == None:\n print(\"server starting\")\n server = threading.Thread(\n target=start, kwargs={\n \"host\":ENV_VAR.get(\"host\"),\n \"port\":ENV_VAR.get(\"port\"),\n \"ssl_cert\":ENV_VAR.get(\"ssl_cert\"),\n \"ssl_key\":ENV_VAR.get(\"ssl_key\"),\n \"loop\": looper.loop\n },\n name=\"server\")\n server.start()\n server.join()\n looper.threads[\"server\"] = server\n\n if looper.stopped() == True:\n if looper.status == \"restart\":\n looper.start()\n server = threading.Thread(\n target=start, \n kwargs={\n \"host\":ENV_VAR.get(\"host\"),\n \"port\":ENV_VAR.get(\"port\"),\n \"ssl_cert\":ENV_VAR.get(\"ssl_cert\"),\n \"ssl_key\":ENV_VAR.get(\"ssl_key\"),\n \"loop\": looper.loop\n }, name=\"server\")\n print(\"Server Stopped\")\n server.start()\n server.join()\n looper.threads[\"server\"] = server\n if looper.status == \"stop\":\n print(\"you asked for it to be stopped\")\n print(\"running\")\n time.sleep(10)\n","sub_path":"sru/packages/supervisor/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"614671931","text":"import json\n\n\nclass Serializer(object):\n mimetype = \"application/json\"\n\n def __init__(self, scm):\n self._scm = scm\n\n def __repr__(self):\n rval = []\n\n all_states = set()\n for source_state, target_states in self._scm._state_graph.iteritems():\n all_states.add(source_state)\n for st in target_states:\n all_states.add(st)\n\n for source_state in all_states:\n state_info = {\n \"name\": source_state.full_name,\n \"current\": source_state is self._scm._current_state,\n \"next\": source_state is self._scm._next_state,\n \"visited\": source_state in self._scm._visited_states,\n \"failed\": source_state in self._scm._error_states\n }\n\n transitions = []\n for target_state, transition in source_state.transition_map.iteritems():\n transitions.append({\n \"cost\": transition.cost,\n \"target\": target_state.full_name,\n \"visited\": (source_state, target_state) in self._scm._visited_transitions,\n \"failed\": (source_state, target_state) in self._scm._error_transitions,\n })\n\n state_info[\"transitions\"] = transitions\n rval.append(state_info)\n return json.dumps(rval)\n","sub_path":"Lib/site-packages/state_machine_crawler/serializers/js.py","file_name":"js.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"176318713","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/peoplefinder/SaveData.py\n# Compiled at: 2019-02-25 18:56:18\n# Size of source mod 2**32: 721 bytes\nimport numpy as np\nfrom astropy.table import Table\n\ndef save_data(savedir, phone, filename, status):\n try:\n data = Table.read(savedir + 'data.csv')\n except FileNotFoundError:\n data = Table(names=('phone', 'status', 'path'), dtype=('U13', 'U280', 'U{}'.format(len(savedir) + 17)))\n p0 = '-------------'\n s0 = '-' * 280\n f0 = '-' * (len(savedir) + 17)\n data.add_row([p0, s0, f0])\n\n phones = np.array(data['phone'])\n if phone in data['phone']:\n data[(phones == phone)]['status'] = status\n data[(phones == phone)]['path'] = filename\n else:\n data.add_row([phone, status, filename])\n data.write((savedir + 'data.csv'), overwrite=True)","sub_path":"pycfiles/peoplefinder-1.0b1-py3.6/SaveData.cpython-36.py","file_name":"SaveData.cpython-36.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"40383900","text":"import math\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return \"Point x={}, y={}.\".format(self.x, self.y)\n\n def __repr__(self):\n return \"Point x={}, y={}.\".format(self.x, self.y)\n\ndef compare_points_by_x(point_1, point_2):\n return point_1.x <= point_2.x\n\ndef compare_points_by_y(point_1, point_2):\n return point_1.y <= point_2.y\n\ndef euclidean_distance(point_1, point_2):\n if point_1 is None or point_2 is None:\n return math.inf\n return math.sqrt( (point_1.x - point_2.x) ** 2 + (point_1.y - point_2.y) ** 2)\n\ndef generate_two_sorted_arrays(array):\n array_x = array.copy()\n merge_sort(array_x, compare_points_by_x)\n array_y = array.copy()\n merge_sort(array_y, compare_points_by_y)\n return array_x, array_y\n\ndef merge_sort(array, compare_method):\n for k in range(int(math.log2(len(array)))+1):\n length = 2 ** k\n for i in range(0, len(array), length):\n if i + length < len(array):\n merge(array, i, length, compare_method)\n\ndef merge(array, start_first, length, compare_method):\n buffer = array[start_first: start_first + length]\n i = start_first\n j = 0\n finish_second = min(len(array), start_first + 2*length)\n k = start_first + length\n while j < len(buffer):\n if k < finish_second:\n if compare_method(buffer[j], array[k]):\n array[i] = buffer[j]\n i += 1\n j += 1\n else:\n array[i] = array[k]\n i += 1\n k += 1\n else:\n array[i:finish_second] = buffer[j:len(buffer)]\n break\n\ndef closest_2d_pair(array):\n array_x, array_y = generate_two_sorted_arrays(array)\n return closest_pair(array_x, array_y)\n\ndef closest_pair(array_sorted_by_x, array_sorted_by_y):\n if len(array_sorted_by_x) == 2:\n return array_sorted_by_x[0], array_sorted_by_x[1]\n elif len(array_sorted_by_x) < 2:\n return None, None\n left_x = array_sorted_by_x[:len(array_sorted_by_x) // 2]\n right_x = array_sorted_by_x[len(array_sorted_by_x) // 2:]\n middle_x = left_x[len(left_x) - 1]\n left_y = []\n right_y = []\n for i in array_sorted_by_y:\n if i.x <= middle_x.x:\n left_y.append(i)\n else:\n right_y.append(i)\n p1, q1 = closest_pair(left_x, left_y)\n p2, q2 = closest_pair(right_x, right_y)\n dist_1 = euclidean_distance(p1, q1)\n dist_2 = euclidean_distance(p2, q2)\n delta = min(dist_1, dist_2)\n p3, q3 = closest_split_pair(array_sorted_by_x, array_sorted_by_y, delta)\n dist_3 = euclidean_distance(p3, q3)\n if dist_3 < delta:\n return p3, q3\n elif dist_1 == delta:\n return p1, q1\n else:\n return p2, q2\n\n\ndef closest_split_pair(array_sorted_by_x, array_sorted_by_y, delta):\n if len(array_sorted_by_x) == 2:\n return array_sorted_by_x[0], array_sorted_by_x[1]\n elif len(array_sorted_by_x) < 2:\n return None, None\n p_ans = None\n q_ans = None\n middle_x = array_sorted_by_x[len(array_sorted_by_x) // 2]\n s_y = []\n for i in array_sorted_by_y:\n if abs(i.x - middle_x.x) <= delta:\n s_y.append(i)\n for i in range(len(s_y)):\n for j in range(1, min(8, len(s_y) - i )):\n if euclidean_distance(s_y[i], s_y[i+j]) < delta:\n p_ans = s_y[i]\n q_ans = s_y[i+j]\n return p_ans, q_ans\n\n\narray = []\narray.append(Point(1,2))\narray.append(Point(2,10))\narray.append(Point(2,500))\narray.append(Point(2,510))\narray.append(Point(1,9))\n\n\nprint(closest_2d_pair(array))","sub_path":"closest_2d_pair.py","file_name":"closest_2d_pair.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"89546617","text":"'''\nA python script for plotting the scattering cossines for uncorrelated Angle-energy interactions from an HDF5 crossection file.\n\nInputs:\n\t1) The name of the crossection file, example Fe56.h5\n\t2) The MT number of the interaction\n\nThis script sits in the same directory as the nndc_hdf5 folder\n\n'''\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib import colors as mcolors\nimport numpy as np\nimport scipy as sp\nimport matplotlib.colors as colors\n\n\nimport os\nimport sys\n\nimport h5py\nimport matplotlib.pyplot as plt\nimport matplotlib.cm\n\nimport openmc.data\n\n\ndef cc(arg):\n return mcolors.to_rgba(arg, alpha=0.6)\n\nos.chdir(r'./nndc_hdf5')\n\nfile=sys.argv[1]\nMT=int(sys.argv[2])\n\nNUC = openmc.data.IncidentNeutron.from_hdf5(file)\n\nreaction =NUC[MT]\nproduct=reaction.products[0]\ndistribution=product.distribution[0]\nangle=distribution.angle\n\nnums=list(range(0,len(angle.energy[:])))\n\nen=[None]*len(nums)\nj=0\nfor i in nums:\n\n en[j]=(angle.energy[i]/1e6)\n j=j+1\n\nc=[None]*len(en)\nx=[None]*len(en)\nxs=[None]*len(en)\nys=[None]*len(en)\n\nj=0\nfor i in nums:\n\n c[j]=angle.mu[i].p\n x[j]=angle.mu[i].x\n j=j+1\n\nfor i in range(0,len(c)):\n xs[i]=np.concatenate([[-1],x[i],[1]]) \n ys[i]=np.concatenate([[0],c[i],[0]])\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nverts=[]\nfor i in range(0,len(en)):\n verts.append(list(zip(xs[i], ys[i])))\n\n\npoly = PolyCollection(verts)\npoly.set_alpha(0.6)\npoly.set_linestyle('-')\npoly.set_edgecolor('k')\nax.add_collection3d(poly, zs=en, zdir='y')\n\n\nplt.title(reaction)\nax.set_xlabel('cos')\nax.set_xlim3d(-1, 1)\nax.set_zlabel('probability/eV')\nax.set_zlim3d(0, 8)\nax.set_ylabel('energy(Mev)')\nax.set_ylim3d(en[0], en[-1])\n\nplt.show()\n\n","sub_path":"plottingTools/NDplotting/plotAngle3D.py","file_name":"plotAngle3D.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"156297293","text":"# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nimport textwrap\nfrom textwrap import dedent\n\nfrom pants.engine.internals.native_engine import FileDigest\nfrom pants.jvm.resolve.common import ArtifactRequirement, Coordinate, Coordinates\nfrom pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry, CoursierResolvedLockfile\nfrom pants.jvm.resolve.coursier_test_util import TestCoursierWrapper\nfrom pants.testutil.pants_integration_test import run_pants, setup_tmpdir\n\nEMPTY_RESOLVE = \"\"\"\n# --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---\n# {{\n# \"version\": 1,\n# \"generated_with_requirements\": [\n# ]\n# }}\n# --- END PANTS LOCKFILE METADATA ---\n\"\"\"\n\n\nDEFAULT_LOCKFILE = (\n TestCoursierWrapper(\n CoursierResolvedLockfile(\n (\n CoursierLockfileEntry(\n coord=Coordinate(\n group=\"org.scala-lang\", artifact=\"scala-library\", version=\"2.13.6\"\n ),\n file_name=\"org.scala-lang_scala-library_2.13.6.jar\",\n direct_dependencies=Coordinates(),\n dependencies=Coordinates(),\n file_digest=FileDigest(\n \"f19ed732e150d3537794fd3fe42ee18470a3f707efd499ecd05a99e727ff6c8a\", 5955737\n ),\n ),\n )\n )\n )\n .serialize(\n [\n ArtifactRequirement(\n coordinate=Coordinate(\n group=\"org.scala-lang\", artifact=\"scala-library\", version=\"2.13.6\"\n )\n )\n ]\n )\n .replace(\"{\", \"{{\")\n .replace(\"}\", \"}}\")\n)\n\nDEFAULT_SCALA_LIBRARY_TARGET = textwrap.dedent(\n \"\"\"\\\n jvm_artifact(\n name=\"org.scala-lang_scala-library_2.13.6\",\n group=\"org.scala-lang\",\n artifact=\"scala-library\",\n version=\"2.13.6\",\n )\n \"\"\"\n)\n\n\ndef test_java() -> None:\n sources = {\n \"src/org/pantsbuild/test/Hello.java\": dedent(\n \"\"\"\\\n package org.pantsbuild.test;\n\n public class Hello {{\n public static void main(String[] args) {{\n System.out.println(\"Hello, World!\");\n }}\n }}\n \"\"\"\n ),\n \"src/org/pantsbuild/test/BUILD\": dedent(\n \"\"\"\\\n java_sources()\n deploy_jar(\n name=\"test_deploy_jar\",\n main=\"org.pantsbuild.test.Hello\",\n dependencies=[\":test\"],\n )\n \"\"\"\n ),\n \"lockfile\": EMPTY_RESOLVE,\n }\n with setup_tmpdir(sources) as tmpdir:\n args = [\n \"--backend-packages=pants.backend.experimental.java\",\n f\"--source-root-patterns=['{tmpdir}/src']\",\n \"--pants-ignore=__pycache__\",\n f'--jvm-resolves={{\"empty\": \"{tmpdir}/lockfile\"}}',\n \"--jvm-default-resolve=empty\",\n \"run\",\n f\"{tmpdir}/src/org/pantsbuild/test:test_deploy_jar\",\n ]\n result = run_pants(args)\n assert result.stdout.strip() == \"Hello, World!\"\n\n\ndef test_scala() -> None:\n sources = {\n \"src/org/pantsbuild/test/Hello.scala\": dedent(\n \"\"\"\\\n package org.pantsbuild.test;\n\n object Hello {{\n def main(args: Array[String]): Unit = {{\n println(\"Hello, World!\")\n }}\n }}\n\n \"\"\"\n ),\n \"src/org/pantsbuild/test/BUILD\": dedent(\n \"\"\"\\\n scala_sources()\n deploy_jar(\n name=\"test_deploy_jar\",\n main=\"org.pantsbuild.test.Hello\",\n dependencies=[\":test\"],\n )\n \"\"\"\n ),\n \"BUILD\": DEFAULT_SCALA_LIBRARY_TARGET,\n \"lockfile\": DEFAULT_LOCKFILE,\n }\n with setup_tmpdir(sources) as tmpdir:\n args = [\n \"--backend-packages=pants.backend.experimental.scala\",\n f\"--source-root-patterns=['{tmpdir}/src']\",\n \"--pants-ignore=__pycache__\",\n f'--jvm-resolves={{\"jvm-default\": \"{tmpdir}/lockfile\"}}',\n \"--jvm-default-resolve=jvm-default\",\n \"run\",\n f\"{tmpdir}/src/org/pantsbuild/test:test_deploy_jar\",\n ]\n result = run_pants(args)\n assert result.stdout.strip() == \"Hello, World!\"\n","sub_path":"src/python/pants/jvm/run_deploy_jar_intergration_test.py","file_name":"run_deploy_jar_intergration_test.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"595158887","text":"from collections import deque\nfrom itertools import permutations\nfrom sys import maxsize, stdin\n\n\ndef get_distances(map_, x, y):\n distances = {}\n queue = deque([(0, x, y)])\n visited = set()\n\n while queue:\n distance, x, y = queue.popleft()\n\n if (x, y) in visited:\n continue\n\n visited.add((x, y))\n\n if map_[y][x].isdigit():\n distances[int(map_[y][x])] = distance\n\n for dx, dy in [(0, -1), (0, 1), (-1, 0), (1, 0)]:\n if map_[y + dy][x + dx] != '#':\n queue.append((distance + 1, x + dx, y + dy))\n\n return distances\n\n\ndef get_all_distances(map_, locations):\n all_distances = {}\n\n for i, (x, y) in locations.items():\n distances = get_distances(map_, x, y)\n\n for j, distance in distances.items():\n all_distances[i, j] = distance\n\n return all_distances\n\n\ndef get_total_distance(all_distances, path):\n total_distance = 0\n i = 0\n\n for j in path:\n total_distance += all_distances[i, j]\n i = j\n\n return total_distance\n\n\ndef main():\n map_ = [list(line.strip()) for line in stdin]\n\n locations = {\n int(c): (x, y)\n for y, row in enumerate(map_)\n for x, c in enumerate(row)\n if c.isdigit()\n }\n\n all_distances = get_all_distances(map_, locations)\n\n min_total_distance = min(\n get_total_distance(all_distances, path)\n for path in permutations(range(1, 8)))\n\n print(min_total_distance)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/2016/day_24/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"321306193","text":"# main program to check if signature exists or not for SLI\n#\nimport numpy as np\nimport cv2\nimport os\nimport sys\n# import pyperclip\nimport logging\nfrom os import listdir\nfrom os.path import isfile, join\nfrom PIL import Image\nimport pytesseract\nimport correct_skew\nimport getcontour\nimport re\nimport genResultExcelFile\n\n\n'''\n## To use specific logger instead of root logger\n## Need to elaborate more\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\nformatter = logging.Formatter(\"%(asctime)s:%(level)s:%(filename)s:%(lineno)s: %(message)s\")\nfile_handler = logging.FileHandler(\"mylogger.log\")\nfile_handler.setLevel(logging.WARNING)\nfile_handler.setFormatter(formatter)\n\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\nlogger.addHandler(stream_handler)\n'''\n\nmargin = 3\nSignWhiteRate = 99.7\n\nglobal fullImage\n\ndef genExcelResult(desNo, custId, results, srcFilename):\n genResultExcelFile.initExcelFile()\n genResultExcelFile.writeToExcel(desNo, custId, results, srcFilename)\n '''\n ## To copy the result to clipboard\n\n index = 0\n texts = ''\n for result in results:\n if index == 0:\n texts += ('계약자 성명: ' + result[2] + \"\\n\")\n elif index == 1:\n texts += ('계약자 서명: ' + result[2] + \"\\n\")\n elif index == 2:\n texts += ('예금주 성명: ' + result[2] + \"\\n\")\n else:\n texts += ('예금주 서명: ' + result[2] + \"\\n\")\n index = index + 1\n # copy the result to clipboard, in order for AA to get te result back\n pyperclip.copy(texts)\n logging.info(\"final result: {}, {}\".format(len(texts), texts.encode('utf-8').strip()))\n '''\n\n\ndef getTextFromSubimage(fullImage):\n desNo = None\n custId = None\n\n desNoImg = fullImage[400:510, 250:570]\n cv2.imshow(\"Design No\", desNoImg)\n cv2.waitKey(0)\n\n try:\n cv2.imwrite(\"subImage.png\", desNoImg)\n ### desNo = pytesseract.image_to_string(Image.open(\"subImage.png\"), lang='kor')\n desNo = pytesseract.image_to_string(Image.open(\"subImage.png\"))\n logging.info(\"OCR Design No: [{}]\".format(desNo))\n regex = re.compile('ID:\\s{*}\\d{*}$')\n matchobj = regex.search(desNo)\n desNo = matchobj.group(0)\n logging.info(\"Design NO: {}\".format(desNo))\n os.remove(\"subImage.png\")\n except:\n logging.exception(\"exception while getting DesignNo \")\n return desNo, custId\n\n try:\n custIdImg = fullImage[510:580, 250:570]\n cv2.imshow(\"Cust ID\", custIdImg)\n cv2.waitKey(0)\n cv2.imwrite(\"subImage.png\", custIdImg)\n ### custId = pytesseract.image_to_string(Image.open(\"subImage.png\"), lang='kor')\n custId = pytesseract.image_to_string(Image.open(\"subImage.png\"))\n logging.info(\"OCR Cust ID: {}\".format(custId))\n regex = re.compile('\\d{*}$')\n matchobj = regex.search(custId)\n custId = matchobj.group(0)\n logging.info(\"Cust ID: {}\".format(custId))\n os.remove(\"subImage.png\")\n except:\n logging.exception(\"exception while getting DesignNo\")\n\n return desNo, custId\n\n\ndef getIDfromFile(fullImage):\n subImage = fullImage[200:550, 130:550]\n cv2.imshow(\"Design-NO & Cust-ID\", subImage)\n cv2.waitKey(0)\n\n deskewedImg = correct_skew.deskewImage(fullImage, subImage)\n cv2.imshow(\"Checking... skewed ..\", deskewedImg)\n cv2.waitKey(0)\n\n del subImage\n\n desNo, custId = getTextFromSubimage(deskewedImg)\n return deskewedImg, desNo, custId\n\n\ndef checkSignture(fullImage, desNo, custId, srcFilename):\n rel_X, rel_Y, contourList = getcontour.getRectCoordinate(fullImage)\n logging.info(\"contours Selected {}\".format(len(contourList)))\n\n checkResult = []\n for contour in contourList:\n ix, iy, iw, ih = cv2.boundingRect(contour)\n ### logging.info(\"Area to compare: {}, {}, {}, {}\".format(ix, iy, iw, ih) )\n\n signatureROI = fullImage[rel_Y+iy+margin:rel_Y+iy+ih-margin, rel_X+ix+margin:rel_X+ix+iw-margin]\n ## cv2.ims how(\"Deskewed\", deskewedImg)\n cv2.imshow(str(ix)+\"SignatureROI\", signatureROI)\n\n numNonBlack = cv2.countNonZero(signatureROI)\n whiteRate = numNonBlack / ((iw - (2 * margin)) * (ih - (2 * margin))) * 100\n\n resultText = 'O'\n if whiteRate >= SignWhiteRate:\n resultText = 'X'\n\n logging.info(\"File:{}, result:{}, White rate(%): {:.3f}\".format(srcFilename, resultText, whiteRate))\n checkResult.append([ix, whiteRate, resultText])\n\n del fullImage\n checkResult.sort()\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n genExcelResult(desNo, custId, checkResult, srcFilename)\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='..\\log\\checkSign_folder.log', level=logging.DEBUG, format='%(asctime).19s:%(levelname).4s:%(module)-.10s(%(lineno)3d) %(message)s')\n logging.info(\"------------ new starting -------------------------------------\")\n\n if len(sys.argv) == 1:\n # folder = sys.argv[1]\n folder = \"F:\\Image\\HKFMI\"\n fullname = ''\n for eachFile in listdir(folder):\n fullname = join(folder, eachFile)\n logging.debug(fullname)\n if not isfile(fullname):\n logging.debug('{} is directory'.format(fullname))\n continue\n\n if fullname.split(\".\")[-1] != 'png' and fullname.split(\".\")[-1] != 'jpg':\n continue\n\n logging.debug('{} is file......'.format(eachFile))\n fullImage = cv2.imread(fullname, cv2.COLOR_BGR2GRAY)\n fullImage, desNo, custId = getIDfromFile(fullImage)\n logging.info(\"Design No:{}, Cust ID:{}\".format(desNo, custId))\n checkSignture(fullImage, desNo, custId, eachFile)\n\n logging.info(\"end of directory..\")\n\n","sub_path":"checkSIgnature/apps/checkSign_Folder.py","file_name":"checkSign_Folder.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"320868594","text":"# -*- coding: utf-8 -*-\nfrom functools import partial\n\nfrom aldryn_client import forms\n\n\nclass Form(forms.BaseForm):\n hide_user_management = forms.CheckboxField(\n 'Hide user management',\n required=False,\n initial='true'\n )\n\n def to_settings(self, data, settings):\n from django.core.urlresolvers import reverse_lazy\n\n from aldryn_addons.utils import djsenv\n\n env = partial(djsenv, settings=settings)\n\n settings['LOGIN_REDIRECT_URL'] = '/'\n\n is_local_dev = env('STAGE') == 'local'\n if is_local_dev:\n settings['LOCAL_DEVELOPMENT'] = True\n\n if env('SSO_DSN'):\n # Expire user session every day because:\n # User can change its data on Login's server.\n # We cannot do a sync of \"recently changed\" user data due to these reasons:\n # - security risk, leaking user data to unauthorized websites,\n # - it would require some periodic tasks (celery?),\n # - stage websites are being paused during which the sync wouldn't work\n settings['CLOUD_USER_SESSION_EXPIRATION'] = 24 * 60 * 60 # 24h = 1day\n settings['SSO_DSN'] = env('SSO_DSN')\n\n if env('SSO_DSN') or is_local_dev:\n settings['ALDRYN_SSO_HIDE_USER_MANAGEMENT'] = data['hide_user_management']\n settings['ADDON_URLS'].append('aldryn_sso.urls')\n settings['INSTALLED_APPS'].insert(\n settings['INSTALLED_APPS'].index('django.contrib.admin'),\n 'aldryn_sso'\n )\n settings['CMSCLOUD_STATIC_URL'] = env('CMSCLOUD_STATIC_URL', 'https://static.aldryn.com/')\n else:\n # there is no SSO_DSN set and is not local dev.\n # No point in configuring anything else.\n return settings\n\n if env('STAGE') == 'test':\n position = settings['MIDDLEWARE_CLASSES'].index('django.contrib.auth.middleware.AuthenticationMiddleware') + 1\n settings['ALDRYN_SSO_LOGIN_WHITE_LIST'] = [reverse_lazy('simple-sso-login')]\n settings['MIDDLEWARE_CLASSES'].insert(position, 'aldryn_sso.middleware.AccessControlMiddleware')\n settings['SHARING_VIEW_ONLY_TOKEN_KEY_NAME'] = env('SHARING_VIEW_ONLY_TOKEN_KEY_NAME')\n settings['SHARING_VIEW_ONLY_SECRET_TOKEN'] = env('SHARING_VIEW_ONLY_SECRET_TOKEN')\n return settings\n","sub_path":"addons/aldryn-sso/aldryn_config.py","file_name":"aldryn_config.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"341677320","text":"\ndef LDA(X,PC_LDA,y,class1,class2): \n #find within class mean and scatter\n from numpy.linalg import pinv\n import numpy as np\n X_class1 = X[class1==y]\n \n \n X_class2 = X[class2==y]\n X_mean_class_1=np.mean(X_class1,axis=0)\n X_mean_class_2=np.mean(X_class2,axis=0)\n \n X1_shifted=X_class1-np.transpose(X_mean_class_1)\n X2_shifted=X_class2-np.transpose(X_mean_class_2) \n #S1=np.dot(np.transpose(X1_shifted),X1_shifted)/(X_class1.shape[0])\n S1=np.cov(np.transpose(X1_shifted))/(X_class1.shape[0])\n #S2=np.dot(np.transpose(X2_shifted),X2_shifted)/(X_class2.shape[0])\n S2=np.cov(np.transpose(X2_shifted))/(X_class2.shape[0])\n S_w= S1+S2\n #find between class scatter\n Total_mean=np.mean(X,axis=0)\n mean_diff1=X_mean_class_1-Total_mean\n mean_diff2=X_mean_class_2-Total_mean\n print(X.shape[1])\n mean_diff1=mean_diff1.reshape(1,X.shape[1])\n mean_diff2=mean_diff2.reshape(1,X.shape[1])\n #print(X_class1.shape[0])\n #print(np.dot(np.transpose(mean_diff1),(mean_diff1)))\n #S_b = (X_class1.shape[0])*np.cov(np.transpose(mean_diff1))+(X_class2.shape[0])*np.cov(np.transpose(mean_diff2))\n S_b=(X_class1.shape[0])*np.dot(np.transpose(mean_diff1),(mean_diff1))+(X_class2.shape[0])*np.dot(np.transpose(mean_diff2),(mean_diff2))\n #S_b=np.multiply(59,np.dot(np.transpose(mean_diff1),(mean_diff1)))+np.multiply(71,np.dot(np.transpose(mean_diff2),(mean_diff2)))\n from numpy.linalg import matrix_rank\n #print(S_b.shape)\n U, s, V = np.linalg.svd((np.dot(pinv(S_w),S_b)), full_matrices=True)\n #U, s, V = np.linalg.svd(np.dot(S_b,pinv(S_b)), full_matrices=True)\n f,g =np.linalg.eig((np.dot(pinv(S_w),S_b)))\n Basis_LDA= U[:,0]\n Basis_LDA=Basis_LDA.reshape(Basis_LDA.shape[0],PC_LDA)\n #print(np.matmul(inv(S_w),S_b))\n #print(Basis_LDA)\n #X_std_lda_mine= np.dot(X, Basis_LDA)\n return Basis_LDA","sub_path":"src/LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"147598668","text":"import json\nimport requests\n\na = '{\"aa\":111, \"b\":True}' # str\nb = '{\"aa\":111, \"b\":true}' # str json\n\nc = json.loads(b)\nprint(c)\n\n# a怎么转字段 eval: 把字符串当成代码去识别\n\nd = eval(a)\nprint(d)\nprint(type(d))\n\nurl = \"http://httpbin.org/post\"\n\nbody = {\n \"uo\": \"hello world\",\n \"xx\": \"aaaa\"\n}\n\nr = requests.post(url, json = body)\nprint(r.text)\n\n# 第二种发送方式\nr2 = requests.post(url, data=json.dumps(body))\nprint(r2.text)\n","sub_path":"gongcheng/studay/day9/json3.py","file_name":"json3.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"173426434","text":"from __future__ import print_function\nfrom model import TFN\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\nimport os\nimport argparse\nimport torch\nimport random\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport utils\nimport pickle # for debugging purposes\n\ndef preprocess(options):\n # parse the input args\n dataset = options['dataset']\n model_path = options['model_path']\n batch_size = options['batch_size']\n DTYPE = torch.FloatTensor\n if options['cuda']:\n DTYPE = torch.cuda.FloatTensor\n\n # prepare the paths for storing models\n model_path = os.path.join(\n model_path, \"tfn.pt\")\n print(\"Temp location for saving model: {}\".format(model_path))\n\n # define fields\n text_field = 'CMU_MOSI_TimestampedWordVectors_1.1'\n visual_field = 'CMU_MOSI_VisualFacet_4.1'\n acoustic_field = 'CMU_MOSI_COVAREP'\n label_field = 'CMU_MOSI_Opinion_Labels'\n \n # DEBUG ONLY\n recalc = not (os.path.exists('vars/dump') and os.path.isfile('vars/dump'))\n \n if recalc:\n # prepare the datasets\n print(\"Currently using {} dataset.\".format(dataset))\n DATASET = utils.download()\n dataset = utils.load(visual_field, acoustic_field, text_field)\n utils.align(text_field, dataset)\n utils.annotate(dataset, label_field)\n splits = utils.get_splits(DATASET)\n if not os.path.exists('./vars'):\n os.makedirs('./vars')\n f = open('./vars/dump', 'wb+')\n pickle.dump([splits, dataset], f)\n f.close()\n else:\n f = open('./vars/dump', 'rb')\n splits, dataset = pickle.load(f)\n f.close()\n\n input_dims = utils.get_dims_from_dataset(dataset, text_field, acoustic_field, visual_field)\n train, dev, test = utils.split(splits, dataset, label_field, visual_field, acoustic_field, text_field, batch_size)\n train_loader, dev_loader, test_loader = utils.create_data_loader(train, dev, test, batch_size, DTYPE)\n return train_loader, dev_loader, test_loader, input_dims\n\ndef display(test_loss, test_binacc, test_precision, test_recall, test_f1, test_septacc, test_corr):\n print(\"MAE on test set is {}\".format(test_loss))\n print(\"Binary accuracy on test set is {}\".format(test_binacc))\n print(\"Precision on test set is {}\".format(test_precision))\n print(\"Recall on test set is {}\".format(test_recall))\n print(\"F1 score on test set is {}\".format(test_f1))\n print(\"Seven-class accuracy on test set is {}\".format(test_septacc))\n print(\"Correlation w.r.t human evaluation on test set is {}\".format(test_corr))\n\ndef main(options):\n train_loader, valid_loader, test_loader, input_dims = preprocess(options)\n\n model = TFN(input_dims, (4, 16, 128), 64, (0.3, 0.3, 0.3, 0.3), 32)\n if options['cuda']:\n model = model.cuda()\n print(\"Model initialized\")\n criterion = nn.L1Loss(size_average=False)\n optimizer = optim.Adam(list(model.parameters())[2:]) # don't optimize the first 2 params, they should be fixed (output_range and shift)\n \n # setup training\n complete = True\n min_valid_loss = float('Inf')\n patience = options['patience']\n epochs = options['epochs']\n model_path = options['model_path']\n curr_patience = patience\n for e in range(epochs):\n model.train()\n train_loss = 0.0\n num_processed = 0\n for batch in train_loader:\n num_processed += batch[0].shape[0]\n model.zero_grad()\n t, v, a, y = batch\n output = model(a, v, t)\n loss = criterion(output, y)\n loss.backward()\n train_loss += loss.data.item() / len(train_loader.dataset)\n optimizer.step()\n\n print(\"Epoch {} complete! Average Training loss: {}\".format(e, train_loss))\n\n # Terminate the training process if run into NaN\n if np.isnan(train_loss):\n print(\"Training got into NaN values...\\n\\n\")\n complete = False\n break\n\n # On validation set we don't have to compute metrics other than MAE and accuracy\n model.eval()\n for batch in valid_loader:\n t, v, a, y = batch\n output_valid = model(a, v, t)\n valid_loss = criterion(output_valid, y)\n output_valid = output.cpu().data.numpy().reshape(-1)\n y = y.cpu().data.numpy().reshape(-1)\n\n if np.isnan(valid_loss.data.item()):\n print(\"Training got into NaN values...\\n\\n\")\n complete = False\n break\n\n valid_binacc = accuracy_score(output_valid>=0, y>=0)\n\n print(\"Validation loss is: {}\".format(valid_loss.data.item() / len(valid_loader.dataset)))\n print(\"Validation binary accuracy is: {}\".format(valid_binacc))\n\n if (valid_loss.data.item() < min_valid_loss):\n curr_patience = patience\n min_valid_loss = valid_loss.data.item()\n torch.save(model, model_path)\n print(\"Found new best model, saving to disk...\")\n else:\n curr_patience -= 1\n \n if curr_patience <= 0:\n break\n print(\"\\n\\n\")\n\n if complete:\n \n best_model = torch.load(model_path)\n best_model.eval()\n for batch in test_loader:\n t, v, a, y = batch\n output_test = model(a, v, t)\n loss_test = criterion(output_test, y)\n test_loss = loss_test.data.item()\n output_test = output_test.cpu().data.numpy().reshape(-1)\n y = y.cpu().data.numpy().reshape(-1)\n\n test_binacc = accuracy_score(output_test>=0, y>=0)\n test_precision, test_recall, test_f1, _ = precision_recall_fscore_support(y>=0, output_test>=0, average='binary')\n test_septacc = (output_test.round() == y.round()).mean()\n\n # compute the correlation between true and predicted scores\n test_corr = np.corrcoef([output_test, y])[0][1] # corrcoef returns a matrix\n test_loss = test_loss / len(test_loader.dataset)\n\n display(test_loss, test_binacc, test_precision, test_recall, test_f1, test_septacc, test_corr)\n return\n\nif __name__ == \"__main__\":\n OPTIONS = argparse.ArgumentParser()\n OPTIONS.add_argument('--dataset', dest='dataset',\n type=str, default='MOSI')\n OPTIONS.add_argument('--epochs', dest='epochs', type=int, default=50)\n OPTIONS.add_argument('--batch_size', dest='batch_size', type=int, default=32)\n # PATIENCE SET LOW FOR TEST PURPOSES, must increase the default back to 20\n OPTIONS.add_argument('--patience', dest='patience', type=int, default=5)\n OPTIONS.add_argument('--cuda', dest='cuda', type=bool, default=False)\n OPTIONS.add_argument('--model_path', dest='model_path',\n type=str, default='models')\n OPTIONS.add_argument('--max_len', dest='max_len', type=int, default=20)\n PARAMS = vars(OPTIONS.parse_args())\n main(PARAMS)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"630611639","text":"# Zeros to the Right\n# Write a function that takes an array of integers and moves each non-zero integer to the left side of the array. The function should return the mutated array. The order of the non-zero integers does not matter in the mutated array.\n# Examples\n# Sample input: [0, 3, 1, 0, -2]\n# Expected output: [3, 1, -2, 0, 0]\n# Sample input: [4, 2, 1, 5]\n# Expected output: [4, 2, 1, 5]\n\ndef zeros_to_the_right(num_array):\n try:\n num_array.index(0)\n sort = num_array.sort(reverse=True)\n while num_array[-1] != 0:\n num_array = num_array[-1:] + num_array[:-1]\n return num_array\n except:\n return num_array\n\nprint(zeros_to_the_right([0, 3, 1, 0, -2]))\nprint(zeros_to_the_right([4, 2, 1, 5]))\nprint(zeros_to_the_right([0, 3, 1, 0, -2, -1, -5]))\n","sub_path":"src/whiteboarding.py","file_name":"whiteboarding.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"94021014","text":"\n#1\ntext = \"Star Trek: Birth of the Federation is a 4X turn-based strategy video game developed by MicroProse and published by Hasbro Interactive. The game was initially released on May 25, 1999 for Windows personal computers.\"\nh_letters = [letter for letter in text if letter != 'a']\ngoogle = ''.join(h_letters)\nprint(google)\n\n#2\nwithout_a = text.replace(\"a\", \"\")\nprint(without_a)\n\n#3\ncut = text[135:]\nprint(cut)\n\n\n#4\ntext_split = text.split()\nprint(text_split)\n\n","sub_path":"4th/practice/pr_1.py","file_name":"pr_1.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"300415131","text":"from pywps import Process, LiteralInput, LiteralOutput\n\n\nclass Square(Process):\n def __init__(self):\n inputs = [LiteralInput(identifier='arg', title='Input number', data_type='integer', abstract=\"Argument\")]\n outputs = [LiteralOutput(identifier='response', title='Output response', data_type='integer', abstract=\"Result\")]\n\n super(Square, self).__init__(\n self.handler,\n identifier='square',\n title='Square Process',\n abstract='Quadriert den Input',\n version='1.0.0',\n inputs=inputs,\n outputs=outputs,\n store_supported=True,\n status_supported=True\n )\n\n def handler(self, request, response):\n response.outputs['response'].data = request.inputs['arg'][0].data ** 2\n return response\n","sub_path":"processes/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"231780509","text":"#inserting an image into the database\n\nimport sqlite3\n\n\n#connect to the database\ndb = sqlite3.connect('images.sqlite')\n\n#configure to allow binary inserts\ndb.text_factory = bytes\n\n#grab your stuff in the database\nr = open('../imageapp/dice.png', 'rb').read()\n\n#insert yout stuff\ndb.execute('INSERT INTO image_store (image) VALUES (?)', (r,))\ndb.commit()\n","sub_path":"sqlite/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88700904","text":"import array\nimport random\nimport time\nimport numpy\nfrom math import sqrt\nimport cluster\nfrom deap import algorithms\nfrom deap import base\nfrom deap import benchmarks\nfrom deap.benchmarks.tools import diversity, convergence\nfrom deap import creator\nfrom deap import tools\nimport os\nfrom population import *\nfrom network import Neterr\nfrom chromosome import Chromosome, crossover\nimport traceback\nn_hidden = 100\nindim = 32\noutdim = 5\n\n\n\nnetwork_obj_tar = Neterr(indim, outdim, n_hidden, change_to_target=1, rng=random)\n# creator.create(\"FitnessMin\", base.Fitness, weights=(-1.0, -1.0, 0.0, 0.0))\ncreator.create(\"FitnessMin\", base.Fitness, weights=(-1.0, -1.0, -1.0))\ncreator.create(\"Individual\", Chromosome, fitness=creator.FitnessMin)\nprint(\"here network object created\")\ntoolbox = base.Toolbox()\n\n\n\n\ndef minimize_tar(individual):\n\toutputarr = network_obj_tar.feedforward_ne(individual, final_activation=network.softmax)\n\n\tneg_log_likelihood_val = give_neg_log_likelihood(outputarr, network_obj_tar.resty)\n\tmean_square_error_val = give_mse(outputarr, network_obj_tar.resty)\n\tcomplexity = lambda ind: len(ind.conn_arr) * ind.node_ctr\n\tind_complexity = complexity(individual)\n\t# anyways not using these as you can see in 'creator.create(\"FitnessMin\", base.Fitness, weights=(-1.0, -1.0, 0.0, 0.0))'\n\t# return neg_log_likelihood_val, mean_square_error_val, false_positve_rat, false_negative_rat\n\treturn neg_log_likelihood_val, mean_square_error_val, ind_complexity\n\n\ndef mycross(ind1, ind2, gen_no):\n\tchild1 = crossover(ind1, ind2, gen_no, inputdim=indim, outputdim=outdim)\n\tchild2 = crossover(ind1, ind2, gen_no, inputdim=indim, outputdim=outdim)\n\n\treturn child1, child2\n\n\ndef mymutate(ind1):\n\tnew_ind = ind1.do_mutation(rate_conn_weight=0.2,rate_conn_itself= 0.1,rate_node= 0.05,weight_factor = 1, inputdim= indim, outputdim=outdim, max_hidden_unit=n_hidden,rng = random)\n\treturn new_ind\n\n\ndef initIndividual(ind_class, inputdim, outputdim):\n\tind = ind_class(inputdim, outputdim)\n\treturn ind\n\nold_chromosome = None\ntoolbox.register(\"individual\", initIndividual, creator.Individual, indim, outdim)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ntoolbox.register(\"mate\", mycross)\ntoolbox.register(\"mutate\", mymutate)\ntoolbox.register(\"select\", tools.selNSGA2)\n\nbp_rate = 0.05\n\n\ndef main(seed=None, play=0, NGEN=40, MU=4 * 10):\n\t# random.seed(seed)\n\n\t# MU has to be a multiple of 4. period.\n\tCXPB = 0.9\n\n\tstats = tools.Statistics(lambda ind: ind.fitness.values[1])\n\t# stats.register(\"avg\", numpy.mean, axis=0)\n\t# stats.register(\"std\", numpy.std, axis=0)\n\tstats.register(\"min\", numpy.min, axis=0)\n\tstats.register(\"max\", numpy.max, axis=0)\n\n\tlogbook = tools.Logbook()\n\tlogbook.header = \"gen\", \"evals\", \"std\", \"min\", \"avg\", \"max\"\n\ttoolbox.register(\"evaluate\", minimize_tar)\n\ttime1 = time.time()\n\tpop_tar = toolbox.population(n=MU)\n\ttime2 = time.time()\n\tprint(\"After population initialisation\", time2 - time1)\n\n\tinvalid_ind = [ind for ind in pop_tar if not ind.fitness.valid]\n\n\tfitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n\tfor ind, fit in zip(invalid_ind, fitnesses):\n\t\tind.fitness.values = fit\n\n\t# This is just to assign the crowding distance to the individuals\n\t# no actual selection is done\n\tpop_tar = toolbox.select(pop_tar, len(pop_tar))\n\t# print( \"first population selected, still outside main loop\")\n\t# print(pop)\n\trecord = stats.compile(pop_tar)\n\tlogbook.record(gen=0, evals=len(invalid_ind), **record)\n\tprint(logbook.stream)\n\tmaxi = 0\n\tstri = ''\n\tflag = 0\n\t# Begin the generational process\n\t# print(pop.__dir__())\n\tfor gen in range(1, NGEN):\n\n\t\t# Vary the population\n\t\tprint()\n\t\tprint(\"here in gen no.\", gen)\n\t\toffspring = tools.selTournamentDCD(pop_tar, len(pop_tar))\n\t\toffspring = [toolbox.clone(ind) for ind in offspring]\n\t\tif play:\n\t\t\tif play == 1:\n\t\t\t\tpgen = NGEN * 0.1\n\t\t\telif play == 2:\n\t\t\t\tpgen = NGEN * 0.9\n\n\t\t\tif gen == int(pgen):\n\t\t\t\tprint(\"gen:\", gen, \"doing clustering\")\n\t\t\t\tto_bp_lis = cluster.give_cluster_head(offspring, int(MU * bp_rate))\n\t\t\t\tassert (to_bp_lis[0] in offspring)\n\t\t\t\tprint(\"doing bp\")\n\t\t\t\t[item.modify_thru_backprop(indim, outdim, network_obj_tar.rest_setx, network_obj_tar.rest_sety,\n\t\t\t\t\t\t\t\t\t\t epochs=10, learning_rate=0.1, n_par=10) for item in to_bp_lis]\n\t\t\t\t# Evaluate the individuals with an invalid fitness\n\t\t\t\tinvalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n\t\t\t\tfitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n\t\t\t\tfor ind, fit in zip(invalid_ind, fitnesses):\n\t\t\t\t\tind.fitness.values = fit\n\t\tdum_ctr = 0\n\t\tfor ind1, ind2 in zip(offspring[::2], offspring[1::2]):\n\t\t\t# print(ind1.fitness.values)\n\t\t\t\"\"\"if not flag :\n\t\t\t\tind1.modify_thru_backprop(indim, outdim, network_obj.rest_setx, network_obj.rest_sety, epochs=10, learning_rate=0.1, n_par=10)\n\t\t\t\tflag = 1\n\t\t\t\tprint(\"just testing\")\n\t\t\t\"\"\"\n\t\t\tflag = 0\n\t\t\tif random.random() <= CXPB:\n\t\t\t\tind1, ind2 = toolbox.mate(ind1, ind2, gen)\n\t\t\t\tind1 = creator.Individual(indim, outdim, ind1)\n\t\t\t\tind2 = creator.Individual(indim, outdim, ind2)\n\t\t\t\tflag = 1\n\t\t\tmaxi = max(maxi, ind1.node_ctr, ind2.node_ctr)\n\t\t\ttoolbox.mutate(ind1)\n\t\t\ttoolbox.mutate(ind2)\n\n\t\t\toffspring[dum_ctr] = ind1\n\t\t\toffspring[dum_ctr + 1] = ind2\n\t\t\tdel offspring[dum_ctr].fitness.values, offspring[dum_ctr + 1].fitness.values\n\t\t\tdum_ctr += 2\n\n\t\t# Evaluate the individuals with an invalid fitness\n\t\tinvalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n\t\tfitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n\t\tfor ind, fit in zip(invalid_ind, fitnesses):\n\t\t\tind.fitness.values = fit\n\n\t\t# Select the next generation population\n\t\tpop_tar = toolbox.select(pop_tar + offspring, MU)\n\n\t\trecord = stats.compile(pop_tar)\n\t\tlogbook.record(gen=gen, evals=len(invalid_ind), **record)\n\t\tanost = logbook.stream\n\t\tliso = [item.rstrip() for item in anost.split(\"\\t\")]\n\t\tmse = float(liso[3])\n\n\t\tprint(anost)\n\t\tstri += anost + '\\n'\n\t\tprint(\"generation done\")\n\t\t# file_ob.write(str(logbook.stream))\n\t\t# print(len(pop))\n\t\t# file_ob.close()\n\t# print(stri)\n\n\t##from here starting target\n\treturn pop_tar, logbook\n\n\ndef note_this_string(new_st, stringh):\n\t\"\"\"flag_ob = open(\"flag.txt\",\"r+\")\n\n ctr = None\n st = flag_ob.read()\n flag = int(st.rstrip())\n while flag ==1:\n flag_ob.seek(0)\n st = flag_ob.read()\n flag = int(st.rstrip())\n time.sleep(3)\n if flag == 0:\n flag = 1\n flag_ob.seek(0)\n flag_ob.write(\"1\\n\")\n flag_ob.close()\n '/home/robita/forgit/neuro-evolution/05/state/tf/indep_pima/input/model.ckpt.meta'\n \"\"\"\n\tname = \"./ctr_folder/ctr\" + stringh + \".txt\"\n\tif not os.path.isfile(name):\n\t\tnew_f = open(name, \"w+\")\n\t\tnew_f.write(\"0\\n\")\n\t\tnew_f.close()\n\n\tctr_ob = open(name, \"r+\")\n\tstrin = ctr_ob.read().rstrip()\n\tassert (strin is not '')\n\tctr = int(strin)\n\tctr_ob.seek(0)\n\tctr_ob.write(str(ctr + 1) + \"\\n\")\n\tctr_ob.close()\n\t\"\"\" \n flag_ob = open(\"flag.txt\",\"w\")\n flag_ob.write(\"0\\n\")\n flag_ob.close()\n \"\"\"\n\n\tnew_file_ob = open(\"log_folder/log\" + stringh + \".txt\", \"a+\")\n\tnew_file_ob.write(str(ctr) + \" \" + new_st + \"\\n\")\n\tnew_file_ob.close()\n\treturn ctr\n\n\n\n\ndef test_it_with_bp(play=1, NGEN=100, MU=4 * 25, play_with_whole_pareto=0):\n\tpop, stats = main(play=play, NGEN=NGEN, MU=MU)\n\tstringh = \"_with_bp_just_tar\" + str(play) + \"_\" + str(NGEN)\n\tfronts = tools.sortNondominated(pop, len(pop))\n\n\t'''file_ob = open(\"./log_folder/log_for_graph.txt\", \"w+\")\n for item in fronts[0]:\n st = str(item.fitness.values[0]) + \" \" + str(item.fitness.values[1])+\"\\n\"\n file_ob.write( st )\n file_ob.close()'''\n\n\tif play_with_whole_pareto or len(fronts[0]) < 30:\n\t\tpareto_front = fronts[0]\n\telse:\n\n\t\tpareto_front = random.sample(fronts[0], 30)\n\tprint(\"Pareto Front: \")\n\tfor i in range(len(pareto_front)):\n\t\tprint(pareto_front[i].fitness.values)\n\n\tprint(\"\\ntest: test on one with min validation error\",\n\t\t network_obj_tar.test_err(min(pop, key=lambda x: x.fitness.values[1])))\n\ttup = network_obj_tar.test_on_pareto_patch_correctone(pareto_front)\n\n\tprint(\"\\n test: avg on sampled pareto set\", tup)\n\n\tst = str(network_obj_tar.test_err(min(pop, key=lambda x: x.fitness.values[1]))) + \" \" + str(tup)\n\tprint(note_this_string(st, stringh))\n\n\nif __name__ == \"__main__\":\n\tlogf = open(\"log_error_just_tar.txt\", \"a\")\n\ttry:\n\t\ttest_it_with_bp(play=1, NGEN=100, MU=4 * 25, play_with_whole_pareto=1)\n\texcept Exception as e:\n\t\tprint(\"Error! Error! Error!\")\n\t\tlogf.write('\\n\\n')\n\t\tlocaltime = time.localtime(time.time())\n\t\tlogf.write(str(localtime)+'\\n')\n\t\ttraceback.print_exc(file=logf)\n\t\tlogf.write('\\n\\n')\n\tfinally:\n\t\tlogf.close()\n\t# file_ob.write( \"test on one with min validation error \" + str(neter.test_err(min(pop, key=lambda x: x.fitness.values[1]))))\n\n\t# print(stats)\n\t'''\n import matplotlib.pyplot as plt\n import numpy\n\n front = numpy.array([ind.fitness.values for ind in pop])\n plt.scatter(front[:,0], front[:,1], c=\"b\")\n plt.axis(\"tight\")\n plt.show()'''\n","sub_path":"main_just_tar.py","file_name":"main_just_tar.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"641521749","text":"\nfrom os import path\nimport time\nimport sys\nimport json\nfrom sweetest.data import testsuite_format, testsuite2data\nfrom sweetest.parse import parse\nfrom sweetest.elements import e\nfrom sweetest.globals import g\nfrom sweetest.windows import w\nfrom sweetest.testsuite import TestSuite\nfrom sweetest.testcase import TestCase\nfrom sweetest.utility import Excel, get_record\nfrom sweetest.log import logger\nfrom sweetest.report import Report\nfrom sweetest.config import _testcase, _elements, _report\n\n\nclass Autotest:\n def __init__(self, file_name, sheet_name, desired_caps={}, server_url=''):\n g.start_time = time.strftime(\"@%Y%m%d_%H%M%S\", time.localtime())\n\n if desired_caps:\n self.desired_caps = desired_caps\n else:\n self.desired_caps = {'platformName': 'Desktop', 'browserName': 'Chrome'}\n self.server_url = server_url\n\n g.project_name = file_name.split('-')[0]\n self.testcase_file = path.join(\n 'testcase', file_name + '-' + _testcase + '.xlsx')\n self.elements_file = path.join(\n 'element', g.project_name + '-' + _elements + '.xlsx')\n self.report_file = path.join(\n 'report', file_name + '-' + _report + '.xlsx')\n self.report_xml = path.join(\n 'junit', file_name + '-' + _report + '.xml')\n\n self.testcase_workbook = Excel(self.testcase_file, 'r')\n self.sheet_names = self.testcase_workbook.get_sheet(sheet_name)\n\n self.report_workbook = Excel(self.report_file.split('.')[\n 0] + g.start_time + '.xlsx', 'w')\n\n def plan(self):\n self.code = 0 # 返回码\n # 1.解析配置文件\n try:\n e.get_elements(self.elements_file)\n except:\n logger.exception('*** Parse config file fail ***')\n self.code = -1\n sys.exit(self.code)\n\n self.report = Report()\n self.report_ts = {}\n\n # 2.逐个执行测试套件\n for sheet_name in self.sheet_names:\n g.sheet_name = sheet_name\n # xml 测试报告初始化\n self.report_ts[sheet_name] = self.report.create_suite(\n g.project_name, sheet_name)\n self.report_ts[sheet_name].start()\n\n self.run(sheet_name)\n\n self.report_workbook.close()\n\n with open(self.report_xml, 'w', encoding='utf-8') as f:\n self.report.write(f)\n\n sys.exit(self.code)\n\n def run(self, sheet_name):\n # 1.从 Excel 获取测试用例集\n try:\n data = self.testcase_workbook.read(sheet_name)\n testsuite = testsuite_format(data)\n logger.info('Testsuite imported from Excle:\\n' +\n json.dumps(testsuite, ensure_ascii=False, indent=4))\n logger.info('From Excel import testsuite success')\n except:\n logger.exception('*** From Excel import testsuite fail ***')\n self.code = -1\n sys.exit(self.code)\n\n # 2.初始化全局对象\n try:\n g.init(self.desired_caps, self.server_url)\n g.set_driver()\n # 如果测试数据文件存在,则从该文件里读取一行数据,赋值到全局变量列表里\n data_file = path.join(\n 'data', g.project_name + '-' + sheet_name + '.csv')\n if path.exists(data_file):\n g.var = get_record(data_file)\n w.init()\n except:\n logger.exception('*** Init global object fail ***')\n self.code = -1\n sys.exit(self.code)\n\n # 3.解析测试用例集\n try:\n parse(testsuite)\n logger.debug('testsuite has been parsed:\\n' + str(testsuite))\n except:\n logger.exception('*** Parse testsuite fail ***')\n self.code = -1\n sys.exit(self.code)\n\n # 4.执行测试套件\n ts = TestSuite(testsuite, self.report_ts[sheet_name])\n ts.run()\n\n # 5.判断测试结果\n if self.report_ts[sheet_name].high_errors + self.report_ts[sheet_name].medium_errors + \\\n self.report_ts[sheet_name].high_failures + self.report_ts[sheet_name].medium_failures:\n self.code = -1\n\n # 6.保存测试结果\n try:\n data = testsuite2data(testsuite)\n self.report_workbook.write(data, sheet_name)\n except:\n logger.exception('*** Save the report is fail ***')\n","sub_path":"sweetest/sweetest/autotest.py","file_name":"autotest.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"23815110","text":"\"\"\"\nmulti_port_simulation.py\n\nAuthor:\n Sequoia Ploeg\n\nDependencies:\n- tkinter\n- simphony\n- scipy\n- numpy\n- os\n- matplotlib\n\nThis file mainly provides the GUI for running simulations. It creates a \nSimulation object, runs it, and provides controls and windows for displaying\nand exporting the results.\n\"\"\"\n\nimport pya\nimport tkinter as tk\nfrom tkinter import filedialog\n\nfrom .graph import Graph, DataSet, MenuItem\nfrom simphony.simulation import MultiInputSimulation\n\nimport scipy.io as sio\nimport numpy as np\nimport os\n\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\n\nclass CircuitAnalysisGUI(tk.Tk):\n # Some constants\n tera = 1e12\n nano = 1e9\n\n def __init__(self):\n tk.Tk.__init__(self)\n self.withdraw()\n self.after(0, self.deiconify)\n self.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n\n # Title the window\n self.title('Multi-Input Circuit Simulation')\n\n # Run the simulation\n cell = pya.Application.instance().main_window().current_view().active_cellview().cell\n _, _, ann_netlist_model, self.components = cell.spice_netlist_export_ann()\n self.simulation = MultiInputSimulation(ann_netlist_model)\n\n # Object paddings\n self.padx = 5\n self.pady = 5\n\n # One frame to rule them all\n bbox = tk.Frame(padx=self.padx, pady=self.pady)\n bbox.pack()\n\n # Controls frame\n self.controls = tk.Frame(bbox, bd=1)\n self.controls.grid(row=0, column=0, sticky='EW')\n self.generate_controls()\n\n # Schematic figure initialization\n self.schematic = tk.Frame(bbox)\n self.schematic.grid(row=1, column=0)\n self.generate_schematic()\n\n # Now that everything is in place, show the window.\n self.after(0, self.deiconify)\n\n def on_closing(self):\n self.withdraw()\n self.quit()\n self.destroy()\n\n def generate_controls(self):\n io_group = tk.LabelFrame(self.controls, text=\"Input/Output\", padx=self.padx, pady=self.pady)\n io_group.pack(fill=tk.BOTH, expand=1)\n tk.Label(io_group, text=\"Input ports (period separated):\").grid(row=0, column=0, sticky='EW')\n self.in_port = tk.Entry(io_group)\n self.in_port.grid(row=1, column=0, sticky='EW')\n tk.Label(io_group, text=\"Output ports (period separated):\").grid(row=0, column=1, sticky='EW')\n self.out_port = tk.Entry(io_group)\n self.out_port.grid(row=1, column=1, sticky='EW')\n tk.Button(io_group, text=\"Run Simulation\", command=self.plot).grid(row=2, column=1, sticky='E')\n self.bind('', self.plot)\n self.bind('', self.plot)\n\n def generate_schematic(self):\n \"\"\"\n This function creates a figure object and places it within the \n schematic slot in the parent tkinter window. It then calls 'draw' to \n plot the layout points on the canvas.\n \"\"\"\n # The only real objects we'll need to interact with to plot and unplot\n # self.components = self.simulation.external_components\n self.fig = Figure(figsize=(5, 4), dpi=100)\n\n # Objects needed simply for the sake of embedding the graph in tk\n self.canvas = FigureCanvasTkAgg(self.fig, master=self.schematic)\n self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n self.fig.clear()\n self.ax = self.fig.add_subplot(111)\n self.draw()\n self.fig.tight_layout()\n\n def draw(self):\n for comp in self.components:\n name, pos, nets = comp\n self.ax.plot(pos[0], pos[1], 'ro')\n externals = [(-1) * x - 1 for x in nets if x < 0 ]\n self.ax.text(pos[0], pos[1], \" Port \" + str(externals[0]) + \": \" + name)\n self.ax.axis('off')\n self.canvas.draw()\n\n def plot(self, *args, **kwargs):\n in_ports = [int(i) for i in self.in_port.get().split('.')]\n out_ports = [int(i) for i in self.out_port.get().split('.')]\n self.simulation.multi_input_simulation(inputs=in_ports)\n plt = Graph(self, \"Figure\")\n for output in out_ports:\n plt.plot(*self.get_magnitude_by_frequency_thz(output), label=(\"out_\" + str(output)))\n plt.title('Multiple Input Simulation')\n plt.xlabel('Frequency (THz)')\n plt.ylabel('Gain')\n plt.legend(True)\n\n def get_magnitude_by_frequency_thz(self, output_port):\n \"\"\"\n Parameters\n ----------\n output_port : int\n Gets the values at that output port (0-indexed).\n \"\"\"\n freq = np.divide(self.simulation.freq_array, 1e12)\n mag = np.power(np.absolute(self.simulation.simulated_matrix[:, output_port]), 2)\n return freq, mag\n\n def get_magnitude_by_wavelength_nm(self, output_port):\n \"\"\"\n Parameters\n ----------\n output_port : int\n Gets the values at that output port (0-indexed).\n \"\"\"\n wl = self.frequencyToWavelength(self.simulation.freq_array) / 1e-9\n mag = np.power(np.absolute(self.simulation.simulated_matrix[:, output_port]), 2)\n return wl, mag\n \ndef circuit_analysis():\n try:\n cell = pya.Application.instance().main_window().current_view().active_cellview().cell\n app = CircuitAnalysisGUI()\n app.mainloop()\n except Exception:\n raise\n\nif __name__ == \"__main__\":\n circuit_analysis()","sub_path":"klayout_dot_config/python/SiEPIC_Simphony/multi_port_simulation.py","file_name":"multi_port_simulation.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2036384","text":"# -*- coding: utf-8 -*-\n\n# ./main.py => contains main system\n\n############\n# IMPORTS\n############\nimport mainsys.byfile as bf\nimport mainsys.shell as sh\nimport utils.explorer as ep\nimport settings.checker as chk\nimport settings.tests_runner as tstrn\n\n###########\n# System\n###########\ncmd = ['file','shell','help','quit','explorer','run_test']\nstatus = True\nhelp_message = '\\n => command list:\\n file : run a file\\n shell : run a shell\\n help : show this help message\\n quit : close the main system \"MAIN SYS\"\\n\\n run_test : Launch a test on all test program (use that only if you have the tests folder provided with source code)\\n'\nhelp_message += '\\n Shell command list:\\n help_shell : show the help message about instructions if you are in shell\\n stop_shell : quit the shell if you are in shell\\n'\nhelp_message += '\\n >>> Other \\\"MAIN SYS\\\" command:\\n explorer : open the \\\"Explorer SYS\\\"\\n'\n\nchk.print_info()\nprint('\\n\\n Type help to show the help message \\n\\n Thanks for using this language :) \\n\\n')\n\nwhile status:\n inp = ''\n while not inp in cmd:\n inp = input('MAIN SYS > ')\n\n if inp == cmd[0]:\n bf.run()\n if inp == cmd[1]:\n sh.run()\n if inp == cmd[2]:\n print(help_message)\n if inp == cmd[3]:\n status = False\n if inp == cmd[4]:\n ep.run()\n if inp == cmd[5]:\n tstrn.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"284721813","text":"# move files and zip files\nimport zipfile\nimport shutil\nimport re\nimport os\n# create new directory\nif not os.path.exists(r'.\\quiz'):\n os.makedirs(r'.\\quiz')\nif not os.path.exists(r'.\\answer'):\n os.makedirs(r'.\\answer')\n# create new zip file\nquizzip=zipfile.ZipFile('quiz.zip','w')\nanswerzip=zipfile.ZipFile('answer.zip','w')\n\nquizre=re.compile(r'.*quiz(\\d)+.*')\nanswerre=re.compile(r'.*quiz_answer(\\d)+.*')\n\n# move and zip\nfor filename in os.listdir(r'.'):\n source = os.path.abspath(filename)\n quizfile=quizre.search(filename)\n if quizfile != None:\n dest=os.path.abspath(r'.\\quiz')\n shutil.move(source,dest)\n print('%s move to %s' % (source,dest))\n newquizfile=os.path.join(dest,filename)\n quizzip.write(newquizfile)\n print('%s zipped to %s' % (filename, 'quiz.zip'))\n answerfile=answerre.search(filename)\n if answerfile != None:\n dest=os.path.abspath(r'.\\answer')\n shutil.move(source, dest)\n print('%s move to %s' % (source, dest))\n newanswerfile=os.path.join(dest,filename)\n answerzip.write(newanswerfile)\n print('%s zipped to %s' % (filename, 'answer.zip'))\n\nquizzip.close()\nanswerzip.close()\n\n","sub_path":"批量移动和压缩文件.py","file_name":"批量移动和压缩文件.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"432503437","text":"# encoding: UTF-8\n\nimport sys\nfrom datetime import date\nfrom time import sleep, time\nimport shelve\n\nfrom mdserver import Md\nfrom schedule import MdScheduler\n\n\nclass MdEngine:\n def __init__(self, pubserver, is_mock=False):\n\n self.pubserver = pubserver\n self._is_mock = is_mock\n\n self.md = Md()\n\n self.dictInstrument = {}\n if not self._is_mock:\n self.md.start()\n self._schedule = MdScheduler(self.reconnectmd)\n self._schedule.start()\n\n def reconnectmd(self):\n if not self._is_mock:\n print(\"reconnect md\")\n self.md.start()\n\n def print_log(self, event):\n _log = event.dict_['log']\n print(\"LOG:\", _log.encode())\n\n def get_error(self, event):\n _msg = event.dict_['log']\n print(\"ERROR\", _msg)\n self.pubserver.send_multipart(\"ERROR\", event.dict_)\n\n def get_data(self, event):\n _data = event.dict_['data']\n ask_1 = _data['AskPrice1']\n askv_1 = _data['AskVolume1']\n bid_1 = _data['BidPrice1']\n bidv_1 = _data['BidVolume1']\n instrument_id = _data['InstrumentID']\n update_time = _data['UpdateTime']\n price = (ask_1 + bid_1) / 2.0\n data_str = \"%s,%s,%s,%s,%s,%s\" % (price, ask_1, askv_1, bid_1, + bidv_1, update_time)\n print(instrument_id, data_str)\n self.pubserver.send_multipart(instrument_id, _data)\n\n def init_get(self, event):\n\n f = shelve.open('setting.vn')\n\n try:\n d = f['instrument']\n\n today = date.today()\n if d['date'] == today:\n self.dictInstrument = d['dictInstrument']\n\n else:\n self.getInstrument()\n except KeyError:\n self.getInstrument()\n\n f.close()\n\n self.exchangeid = \"\"\n self.subscribe(self.symbol, self.exchangeid)\n\n # ----------------------------------------------------------------------\n def exit(self):\n self.md = None\n\n # ----------------------------------------------------------------------\n def saveInstrument(self):\n f = shelve.open('setting.vn')\n d = {}\n d['dictInstrument'] = self.dictInstrument\n d['date'] = date.today()\n f['instrument'] = d\n f.close()\n","sub_path":"mdserver/instrument.py","file_name":"instrument.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"319464478","text":"# 给定一个整数数组,判断是否存在重复元素。 \n# \n# 如果任意一值在数组中出现至少两次,函数返回 true 。如果数组中每个元素都不相同,则返回 false 。 \n# \n# \n# \n# 示例 1: \n# \n# 输入: [1,2,3,1]\n# 输出: true \n# \n# 示例 2: \n# \n# 输入: [1,2,3,4]\n# 输出: false \n# \n# 示例 3: \n# \n# 输入: [1,1,1,3,3,4,3,2,4,2]\n# 输出: true \n# Related Topics 数组 哈希表 \n# 👍 292 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n if nums is None or len(nums) == 0:\n return False\n d = {}\n for n in nums:\n if n in d:\n return True\n else:\n d[n] = n\n return False\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n# s = Solution()\n# print(s.containsDuplicate([1, 2, 3, 4]))\n# print(s.containsDuplicate([1, 2, 3, 1]))\n\nimport random\n\n\ndef check(nums):\n a = nums[0]\n for i in nums[1:]:\n if a <= i:\n a = i\n else:\n return False\n return True\n\n\ndef monkey_sort(nums):\n fuck = 0\n while True:\n if check(nums):\n break\n random.shuffle(nums)\n fuck += 1\n print(\"monkey done. %d\" % fuck)\n print(nums)\n\n\nmonkey_sort([3, 5, 6, 1])\nmonkey_sort([1, 2, 3, 4])\nmonkey_sort([1, 3, 2, 4])\n","sub_path":"python/leetcode/editor/cn/[217]存在重复元素.py","file_name":"[217]存在重复元素.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"422588124","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, division, print_function, absolute_import\n\nimport testdata\n\nfrom popbak import EmailMsg, ByteString\n\n\nclass TestCase(testdata.TestCase):\n def get_email(self, fileroot):\n body = testdata.get_contents(fileroot)\n lines = body.splitlines(False)\n contents = (b'+OK message follows', [ByteString(l) for l in lines], len(lines))\n return EmailMsg(1, contents)\n\n\nclass EmailMsgTest(TestCase):\n def test_parse_multipart(self):\n em = self.get_email(\"emoji-html-attachment\")\n\n self.assertTrue(em.has_attachments())\n self.assertEqual(1, len(list(em.attachments())))\n self.assertEqual(\"foo@example.com\", em.from_addr)\n\n emoji = b'\\xf0\\x9f\\x98\\x82\\xf0\\x9f\\x98\\x8e\\xf0\\x9f\\x91\\x8d'\n self.assertTrue(emoji in em.plain)\n self.assertTrue(emoji in em.html)\n\n def test_parse_simple(self):\n em = self.get_email(\"simple-text\")\n\n self.assertFalse(em.has_attachments())\n self.assertEqual(\"\", em.html)\n\n shrug = b'\\xc2\\xaf\\\\_(\\xe3\\x83\\x84)_/\\xc2\\xaf'\n self.assertTrue(shrug in em.plain)\n\n def test_parse_subject_multi_to(self):\n em = self.get_email(\"no-subject\")\n self.assertEqual(2, len(em.to_addrs))\n self.assertTrue(\"(no subject)\" in em.subject)\n\n def test_parse_cc(self):\n em = self.get_email(\"cc\")\n self.assertEqual(\"foo@example.com\", em.from_addr)\n\n def test_save(self):\n basedir = testdata.create_dir()\n em = self.get_email(\"emoji-html-attachment\")\n em.save(basedir)\n pout.v(basedir)\n\n em = self.get_email(\"cc\")\n em.save(basedir)\n\n em = self.get_email(\"no-subject\")\n em.save(basedir)\n\n em = self.get_email(\"simple-text\")\n em.save(basedir)\n\n def test_bad_subject(self):\n em = self.get_email(\"bad-1\")\n self.assertEqual(\n \"PitchBook PE & VC News: Changing Course — PE Pivots Away from B2C Education, Toward B2B\",\n em.subject\n )\n\n def test_bad_2(self):\n basedir = testdata.create_dir()\n em = self.get_email(\"bad-2\")\n em.save(basedir)\n pout.v(basedir)\n\n email_dir = basedir.children[0].children[0]\n email_dir = basedir.first_dir().first_dir()\n email_dir = basedir.child_dir().child_dir()\n self.assertEqual(5, len(email_dir.files))\n\n\n pout.v(em.recipient_addrs)\n\n","sub_path":"popbak_test.py","file_name":"popbak_test.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"534092018","text":"# coding:utf-8\nimport json\nfrom django.http import HttpResponse\nfrom django.views.generic import View\n\n\nclass JSONView(View):\n def dispatch(self, request, *args, **kwargs):\n if request.method.lower() in self.http_method_names:\n handler = getattr(self, request.method.lower(),\n self.http_method_not_allowed)\n else:\n handler = self.http_method_not_allowed\n\n data = None\n\n if request.method == 'POST':\n data = json.loads(request.body)\n response = handler(request, data, *args, **kwargs)\n return HttpResponse(\n content=json.dumps(response),\n content_type='application/json'\n ) if type(response) in (dict, list) else response\n","sub_path":"sportssite/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"47887421","text":"import pickle\nimport struct\nimport socket\nimport threading\n\nBUFSIZE = 64\nPORT = 2020\nIP = socket.gethostbyname(socket.gethostname())\nADDR = (IP, PORT)\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDR)\n\ndef divisors(n: int) -> list:\n res = []\n for i in range(2, n//2+1):\n if n % i == 0:\n res.append(i)\n return res\n\ndef handle_client(conn: socket.socket, addr):\n print(f\"Active connections: {threading.activeCount() - 1}\")\n connected = True\n while connected:\n n = conn.recv(BUFSIZE)\n if n:\n n = struct.unpack(\"!H\", n)[0]\n #n = pickle.loads(n)\n if n == 0:\n connected = False\n divs = divisors(n)\n length = len(divs)\n conn.send(struct.pack(\"!H\", length))\n for div in divs:\n conn.send(struct.pack(\"!H\", div))\n #conn.send(pickle.dumps(divs))\n print(f\"Client {addr} disconnected\")\n conn.close()\n\ndef start():\n server.listen()\n print(f\"Server is listening on {IP}\")\n while True:\n conn, addr = server.accept()\n thread = threading.Thread(target=handle_client, args=(conn, addr))\n thread.start()\n\nif __name__ == '__main__':\n print(\"Server is starting...\")\n start()","sub_path":"lab1/p5/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600686777","text":"import cv2\nimport numpy as np\nimport math\nimport filters\n\nclass ROI():\n def __init__(self, arc_image, image, cnt):\n hull = cv2.convexHull(cnt)\n\n x, y, w, h = cv2.boundingRect(hull)\n rect = cv2.minAreaRect(hull)\n\n roi_mask = np.zeros(image.shape[0:2], np.uint8)\n cv2.drawContours(roi_mask, [hull], 0, 255, -1)\n image_masked = cv2.bitwise_and(image, image, mask=roi_mask)\n \n self.roi = image_masked[y:y+h, x:x+w]\n self.rect = rect\n self.hull = hull\n self.arc_image = arc_image\n self.image = image\n if not self.validate():\n raise ValueError(\"Failed validation test.\")\n \n def validate(self):\n #check area of the hull compared to the area of the rect\n hull_area = cv2.contourArea(self.hull)\n rect_cnt = cv2.boxPoints(self.rect)\n rect_cnt = np.int0(rect_cnt)\n rect_area = cv2.contourArea(rect_cnt)\n\n if (rect_area*.7) > hull_area:\n return False\n\n #Calculate aspect ratio of rotated bounding box\n tl, tr, br, bl = self.order_points(rect_cnt)\n if self.dist(tl, bl) == 0:\n ar = 0\n else:\n ar = self.dist(tl, tr)/self.dist(tl, bl) \n \n if not (0.3 < ar < 3):\n return False\n \n return True\n\n def order_points(self, pts):\n s = pts.sum(axis = 1)\n diff = np.diff(pts, axis = 1)\n tl = pts[np.argmin(s)]\n tr = pts[np.argmin(diff)]\n br = pts[np.argmax(s)]\n bl = pts[np.argmax(diff)]\n \n return (tl, tr, br, bl)\n \n def dist(self, pt1, pt2):\n x1, y1 = pt1\n x2, y2 = pt2\n return math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))\n","sub_path":"ROI.py","file_name":"ROI.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449644984","text":"'''\nGet all the instances whose type is t2.micro, and stop them\n'''\n\nimport boto3\n\nec2 = boto3.resource('ec2')\n\n# Filter by instance type\n\nfilter =[\n {\n 'Name': 'instance-state-name',\n 'Values': [\n 'running'\n ]\n }\n ]\n\ninstances = ec2.instances.filter(Filters=filter)\nfor x in instances:\n print(x)\n x.stop()","sub_path":"stop_running_ec2_instances.py","file_name":"stop_running_ec2_instances.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"511343959","text":"import os\nimport subprocess\n\nfrom nmigen.build import *\nfrom nmigen.vendor.lattice_ice40 import *\nfrom nmigen_boards.resources import *\n\n__all__ = [\"TinyFPGABXPlatformCustomFreq\"]\n\nFREQ = 10\n\n# Run: icetime fpga_mt_rand/src/build/top.asc\n# to get timing analysis\nclass TinyFPGABXPlatformCustomFreq(LatticeICE40Platform):\n device = \"iCE40LP8K\"\n package = \"CM81\"\n default_clk = \"clk\" + str(FREQ)\n resources = [\n Resource(\"clk\" + str(FREQ), 0, Pins(\"B2\", dir=\"i\"),\n Clock(FREQ * 1e6), Attrs(IO_STANDARD=\"SB_LVCMOS\")),\n\n *LEDResources(pins=\"B3\", attrs=Attrs(IO_STANDARD=\"SB_LVCMOS\")),\n\n Resource(\"usb\", 0,\n Subsignal(\"d_p\", Pins(\"B4\", dir=\"io\")),\n Subsignal(\"d_n\", Pins(\"A4\", dir=\"io\")),\n Subsignal(\"pullup\", Pins(\"A3\", dir=\"o\")),\n Attrs(IO_STANDARD=\"SB_LVCMOS\")\n ),\n\n *SPIFlashResources(0,\n cs=\"F7\", clk=\"G7\", mosi=\"G6\", miso=\"H7\", wp=\"H4\", hold=\"J8\",\n attrs=Attrs(IO_STANDARD=\"SB_LVCMOS\")\n ),\n ]\n connectors = [\n Connector(\"gpio\", 0,\n # Left side of the board\n # 1 2 3 4 5 6 7 8 9 10 11 12 13\n \" A2 A1 B1 C2 C1 D2 D1 E2 E1 G2 H1 J1 H2 \"\n # Right side of the board\n # 14 15 16 17 18 19 20 21 22 23 24\n \" H9 D9 D8 C9 A9 B8 A8 B7 A7 B6 A6 \"\n # Bottom of the board\n # 25 26 27 28 29 30 31\n \"G1 J3 J4 G9 J9 E8 J2\"\n ),\n ]\n\n def toolchain_program(self, products, name):\n tinyprog = os.environ.get(\"TINYPROG\", \"tinyprog\")\n with products.extract(\"{}.bin\".format(name)) as bitstream_filename:\n subprocess.check_call([tinyprog, \"-p\", bitstream_filename])\n","sub_path":"fpga_mt_rand/src/tinyfpga_bx.py","file_name":"tinyfpga_bx.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"562740531","text":"from math import *\nfrom tkinter import *\n\nSIZE_X = 600\nSIZE_Y = 500\nSTEP = 10\n\n\nclass Koch:\n def __init__(self, canvas):\n self.canvas = canvas\n\n def make(self, level):\n r = SIZE_Y / 2 - STEP\n x1 = SIZE_X / 2\n y1 = SIZE_Y / 2 - r * sin(pi / 2)\n x2 = SIZE_X / 2 + r * cos(pi / 6)\n y2 = SIZE_Y / 2 - r * sin(-pi / 6)\n x3 = SIZE_X / 2 - r * cos(-pi / 6)\n y3 = SIZE_Y / 2 - r * sin(-pi / 6)\n self.segment(x1, y1, x2, y2, level)\n self.segment(x2, y2, x3, y3, level)\n self.segment(x3, y3, x1, y1, level)\n\n def segment(self, x1, y1, x2, y2, lev):\n if lev > 0:\n angle = atan2(y2 - y1, x2 - x1)\n r = sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))\n\n new_x1 = x1 + r * cos(angle) / 3\n new_y1 = y1 + r * sin(angle) / 3\n new_x2 = new_x1 + r * cos(angle - pi / 3) / 3\n new_y2 = new_y1 + r * sin(angle - pi / 3) / 3\n new_x3 = x1 + 2 * r * cos(angle) / 3\n new_y3 = y1 + 2 * r * sin(angle) / 3\n\n self.segment(x1, y1, new_x1, new_y1, lev - 1)\n self.segment(new_x1, new_y1, new_x2, new_y2, lev - 1)\n self.segment(new_x2, new_y2, new_x3, new_y3, lev - 1)\n self.segment(new_x3, new_y3, x2, y2, lev - 1)\n else:\n self.canvas.create_line(x1, y1, x2, y2, fill=\"black\")\n\n\nroot = Tk()\nroot.title(\"Lab4.Made by Vitaliy Viflinzider\")\nc = Canvas(root, width=SIZE_X, height=SIZE_Y, bg=\"white\")\nc.pack()\ns = Koch(c)\ns.make(3)\n\nroot.mainloop()\n","sub_path":"Lab4/koh.py","file_name":"koh.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"368342210","text":"# -*- coding: utf-8 -*-\n'''\nThis package defines \n - an abstract Game class\n - an abstract Player class\n - a Human_player class\n \n\nCreated on Sat 20 Aug 2016\n\n@author: f.maire@qut.edu.au\n \n \n'''\n\n# for python 2 compatibility\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\nclass IllegalMove(Exception):\n pass\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n\nclass Game(object):\n '''\n Abstract class to represent a game (match) between two players.\n An instance of this class manages the history of the moves, \n the current state of the board, and whose turn it is.\n \n Atributes: \n self.turn : color of the next player to move (-1 Black or +1 White)\n self.history : list of the moves playedd so far\n \n '''\n\n def display(self):\n '''\n Display the current state\n '''\n raise NotImplementedError # could raise a NonImplemented Exception\n\n def clone(self):\n '''\n Make a clone of this game.\n Implematation s\n '''\n raise NotImplementedError\n\n def do_move(self, m, color=None):\n '''\n Perform move 'm' \n if color == None, \n put a stone of color 'self.turn' \n update self.turn\n else:\n add a stone of color 'color' but\n do not update self.turn\n '''\n raise NotImplementedError\n\n def undo_move(self):\n raise NotImplementedError\n\n def is_terminal(self):\n '''\n Return True if the game is over\n otherwise return False\n '''\n raise NotImplementedError\n\n def legal_moves(self):\n '''\n Return the list of legal moves for the current player\n '''\n raise NotImplementedError\n\n def set_turn(self, c):\n self.turn = c\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n\n\nclass Player(object):\n '''\n Abstract player class\n \n Attributes \n self.color : -1 or +1 \n self.game : own copy of the game\n '''\n\n def play(self, game, opp_move):\n '''\n Given 'opp_move', the last move of the opponent \n return a move \n '''\n raise NotImplementedError\n\n def set_color(self, c):\n self.color = c\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\nclass Human_player(Player):\n '''\n Allow a user player to play a game at the console\n '''\n\n def __init__(self, game):\n self.game = game # player's own private copy of the board\n\n def play(self, opp_move):\n if opp_move:\n self.game.do_move(opp_move)\n while True:\n # move is an integer specifying the index of the cell played\n m = int(input(\"Your Move -> \"))\n if m not in self.game.legal_moves():\n print (\"Error: illegal move\")\n continue\n break\n self.game.do_move(m)\n return m\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\ndef play(game, player1, player2, verbose=1):\n ''' \n Run a game between two players. \n Can raise IllegalMove exception\n \n PRE:\n - players have been created\n '''\n\n # Create dictionary color -> player\n assert player1.color == -player2.color\n if player1.color == -1:\n dict_color_player = {-1: player1, +1: player2}\n else:\n dict_color_player = {-1: player2, +1: player1}\n\n last_move = None\n\n if verbose:\n print ('** Player {} starts **'.format('Black' if game.turn == -1 else 'White'))\n\n while not game.is_terminal():\n # get the next move from current player\n if verbose:\n game.display()\n print ('Player {} to move '.format('Black' if game.turn == -1 else 'White'))\n print(\"legal moves: \" + str(game.legal_moves()))\n move = dict_color_player[game.turn].play(last_move)\n if move not in game.legal_moves():\n raise IllegalMove\n # update the master board\n game.do_move(move)\n # print(move, game.legal_moves(), game.history)\n last_move = move\n\n # display terminal state\n if verbose:\n print('** Game over **')\n game.display()\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\nclass Minmax_player(Player):\n '''\n A player that uses minmax to computes its moves.\n\n Attributes:\n self.maxply : depth of the lookahead (default is 2)\n self.eval_fn : the evaluation function to use at the leaves\n '''\n\n def __init__(self, game, eval_fn=lambda x: 0):\n self.game = game # player's own copy of the board\n self.maxply = 0 #\n self.eval_fn = eval_fn # default evaluation function\n\n def play(self, opp_move):\n '''\n Return a move using the minmax algorithm\n \n PRE:\n Game state not terminal \n '''\n if opp_move:\n self.game.do_move(opp_move)\n\n best = None # pair (value, move)\n # try each move\n # print(self.game.legal_moves())\n for m in self.game.legal_moves():\n self.game.do_move(m)\n\n val = self.minimax_value(self.maxply)\n\n if best is None or val > best[0]:\n best = (val, m)\n\n self.game.undo_move()\n m = best[1] # best move found\n\n # need to play this move on our own copy of the board\n self.game.do_move(m)\n return m\n\n def minimax_value(self, maxply):\n \"\"\"Find the utility value of the game state w.r.t. the current player.\"\"\"\n\n # if we have reached the maximum depth, the utility is approximated\n # with the evaluation function\n if maxply == 0 or self.game.is_terminal():\n return self.eval_fn(self.game)\n\n best_val = None # just a value, not a move\n\n # try each move\n val = float('-inf')\n for m in self.game.legal_moves():\n\n # “INSERT YOUR CODE HERE”\n # print(maxply)\n self.game.do_move(m)\n score = self.minimax_value(maxply - 1)\n self.game.undo_move()\n if maxply % 2 == 0:\n if score > val:\n val = score\n else:\n if score < val:\n val = score\n\n if best_val is None or val > best_val:\n best_val = val\n\n return best_val\n","sub_path":"w5/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"81440091","text":"# Copyright (C) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport os\n\nfrom common.action_recognition_test_case import create_action_recognition_export_test_case # pylint: disable=import-error\n\n\nkwargs = dict(\n problem_name='gesture-recognition',\n ann_file='test.txt',\n img_root=os.path.dirname(__file__) + '/../../../data/gesture_recognition'\n)\n\n\nclass GestureRecognitionExportTestCaseJester(\n create_action_recognition_export_test_case(\n model_name='s3d-rgb-mobilenet-v3-stream-jester',\n **kwargs,\n )\n):\n \"\"\" Test case for s3d-rgb-mobilenet-v3-stream-jester model export. \"\"\"\n\n\nclass GestureRecognitionExportTestCaseMSASL(\n create_action_recognition_export_test_case(\n model_name='s3d-rgb-mobilenet-v3-stream-msasl',\n **kwargs,\n )\n):\n \"\"\" Test case for s3d-rgb-mobilenet-v3-stream-msasl model export. \"\"\"\n\nclass GestureRecognitionExportTestCaseCSL(\n create_action_recognition_export_test_case(\n model_name='s3d-rgb-mobilenet-v3-stream-csl',\n **kwargs,\n )\n):\n \"\"\" Test case for s3d-rgb-mobilenet-v3-stream-csl model export. \"\"\"\n","sub_path":"models/action_recognition/tests/export_tests_gesture_recognition.py","file_name":"export_tests_gesture_recognition.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"469429530","text":"# Dependencies\nimport os\nimport csv\n\n# Create file path\ncsvpath = os.path.join('Resources', 'budget_data.csv')\n\n# Lists to store data\nmonths = []\nrevenue = []\n\n# Read the CSV file\nwith open(csvpath, newline='') as csvfile:\n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # Skip header\n next(csvreader, None)\n\n #Store month and revenue data in empty arrays\n for row in csvreader:\n months.append(row[0])\n revenue.append(int(row[1]))\n\n # The total number of months included in the dataset\n total_months = len(months)\n print(total_months)\n\n # The total net amount of \"Profit/Losses\" over the entire period\n # The greatest increase in profits (date and amount) over the entire period\n # The greatest decrease in losses (date and amount) over the entire period\n greatest_inc = revenue[0]\n greatest_dec = revenue[0]\n total_revenue = 0\n \n for r in range(len(revenue)):\n if revenue[r] >= greatest_inc:\n greatest_inc = revenue[r]\n great_inc_month = months[r]\n elif revenue [r] <= greatest_dec:\n greatest_dec = revenue[r]\n great_dec_month = months[r]\n total_revenue += revenue[r]\n \n # The average change in \"Profit/Losses\" between months over the entire period\n average_change = round(total_revenue/total_months, 2)\n\n print(average_change)\n print(greatest_dec)\n print(great_dec_month)\n print(greatest_inc)\n print(great_inc_month)\n print(total_revenue)\n\n print(\"Financial Analysis\")\n print(\"--------------------------------------------\")\n print(\"Total Months: \" + str(total_months))\n print(\"Total Revenue: $\" + str(total_revenue))\n print(\"Average Revenue Change: $\" + str(average_change))\n print(\"Greatest Increase in Revenue: \" +\n great_inc_month + \" ($\" + str(greatest_inc) + \")\")\n print(\"Greatest Decrease in Revenue: \" +\n great_dec_month + \" ($\" + str(greatest_dec) + \")\")\n\nf = open(\"Financial_Analysis.txt\", \"w\")\n\nf.write(str(average_change))\nf.write(str(greatest_dec))\nf.write(str(great_dec_month))\nf.write(str(greatest_inc))\nf.write(str(great_inc_month))\nf.write(str(total_revenue))\n\nf.close()\n","sub_path":"PyBank/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"598504528","text":"from mdns import MDNSOutgoingPacket, MDNSQueryRecord\nfrom mdns import _FLAGS_QR_QUERY, _CLASS_IN, _TYPE_PTR\nfrom mdns import MDNSIncomingPacket\nfrom mdns import _TYPE_SRV, _TYPE_TXT\n\nfrom twisted.internet import reactor\nfrom twisted.internet import defer\nfrom twisted.internet.task import LoopingCall\n\nclass ServiceBrowser(object):\n \n def __init__(self, zeroconf): \n self.service = ''\n self.discoverer = zeroconf\n self.discoverer.callback = self.update\n self.loopbrowse = LoopingCall(self.browse)\n reactor.callWhenRunning(self.browse)\n self.loopbrowse.start(120, False)\n \n def addService(self, service):\n self.defer = defer.Deferred()\n self.service = service\n return self.defer\n \n def browse(self):\n self.query = MDNSQueryRecord(self.service, _TYPE_PTR, _CLASS_IN)\n mdnsOut = MDNSOutgoingPacket(_FLAGS_QR_QUERY)\n mdnsOut.addQuery(self.query)\n \n self.discoverer.sendDatagram(mdnsOut.packet())\n\n def update(self, packet):\n if packet.isResponse(): \n for answer in packet.answerRecords:\n if self.query.isAnsweredBy(answer) and self.defer is not None:\n d, self.defer = self.defer, None\n d.callback(packet)\n","sub_path":"bonjour/service_browser.py","file_name":"service_browser.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"356397591","text":"from osgeo import ogr\nfrom osgeo.osr import SpatialReference\nimport json\n\nclass GMLParser:\n def __init__(self):\n self.points_dict = {}\n\n '''\n All functions that start with \"getPoints\" return list of lists of coordinates\n MultiPolygon and MultiLineString may have few parcels within them (few lists of coordinates)\n while MultiPoint, Polygon and LineString, describe only one parcel (one list of coordinates)\n\n !!!!! Points are in the form of (Longitude, Latitude) !!!!!\n '''\n def getPointsFromMultipolygon(self, geometry):\n polygonCount = geometry.GetGeometryCount()\n points = []\n for i in range(polygonCount):\n polygon = geometry.GetGeometryRef(i)\n points.append(self.getPointsFromPolygon(polygon)[0])\n return points\n\n def getPointsFromMultilinestring(self, geometry): #not sure\n lineStringCount = geometry.GetGeometryCount()\n points = []\n for i in range(lineStringCount):\n lineString = geometry.GetGeometryRef(i)\n points.append(self.getPointsFromLineString(lineString)[0])\n return [points]\n\n def getPointsFromPolygon(self, geometry):\n linearRing = geometry.GetGeometryRef(0)\n points = linearRing.GetPoints()\n return [points]\n\n def getPointsFromLineString(self, geometry): # not sure\n line = geometry.GetGeometryRef(0)\n points = line.GetPoints()\n return [points]\n\n def getPointsFromMultipoint(self, geometry): #not sure\n points = geometry.GetPoints()\n return [points]\n\n def getPointFromPoint(self, geometry):\n point = (geometry.getX(), geometry.getY())\n return [[point]]\n\n def getPoints(self, geometry):\n gtype = geometry.GetGeometryType()\n name = geometry.GetGeometryName()\n if gtype == 6 and name == \"MULTIPOLYGON\":\n return self.getPointsFromMultipolygon(geometry)\n elif gtype == 5 and name == \"MULTILINESTRING\": #not sure\n return self.getPointsFromMultilinestring(geometry)\n elif gtype == 4 and name == \"MULTIPOINT\": #not sure\n return self.getPointsFromMultipoint(geometry)\n elif gtype == 3 and name == \"POLYGON\":\n return self.getPointsFromPolygon(geometry)\n elif gtype == 2 and name == \"LINESTRING\": #not sure\n return self.getPointsFromLineString(geometry)\n elif gtype == 1 and name == \"POINT\": #not sure\n return self.getPointFromPoint(geometry)\n else:\n print(\"GMLParser: Unrecognized geometry type: \", name)\n return -1\n\n\n def getCoordinatesDictionary(self):\n return self.points_dict\n\n\n def parse(self, GMLfile):\n ogr.RegisterAll()\n inSource = ogr.Open(GMLfile)\n self.points_dict = {}\n for layerIndex in range(inSource.GetLayerCount()):\n ############################### LAYER #######################################\n inLayer = inSource.GetLayer(layerIndex)\n inLayer.ResetReading() # not neccessary, ensures iterating from begining\n\n ############################### FEATURE #####################################\n for featureIndex in range(inLayer.GetFeatureCount()):\n feature = inLayer.GetNextFeature()\n\n ############################### GEOMETRY #####################################\n geometry = feature.GetGeometryRef()\n coord_system = geometry.GetSpatialReference()\n\n targetReference = SpatialReference()\n targetReference.ImportFromEPSG(4326) # WGS84\n geometry.TransformTo(targetReference)\n\n points = self.getPoints(geometry)\n # print(points)\n\n entryName = \"Layer-\" + str(layerIndex) + \" Feature-\" + str(featureIndex)\n self.points_dict[entryName] = points\n if self.points_dict.has_key('coordinates'):\n self.points_dict['coordinates'] = self.points_dict['coordinates'] + points\n else:\n self.points_dict['coordinates'] = points\n\n\n def exportToJSON(self):\n with open('WGS84_coordinates_from_GML.json', 'w') as file:\n json.dump(self.points_dict, file, indent=4)\n\n\nif __name__ == '__main__':\n inSource = \"/home/ivan/Downloads/katastarski_plan_CESTICE.gml\"\n # inSource = /home/ivan/Downloads/Building_9620123VK0192B.gml\"\n # inSource = \"/home/ivan/Downloads/Building_9531109VK0193B.gml\"\n # inSource = \"/home/ivan/Downloads/Building_9642901VK3794B.gml\"\n\n parser = GMLParser()\n parser.parse(inSource)\n print(parser.getCoordinatesDictionary())\n","sub_path":"scripts/GMLParser.py","file_name":"GMLParser.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2351929","text":"#!/usr/bin/env python\n\nimport sys, logging\n\nDEFAULT_FILENAME = 'flight_paths.txt'\n\nif len(sys.argv) > 1:\n infilename = sys.argv[1]\nelse:\n infilename = DEFAULT_FILENAME\n\nlogger = logging.getLogger('coconut_delivery')\nlogger.setLevel(logging.INFO)\nlogger.addHandler(logging.StreamHandler())\n\npathfile = open(infilename)\n\nconst_energy = None\n\njet_streams = []\n\nfor line in pathfile:\n if not const_energy:\n const_energy = int(line)\n else:\n jet_streams.append([ int(i) for i in line.split(' ')])\n\nlogger.info(\"Constant Energy required to fly one mile: %s\" % const_energy)\nlogger.info(\"Jet Streams: %s\" % jet_streams)\n\nposition = 0\nold_position = None\nenergy_consumed = 0\nfor stream in jet_streams:\n start = stream[0]\n end = stream[1]\n energy_required = stream[2]\n if start == position:\n old_position = position\n position = end\n energy_consumed += energy_required\n logger.info(\"Moved from %s to %s using %s energy\" % (old_position, position, energy_required))\n elif start > position:\n energy_consumed += (start - position) * const_energy\n else:\n logger.info(\"Did not move from %s to %s using %s energy\" % (position, end, energy_required))\n\nlogger.info(\"Moved %s positions using %s total energy\" % (position, energy_consumed))\n\n\n","sub_path":"delivery.py","file_name":"delivery.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"126145105","text":"from django.shortcuts import render, HttpResponse\nfrom app.models import *\nfrom user.models import *\n\n\n# Create your views here.\ndef index(request):\n shoes = Shoes.objects.all()\n banners = Banner_shoe.objects.all()\n brank_list = Brank.objects.all()[:6]\n category = Category.objects.all()\n\n cdx = {\n 'banner_list1': banners[0],\n 'banner_list2': banners[1:],\n 'shoe_list': shoes,\n 'brank_list': brank_list,\n 'category': category,\n }\n return render(request, '../../xiangmu/templates/index.html', cdx)\n\n\nbrank_list = Brank.objects.all()[:6]\ncategory = Category.objects.all()\n\n\ndef products(request, tid=-1, bid=-1):\n if tid != -1:\n shoe1_list = Shoes.objects.filter(brand=tid)\n elif bid != -1:\n shoe1_list = Shoes.objects.filter(category=bid)\n else:\n shoe1_list = Shoes.objects.all()\n\n cdx = {\n 'shoe_list': shoe1_list,\n 'brank_list': brank_list,\n 'category': category,\n }\n return render(request, '../../xiangmu/templates/products.html', cdx)\n\n\ndef contact_as(request):\n return render(request, '../../xiangmu/templates/contact.html')\n\n\ndef single(request, sid):\n shoe = Shoes.objects.get(id=sid)\n cdx = {\n 'shoe':shoe,\n }\n return render(request, '../../xiangmu/templates/single.html',cdx)\n","sub_path":"xiangmu1/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"586738879","text":"#!/usr/bin/python\n\n\nimport math\nimport sys\nimport csv\n\ngraphFile = sys.argv[1]\n\nrowlist\t= []\n\nwith open(graphFile) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=';')\n\n for row in csv_reader:\n if row[0] == 'partial':\n cyc = int(row[1])\n qta = int(row[2])\n qtb = int(row[3])\n sample = int(row[5])\n rowlist.append([qta, cyc, 'A'])\n rowlist.append([qtb, cyc, 'B'])\n elif int(row[2]) != -1:\n qta = int(row[0])\n qtb = int(row[1])\n cycfix = int(row[2])\n cnext = int(math.ceil(float(cycfix)/500)*500)\n for i in range(cnext, 5001, 500):\n rowlist.append([qta, i, 'A'])\n rowlist.append([qtb, i, 'B'])\n\n\n\nwith open(\"split_\"+graphFile, \"a+\") as csv_file:\n\twriter = csv.writer(csv_file)\n\n\tfor roww in rowlist:\n\t\twriter.writerow(roww)\n","sub_path":"plot/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"284940902","text":"from apps.workers import worker_login\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext, loader\nfrom ..models import Mail, ImageMailAttachments\nfrom .forms import MailReplyForm, MailAddForm\nfrom .. import timeDelta\nfrom apps.clients.models import Client\nfrom apps.ladies.models import Lady, ImagesLady\nfrom apps.shop.dashboard.forms import ModelProduct\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q\nfrom django.db import IntegrityError\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.forms.models import modelformset_factory, formset_factory\nfrom custom_user.models import EmailUser\nfrom apps.dashboard.forms import EmailUserForm\nfrom apps.dashboard import custom_pagination\nfrom django.core.exceptions import PermissionDenied\nfrom apps.dashboard.middleware.http import Http403\nfrom apps.workers.models import Worker\nfrom apps.workers import check_permission\nfrom django.db import connection\nimport datetime\nimport json\n\n\n@worker_login\ndef mailbox(request):\n check_permission(['mails_view', 'mails_view_all', 'mails_all'], request.session.get('permission_list'))\n return render_to_response('dashboard/mailbox.html', locals(), context_instance=RequestContext(request))\n\n\n@worker_login\n@csrf_protect\ndef mails_loader(request):\n check_permission(['mails_view', 'mails_view_all', 'mails_all'], request.session.get('permission_list'))\n data = dict()\n if request.is_ajax():\n page = int(request.POST.get('page', 1))\n count_to_page = int(request.POST.get('count_to_page', 10))\n search = request.POST.get('search', '')\n ladies_list = list()\n\n query = ''\n if \"filters[only-sms]\" in request.POST and \"filters[only-first]\" in request.POST:\n query += \" AND (`first`=1 OR `type`='sms')\"\n else:\n if \"filters[only-first]\" in request.POST:\n query += ' AND `first`=1'\n if \"filters[only-sms]\" in request.POST:\n query += \" AND `type`='sms'\"\n\n if \"filters[not-smile]\" in request.POST:\n query += \" AND `type` NOT IN ('smile')\"\n\n if \"filters[not-free]\" in request.POST:\n query += \" AND `free`=0\"\n\n if not check_permission(['ladies_view_all'], request.session.get('permission_list'), return_bool=True):\n worker = Worker.objects.get(user=request.user)\n ladies = Lady.objects.filter(worker=worker).values('id')\n for lady in ladies:\n ladies_list.append(str(lady['id']))\n ladies_li = ','.join(ladies_list)\n query += \" AND `to` IN (%s)\" % ladies_li\n\n if search:\n try:\n s_id = int(search)\n if check_permission(['ladies_view_all'], request.session.get('permission_list'), return_bool=True):\n if str(s_id) in ladies_list:\n query += \" AND (`fr`=%d OR `to`=%d)\" % (s_id, s_id)\n else:\n query += \" AND (`fr`=%d)\" % s_id\n else:\n\n query += \" AND (`fr`=%d OR `to`=%d)\" % (s_id, s_id)\n except Exception:\n pass\n\n cursor = connection.cursor()\n c = connection.cursor()\n\n if query:\n cursor.execute(\"SELECT `fr`,`to`,`date_send`,`date_re`,`date_open`,`type_send`,`subject`,`first`,`id`,`type`,`free` FROM `mails` WHERE (`active`=1 AND `type_send`='client' %s) OR (`active`=1 AND `type_send`='client_re' %s) ORDER BY `date_send` ASC LIMIT %d,%d\" % (query, query, count_to_page*(page-1), count_to_page))\n c.execute(\"SELECT count(*) FROM `mails` WHERE (`active`=1 AND `type_send`='client' %s) OR (`active`=1 AND `type_send`='client_re' %s)\" % (query, query))\n else:\n cursor.execute(\"SELECT `fr`,`to`,`date_send`,`date_re`,`date_open`,`type_send`,`subject`,`first`,`id`,`type`,`free` FROM `mails` WHERE (`active`=1 AND `type_send`='client') OR (`active`=1 AND `type_send`='client_re') ORDER BY `date_send` ASC LIMIT %d,%d\" % (count_to_page*(page-1), count_to_page))\n c.execute(\"SELECT count(*) FROM `mails` WHERE (`active`=1 AND `type_send`='client') OR (`active`=1 AND `type_send`='client_re')\")\n\n mails_list = cursor.fetchall()\n count = c.fetchone()[0]\n\n pagination = custom_pagination(page, count, count_to_page)\n template = loader.get_template('dashboard/_mailsTpl.html')\n mails_html = ''\n for mail in [\n {\n 'fr': item[0], 'to': item[1], 'date_send': item[2], 'date_re': item[3],\n 'date_open': item[4], 'type_send': item[5], 'subject': item[6], 'first': item[7],\n 'id': item[8], 'type': item[9], 'free': item[10]\n } for item in mails_list\n ]:\n\n client = Client.objects.get(pk=mail['fr'])\n lady = Lady.objects.get(pk=mail['to'])\n data_c = {\n 'date_send': mail['date_send'],\n 'date_re': mail['date_re'],\n 'date_open': mail['date_open'],\n 'time_delta': timeDelta(mail['date_send']),\n 'type_send': mail['type_send'],\n 'type': mail['type'],\n 'from': client,\n 'to': lady,\n 'subject': mail['subject'],\n 'first': mail['first'],\n 'free': mail['free'],\n 'id': mail['id']\n }\n\n try:\n data_c['worker'] = lady.worker\n data_c['worker_id'] = lady.worker.id\n except Exception:\n data_c['worker'] = None\n data_c['worker_id'] = None\n\n mails_html += template.render(RequestContext(request, data_c))\n data = {'result': mails_html, 'pagination': pagination, 'count': count}\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@worker_login\n@csrf_protect\ndef mails_last_loader(request):\n check_permission(['mails_view', 'mails_view_all', 'mails_all'], request.session.get('permission_list'))\n data = dict()\n if request.is_ajax():\n page = int(request.POST.get('page', 1))\n count_to_page = int(request.POST.get('count_to_page', 10))\n search = request.POST.get('search', '')\n mails_list = list()\n\n if search:\n pass\n else:\n if not check_permission(['ladies_view_all'], request.session.get('permission_list'), return_bool=True):\n worker = Worker.objects.filter(user=request.user)\n ladies = Lady.objects.filter(worker=worker).values('id')\n ladies_list = list()\n for lady in ladies:\n ladies_list.append(lady['id'])\n mails_list = Mail.objects.filter(Q(active=True, type_send='client', to__in=ladies_list, mail_re=None) | Q(active=True, type_send='client_re', to__in=ladies_list, mail_re=None))[count_to_page*(page-1):count_to_page*page]\n count = Mail.objects.filter(Q(active=True, type_send='client', to__in=ladies_list, mail_re=None) | Q(active=True, type_send='client_re', to__in=ladies_list, mail_re=None)).count()\n else:\n mails_list = Mail.objects.filter(Q(active=True, type_send='client', mail_re=None) | Q(active=True, type_send='client_re', mail_re=None))[count_to_page*(page-1):count_to_page*page]\n count = Mail.objects.filter(Q(active=True, type_send='client', mail_re=None) | Q(active=True, type_send='client_re', mail_re=None)).count()\n\n pagination = custom_pagination(page, count, count_to_page)\n\n template = loader.get_template('dashboard/_mailsTpl.html')\n mails_html = ''\n for mail in mails_list:\n client = Client.objects.get(pk=mail.fr)\n lady = Lady.objects.get(pk=mail.to)\n data_c = {\n 'date_send': mail.date_send,\n 'date_open': mail.date_open,\n 'time_delta': mail.time_delta,\n 'type_send': mail.type_send,\n 'from': client,\n 'to': lady,\n 'subject': mail.subject,\n 'first': mail.first,\n 'id': mail.id\n }\n\n try:\n data_c['worker'] = lady.worker\n data_c['worker_id'] = lady.worker.id\n except Exception:\n data_c['worker'] = None\n data_c['worker_id'] = None\n\n mails_html += template.render(RequestContext(request, data_c))\n data = {'result': mails_html, 'pagination': pagination, 'count': count}\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@worker_login\n@csrf_protect\ndef mails_lady_loader(request):\n check_permission(['mails_view', 'mails_view_all', 'mails_all'], request.session.get('permission_list'))\n lady_id = int(request.POST.get('lady'))\n\n if not check_permission(['ladies_view_all'], request.session.get('permission_list'), return_bool=True):\n lady = get_object_or_404(Lady, pk=lady_id)\n try:\n if lady.worker.user.id != request.user.id:\n raise Http403\n except Exception:\n raise Http403\n\n data = dict()\n if request.is_ajax():\n page = int(request.POST.get('page', 1))\n count_to_page = int(request.POST.get('count_to_page', 10))\n search = request.POST.get('search', '')\n mails_list = list()\n\n if search:\n pass\n else:\n mails_list = Mail.objects.filter(Q(to=lady_id, active=True, type_send='client') | Q(to=lady_id, active=True, type_send='client_re')).order_by('-date_send')[count_to_page*(page-1):count_to_page*page]\n count = Mail.objects.filter(Q(to=lady_id, active=True, type_send='client') | Q(to=lady_id, active=True, type_send='client_re')).count()\n\n pagination = custom_pagination(page, count, count_to_page)\n\n template = loader.get_template('dashboard/_mailsLadyTpl.html')\n mails_html = ''\n for mail in mails_list:\n client = Client.objects.get(pk=mail.fr)\n data_c = {\n 'date_send': mail.date_send,\n 'date_re': mail.date_re,\n 'date_open': mail.date_open,\n 'type_send': mail.type_send,\n 'from': client,\n 'to': lady,\n 'first': mail.first,\n 'id': mail.id\n }\n mails_html += template.render(RequestContext(request, data_c))\n data = {'result': mails_html, 'pagination': pagination, 'count': count}\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@worker_login\ndef mail_view(request, id):\n mail = get_object_or_404(Mail, pk=id)\n if not mail.date_open:\n mail.date_open = datetime.datetime.now()\n mail.worker_open = Worker.objects.get(user=request.user)\n mail.save()\n client = Client.objects.get(pk=mail.fr)\n return render_to_response('dashboard/mail_view.html', locals(), context_instance=RequestContext(request))\n\n\n@worker_login\n@csrf_protect\ndef new_mail_lady(request, id):\n check_permission(['ladies_view', 'ladies_view_all', 'ladies_all'], request.session.get('permission_list'))\n lady = get_object_or_404(Lady, pk=id)\n\n if not check_permission(['ladies_view_all'], request.session.get('permission_list'), return_bool=True):\n try:\n if lady.worker.user.id != request.user.id:\n raise Http403\n except Exception:\n raise Http403\n\n form = MailAddForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n if form.is_valid():\n to = request.POST.get('to', '')\n subject = request.POST.get('subject', '')\n content = request.POST.get('content', '')\n new_message = Mail(subject=subject, fr=lady.id, to=to, content=content, type='mail', type_send='lady', worker_send=request.user.worker)\n new_message.save()\n\n ids = request.POST.getlist('image_attachments[]')\n if ids:\n for img in [int(x) for x in ids]:\n img_at = ImageMailAttachments(mail=new_message, image=img, fr=lady.id, to=to)\n img_at.save()\n\n new_message.attachment = True\n new_message.save()\n\n messages.add_message(request, messages.SUCCESS, 'Mail send')\n return HttpResponseRedirect('/dashboard/mailbox')\n\n return render_to_response('dashboard/mail_lady_add.html', locals(), context_instance=RequestContext(request))\n\n\n@worker_login\n@csrf_protect\ndef mail_reply(request, id):\n mail = get_object_or_404(Mail, pk=id)\n if mail.date_re:\n return HttpResponseRedirect('/dashboard/mailbox/%d' % mail.id)\n if not mail.date_open:\n mail.date_open = datetime.datetime.now()\n mail.worker_open = Worker.objects.get(user=request.user)\n mail.save()\n client = Client.objects.get(pk=mail.fr)\n lady = Lady.objects.get(pk=mail.to)\n form = MailReplyForm(request.POST or None, request.FILES or None)\n re_subject = 'Re: %s' % mail.subject\n\n if request.method == 'POST':\n subject = request.POST.get('subject', 'Re: %s' % mail.subject)\n content = request.POST.get('content', '')\n if form.is_valid():\n new_message = Mail(subject=subject, fr=lady.id, to=client.id, content=content, type='mail', type_send='lady_re', worker_re=request.user.worker)\n new_message.save()\n\n ids = request.POST.getlist('image_attachments[]')\n if ids:\n for img in [int(x) for x in ids]:\n img_at = ImageMailAttachments(mail=new_message, image=img, fr=lady.id, to=client.id)\n img_at.save()\n\n new_message.attachment = True\n new_message.save()\n\n mail.date_re = datetime.datetime.now()\n mail.mail_re = new_message\n mail.save()\n\n messages.add_message(request, messages.SUCCESS, 'Mail send')\n return HttpResponseRedirect('/dashboard/mailbox')\n return render_to_response('dashboard/mail_reply.html', locals(), context_instance=RequestContext(request))\n\n\n@worker_login\ndef mails_history(request):\n try:\n from_id = int(request.GET.get('from', ''))\n client = Client.objects.get(pk=from_id)\n to_id = int(request.GET.get('to', ''))\n lady = Lady.objects.get(pk=to_id)\n except Exception:\n messages.add_message(request, messages.ERROR, 'Error data!')\n\n check_permission(['mails_view', 'mails_view_all', 'mails_all'], request.session.get('permission_list'))\n\n if not check_permission(['mails_view_all'], request.session.get('permission_list'), return_bool=True):\n try:\n if lady.worker.user.id != request.user.id:\n raise PermissionDenied\n except Exception:\n raise PermissionDenied\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT `fr`,date(`date_send`) AS d,`date_send`,`subject`,`content`,`type`,`first`,`free`,`attachment`, `id` FROM `mails` WHERE (`fr`=%d AND `to`=%d AND `active`=1) OR (`fr`=%d AND `to`=%d AND `active`=1) ORDER BY d ASC\" % (from_id, to_id, to_id, from_id))\n history = cursor.fetchall()\n\n history_list = list()\n item_date = None\n item_dict = dict()\n for item in history:\n if item[8]:\n attachments_data = ImageMailAttachments.objects.filter(mail=item[9]).values('image', 'date_accept')\n attachments = ImagesLady.objects.filter(pk__in=[x['image'] for x in attachments_data])\n attachments_data_list = list()\n for item_att in attachments_data:\n if item_att['date_accept']:\n attachments_data_list.append(item_att['image'])\n else:\n attachments_data = None\n attachments_data_list = list()\n attachments = None\n\n if item[0] == from_id:\n if client.image:\n fr_img_none = False\n fr_img = client.image\n else:\n fr_img_none = True\n fr_img = '/static/img/boy.png'\n else:\n if lady.image:\n fr_img_none = False\n fr_img = lady.image\n else:\n fr_img_none = True\n fr_img = '/static/img/girl.png'\n\n if item_date != item[1]:\n if item_date:\n history_list.append(item_dict)\n\n item_date = item[1]\n item_dict = dict()\n item_dict['date'] = item_date\n item_dict['items'] = list()\n item_dict['items'].append({'fr': item[0], 'fr_img': fr_img, 'date_send': item[2], 'subject': item[3], 'content': item[4], 'type': item[5], 'first': item[6], 'free': item[7], 'fr_img_none': fr_img_none, 'attachment': item[8], 'attachments': attachments, 'attachments_data_list': attachments_data_list})\n else:\n item_dict['items'].append({'fr': item[0], 'fr_img': fr_img, 'date_send': item[2], 'subject': item[3], 'content': item[4], 'type': item[5], 'first': item[6], 'free': item[7], 'fr_img_none': fr_img_none, 'attachment': item[8], 'attachments': attachments, 'attachments_data_list': attachments_data_list})\n\n history_list.append(item_dict)\n\n return render_to_response('dashboard/mails_history.html', {'client': client, 'lady': lady, 'history_list': history_list, 'history': history}, context_instance=RequestContext(request))\n","sub_path":"apps/mails/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"148363562","text":"import pickle \nfrom flask import Flask, render_template, request\n\napp= Flask(__name__)\nloadedModel= pickle.load(open('Model.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('iris.html')\n\n@app.route('/prediction', methods=['POST'])\ndef prediction():\n SepalLengthCm = request.form['SepalLengthCm']\n SepalWidthCm = request.form['SepalWidthCm']\n PetalLengthCm = request.form['PetalLengthCm']\n PetalWidthCm = request.form['PetalWidthCm']\n\n prediction = loadedModel.predict([[SepalLengthCm,SepalWidthCm,PetalLengthCm,PetalWidthCm]])[0]\n\n if prediction == 0:\n prediction = \"Setosa\"\n elif prediction == 1:\n prediction = \"Versicolor\"\n else:\n prediction = \"Virginica\"\n\n return render_template('iris.html',output=prediction)\n\nif __name__=='__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"258937113","text":"# -*- coding: utf-8 -*-\n# \nfrom Components.Converter.Converter import Converter\nfrom Components.Element import cached\nfrom Components.Language import language\n\n#simple and dirty translation\nif language.getLanguage() == 'pl_PL':\n translationsDict = {'Support for skin:': 'Wsparcie:', 'http://forum.dvhk.pl': 'http://forum.dvhk.to', 'Flash Free:': 'Wolne:',\n 'Load Avg:': 'Obciążenie CPU:', 'Uptime:': 'Czas pracy:', 'Memory:': 'Pamięć RAM:', 'Box Type:': 'Typ tunera:', 'Flash:': 'Pamięć Flash:',\n 'Flash Free:': 'Wolne:'}\nelse:\n translationsDict = {} \n \nclass j00zekTranslator(Converter, object):\n def __init__(self, LabelText):\n Converter.__init__(self, LabelText)\n self.translatedLabel = translationsDict.get(LabelText, LabelText)\n\n @cached\n def getText(self):\n return self.translatedLabel\n \n text = property(getText)\n","sub_path":"e2components/Components/Converter/j00zekTranslator.py","file_name":"j00zekTranslator.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"550979780","text":"def sequence(n):\n seq = []\n for i in range(n):\n a = 0\n for j in range(i):\n if seq[j] == seq[-j - 1]:\n a += 1\n seq.append(a)\n print(a)\n\n\nif __name__ == \"__main__\":\n num = int(input())\n sequence(num)\n","sub_path":"num_13/13.3.py","file_name":"13.3.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"547749854","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport rospy\nimport numpy as np\nimport pyrealsense2 as rs\n\nimport cv2\nfrom sensor_msgs.msg import Image, CompressedImage\nfrom cv_bridge import CvBridge, CvBridgeError\n\nclass RealsenseCamera(object):\n \"\"\"Realsense 카메라를 동작한다.\"\"\"\n\n def __init__(self):\n # 글로벌 변수 설정\n self.bridge = CvBridge()\n\n # 발행 설정\n # self.color_image_pub = rospy.Publisher(\"camera/color/image_raw\", Image, queue_size=1)\n # self.colorful_depth_image_pub = rospy.Publisher(\"camera/colorful_depth/image_raw\", Image, queue_size=1)\n self.depth_image_pub = rospy.Publisher(\"camera/depth/image_raw\", Image, queue_size=1)\n self.compressed_color_image_pub = rospy.Publisher(\"camera/color/image_raw/compressed\", CompressedImage, queue_size=1)\n\n # 디바이스 확인\n ctx = rs.context()\n devices = ctx.query_devices()\n for dev in devices:\n print(\"연결된 카메라의 정보는 다음과 같습니다.\")\n print(\"Device : {}\".format(dev.get_info(rs.camera_info.name)))\n print(\"S/N : {}\".format(dev.get_info(rs.camera_info.serial_number)))\n\n # 카메라 설정\n # Create a pipeline\n self.pipeline = rs.pipeline()\n\n # Create a config and configure the pipeline to stream\n rs_config = rs.config()\n rs_config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n rs_config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n # Start streaming\n profile = self.pipeline.start(rs_config)\n\n # Getting the depth sensor's depth scale (see rs-align example fr explanation)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n\n # Create an align object\n # rs.align allows us to perform alignment of depth frames to others frames\n # The \"align_to\" is the stream type to which we plan to align depth frames.\n align_to = rs.stream.color\n self.align = rs.align(align_to)\n\n # 실행\n self.start_camera()\n\n def start_camera(self):\n \"\"\"\n \"\"\"\n while not rospy.is_shutdown():\n # Get frameset of color and depth\n frames = self.pipeline.wait_for_frames()\n\n aligned_frames = self.align.process(frames)\n\n # Get aligned frames\n aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image\n color_frame = aligned_frames.get_color_frame()\n\n # Intrinsics\n aligned_depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics\n # print(\"Aligned depth intrinsics : {}\".format(type(aligned_depth_intrin)))\n # print(\"Aligned depth intrinsics coeffs : {}\".format(aligned_depth_intrin.coeffs))\n # print(\"Aligned depth intrinsics fx : {}\".format(aligned_depth_intrin.fx))\n # print(\"Aligned depth intrinsics fy : {}\".format(aligned_depth_intrin.fy))\n # print(\"Aligned depth intrinsics height : {}\".format(aligned_depth_intrin.height))\n # print(\"Aligned depth intrinsics width : {}\".format(aligned_depth_intrin.width))\n # print(\"Aligned depth intrinsics model : {}\".format(aligned_depth_intrin.model))\n # print(\"Aligned depth intrinsics ppx : {}\".format(aligned_depth_intrin.ppx))\n # print(\"Aligned depth intrinsics ppy : {}\".format(aligned_depth_intrin.ppy))\n\n \n color_intrin = color_frame.profile.as_video_stream_profile().intrinsics\n \n # Validate that both frames are valid\n if not aligned_depth_frame or not color_frame:\n continue\n\n # Convert images to numpy arrays\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n\n compressed_color_image = CompressedImage()\n compressed_color_image.header.stamp = rospy.Time.now()\n compressed_color_image.format = \"jpeg\"\n compressed_color_image.data = cv2.imencode('.jpg', color_image)[1].tostring()\n\n try:\n # self.color_image_pub.publish(self.bridge.cv2_to_imgmsg(color_image, \"bgr8\"))\n # self.colorful_depth_image_pub.publish(self.bridge.cv2_to_imgmsg(depth_colormap, \"bgr8\"))\n self.depth_image_pub.publish(self.bridge.cv2_to_imgmsg(depth_image, \"16UC1\"))\n self.compressed_color_image_pub.publish(compressed_color_image)\n\n except CvBridgeError as e:\n print(e)\n\nif __name__ == '__main__':\n rospy.init_node('pyrealsense2', anonymous=False)\n realsense_camera = RealsenseCamera()\n rospy.spin()\n","sub_path":"pedestrian_tracking/src/pyrealsense2.py","file_name":"pyrealsense2.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542892116","text":"import numpy as np\nimport cv2, cv\n\ndef diffImg(t0, t1, t2):\n d1 = cv2.absdiff(t2, t1)\n d2 = cv2.absdiff(t1, t0)\n return cv2.bitwise_and(d1, d2)\n\ndef diff(t0, t1):\n d1 = cv2.absdiff(t0, t1)\n return d1\n\ncap = cv2.VideoCapture(0)\nfunction = 1\nwindow = 'Video Imaging'\n\nten = 0\nlast10 = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)\nprev = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)\ncurrent = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)\nnext = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)\n\nwhile(True):\n # Read frame by frame\n ret, frame = cap.read()\n\n prev = current\n current = next\n next = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)\n\n # operations on the frame\n edges = cv2.Canny(frame, 100, 100)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n current = cv2.GaussianBlur(current, (5,5), 0)\n difference = diffImg(prev, current, next)\n\n prev = current\n current = next\n next = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)\n\n # edge detect diff10\n diffedge = cv2.Canny(difference, 100, 100)\n\n # find contours\n ret,thresh = cv2.threshold(diffedge, 2, 255, cv2.THRESH_BINARY)\n cv2.GaussianBlur(frame, (5,5), 0)\n contours, hierarchy = cv2.findContours(thresh,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)\n\n cv2.drawContours(diffedge, contours, -1, (255,255,255), 3)\n\n for number in contours:\n [x, y, w, h] = cv2.boundingRect(number)\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\n \n # if cv2.contourArea(number) > 50:\n # # sufficiently large contour to possibly be a number\n # [x, y, w, h] = cv2.boundingRect(number)\n \n # if h > 28:\n # # tall enough to possibly be a number\n # # draw the bounding box on the image\n # cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\n # roi = thresh[y:y+h, x:x+w]\n # roi_small = cv2.resize(roi, (10, 10))\n\n\n if function == 1:\n cv2.imshow(window, frame)\n elif function == 2:\n cv2.imshow(window, edges)\n elif function == 3:\n cv2.imshow(\"regular\", frame)\n cv2.imshow(\"contours\", diffedge)\n cv2.imshow(window, difference)\n\n # Control window\n control = cv2.waitKey(1) & 0xFF\n if control == ord('q'):\n break\n elif control == ord('1'):\n function = 1\n elif control == ord('2'):\n function = 2\n elif control == ord('3'):\n function = 3\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()","sub_path":"Programming - Python/MotionCapture/motionCapture.py","file_name":"motionCapture.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"311673143","text":"from django.urls import path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n path(\"\", views.home, name=\"home\"),\r\n path(\"about\", views.about, name=\"about\"),\r\n path(\"contact\", views.contact, name=\"contact\"),\r\n path(\"form\", views.myform, name=\"myform\"),\r\n path(\"formprocess\",views.process,name=\"process\"),\r\n path('slist',views.studentlist.as_view(),name='s1')\r\n]\r\n","sub_path":"django/myproject/firstapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"275207938","text":"#!/usr/bin/python3\nif __name__ == '__main__':\n from sys import argv\n leng = len(argv)\n if leng == 1:\n print(\"0 arguments.\")\n elif leng == 2:\n print(\"1 argument:\")\n else:\n print(\"{} arguments:\".format(leng - 1))\n for i in range(1, len(argv)):\n print(\"{}: {}\".format(i, argv[i]))\n","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"108608837","text":"import scrapy\r\nfrom scrapy import Spider\r\nfrom scrapy.spiders import Rule,CrawlSpider # For setting the Rule and crawl. Rule and CrawlSpider are matched with rule setting anc crawl respectively in order\r\nfrom balaan.items import BTQitem # For saveing the files # for sending request by http # For responseection\r\nfrom scrapy.linkextractors import LinkExtractor # To extract the link\r\nfrom scrapy.http import Request\r\nimport json\r\nimport logging\r\nimport datetime\r\nfrom scrapy.spiders import Spider\r\nfrom scrapy.selector import Selector\r\nfrom scrapy.spiders import CSVFeedSpider\r\nfrom scrapy import FormRequest\r\n\r\n\r\nlogging.getLogger()\r\nlogging.basicConfig(filename='error.log',level=logging.DEBUG)\r\nlogging.debug('This message should go to the log file')\r\nlogging.info('So should this')\r\nlogging.warning('And this, too')\r\n\r\n\r\nclass THECLUTCHER_Spider(CSVFeedSpider):\r\n name = \"STYLECSV\"\r\n\r\n start_urls = [\"https://www.styleisnow.com/en/coltorti_customcustomer/index/download/mode/new\"]\r\n delimiter = ';'\r\n headers = ['Product Url','Image','Image1','Image2','Image3','Brand','Sku Styleisnow','Season','Year','Sku Supplier','Variant','Color Styleisnow','Color Supplier','Made in','Material','Name','Description',\r\n 'Categories','Qty','Retail Price','Discount','Size','Qty Detail','Bag length','Bag height',\t'Bag weight',\t'Handle height','Shoulder bag length','Belt length','Belt height','Accessory length','Accessory height','Accessory weight',\r\n 'Heel height','Plateau height','Insole length']\r\n\r\n login_url = 'https://www.styleisnow.com/en/customer/account/login/'\r\n\r\n def start_requests(self):\r\n # let's start by sending a first request to login page\r\n yield scrapy.Request(self.login_url, self.parse_login)\r\n\r\n def parse_login(self, response):\r\n return FormRequest.from_response(response,\r\n formdata={'login[username]': 'coo@balaan.co.kr',\r\n 'login[password]': 'Qkffks18.!'\r\n }, callback = self.start_crawl,dont_filter = True)\r\n\r\n\r\n\r\n def start_crawl(self,response):\r\n for url in self.start_urls:\r\n yield scrapy.Request(url)\r\n\r\n\r\n\r\n\r\n\r\n def parse_row(self, response, row):\r\n\r\n\r\n item = BTQitem()\r\n if row['Brand'] != \"Brand\":\r\n item['brand'] = row['Brand']\r\n item['SKU'] = row['Sku Supplier'] + \" \" + row['Variant']\r\n item['price'] = float(str.replace(str.replace(str.replace(row['Retail Price'],'Retail Price','0'),'Eur ',''),',',''))\r\n item['gd_name'] = row['Name']\r\n item['color'] = row['Color Styleisnow']\r\n img = [row['Image']] + [row['Image1']] + [row['Image2']] + [row['Image3']]\r\n i = 0\r\n image = []\r\n while i < 4:\r\n if img[i] != '':\r\n image.append(img[i])\r\n i += 1\r\n item['img'] = json.dumps(image)\r\n item['link'] = row['Product Url']\r\n\r\n item['desc'] = row['Description'] + \" | Season : \" + row['Season'] + row['Year'] + \" | Material : \" + row['Material']\r\n item['generation'] = 'adult'\r\n shopurlId_temp = row['Categories']\r\n if \"Women > Clothing\" in shopurlId_temp:\r\n shopurlId = '247'\r\n elif \"Women > Shoes\" in shopurlId_temp:\r\n shopurlId = '248'\r\n elif \"Women > Bags\" in shopurlId_temp:\r\n shopurlId = '249'\r\n elif \"Women > Accessories\" in shopurlId_temp:\r\n shopurlId = '250'\r\n elif \"Men > Clothing\" in shopurlId_temp:\r\n shopurlId = '252'\r\n elif \"Men > Shoes\" in shopurlId_temp:\r\n shopurlId = '253'\r\n elif \"Men > Bags\" in shopurlId_temp:\r\n shopurlId = '254'\r\n elif \"Men > Accessories\" in shopurlId_temp:\r\n shopurlId = '255'\r\n else:\r\n shopurlId = '241'\r\n item['shopurl_id'] = shopurlId\r\n item['shop_id'] = '9'\r\n item['updated_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n item['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n item['crawl_last_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n discount_rate = float(str.replace(str.replace(row['Discount'],'%',''),'Discount','0'))\r\n opt = row[\"Size\"].split(',')\r\n stock = row['Qty Detail'].split(',')\r\n Soldout_check = row['Qty']\r\n optqty = []\r\n j = 0\r\n\r\n for idx, op in enumerate(opt):\r\n temp_obj = {'Size': str.upper((str.replace(str.replace(op,'u','UNIQUE'),'os','UNIQUE'))).strip()\r\n ,\"stock\": str.replace(stock[idx], '.0000', '')\r\n ,\"goods_consumer\": float(\r\n str.replace(str.replace(str.replace(row['Retail Price'], 'Retail Price', '0'), 'Eur ', ''),\r\n ',', ''))\r\n ,\"goods_price\": round(float(\r\n str.replace(str.replace(str.replace(row['Retail Price'], 'Retail Price', '0'), 'Eur ', ''),\r\n ',', '')) * (1 - discount_rate / 100), 2)\r\n }\r\n optqty.append(temp_obj)\r\n\r\n indexed_optqty = []\r\n for idx, opt in enumerate(optqty):\r\n indexed_optqty.append(dict({idx + 1: opt}))\r\n\r\n item['optqty'] = \"Sold out\" if len(indexed_optqty) == 0 else json.dumps(indexed_optqty)\r\n\r\n item[\"discount\"] = row['Discount']\r\n yield item","sub_path":"balaan/spiders/STYLEISNOW_CSV.py","file_name":"STYLEISNOW_CSV.py","file_ext":"py","file_size_in_byte":5807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"441425844","text":"import pytest\n\nfrom malleefowl.tests.common import WpsTestClient, TESTDATA, assert_response_success\n\nimport tempfile\nimport yaml\n\n@pytest.mark.online\ndef test_wps_dummy():\n wps = WpsTestClient()\n datainputs = \"[dataset={0}]\".format(TESTDATA['noaa_nc_1'])\n resp = wps.get(service='wps', request='execute', version='1.0.0', identifier='dummy',\n datainputs=datainputs)\n assert_response_success(resp)\n\n@pytest.mark.online\ndef test_wps_thredds_workflow():\n doc = \"\"\"\n workflow:\n name: test_thredds_workflow\n source:\n thredds:\n catalog_url: {0}\n worker:\n identifier: dummy\n url: http://localhost:8091/wps\n resource: dataset\n inputs: []\n \"\"\".format(TESTDATA['noaa_catalog_1'])\n fp = tempfile.NamedTemporaryFile(suffix=\".txt\")\n yaml.dump(yaml.load(doc), fp)\n \n wps = WpsTestClient()\n datainputs = \"[workflow=file://{0}]\".format(fp.name)\n resp = wps.get(service='wps', request='execute', version='1.0.0', identifier='workflow',\n datainputs=datainputs)\n assert_response_success(resp)\n","sub_path":"malleefowl/tests/test_wps_workflow.py","file_name":"test_wps_workflow.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"451362730","text":"# Given a binary tree, return the zigzag level order traversal of its nodes' values. \n# (ie, from left to right, then right to left for the next level and alternate between).\n# https://leetcode.com/problems/binary-tree-zigzag-level-order-traversal/\nfrom collections import defaultdict,deque\n\n# Time O(N), Space: O(N)\ndef zizagOrder(root):\n if root == None:\n return []\n queue = deque()\n levels = defaultdict(deque)\n queue.append([root, 0])\n while len(queue) > 0:\n node, l_idx = queue.popleft()\n if l_idx % 2 == 0:\n levels[l_idx].append(node.val)\n else:\n levels[l_idx].appendleft(node.val)\n if node.left != None:\n queue.append([node.left, l_idx + 1])\n if node.right != None:\n queue.append([node.right, l_idx + 1])\n return list(map(list,levels.values()))","sub_path":"Tree/zigzag_level_order.py","file_name":"zigzag_level_order.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"549186288","text":"#pip install scikit-learn\n#pip install psutil\nfrom sklearn import datasets\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\n\n\n\n\ndata_train=datasets.fetch_20newsgroups(\n\tsubset='train',#subset='all','train','test'\n\tremove=['headers','footers','quotes']#去掉帖子头\n\t)\ndata_test=datasets.fetch_20newsgroups(\n\tsubset='test',#subset='all','train','test'\n\tremove=['headers','footers','quotes']#去掉帖子头\n\t)\nprint(data_train.data[0])#查看分类名称\nprint(data_train.target_names)#查看分类名称\nprint(data_train.target_names[data_train.target[0]])#查看某一篇文章对应的分类#输出结果为rec.autos\n\n#scikit-learn的计数向量转化\ncount_vect=CountVectorizer(stop_words='english')\n#停用词\tstop_words\t需要被过滤掉的词\n#中文停用词整理为一个列表数据后也可以传入\n#如chinese_stop_words=['第二','啊']\n\n#数据统计转换.fit_transform()\nx_train_counts=count_vect.fit_transform(data_train.data)\nprint(x_train_counts)\n\n#简单的词频统计\n#长短文章单词出现的次数明显不同\n\n#TF-IDF方法\n#核心思想\n#如果某个词在一个文章中出现的频率TF高,并且在其他文章中很少出现,\n#则认为该词能很好的作为分类\n#\n#car的词频TF=4/91=0.044\n#假如car在100篇文章中出现,总文章数是10,000,则\n#IDF=lg(10000/1400)=4.605\n#则car的TF-IDF得分:\n#TF-IDF=0.044*4.605=0.20262\n\n\ntfidf_transformer=TfidfTransformer().fit(x_train_counts)\nx_train_tfidf=tfidf_transformer.transform(x_train_counts)\n\n#朴素贝叶斯方法\nclf=MultinomialNB().fit(x_train_tfidf,data_train.target)\n#用于文档分类和垃圾邮件任务\n\n\n#写一段带预测文字列表并计算TF-IDF\ntext_test=['do you use windows','god loves you','opengl on the gup is fast']\nx_test_counts=count_vect.transform(text_test)\nx_test_tfidf=tfidf_transformer.transform(x_test_counts)\n#需要以TF-IDF作为预测的输入\n\n#预测一下\npredicted=clf.predict(x_test_tfidf)\n#得到结果是分类序号,可在数据的target_names中查找\n\nfor doc,category in zip(text_test,predicted):\n\tprint('{}=>{}'.format(doc,data_train.target_names[category]))\n\n\n\n\ndef 教程():\n\tzip()#将多个列表按顺序打包成一个个元组\n\tstudent=['li','wang','dd']\n\tscore=[95,59,34]\n\tzzip=list(zip(student,score))\n\tprint(zzip)\n\n","sub_path":"机器学习/jiqi.py","file_name":"jiqi.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"204781146","text":"import numpy as np\n\nfrom tests.matrices import MATRICES\n\n\nclass GlobalAlignment:\n def __init__(self, string1, string2, gap_penalty, matrix):\n \"\"\"\n :param string1: first string to be aligned, string\n :param string2: second string to be aligned, string\n :param gap_penalty: gap penalty, integer\n :param matrix: substitution matrix containing scores for amino acid\n matches and mismatches, dict\n\n Attention! string1 is used to index columns, string2 is used to index rows\n \"\"\"\n self.string1 = string1\n self.string2 = string2\n self.gap_penalty = gap_penalty\n self.substituion_matrix = matrix\n self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)\n self.align()\n\n def align(self):\n \"\"\"\n Align given strings using the Needleman-Wunsch algorithm,\n store the alignments and the score matrix used to compute those alignments.\n NB: score matrix and the substitution matrix are different matrices!\n \"\"\"\n len_string1 = len(self.string1) + 1\n len_string2 = len(self.string2) + 1\n self.score_matrix[0, :] = np.array([i * self.gap_penalty for i in range(len_string1)])\n self.score_matrix[:, 0] = np.array([i * self.gap_penalty for i in range(len_string2)])\n\n for i in range(1, len_string2):\n for k in range(1, len_string1):\n match = self.score_matrix[i-1, k-1] + \\\n self.substituion_matrix[self.string2[i-1]][self.string1[k-1]]\n delete = self.score_matrix[i-1, k] + self.gap_penalty\n insert = self.score_matrix[i, k-1] + self.gap_penalty\n self.score_matrix[i, k] = max(match, delete, insert)\n\n def get_best_score(self):\n \"\"\"\n :return: the highest score for the aligned strings, int\n \"\"\"\n return self.score_matrix[-1, -1]\n\n def get_number_of_alignments(self):\n \"\"\"\n :return: number of found alignments with the best score\n \"\"\"\n return len(self.get_alignments())\n\n def get_alignments(self):\n \"\"\"\n :return: list of alignments, where each alignment is represented\n as a tuple of aligned strings\n \"\"\"\n i = len(self.string2)\n k = len(self.string1)\n if i == 1 or k == 1:\n if self.string1[0] != self.string2[0]:\n return None\n\n stack = []\n stack.append(('', '', i, k))\n alignments = []\n\n while len(stack) > 0:\n align_a, align_b, m, n = stack.pop()\n if m == 0 and n == 0:\n alignments.append((align_b, align_a))\n continue\n\n if m > 0 and n > 0 and self.score_matrix[m, n] == self.score_matrix[m-1, n-1] + \\\n self.substituion_matrix[self.string2[m-1]][self.string1[n-1]]:\n align_a_1 = self.string2[m-1] + align_a\n align_b_1 = self.string1[n-1] + align_b\n stack.append((align_a_1, align_b_1, m-1, n-1))\n\n if n > 0 and self.score_matrix[m, n] == self.score_matrix[m, n-1] + self.gap_penalty:\n align_a_2 = '-' + align_a\n align_b_2 = self.string1[n-1] + align_b\n stack.append((align_a_2, align_b_2, m, n-1))\n\n if m > 0 and self.score_matrix[m, n] == self.score_matrix[m-1, n] + self.gap_penalty:\n align_a_3 = self.string2[m-1] + align_a\n align_b_3 = '-' + align_b\n stack.append((align_a_3, align_b_3, m-1, n))\n\n return alignments\n\n def get_score_matrix(self):\n \"\"\"\n :return: matrix built during the alignment process as a list of lists\n \"\"\"\n return self.score_matrix\n","sub_path":"codechecker/repos/3/collected_files/global_alignment/ru93tag.py","file_name":"ru93tag.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"37354949","text":"#!/usr/bin/python\n\"\"\" Filter handling \"\"\"\nimport traceback\nimport os\nimport sys\nfrom flask import Blueprint, request, current_app\nfrom YON.user import get_user_data\nfrom YON.notification import be_fcm_init_process\nsys.path.append(os.path.dirname(__file__))\nfrom YON.common import Common, APP_NAME, get_user_from_session, ret_json_err_code, ret_json_with_object\nfrom YON.fcm_header import FCM_ACTION_ID_FILTER_CONTACT_READY\nimport urllib\nimport json\nimport socket\nfrom multiprocessing import Process\nimport time\nimport requests\nAPI = Blueprint('filter_phones', APP_NAME)\n\n@API.route('/filter_phones', methods=['POST'])\ndef filter_phones():\n \"\"\" get the phone numbers from user and return only the phones that exist in the system (users) \"\"\"\n from YON.common import be_get_jsonUser_data\n data = request.json\n\n if data is not None and 'contacts' in data:\n user = get_user_from_session()\n contacts = data['contacts']\n phones = contacts.keys()\n #current_app.logger.debug('contacts- {0}'.format(contacts))\n #in_there_contact_p = Process(target=init_in_there_contacts, args=(phones, user))\n #in_there_contact_p.daemon = True\n #in_there_contact_p.start()\n be_in_ther_contacts_init_process({'phones':phones, 'user':user}, current_app.logger)\n\n #filter_contact_p = Process(target=be_filter_user_contacts, args=(user, contacts))\n #filter_contact_p.daemon = True\n #filter_contact_p.start()\n ##filtered_contact = be_filter_user_contacts(user, contacts, current_app.logger)\n url = \"https://us-central1-yorn-51648.cloudfunctions.net/FB_ContactsHandler/filterContacts\"\n data = json.dumps({\"user_id\": user, \"contacts\": contacts})\n headers = {'Content-type': 'application/json'}\n fb_func_response = requests.post(url, data=data, headers=headers, verify=False)\n #current_app.logger.debug('filter contacts FB response: {}'.format(fb_func_response))\n filtered_contact_keys = json.loads(fb_func_response.text)\n current_app.logger.debug('filter contacts FB response keys: {}'.format(filtered_contact_keys))\n filter_contacts = []\n for contact_id in filtered_contact_keys:\n user_json_data = be_get_jsonUser_data(contact_id)\n if user_json_data:\n filter_contacts.append(user_json_data)\n #current_app.logger.debug('filtered: {}'.format(filter_contacts))\n be_init_save_contacts_to_db_process({'user_id':user, 'contacts':contacts}, current_app.logger)\n #store_user_contact_p = Process(target=be_save_user_contacts_to_storage, args=(user, contacts))\n #store_user_contact_p.daemon = True\n #store_user_contact_p.start()\n\n\n current_app.logger.debug('return ok for filter phones')\n #return ret_json_err_code(0, 'OK', 'Status OK')\n return ret_json_with_object(filter_contacts)\n else:\n return ret_json_err_code(8, 'missing post params', 'required: contacts')\n\n\n\"\"\"========================================================================\n*** Function: be_fcm_init_process\n*** server: back end\n*** Description: call the fcm handler using backgroud thred\n*** Arguments: the relevant data for spesific action id in dictionary\n*** Return: none\n=========================================================================\"\"\"\ndef be_filter_contacts_process(msg_json, logger):\n \"\"\" be_fcm_init_process \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ip_adrs = 'localhost'\n ip_port = 10001\n server_address = (ip_adrs, ip_port)\n logger.debug(\"connecting to {} Port {}\".format(ip_adrs, ip_port))\n sock.connect(server_address)\n logger.debug('Connected')\n\n raw_msg = json.dumps(msg_json)\n sock.sendall(raw_msg)\n logger.debug('Sent')\n\n\"\"\"========================================================================\n*** Function: be_save_user_contacts_to_storage\n*** server: back end\n*** Description: Save user contacts to storage\n*** Arguments: user_id (phone number), contacts (dictionary)\n*** Return: none\n=========================================================================\"\"\"\ndef be_save_user_contacts_to_storage(user_id, contacts, logger):\n \"\"\" This function saved the user contacts into the storage \"\"\"\n clean_dict = {}\n # We should convert special char to url encoded string\n for key, val in contacts.items():\n str_key = urllib.quote(key, safe='')\n str_key.replace('.', '%2E')\n clean_dict[str_key] = val\n\n try:\n #logger.debug('Saving user contacts to db {}'.format(clean_dict))\n #we need to store all the user filtered contacts in storege\n storage = Common().firebase.storage()\n\n #check if the local diectory exist\n path_dir = '/tmp/users_contacts/'\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n\n # Create a file if it doesn't already exist\n file_path = '/tmp/users_contacts/{0}_contacts.txt'.format(user_id)\n with open(file_path, 'w') as file_desc:\n # Write to the file\n json.dump(clean_dict, file_desc)\n # Close the connection to the file\n file_desc.close()\n\n db_path = 'users/{0}/contacts.txt'.format(user_id)\n storage.child(db_path).put(file_path)\n\n #db_con.child('users', 'active', user_id, 'contacts').set(clean_dict)\n except Exception as e:\n logger.debug('Exception in saving contacts to db {0}'.format(e))\n traceback.print_exc()\n\n\n\"\"\"========================================================================\n*** Function: be_save_user_contacts_to_storage\n*** server: back end\n*** Description: Save user contacts to storage\n*** Arguments: user_id (phone number), contacts (dictionary)\n*** Return: none\n=========================================================================\"\"\"\ndef be_save_user_contacts_to_db(user_id, contacts, logger):\n \"\"\" This function saved the user contacts into the storage \"\"\"\n logger.debug('saving contacts for user id {}'.format(user_id))\n clean_dict = {}\n # We should convert special char to url encoded string\n for key, val in contacts.items():\n str_key = urllib.quote(key, safe='')\n str_key.replace('.', '%2E')\n clean_dict[str_key] = val\n\n try:\n #logger.debug('Saving user contacts to db {}'.format(clean_dict))\n #we need to store all the user filtered contacts in storege\n db_con = Common().get_con()\n db_con.child('contacts', user_id).set(clean_dict)\n\n except Exception as e:\n logger.debug('Exception in saving contacts to db {0}'.format(e))\n traceback.print_exc()\n\n\ndef be_filter_user_contacts(user_id, contacts, logger):\n \"\"\" filter the phones that posted from user. return only phounes that are active users \"\"\"\n logger.debug('Filternig user contacts')\n db_con = Common().get_con()\n\n members = []\n # Get the active users from firebase\n db_active_users = (db_con.child('users', 'active').get().val()).keys()\n\n #Hold only the contacts keys (array of phone numbers include the country code EXAMPLE: [972547760683, 972526509355, ... , n])\n phones = contacts.keys()\n #current_app.logger.debug('user contacts {0}'.format(phones))\n #Get the intersection between the posted contacts and the active users from firebase\n intersection_users = set(db_active_users) & set(phones)\n\n #remove self user from the list\n if user_id in intersection_users:\n intersection_users.remove(user_id)\n\n # Get the full user data of the user from firebase\n # and add the full name based on the posted contacts\n for member_phone in intersection_users:\n members.append(get_user_data(member_phone))\n members[-1]['full_name'] = contacts[member_phone]['full_name']\n\n #we need to store all the user filtered contacts in storege\n storage = Common().firebase.storage()\n \n #check if the diectory exist\n path_dir = '/tmp/filter_contacts/'\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n\n # Create a file if it doesn't already exist\n file_path = '/tmp/filter_contacts/{0}_filtered_contacts.txt'.format(user_id)\n with open(file_path, 'w') as file_desc:\n # Write to the file\n json.dump(members, file_desc)\n # Close the connection to the file\n file_desc.close()\n\n db_path = 'users/{0}/filter_contacts.txt'.format(user_id)\n storage.child(db_path).put(file_path)\n user_token = db_con.child('users', 'active', user_id, 'pn_token').get().val()\n logger.debug('end filtering for user{0} with token {1}'.format(user_id, user_token))\n #Create data message to the application\n fcm_ps_msg = {'fcm_data':{'contacts_url': 'user/filtered_contacts/get'},\n 'meta_data': {'action_id':FCM_ACTION_ID_FILTER_CONTACT_READY, 'user_token': user_token,\\\n 'user_id':str(user_id), 'modified_date':time.mktime(time.gmtime())}}\n\n\n be_fcm_init_process(fcm_ps_msg, current_app.logger)\n return \"user/filtered_contacts/get\"\n\ndef init_in_there_contacts(phones_keys, user, logger):\n \"\"\" create 'in there contacts' key for each user \"\"\"\n logger.debug('init in there contacts')\n # url = \"https://us-central1-yorn-51648.cloudfunctions.net/FB_ContactsHandler/unreg_contacts\"\n # data = json.dumps({\"user_id\": user, \"contacts_keys\": phones_keys})\n # headers = {'Content-type': 'application/json'}\n # fb_func_response = requests.post(url, data=data, headers=headers, verify=False)\n # logger.debug('in there contacts res {0}'.format(fb_func_response))\n # return\n db_con = Common().firebase.database()\n #go all over the contacts that the user sent and set the 'userImInThereContacts'i\n #unregistered = {}\n for phone in phones_keys:\n if str(phone) == str(user):\n continue\n try:\n read_contact = db_con.child('users', 'active', phone).get().val()\n if read_contact is None:\n #unregistered[phone] = {'in_there_contacts':{str(phone):str(phone)}}\n #unreg = db_con.child('users', 'unregisterd', phone).get().val()\n #if unreg is not None:\n db_con.child('users', 'unregisterd', phone, 'in_there_contacts', user).set(str(user))\n #else:\n #unregistered = {}\n #unregistered[phone] = {'in_there_contacts':{str(phone):str(phone)}}\n #db_con.child('users', 'unregisterd', phone).set(unregistered)\n #db_con.child('users', 'unregisterd', 'in_there_contacts').set({str(phone):str(phone)})\n else:#this contact is active\n db_con.child('users', 'active', phone, 'in_there_contacts', user).set(str(user))\n except Exception as e:\n logger.debug('read in there contacts error: {0}'.format(e))\n\ndef be_in_ther_contacts_init_process(json_data, logger):\n \"\"\" be_fcm_init_process \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ip_adrs = 'localhost'\n ip_port = 10001\n server_address = (ip_adrs, ip_port)\n logger.debug(\"connecting to {} Port {}\".format(ip_adrs, ip_port))\n try:\n sock.connect(server_address)\n logger.debug('Connected')\n raw_msg = json.dumps(json_data)\n sock.sendall(raw_msg)\n logger.debug('Sent')\n except Exception as e:\n logger.debug('in_ther_contacts service error: {0}'.format(e))\n\n\ndef be_init_save_contacts_to_db_process(json_data, logger):\n \"\"\" be_fcm_init_process \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ip_adrs = 'localhost'\n ip_port = 10002\n server_address = (ip_adrs, ip_port)\n logger.debug(\"connecting to {} Port {}\".format(ip_adrs, ip_port))\n try:\n sock.connect(server_address)\n logger.debug('Connected')\n raw_msg = json.dumps(json_data)\n sock.sendall(raw_msg)\n logger.debug('Sent')\n except Exception as e:\n logger.debug('save contacts service error: {0}'.format(e))\n\n\n","sub_path":"filter_phones.py","file_name":"filter_phones.py","file_ext":"py","file_size_in_byte":12029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"332621802","text":"#!/usr/bin/env python3\nimport sys\nimport warnings\nimport logging\nimport csv\nimport pymysql\nimport time\nimport traceback\nimport inspect\nimport code\n\n# PARAMETERS\nperson_addressSET_NAME = '***'\nFILE_PATH = '***'\nDEFAULT_COUNTRY = '***'\nCHUNK_SIZE = 1000\n\n# CONSTANTS\nATT_OPTION_TYPE_POINTER = 10\nATT_OPTION_TYPE_INTEGER = 20\nATT_OPTION_TYPE_STRING = 30\n\n\nclass Attribute(list):\n def __init__(self, values=None):\n super().__init__()\n self.type_pointer = False\n self.values = []\n if values:\n self.values += values\n self.pointers = []\n self[:] = self.values\n\n def set_as_pointer(self, b):\n self.type_pointer = bool(b)\n if not self.type_pointer:\n self[:] = self.values\n else:\n self[:] = self.pointers\n\n\nclass PersonAddressEntry:\n def __init__(self, attributes=None):\n self.new = True\n self.id = None\n self.attributes = {}\n if attributes:\n self.attributes.update(attributes)\n\n\ndef execute_query(cursor, query, args=None):\n times = []\n while True:\n t = time.time()\n try:\n cursor.execute(query, args)\n except pymysql.err.InternalError:\n continue\n finally:\n times.append(time.time() - t)\n break\n return times\n\n\ndef create_formatting(values=0, groups=0):\n formatting = ''\n for i in range(int(values)):\n formatting += '%s,'\n if not groups:\n return formatting[:-1]\n formatting_groups = ''\n for i in range(int(groups)):\n formatting_groups += '({0}),'.format(formatting[:-1])\n return formatting_groups[:-1]\n\n\ndef main():\n # Disable pymysql warnings\n warnings.simplefilter('ignore', pymysql.err.Warning)\n\n # Initialize logging\n log = logging.getLogger(__name__)\n log.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')\n h = logging.StreamHandler(sys.stdout)\n h.setFormatter(formatter)\n log.addHandler(h)\n log.info('PROGRAM START')\n log.info(' -- person_addressset name: {0}'.format(person_addressSET_NAME))\n log.info(' -- File name: {0}'.format(FILE_PATH))\n\n # Main person_addressdb dict\n person_addressdb = {}\n\n # Open file, parse to person_addressdb\n with open('/home/petr/person_addressdb-files/' + FILE_PATH, encoding='iso8859-15') as f:\n reader = csv.reader(f)\n # reader.__next__()\n for row in reader:\n if not row:\n continue\n attributes = {}\n \"\"\"\n ***\n \"\"\"\n person_addressdb[person_address_address] = PersonAddressEntry(attributes)\n log.info('File read, {0} entries'.format(len(person_addressdb)))\n\n # Divide person_address addresses into chunks\n person_address_addresses = list(person_addressdb)\n person_address_addresses_chunks = []\n for i in range(0, len(person_address_addresses), CHUNK_SIZE):\n person_address_addresses_chunks.append(person_address_addresses[i:i + CHUNK_SIZE])\n log.info('Data divided to {0} chunks about {1} entries'.format(len(person_address_addresses_chunks), CHUNK_SIZE))\n\n # Connect to database\n db = '***'\n user = '***'\n host = '***'\n co = pymysql.connect(host=host, user=user, password='***', db=db, charset='utf8')\n co.autocommit(True)\n c = co.cursor()\n log.info('Connected to database {0} by {1} on {2}'.format(db, user, host))\n\n # Create cache\n # attribute table\n c.execute('SELECT attribute_id, attribute_type FROM attribute')\n attribute_cache = {row[0]: row[1] for row in c.fetchall()}\n # attribute_option table\n c.execute('SELECT attribute_id, attribute_option, attribute_option_id FROM attribute_option')\n attribute_option_cache = {(row[0], row[1]): row[2] for row in c.fetchall()}\n log.info('Downloaded table attribute_option to cache')\n # tld_country table\n c.execute('SELECT tld, country FROM tld_country')\n tld_country_cache = {row[0]: row[1] for row in c.fetchall()}\n log.info('Downloaded table tld_country to cache')\n\n # Get tags or set if not exist\n # Get/set person_addressSET tag id\n try:\n person_addressset_id = attribute_option_cache[(110, person_addressSET_NAME)]\n log.info('Read person_addressdb-set data-source attribute \"{0}\" with id {1}'.format(person_addressSET_NAME, person_addressset_id))\n except KeyError:\n c.execute('INSERT INTO attribute_option (attribute_id,attribute_option) VALUES (%s,%s)', (110, person_addressSET_NAME))\n attribute_option_cache[(110, person_addressSET_NAME)] = c.lastrowid\n person_addressset_id = c.lastrowid\n log.info('Set person_addressdb-set data-source attribute \"{0}\" with new id {1}'.format(person_addressSET_NAME, person_addressset_id))\n # Get/set person_addressFILE tag id\n person_addressfile = 'FILE_PATH:' + FILE_PATH\n try:\n person_addressfile_id = attribute_option_cache[(110, person_addressfile)]\n log.info('Read file data-source attribute \"{0}\" with id {1}'.format(person_addressfile, person_addressfile_id))\n except KeyError:\n c.execute('INSERT INTO attribute_option (attribute_id,attribute_option) VALUES (%s,%s)', (110, person_addressfile))\n attribute_option_cache[(110, person_addressfile)] = c.lastrowid\n person_addressfile_id = c.lastrowid\n log.info('Set file data-source attribute \"{0}\" with new id {1}'.format(person_addressfile, person_addressfile_id))\n\n # Resolve country tag by TLD\n for person_address_address in person_addressdb:\n try:\n tld = person_address_address.split('.')[-1]\n country = tld_country_cache[tld]\n except (IndexError, KeyError):\n country = DEFAULT_COUNTRY\n person_addressdb[person_address_address].attributes.setdefault(70, Attribute()).append(country)\n\n # Insert by chunks\n log.info('Starting loop with inserting by chunk')\n for chunk_count, person_address_addresses_chunk in enumerate(person_address_addresses_chunks):\n t_chunk_start = time.time()\n\n # Check for existing person_address addresses from this chunk\n values_person_address_address = person_address_addresses_chunk[:]\n c.execute('SELECT person_address_address, person_address_id FROM person_address_address WHERE person_address_address IN {0}'.format(\n create_formatting(len(values_person_address_address), 1)), values_person_address_address)\n unrecognized_person_address_address_present = False\n checked_person_address_addresses = []\n for person_address_address, person_address_id in c.fetchall():\n try:\n person_addressdb[person_address_address].id = person_address_id\n person_addressdb[person_address_address].new = False\n values_person_address_address.remove(person_address_address)\n checked_person_address_addresses.append(person_address_address)\n except KeyError:\n unrecognized_person_address_address_present = True\n except ValueError:\n for checked_person_address_address in checked_person_address_addresses:\n person_addressdb[checked_person_address_address].id = None\n person_addressdb[checked_person_address_address].new = True\n values_person_address_address = person_address_addresses_chunk[:]\n unrecognized_person_address_address_present = True\n break\n if unrecognized_person_address_address_present:\n for person_address_address in values_person_address_address:\n c.execute('SELECT person_address_id FROM person_address_address WHERE person_address_address = %s', (person_address_address,))\n row = c.fetchone()\n if row:\n person_addressdb[person_address_address].id = row[0]\n person_addressdb[person_address_address].new = False\n values_person_address_address.remove(person_address_address)\n if values_person_address_address:\n # Insert into main table person_address_address\n t_query_eadd = execute_query(c, 'INSERT IGNORE INTO person_address_address (person_address_address) VALUES {0}'.format(\n create_formatting(1, len(values_person_address_address))), values_person_address_address)\n inserted_count = c.rowcount\n inserted_first_id = c.lastrowid\n # Assign id if all person_address addresses was inserted\n if inserted_count is len(values_person_address_address):\n inserted_id_offset = 0\n for person_address_address in values_person_address_address:\n person_addressdb[person_address_address].id = inserted_first_id + inserted_id_offset\n inserted_id_offset += 1\n # Assign id if some person_address addresses was ignored\n else:\n for person_address_address in values_person_address_address:\n c.execute('SELECT person_address_id FROM person_address_address WHERE person_address_address = %s', (person_address_address,))\n person_addressdb[person_address_address].id = c.fetchone()[0]\n\n # Insert attribute options\n for person_address_address in person_address_addresses_chunk:\n for attribute_id, attribute_values in person_addressdb[person_address_address].attributes.items():\n try:\n attribute_type = attribute_cache[attribute_id]\n except KeyError:\n log.error('Can not find attribute in attribute table!')\n log.error('The program will be terminated')\n sys.exit()\n if attribute_type == ATT_OPTION_TYPE_POINTER:\n for attribute_value in attribute_values:\n try:\n attribute_option_id = attribute_option_cache[(attribute_id, attribute_value)]\n except KeyError:\n c.execute('INSERT INTO attribute_option (attribute_id,attribute_option) VALUES (%s,%s)',\n (attribute_id, attribute_value))\n attribute_option_cache[(attribute_id, attribute_value)] = c.lastrowid\n attribute_option_id = c.lastrowid\n person_addressdb[person_address_address].attributes[attribute_id].pointers.append(attribute_option_id)\n person_addressdb[person_address_address].attributes[attribute_id].set_as_pointer(True)\n\n # Insert person_address attributes\n #c.execute('SELECT .. FROM person_address_attribute WHERE')\n values_person_address_attribute = []\n for person_address_address in person_address_addresses_chunk:\n values_person_address_attribute += [person_addressdb[person_address_address].id, 110, person_addressset_id] + [person_addressdb[person_address_address].id, 110,\n person_addressfile_id]\n for attribute_id, attribute_values in person_addressdb[person_address_address].attributes.items():\n for attribute_value in attribute_values:\n if attribute_value != '':\n values_person_address_attribute += [person_addressdb[person_address_address].id, attribute_id, attribute_value]\n t_query_eatt = \\\n execute_query(c, 'INSERT INTO person_address_attribute (person_address_id,attribute_id,attribute_value) VALUES {0}'.format(\n create_formatting(3, int(len(values_person_address_attribute) / 3))), values_person_address_attribute)\n\n # Insert log for newly inserted person_address addresses\n values_log = []\n for person_address_address in person_address_addresses_chunk:\n if person_addressdb[person_address_address].new:\n values_log += [10, person_addressdb[person_address_address].id, person_addressset_id]\n if values_log:\n execute_query(c, 'INSERT INTO log (log_event_id,person_address_id,log_note1) VALUES {0}'.format(\n create_formatting(3, int(len(values_log) / 3))), values_log)\n t_chunk = time.time() - t_chunk_start\n\n log.info('Inserted {0}/{1}'.format(chunk_count * CHUNK_SIZE + len(person_address_addresses_chunk), len(person_addressdb)))\n log.info(' -- Newly inserted {0} entries'.format(inserted_count))\n log.info(' -- Inserting time {0}s'.format(round(t_chunk, 2)))\n log.info(' ------ Query person_address_address time(s) {0}s'.format([round(x, 2) for x in t_query_eadd]))\n log.info(' ------ Query person_address_attribute time(s) {0}s'.format([round(x, 2) for x in t_query_eatt]))\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n sys.exit()\n except:\n tb = traceback.format_exc()\n inspection = inspect.trace()\n code.interact(local=locals())\n","sub_path":"addressdb_import.py","file_name":"addressdb_import.py","file_ext":"py","file_size_in_byte":13135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"207327088","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /mnt/disk1/anaconda3/lib/python3.7/site-packages/topic_modeling/model_corpus_args_me_cluster.py\n# Compiled at: 2020-02-25 08:27:23\n# Size of source mod 2**32: 2493 bytes\nimport sys\nsys.path.insert(0, '/home/yamenajjour/git/topic-ontologies/')\nfrom argument_esa_model.esa import ESA\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructField, StructType, StringType, LongType\nimport pandas as pd, numpy as np\nspark = SparkSession.builder.appName('topic-ontologies').config('master', 'yarn').getOrCreate()\nargs_me = spark.read.format('csv').option('header', 'true').option('delimiter', ',')\nimport pickle, codecs\n\ndef dict_to_list(dictionary):\n vector = []\n for key in sorted(dictionary):\n vector.append(dictionary[key])\n\n pickled = codecs.encode(pickle.dumps(vector), 'base64').decode()\n return pickled\n\n\ndef project_arguments():\n esa_model_debatepedia = ESA('/mnt/ceph/storage/data-in-progress/args-topic-modeling/topic-models/esa/debatepedia.mat')\n\n def project_argument(argument):\n dict_vect = esa_model_debatepedia.process(argument, False)\n return dict_to_list(dict_vect)\n\n args_me_arguments_df = spark.read.format('csv').option('header', 'true').option('delimiter', '|').option('quote', '\"').load('/user/befi8957/topic-ontologies/args-me/corpus-args-me-preprocessed-documents.csv').na.drop()\n arguments = args_me_arguments_df.select('text').rdd.map(lambda r: r[0]).repartition(400)\n ids = args_me_arguments_df.select('argument-id').rdd.map(lambda r: r[0]).repartition(400)\n vectors = arguments.map(lambda argument: project_argument(argument))\n ids_with_vectors = vectors.zip(ids)\n ids_with_vectors.saveAsTextFile('/user/befi8957/args-me-esa-topic-vectors')\n\n\nproject_arguments()","sub_path":"pycfiles/topic-ontologies-0.2.0.linux-x86_64.tar/model_corpus_args_me_cluster.cpython-37.py","file_name":"model_corpus_args_me_cluster.cpython-37.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"626048858","text":"import string\nimport numpy as np\n\n\nclass GoldStdWord:\n \"\"\"\n Wrapper class for a goldstd context\n i.e. word - segmentation - segment_mapping\n \"\"\"\n def __init__(self, _word: str = '', _segmentation: str = '', _seg_mapping: list = []):\n \"\"\"\n :param _word: 'abounded'\n :param _segmentation: 'abound ed'\n :param _seg_mapping: ['B', 'M', 'M', 'M', 'M', 'M', 'B', 'M']\n \"\"\"\n self.word = _word\n self.segmentation = _segmentation\n self.seg_mapping = _seg_mapping\n\n\nclass Prediction:\n \"\"\"\n Wrapper class for the prediction context\n \"\"\"\n def __init__(self, word, pred):\n \"\"\"\n :param word: 'abounded'\n :param pred: ['B', 'M', ...]\n \"\"\"\n self.word = word\n self.prediction = pred\n\n\nclass Context:\n \"\"\"\n Runtime context\n \"\"\"\n def __init__(self):\n self.SEPARATOR = '\\t'\n self.BEGIN = 'B'\n self.MIDDLE = 'M'\n self.END = self.MIDDLE\n self.SINGLE = self.BEGIN\n self.START = ''\n self.STOP = ''\n self.ALPHABET = \\\n [self.START, self.STOP] + \\\n [\"'\", '-'] + \\\n list(string.ascii_lowercase) + \\\n ['å', 'ä', 'ö'] + \\\n ['í', 'ö', 'ü', 'ó', 'ő', 'ú', 'ű', 'é', 'á']\n self.input_map = None\n self.output_map = None\n\n def set_bmes_context(self):\n \"\"\"\n Sets the context for BMES classification\n \"\"\"\n self.END = 'E'\n self.SINGLE = 'S'\n\n def get_input_map(self):\n \"\"\"\n Returns a numpy matrix representing the one-hot vectors\n for the input alphabet\n :return: matrix\n \"\"\"\n if not self.input_map:\n identity = np.identity(len(self.ALPHABET))\n self.input_map = {key: identity[self.ALPHABET.index(key)] for key in self.ALPHABET}\n return self.input_map\n\n def get_output_map(self):\n \"\"\"\n Returns a numpy matrix representing the one-hot vectors\n for the output alphabet\n :return: matrix\n \"\"\"\n if not self.output_map:\n alphabet = list({self.BEGIN, self.SINGLE, self.END, self.MIDDLE})\n identity = np.identity(len(alphabet))\n self.output_map = {key: identity[alphabet.index(key)] for key in alphabet}\n return self.output_map\n\n\nclass EvaluationContext(Context):\n \"\"\"\n Evaluation specific Context\n \"\"\"\n def __init__(self):\n super().__init__()\n self.expected = []\n self.actual = []\n self.POS = [self.BEGIN, self.SINGLE]\n self.NEG = [self.MIDDLE, self.END]\n\n\nclass SegmentationContext(Context):\n \"\"\"\n Segmentation specific Context\n \"\"\"\n def __init__(self):\n super().__init__()\n self.windowtype = 0\n self.windowsize = 1\n\n\nclass MainContext(SegmentationContext):\n \"\"\"\n Train & Build specific Context\n \"\"\"\n def __init__(self, _windowsize: int, _windowtype: int, _hiddenlayer: int, _epochs: int,\n _activate: str, _optimize: str, _loss: str, _init: str, _earlystop: int,\n _training: str, _test: str, _verb: int, _devel: str = None,\n ):\n \"\"\"\n :param _windowsize: size of window\n :param _windowtype: type of window (0,1,2 --> left,center,right)\n :param _hiddenlayer: number of hidden layers in the network\n :param _epochs: number of epochs to run\n :param _activate: name of the activation function\n :param _optimize: name of the optimization method\n :param _loss: name of the loss function\n :param _init: name of the initialization method\n :param _earlystop: value for early-stopping\n :param _training: training data for segmentation learning\n :param _test: file containing the words to be segmented\n :param _verb: level of verbosity for logging\n :param _devel: file to be used for benchmark evaluation\n \"\"\"\n super().__init__()\n\n self.devel = _devel\n self.training = _training\n self.test = _test\n self.windowsize = _windowsize\n self.windowtype = _windowtype\n self.hiddenlayer = _hiddenlayer\n self.epochs = _epochs\n self.activate = _activate\n self.optimize = _optimize\n self.loss = _loss\n self.init = _init\n self.earlystop = _earlystop\n self.verbose = _verb\n\n def __str__(self):\n return '[Window size: ' + str(self.windowsize) + '; ' + \\\n 'Window type: ' + str(self.windowtype) + '; ' + \\\n 'Hidden layers: ' + str(self.hiddenlayer) + '; ' + \\\n 'Epoch size: ' + str(self.epochs) + '; ' + \\\n 'Activation: ' + self.activate + '; ' + \\\n 'Optimizer: ' + self.optimize + '; ' + \\\n 'Loss: ' + self.loss + '; ' + \\\n 'Initialization: ' + self.init + '; ' + \\\n 'Early stopping patience: ' + str(self.earlystop) + ']'\n","sub_path":"MorphemeSegmentation/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"549088333","text":"#!/usr/bin/env python3\nfrom collections import defaultdict\n\n# {state_name: {0: [val_to_write, dir, new_state], 1: [...]},}\ntest_input = {\n 'a': {\n 0: [1, 'r', 'b'],\n 1: [0, 'l', 'b'],\n },\n 'b': {\n 0: [1, 'l', 'a'],\n 1: [1, 'r', 'a'],\n }\n}\n\nreal_input = {\n 'a': {\n 0: [1,'r','b'],\n 1: [0,'l','c'],\n },\n 'b': {\n 0: [1,'l','a'],\n 1: [1,'r','c'],\n },\n 'c': {\n 0: [1,'r','a'],\n 1: [0,'l','d'],\n },\n 'd': {\n 0: [1,'l','e'],\n 1: [1,'l','c'],\n },\n 'e': {\n 0: [1,'r','f'],\n 1: [1,'r','a'],\n },\n 'f': {\n 0: [1,'r','a'],\n 1: [1,'r','e'],\n },\n}\n\n\ndef solve(states, steps):\n tape = defaultdict(lambda: 0, defaultdict(int))\n cursor = 0\n curr_state = 'a'\n\n for step in range(steps):\n operations = states[curr_state][tape[cursor]]\n tape[cursor] = operations[0]\n cursor += 1 if operations[1] == 'r' else -1\n curr_state = operations[2]\n \n return list(tape.values()).count(1)\n\n\nprint(solve(test_input, 6))\nprint(solve(real_input, 12261543))\n","sub_path":"2017/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"579769964","text":"import database\nimport os\nimport nba_api_client as nba_client\n\nGetTeamNames = False\nAddPositionsToDb = False\nAddPlayersToDb = False\nAddScheduleToDb = False\nAddPlayerStats = False\n\n\"\"\"Only necessary for DB setup\"\"\"\n\n\"\"\"This should be commented out until team names are added to db\"\"\"\nNbaTeamIdToDbIdEnum = database.GetNbaTeamIdToDbIdEnum()\nprint(NbaTeamIdToDbIdEnum)\nPositionPlayerDict = {\"pg\": 1, \"sg\": 2, \"sf\": 3, \"pf\": 4, \"c\": 5}\n\n\ndef main():\n teamDict = getTeamNames()\n playerDict, playerIds = getAllPlayers(teamDict)\n if GetTeamNames:\n getTeamNames(True)\n if AddScheduleToDb:\n getTodaysSchedule(teamDict)\n if AddPlayersToDb:\n addPlayersToSystem(playerDict)\n if AddPlayerStats:\n GetPlayerStats()\n\n\ndef showApiInfo():\n dataDict = nba_client.GetApiInfoRoute()\n allRoutes = dataDict['links']\n for link in allRoutes:\n print(\"%s: %s\" % (link, allRoutes[link]))\n\n\ndef printAllTeamName():\n allNames = nba_client.GetAllNbaTeamNames()\n teamNames = allNames['league']\n for name in teamNames.keys():\n print(name)\n for detail in name:\n print(detail)\n\n\ndef getLeagueScoringScale():\n pointDict = {'DraftKing': {}, 'FanDuel': {}}\n cur_file = open('fantasy_details_scoring.csv', 'r')\n linesOfFile = cur_file.readlines()\n for j in range(1, len(linesOfFile)):\n curLine = linesOfFile[j].strip(\"\\n\").split(';')\n league = int(curLine[0])\n point_type = curLine[2].strip('\\\"')\n value = float(curLine[3])\n league_text = 'DraftKing' if league == 1 else 'FanDuel'\n pointDict[league_text][point_type] = value\n return pointDict\n\n\"\"\"This function needs to be broken out into two functions\n one for getting team names and one for actually writing\n all of the team names to the database\"\"\"\ndef getTeamNames(writeToDb=False):\n teamNames = nba_client.GetAllNbaTeamNames()\n allTeams = teamNames['league']\n teamDict = dict()\n for currentTeam in allTeams['standard']:\n if currentTeam.get('isNBAFranchise', False) == True:\n apiTeamId = int(currentTeam['teamId'])\n apiTeamName = currentTeam['urlName']\n apiTriCode = currentTeam['tricode']\n teamDict[apiTeamId] = (apiTeamName, apiTriCode)\n for team_id in sorted(teamDict.keys()):\n team_name = teamDict[team_id][0]\n triCode = teamDict[team_id][1].lower()\n #print(team_id, team_name, triCode)\n if writeToDb:\n database.AddTeamToDb(team_id, team_name, triCode)\n return teamDict\n\n\ndef getTodaysSchedule(teamDict):\n print(teamDict.keys())\n nbaID = set()\n yesterdayTeams = []\n teamMatchups = []\n schedule = nba_client.GetLeagueSchedule()\n for game in schedule['league']['standard']:\n gameId = game['gameId']\n startTime = game['startTimeUTC']\n nbaHomeTeamId = int(game['hTeam']['teamId'])\n nbaVisitorTeamId = int(game['vTeam']['teamId'])\n if nbaHomeTeamId in teamDict.keys() and nbaHomeTeamId in teamDict.keys():\n try:\n homeTeamScore = int(game['hTeam']['score'])\n visitorTeamScore = int(game['vTeam']['score'])\n except Exception as error:\n continue\n #database.AddGameToDb(gameId, startTime, nbaHomeTeamId, homeTeamScore, nbaVisitorTeamId, visitorTeamScore)\n\n\n\n\ndef add_teams_for_holdoer():\n startTime = ''\n yesterdayTeams = []\n nbaVisitorTeamId = ''\n nbaHomeTeamId = ''\n teamMatchups = []\n nbaID = []\n game = {}\n if startTime > '2018-12-02T08' and startTime < '2018-12-03T08':\n yesterdayTeams.append(nbaHomeTeamId)\n yesterdayTeams.append(nbaVisitorTeamId)\n if startTime > '2018-12-03T08' and startTime < '2018-12-04T08':\n nbaID.append(nbaHomeTeamId)\n nbaID.append(nbaVisitorTeamId)\n todaysMatchup = dict(HomeTeam=nbaHomeTeamId, VisitorTeam=nbaVisitorTeamId)\n teamMatchups.append(todaysMatchup)\n homeTeamScore = game['hTeam']['score'] if game['hTeam']['score'] != '' else 0\n visitorTeamScore = game['vTeam']['score'] if game['vTeam']['score'] != '' else 0\n\ndef getAllPlayers(teamDict):\n playerDict = dict()\n allPlayers = nba_client.GetLeagueRoster()\n playerIds = []\n for player in allPlayers['league']['standard']:\n fName = player['firstName'].strip(\",\").replace(' ', '').replace(\"'\", '').replace(\".\", '').lower()\n lName = player['lastName']\n nbaPlayerId = int(player['personId'])\n isActive = player['isActive']\n position = player['pos'].lower()\n if position == 'g':\n position = 'pg'\n elif position == 'g-f':\n position = 'sg'\n elif position == 'f':\n position = 'pf'\n elif position == 'f-g':\n position = 'sf'\n elif position == 'f-c':\n position = 'pf'\n elif position == 'f-g':\n position = 'sf'\n elif position == 'c-f':\n position = 'pf'\n try:\n position = PositionPlayerDict[position]\n except Exception as err:\n print('%s not in player dict' % position)\n nbaTeamId = int(player['teamId']) if player['teamId'] else ''\n if nbaTeamId:\n if nbaTeamId not in playerDict.keys():\n playerDict[nbaTeamId] = []\n playerIds.append(nbaPlayerId)\n playerDict[nbaTeamId].append(dict(PlayerID=nbaPlayerId, FirstName=fName, LastName=lName, PositionID=position, Stats=[]))\n #print('Marvin:',dict(PlayerID=nbaPlayerId, FirstName=fName, LastName=lName, PostionID=position, Stats=[]))\n for team in playerDict.keys():\n currentTeam = playerDict[team]\n teamName = teamDict[team][0]\n # print('\\nCurrent Roster for %s' % teamName)\n # print('PlayerID\\tFName\\tLastName\\tPostion')\n return playerDict, playerIds\n\n\ndef addPlayersToSystem(playerDict):\n for roster in playerDict:\n teamId = roster\n teamRoster = playerDict[roster]\n for team in teamRoster:\n nbaPlayerId = team.get('PlayerID')\n fName = team.get('FirstName')\n lName = team.get('LastName')\n positionId = team.get('PositionID')\n isActive = True\n database.AddPlayerToDb(nbaPlayerId,fName,lName,teamId,positionId)\n #print(nbaPlayerId, fName, lName, teamId, positionId, isActive)\n print('New Team')\n\ndef addPlayersToSystemLegacy(playerDict):\n allPlayers = nba_client.GetLeagueRoster()\n for player in allPlayers['league']['standard']:\n print('CurPlayer:',player)\n fName = player['firstName'].strip(\",\").replace(' ', '').replace(\"'\", '').replace('.', '').lower()\n lName = player['lastName']\n indexOfSpace = lName.find(' ')\n lName = lName[:indexOfSpace] if indexOfSpace != -1 else lName\n lName = lName.strip(\",\").replace(' ', '').replace(\"'\", '').replace('.', '').lower()\n nbaPlayerId = player['personId']\n isActive = player['isActive']\n position = player['pos'].lower()\n nbaTeamId = player['teamId']\n if nbaTeamId != '' and int(nbaTeamId) in NbaTeamIdToDbIdEnum.keys():\n currentTeam = NbaTeamIdToDbIdEnum[int(nbaTeamId)]['name']\n print(currentTeam)\n teamId = NbaTeamIdToDbIdEnum[int(nbaTeamId)]['id']\n print('ENUM:',NbaTeamIdToDbIdEnum)\n\n print('TEAMID:',teamId)\n print('Roster:',playerDict)\n print('PlayerDict:',playerDict)\n # teamRoster = playerDict[teamId]\n teamRoster = playerDict.keys()\n\n playerFound = False\n print('TEAMROSTER:',teamRoster)\n for possiblePlayer in teamRoster:\n print('PossiblePlayer:',possiblePlayer)\n tempFName = possiblePlayer[0].strip(\",\").replace(' ', '').replace(\"'\", '').replace('.', '').lower()\n tempLName = possiblePlayer[1].strip(\",\").replace(' ', '').replace(\"'\", '').replace('.', '').lower()\n if tempFName == fName and tempLName == lName:\n playerFound = True\n newPosition = possiblePlayer[2].strip(\",\").replace(' ', '').replace(\"'\", '').replace('.',\n '').lower()\n positionId = PositionPlayerDict[newPosition]\n database.AddPlayerToDb(nbaPlayerId,fName,lName,teamId,positionId,isActive)\n #print(nbaPlayerId,fName,lName,teamId,positionId,isActive)\n if not playerFound:\n print(\"{} {} could not be found on the {}\".format(fName, lName, currentTeam))\n else:\n pass\n\n\ndef GetPlayerStats(teamIds, playerDict, playerIds, teamsIds):\n gamesNotInDb = set()\n leagueSchedule = nba_client.GetLeagueSchedule()\n for game in leagueSchedule['league']['standard']:\n nbaGameId = game['gameId']\n dateString = game['startDateEastern']\n homeTeamId = int(game['hTeam']['teamId'])\n visitorTeamId = int(game['vTeam']['teamId'])\n\n if dateString > '20181016':\n print('GameID:%s Date:%s' % (nbaGameId, dateString))\n statsForGame = nba_client.GetGameStats(nbaGameId, dateString)\n for row in statsForGame['stats']['activePlayers']:\n nbaTeamId = int(row['teamId'])\n if nbaTeamId != '' and homeTeamId in teamIds and visitorTeamId in teamIds:\n gameInDb = database.GameInDb(nbaGameId)\n print('GameInDB:',gameInDb)\n if gameInDb:\n currentNbaPlayerId = int(row['personId'])\n try:\n ptsScored = int(row['points'])\n except Exception as error:\n ptsScored = 0\n try:\n threePointersMade = int(row['tpm'])\n except Exception as error:\n threePointersMade = 0\n try:\n rebounds = int(row['totReb'])\n except Exception as error:\n rebounds = 0\n try:\n assists = int(row['assists'])\n except Exception as error:\n assists = 0\n try:\n steals = int(row['steals'])\n except Exception as error:\n steals = 0\n try:\n turnovers = int(row['turnovers'])\n except Exception as error:\n turnovers = 0\n try:\n blocks = int(row['blocks'])\n except Exception as error:\n blocks = 0\n\n opposingTeamID = visitorTeamId if nbaTeamId == homeTeamId else homeTeamId\n if currentNbaPlayerId in playerIds:\n statDict = dict(Date=dateString, GameID=nbaGameId, OpposingTeamID=opposingTeamID,\n PointsScored=ptsScored, ThreesMade=threePointersMade, Rebounds=rebounds,\n Assists=assists, Steals=steals, Turnovers=turnovers, Blocks=blocks)\n correctTeam = playerDict[nbaTeamId]\n for player in correctTeam:\n if player['PlayerID'] == currentNbaPlayerId:\n player['Stats'].append(statDict)\n print(currentNbaPlayerId,nbaGameId,ptsScored,threePointersMade,rebounds,assists,steals,turnovers,blocks)\n\n # print(player)\n # database.UpsertPlayerGameStat(currentNbaPlayerId,nbaGameId,ptsScored,threePointersMade,rebounds,assists,steals,turnovers,blocks)\n break\n else:\n pass\n # print(currentNbaPlayerId,nbaGameId,ptsScored,threePointersMade,rebounds,assists,steals,turnovers,blocks)\n return playerDict\n\n\ndef addGameStats(scoreBoard, nbaGameId):\n gamesNotInDb = []\n for row in scoreBoard['stats']['activePlayers']:\n nbaTeamId = row['teamId']\n if nbaTeamId != '' and int(nbaTeamId) in NbaTeamIdToDbIdEnum.keys():\n gameInDb = database.GameInDb(nbaGameId)\n if not gameInDb:\n gamesNotInDb.append(nbaGameId)\n currentNbaPlayerId = int(row['personId'])\n ptsScored = int(row['points'])\n threePointersMade = int(row['tpm'])\n rebounds = int(row['totReb'])\n assists = int(row['assists'])\n steals = int(row['steals'])\n turnovers = int(row['turnovers'])\n blocks = int(row['blocks'])\n # database.UpsertPlayerGameStat(currentNbaPlayerId,nbaGameId,ptsScored,threePointtersMad,rebounds, assists, steals, turnovers, blocks)\n print(gamesNotInDb)\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/fantasy_setup.py","file_name":"fantasy_setup.py","file_ext":"py","file_size_in_byte":13279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"376782915","text":"# -*- coding: utf-8 -*-\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom plotter import Ui_dialog as form\n\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(350, 250)\n self.col='#ffffff'\n self.gridLayout = QtWidgets.QGridLayout(Dialog)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_4.setSpacing(100)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.label = QtWidgets.QLabel(Dialog)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())\n self.label.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"Tw Cen MT\")\n font.setPointSize(10)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.horizontalLayout_4.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(Dialog)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())\n self.label_2.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"Tw Cen MT\")\n font.setPointSize(10)\n self.label_2.setFont(font)\n self.label_2.setObjectName(\"label_2\")\n self.horizontalLayout_4.addWidget(self.label_2)\n self.verticalLayout.addLayout(self.horizontalLayout_4)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setSpacing(128)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.colx = QtWidgets.QComboBox(Dialog)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.colx.sizePolicy().hasHeightForWidth())\n self.colx.setSizePolicy(sizePolicy)\n self.colx.setStyleSheet(\"QComboBox{\\n\"\n \"background-color: white;\\n\"\n \"color: blue;\\n\"\n \"border: 1px solid blue;\\n\"\n \"border-radius: 3px;\\n\"\n \"}\\n\"\n \"QComboBox::drop-down {\\n\"\n \" border-left-width: 1px;\\n\"\n \" border-left-color: blue;\\n\"\n \" border-left-style: solid;\\n\"\n \" border-top-right-radius: 3px;\\n\"\n \" border-bottom-right-radius: 3px;\\n\"\n \" width: 24px;\\n\"\n \"}\\n\"\n \"QComboBox::down-arrow {\\n\"\n \" image: url(Icons/arrow-blue.png);\\n\"\n \"}\"\n )\n self.colx.setObjectName(\"colx\")\n self.horizontalLayout_2.addWidget(self.colx)\n self.coly = QtWidgets.QComboBox(Dialog)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.coly.sizePolicy().hasHeightForWidth())\n self.coly.setSizePolicy(sizePolicy)\n self.coly.setStyleSheet(\"QComboBox{\\n\"\n \"background-color: white;\\n\"\n \"color: blue;\\n\"\n \"border: 1px solid blue;\\n\"\n \"border-radius: 3px;\\n\"\n \"}\\n\"\n \"QComboBox::drop-down {\\n\"\n \" border-left-width: 1px;\\n\"\n \" border-left-color: blue;\\n\"\n \" border-left-style: solid;\\n\"\n \" border-top-right-radius: 3px;\\n\"\n \" border-bottom-right-radius: 3px;\\n\"\n \" width: 24px;\\n\"\n \"}\\n\"\n \"QComboBox::down-arrow {\\n\"\n \" image: url(Icons/arrow-blue.png);\\n\"\n \"}\"\n )\n self.coly.setObjectName(\"coly\")\n self.horizontalLayout_2.addWidget(self.coly)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.color = QtWidgets.QPushButton(Dialog)\n font = QtGui.QFont()\n font.setFamily(\"TW Cen MT\")\n font.setPointSize(10)\n self.color.setFont(font)\n self.color.setStyleSheet(\"background-color: blue;\\n\"\n \"color: white;\\n\"\n \"border-radius: 5px;\\n\"\n \"padding: 8px 16px 8px 16px;\")\n self.color.setObjectName(\"color\")\n self.color.clicked.connect(self.colorpicker)\n self.horizontalLayout_3.addWidget(self.color)\n self.gridLayout.addLayout(self.horizontalLayout_3, 1, 0, 1, 1)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setContentsMargins(10, -1, 10, -1)\n self.horizontalLayout.setSpacing(6)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.plot = QtWidgets.QPushButton(Dialog)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.plot.sizePolicy().hasHeightForWidth())\n self.plot.setSizePolicy(sizePolicy)\n self.plot.setStyleSheet(\"background-color: blue;\\n\"\n \"color: white;\\n\"\n \"border-radius: 5px;\\n\"\n \"padding: 8px 16px 8px 16px;\")\n self.plot.setObjectName(\"plot\")\n self.plot.clicked.connect(self.showplot)\n self.horizontalLayout.addWidget(self.plot)\n self.gridLayout.addLayout(self.horizontalLayout, 2, 0, 1, 1)\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.label.setText(_translate(\"Dialog\", \"Column for X axis:\"))\n self.label_2.setText(_translate(\"Dialog\", \"Column for Y axis:\"))\n self.plot.setText(_translate(\"Dialog\", \"Plot\"))\n self.color.setText(_translate(\"Dialog\", \"Choose Color\"))\n\n def setdata(self, dataframe, name, Dialog):\n _translate = QtCore.QCoreApplication.translate\n col_list = dataframe.columns.values.tolist()\n self.colx.addItems(col_list)\n self.coly.addItems(col_list)\n self.df = dataframe\n self.name = name\n if name == 'Line Plot':\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Line Plot\"))\n Dialog.setWindowIcon(QtGui.QIcon('Icons\\\\line-chart.png'))\n elif name == 'Scatter Plot':\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Scatter Plot\"))\n Dialog.setWindowIcon(QtGui.QIcon('Icons\\\\scatter-chart.png'))\n\n def showplot(self):\n str1 = str(self.colx.currentText())\n str2 = str(self.coly.currentText())\n if str1.__eq__(str2):\n msgBox = QtWidgets.QMessageBox()\n msgBox.setIcon(QtWidgets.QMessageBox.Information)\n msgBox.setText(\"Please choose two different columns to plot.\")\n msgBox.setWindowTitle(\"Plotter\")\n msgBox.setWindowIcon(QtGui.QIcon('Icons\\\\icon.png'))\n msgBox.setIcon(QtWidgets.QMessageBox.Information)\n msgBox.addButton(QtWidgets.QMessageBox.Yes)\n msgBox.exec_()\n return\n else:\n dialog = QtWidgets.QDialog()\n dialog.ui = form()\n dialog.setWindowIcon(QtGui.QIcon('Icons\\\\plot.png'))\n dialog.ui.setupUi(dialog)\n dialog.ui.plotdoubleaxis(self.name, str1, str2, self.df, self.col)\n dialog.exec_()\n dialog.show()\n\n def colorpicker(self):\n col1 = QtWidgets.QColorDialog.getColor()\n self.col = col1.name()\n","sub_path":"doubleaxisplot.py","file_name":"doubleaxisplot.py","file_ext":"py","file_size_in_byte":9076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"519776712","text":"from faker import Faker\n\n\nclass WorksAdmission:\n def __init__(self, part_id, workres_id):\n faker = Faker()\n\n self.part = part_id\n self.res = workres_id\n\n def to_sql(self, start=True):\n values = \"(\" + str(self.part) + \",\" + str(self.res) + \")\"\n\n return \"INSERT INTO ZgloszeniaWarsztatow (IDUczestnika, IDRezerwacjiWarsztatu) \" \\\n \"VALUES \" + values if start else values","sub_path":"src/WorksAdmission.py","file_name":"WorksAdmission.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"107133765","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Report import Report\nfrom PayMode import PayMode\n\nstext = [\"Sin Aprobacion\",\"Approved\"]\n\nclass LabelHistory(Report):\n\n\n def defaults(self):\n Report.defaults(self)\n pars = self.getRecord()\n pars.Status = 1 #approved\n pars.OrderBy = 0\n pars.ViewOption = 0\n pars.IncludeInvalids = False\n pars.s1 = True\n pars.s2 = True\n pars.s3 = True\n pars.s4 = True\n pars.s5 = True\n pars.s6 = True\n pars.s7 = True\n pars.s8 = True\n pars.FromDate = today()\n pars.ToDate = today()\n\n\n def getQuery(self):\n pars = self.getRecord()\n labelSet = [ pars.Label ]\n invs = \"SELECT inv.TransDate,{SerNr},{CustName},{Computer},{Currency},[ipr].{Name} as {Concept},{RowTotal} AS {Amount},{TransTime},[inv].{OfficialSerNr} AS {RefStr},{Status},{Invalid}, \\n\"\n invs += \"'Invoice' AS {OpType}\\n\"\n invs += \"FROM [InvoiceItemRow] [ipr] \\n\"\n invs += \"INNER JOIN [Invoice] [inv] ON [inv].{internalId} = [ipr].{masterId} \\n\"\n invs += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"inv\")\n #invs += \"AND {OriginType}<>i|9| \\n\" microtel mendoza should not ignores Invoices from BarTab, \n if labelSet: invs += \"AND [ipr].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if not pars.IncludeInvalids: invs += \"AND (inv.{Invalid}=i|0| OR inv.{Invalid} IS NULL) \\n\"\n invs += \"ORDER BY {TransTime}\\n\"\n\n rbos = \"SELECT r.TransDate,{SerNr},{CustName},{Computer},[r].{Currency},[rpr].{InvoiceNr} as {Concept},-[rpr].{InvoiceAmount} as {Amount},{TransTime},[r].{RefStr},{Status},{Invalid},\\n\"\n rbos += \"'Receipt' AS {OpType} \\n\"\n rbos += \"FROM [ReceiptInvoiceRow] [rpr] \\n\"\n rbos += \"INNER JOIN [Receipt] [r] ON [r].{internalId} = [rpr].{masterId} \\n\"\n rbos += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"r\")\n if labelSet: rbos += \"AND [rpr].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if pars.RefStr: rbos += \"AND {RefStr}=s|%s| \\n\" % pars.RefStr\n if not pars.IncludeInvalids: rbos += \"AND (r.{Invalid}=i|0| OR r.{Invalid} IS NULL) \\n\"\n rbos += \"ORDER BY {TransTime}\\n\"\n\n pinv = \"SELECT [inv].{TransDate},{SerNr},{SupName} AS CustName,{Computer},{Currency},[ipr].{Comment} as {Concept},{RowTotal} AS {Amount},{TransTime},[inv].{RefStr},{Status},{Invalid}, \\n\"\n pinv += \"'PurchaseInvoice' AS {OpType}\\n\"\n pinv += \"FROM [PurchaseInvoiceRow] [ipr] \\n\"\n pinv += \"INNER JOIN [PurchaseInvoice] [inv] ON [inv].{internalId} = [ipr].{masterId} \\n\"\n pinv += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"inv\")\n if labelSet: pinv += \"AND [ipr].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if not pars.IncludeInvalids: pinv += \"AND (inv.{Invalid}=i|0| OR inv.{Invalid} IS NULL) \\n\"\n pinv += \"ORDER BY {TransTime}\\n\"\n\n pays = \"SELECT r.TransDate,{SerNr},{SupName} as {CustName},{Computer},[r].{Currency},[prow].{InvoiceNr} as {Concept},-[prow].{InvoiceAmount} as {Amount},{TransTime},[r].{RefStr},{Status},{Invalid}, \\n\"\n pays += \"'Payment' AS {OpType} \\n\"\n pays += \"FROM [PaymentInvoiceRow] [prow] \\n\"\n pays += \"INNER JOIN [Payment] [r] ON [r].{internalId} = [prow].{masterId} \\n\"\n pays += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"r\")\n if labelSet: pays += \"AND [prow].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if(pars.RefStr): pays += \"AND [r].{RefStr}=s|%s| \\n\" % pars.RefStr\n if not pars.IncludeInvalids: pays += \"AND (r.{Invalid}=i|0| OR r.{Invalid} IS NULL) \\n\"\n pays += \"ORDER BY {TransTime}\\n\"\n\n cout = \"SELECT c.TransDate,{SerNr},[c].{Comment} AS {CustName},{Computer},{Currency},[cor].{Name} as {Concept},-[cor].{Amount},{TransTime},[c].{RefStr},{Status},{Invalid}, \\n\"\n cout += \"'CashOut' AS {OpType} \\n\"\n cout += \"FROM [CashOutCntRow] [cor] \\n\"\n cout += \"INNER JOIN [CashOut] [c] ON [c].{internalId} = [cor].{masterId} \\n\"\n cout += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"c\")\n if labelSet: cout += \"AND [cor].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if(pars.RefStr): cout += \"AND [c].{RefStr}=s|%s| \\n\" % pars.RefStr\n if not pars.IncludeInvalids: cout += \"AND (c.{Invalid}=i|0| OR c.{Invalid} IS NULL) \\n\"\n cout += \"ORDER BY {TransTime}\\n\"\n\n cin = \"SELECT c.TransDate,{SerNr},[c].{Comment} AS CustName,{Computer},{Currency},[cor].{Name} as {Concept},[cor].{Amount},{TransTime},[c].{RefStr},{Status},{Invalid}, \\n\"\n cin += \"'CashIn' AS {OpType} \\n\"\n cin += \"FROM [CashInCntRow] [cor] \\n\"\n cin += \"INNER JOIN [CashIn] [c] ON [c].{internalId} = [cor].{masterId} \\n\"\n cin += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"c\")\n if labelSet: cin += \"AND [cor].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if not pars.IncludeInvalids: cin += \"AND (c.{Invalid}=i|0| OR c.{Invalid} IS NULL) \\n\"\n cin += \"ORDER BY {TransTime}\\n\"\n\n brcpt = \"SELECT r.TransDate,{SerNr},[rpr].{CustName},{Computer},[r].{Currency},[rpr].{InvoiceNr} as {Concept},[rpr].{InvoiceAmount} as {Amount},{TransTime},[r].{RefStr},{Status},{Invalid},\\n\"\n brcpt += \"'BankReceipt' AS {OpType} \\n\"\n brcpt += \"FROM [BankReceiptRow] [rpr] \\n\"\n brcpt += \"INNER JOIN [BankReceipt] [r] ON [r].{internalId} = [rpr].{masterId} \\n\"\n brcpt += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"r\")\n if labelSet: brcpt += \"AND [rpr].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if pars.RefStr: brcpt += \"AND [r].{RefStr}=s|%s| \\n\" % pars.RefStr\n if not pars.IncludeInvalids: brcpt += \"AND (r.{Invalid}=i|0| OR r.{Invalid} IS NULL) \\n\"\n brcpt += \"ORDER BY {TransTime}\\n\"\n\n exp = \"SELECT [ex].{TransDate},{SerNr},{Comment} AS CustName,{Computer},{Currency},[exr].{Name} as {Concept},-{RowTotal} AS {Amount},{TransTime},[ex].{RefStr},{Status},{Invalid}, \\n\"\n exp += \"'Expenses' AS {OpType}\\n\"\n exp += \"FROM [ExpensesRow] [exr] \\n\"\n exp += \"INNER JOIN [Expenses] [ex] ON [ex].{internalId} = [exr].{masterId} \\n\"\n exp += self.TransFilter(pars.FromDate,pars.ToDate,pars.Status,pars.Office,pars.Shift,pars.ShiftNr,pars.User,pars.Computer,\"ex\")\n if labelSet: exp += \"AND [exr].{Labels} IN ('%s') \\n\" % \"','\".join(labelSet)\n if not pars.IncludeInvalids: exp += \"AND (ex.{Invalid}=i|0| OR ex.{Invalid} IS NULL) \\n\"\n exp += \"ORDER BY {TransTime}\\n\"\n\n\n nullquery = \"SELECT NULL AS {TransDate},NULL AS {SerNr},NULL AS {CustName}, NULL AS {Computer},NULL AS {Currency},NULL AS {Concept},NULL AS {Amount}, NULL AS {TransTime},NULL AS {RefStr}, NULL AS {Status}, NULL AS {Invalid},NULL AS {OpType} \\n\"\n\n if not pars.s1: invs = nullquery\n if not pars.s2: rbos = nullquery\n if not pars.s3: pinv = nullquery\n if not pars.s4: pays = nullquery\n if not pars.s5: cout = nullquery\n if not pars.s6: cin = nullquery\n if not pars.s7: brcpt = nullquery\n if not pars.s8: exp = nullquery\n\n query = Query()\n query.sql = \"(%s) UNION (%s) UNION (%s) UNION (%s) UNION (%s) UNION (%s) UNION (%s) UNION (%s)\\n\" % (invs,rbos,pinv,pays,cout,cin,brcpt,exp)\n query.sql += \"ORDER BY {TransDate},{TransTime}\\n\"\n return query\n\n def run(self):\n pmodes = {}\n comps = {}\n pars = self.getRecord()\n if (not pars.FromDate or not pars.ToDate):\n return\n\n self.printReportTitle(\"Label History\")\n self.startTable()\n if (pars.ViewOption==0):\n self.showReport1()\n else: \n ctot = self.showReport2(True)\n dtot = self.showReport2(False)\n self.startRow(Style=\"C\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"Saldo\")\n self.addValue(\"\")\n self.addValue(ctot+dtot)\n self.endRow()\n self.endTable()\n\n\n def showReport1(self):\n query = self.getQuery()\n tot,ctot,dtot = 0,0,0\n zoomwin = {\"PurchaseInvoice\": \"PurchaseInvoiceWindow\", \"Invoice\":\"InvoiceWindow\",\"Receipt\":\"ReceiptWindow\",\"BankReceipt\":\"BankReceiptWindow\",\"CashOut\":\"CashOutWindow\",\"CashIn\":\"CashInWindow\",\"Payment\":\"PaymentWindow\",\"Expenses\":\"ExpensesWindow\"}\n pars = self.getRecord()\n self.startHeaderRow()\n self.addValue(\"Nr\")\n self.addValue(\"Date\")\n self.addValue(tr(\"Type\"))\n self.addValue(tr(\"Name\"))\n self.addValue(tr(\"Cur.\"))\n if (pars.showMachine):\n self.addValue(tr(\"Computer\"))\n self.addValue(tr(\"Concept\"))\n self.addValue(tr(\"Reference\"))\n if pars.ShowOption:\n self.addValue(tr(\"Amount\"))\n else:\n self.addValue(tr(\"Debit\"))\n self.addValue(tr(\"Credit\"))\n if not pars.Status:\n self.addValue(tr(\"Status\"))\n self.endHeaderRow()\n col = \"black\"\n if query.open():\n for r in query:\n #if not r.SerNr: continue # to fench off the null values\n col = \"black\"\n if r.Invalid: col = \"red\"\n self.startRow()\n self.addValue(r.SerNr,align=\"left\",Window=zoomwin.get(r.OpType,\"\"), FieldName=\"SerNr\",Color=col)\n self.addValue(r.TransDate,Color=col)\n self.addValue(tr(r.OpType),Color=col)\n self.addValue(r.CustName,Wrap=False,Color=col)\n self.addValue(r.Currency,Color=col)\n if (pars.showMachine):\n self.addValue(r.Computer,Color=col)\n self.addValue(r.Concept,Color=col)\n self.addValue(r.RefStr,Color=col)\n if pars.ShowOption:\n self.addValue(r.Amount,Color=col)\n else:\n if (r.Amount<0):\n self.addValue(\"\",Color=col)\n self.addValue(-r.Amount,Color=col)\n dtot += -r.Amount\n else:\n self.addValue(r.Amount,Color=col)\n self.addValue(\"\",Color=col)\n ctot += r.Amount\n if not pars.Status:\n if not r.Invalid:\n self.addValue(stext[r.Status],Color=col)\n else:\n self.addValue(tr(\"Invalid\"),Color=col)\n self.endRow()\n if not r.Invalid:\n tot += r.Amount\n query.close()\n self.startRow(Style=\"A\")\n self.addValue(\"\")\n self.addValue(\"\")\n if (pars.showMachine):\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"Total\")\n self.addValue(\"\")\n if pars.ShowOption:\n self.addValue(tot,Color=col)\n else:\n self.addValue(ctot,Color=col)\n self.addValue(dtot,Color=col)\n self.endRow()\n self.startRow(Style=\"B\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"Saldo\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(ctot-dtot,Color=col)\n self.endRow()\n\n def showReport2(self,showpos=True):\n query = self.getQuery()\n tot = 0\n zoomwin = {\"PurchaseInvoice\": \"PurchaseInvoiceWindow\", \"Invoice\":\"InvoiceWindow\",\"Receipt\":\"ReceiptWindow\",\"BankReceipt\":\"BankReceiptWindow\",\"CashOut\":\"CashOutWindow\",\"CashIn\":\"CashInWindow\",\"Payment\":\"PaymentWindow\",\"Expenses\":\"ExpensesWindow\"}\n pars = self.getRecord()\n self.startRow(Style=\"B\")\n self.addValue(\"Nr\")\n self.addValue(\"Date\")\n self.addValue(tr(\"Type\"))\n self.addValue(tr(\"Name\"))\n self.addValue(tr(\"Cur.\"))\n if (pars.showMachine):\n self.addValue(tr(\"Computer\"))\n self.addValue(tr(\"Concept\"))\n self.addValue(tr(\"Reference\"))\n self.addValue(tr(\"Amount\"))\n if not pars.Status:\n self.addValue(tr(\"Status\"))\n self.endRow()\n col = \"black\"\n if query.open():\n for r in query:\n if (showpos and (r.Amount>=0)) or (not showpos and (r.Amount<0)):\n #if not r.SerNr: continue # to fench off the null values\n col = \"black\"\n if r.Invalid: col = \"red\"\n self.startRow()\n self.addValue(r.SerNr,align=\"left\",Window=zoomwin.get(r.OpType,\"\"), FieldName=\"SerNr\",Color=col)\n self.addValue(r.TransDate,Color=col)\n self.addValue(tr(r.OpType),Color=col)\n self.addValue(r.CustName,Wrap=False,Color=col)\n self.addValue(r.Currency,Color=col)\n if (pars.showMachine):\n self.addValue(r.Computer,Color=col)\n self.addValue(r.Concept,Color=col)\n self.addValue(r.RefStr,Color=col)\n self.addValue(r.Amount,Color=col)\n if not pars.Status:\n if not r.Invalid:\n self.addValue(stext[r.Status],Color=col)\n else:\n self.addValue(tr(\"Invalid\"),Color=col)\n self.endRow()\n if not r.Invalid:\n tot += r.Amount\n query.close()\n self.startRow(Style=\"A\")\n self.addValue(\"\")\n self.addValue(\"\")\n if (pars.showMachine):\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"\")\n self.addValue(\"Total\")\n self.addValue(\"\")\n self.addValue(tot,Color=col)\n self.endRow()\n return tot\n\n def ZoomReceipt(self,param,value):\n from ReceiptWindow import ReceiptWindow\n from Receipt import Receipt\n r = Receipt()\n r.internalId = int(param)\n r.load()\n rw = ReceiptWindow()\n rw.setRecord(r)\n rw.open()\n\n def ZoomCheque(self,param,value):\n from ChequeWindow import ChequeWindow\n from Cheque import Cheque\n ch = Cheque()\n ch.SerNr = param\n ch.load()\n chw = ChequeWindow()\n chw.setRecord(ch)\n chw.open()\n","sub_path":"standard/reports/LabelHistory.py","file_name":"LabelHistory.py","file_ext":"py","file_size_in_byte":14974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"164669002","text":"import random\ndef genMatrix(l1, l2):\n j = 0\n l3 = list(zip(l1, l2))\n l4 = [[None for i in range(4)] for i in range(4)]\n for (i, ix) in l3:\n j = int((i-1) % 4)\n a = int(((i-1) / 4))\n \n #if i<5:\n # j = i\n #elif i<9:\n # j = i - 4 \n #elif i<13:\n # j = i - 8\n #else:\n # j = i - 12\n #print(\"a==\")\n #print(a)\n l4[j][a] = ix -1\n \n print(l4)\n #print(\"[[1, 2, 3, 4], \")\n #print(\"[None, None, None, None], \")\n #print(\"[None, None, None, None], \")\n #print(\"[None, None, None, None]]\")\n \ndef genCommands(l1, l2):\n j = 0\n l3 = list(zip(l1, l2))\n \n for (i, ix) in l3:\n j = int((i-1) % 4) + 1\n a = int(((i-1) / 4) + 1)\n \n #if i<5:\n # j = i\n #elif i<9:\n # j = i - 4 \n #elif i<13:\n # j = i - 8\n #else:\n # j = i - 12\n #print(\"a==\")\n #print(a)\n print(ix)\n if a == 1:\n print(str(j) + \"A\")\n elif a == 2:\n print(str(j) + \"B\")\n elif a == 3:\n print(str(j) + \"C\")\n elif a == 4:\n print(str(j) + \"D\")\n\n \n \n\n #print(\"Alfred\")\n #print(\"Martha\")\n #print(\"1\")\n #print(\"1A\")\n #print(\"2\")\n #print(\"1B\")\n #print(\"3\")\n # print(\"1C\")\n # print(\"4\")\n #print(\"1D\")\n \ndef piece_give_order():\n l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n random.shuffle(l)\n return l\n\ndef prompt_square_order():\n l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n random.shuffle(l)\n return l\n\ndef main():\n p_g_o = piece_give_order()\n p_s_o = prompt_square_order()\n #print(p_g_o)\n #print(p_s_o)\n #print(\"2\")\n #print(\"n\")\n #print(\"Alfred\")\n \n \n genMatrix(p_g_o, p_s_o)\n\n print(\"7357\")\n #print(\"Martha\")\n genCommands(p_g_o, p_s_o)\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/integration/generate_int.py","file_name":"generate_int.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"11286754","text":"\"\"\"\nAll data is provided to divided group by time (Like per-month or per-week)\nto see change of data (total weight, positive, or negative).\nTo satisfy this demand, We use custom data structure, called `TimeDivision`.\n\"\"\"\nfrom datetime import date, timedelta\nfrom flask import request\n\nclass TimeDivision:\n\n\t@staticmethod\n\tdef get_timeunit():\n\t\treturn request.args.get('timeunit', 'month')\n\n\t# TIMELABEL_UNIT = 'month'\n\t_all_labels_cache = {}\n\n\t@staticmethod\n\tdef all_labels():\n\t\tcache_id = '%s/%s' % (date.today(), TimeDivision.get_timeunit())\n\t\tif cache_id in TimeDivision._all_labels_cache:\n\t\t\treturn TimeDivision._all_labels_cache[cache_id]\n\n\t\td = timedelta(days=1)\n\t\tcur = date(2017, 6, 1) # Base\n\t\tend = date(2018, 10, 17) #date.today()\n\n\t\tret = [TimeDivision.timelabel(cur)]\n\n\t\twhile True:\n\t\t\tif (end - cur).total_seconds() == 0:\n\t\t\t\t# Loop until today\n\t\t\t\tbreak\n\n\t\t\tcur += d\n\t\t\tlabel = TimeDivision.timelabel(cur)\n\t\t\tif ret[-1] != label:\n\t\t\t\tret.append(label)\n\n\t\tret = ret[-12:]\n\t\tTimeDivision._all_labels_cache[cache_id] = ret\n\t\treturn ret\n\n\t@staticmethod\n\tdef timelabel(datetime):\n\t\t\"\"\" Convert datetime string to timelabel\n\t\t\tDefault conversion is YY.mm. (ex. 18.06.)\n\t\t:param datetime: String or Date instance\n\t\t:return: String\n\t\t\"\"\"\n\t\tif type(datetime) == str and len(datetime) > 10 and datetime[4] == '-' and datetime[7] == '-':\n\t\t\t# Parse str to date if it has correct format\n\t\t\tdatetime = date(int(datetime[0:4]), int(datetime[5:7]), int(datetime[8:10]))\n\n\t\tif type(datetime) == date:\n\t\t\tif TimeDivision.get_timeunit() == 'month':\n\t\t\t\treturn datetime.strftime('%y.%m.')\n\n\t\t\telif TimeDivision.get_timeunit() == 'week':\n\t\t\t\tsaturday = datetime + timedelta(days=6-datetime.isoweekday()%7)\n\t\t\t\t# day under 7 -> week1, day under 14 -> week n, ...\n\t\t\t\tweek = (saturday.day-1) / 7 + 1\n\t\t\t\treturn saturday.strftime('%y.%m.') + (' %d\\'' % week)\n\n\t\t\telse:\n\t\t\t\traise Exception('Unsupported TIMELABEL_UNIT')\n\n\t\t# Use raw str if no format matched\n\t\treturn datetime\n\n\tdef __init__(self, default_value=None):\n\t\tself._data = {}\n\t\tself._default_value = default_value\n\n\tdef get(self, datetime):\n\t\t\"\"\" Get value in specific timelabel division\n\t\t:param datetime: String\n\t\t:return: Value set by `set`, `append`, or `increase` method\n\t\t\"\"\"\n\t\treturn self._data.get(TimeDivision.timelabel(datetime), self._default_value)\n\n\tdef set(self, datetime, value):\n\t\t\"\"\" Set value of division in specific timelabel division\n\t\t:param datetime: String\n\t\t:param value: Anything\n\t\t\"\"\"\n\t\tself._data[TimeDivision.timelabel(datetime)] = value\n\n\tdef append_list(self, datetime, value):\n\t\t\"\"\" Append value like list to specific timelabel division\n\t\t:param datetime: String\n\t\t:param value: Item to be append\n\t\t\"\"\"\n\t\ttimelabel = TimeDivision.timelabel(datetime)\n\n\t\tif timelabel not in self._data:\n\t\t\tself._data[timelabel] = []\n\n\t\tself._data[timelabel].append(value)\n\n\tdef increase(self, datetime, value=1):\n\t\t\"\"\" Increase to specific timelabel division\n\t\tIf the division is not set, init to 0 first\n\t\t:param datetime: String\n\t\t:param value: Value to increase\n\t\t\"\"\"\n\t\ttimelabel = TimeDivision.timelabel(datetime)\n\t\tif timelabel not in self._data:\n\t\t\tself._data[timelabel] = 0\n\t\tself._data[timelabel] += value\n\n\tdef items(self):\n\t\t\"\"\" Get list of sorted by timelabel\n\t\t:return: List>\n\t\t\"\"\"\n\t\treturn [\n\t\t\t(timelabel, self.get(timelabel))\n\t\t\tfor timelabel in TimeDivision.all_labels()\n\t\t]\n\n\tdef values(self):\n\t\t\"\"\" Get list of value sorted by timelabel\n\t\t:return: List\n\t\t\"\"\"\n\t\treturn [self.get(timelabel) for timelabel in TimeDivision.all_labels()]\n","sub_path":"app/app/model/timedivision.py","file_name":"timedivision.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"497224943","text":"# Merge sort didn't time out but built in sort was 30x faster \n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n if not intervals or not intervals[0]:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[0]) #self.part(intervals)\n ls = []\n for i,(x,y) in enumerate(intervals):\n if i == 0:\n ls.append([x,y])\n else:\n if x<=ls[-1][1]:\n ls[-1][1] = max(ls[-1][1],y)\n else:\n ls.append([x,y])\n return ls\n \n def part(self, I: List[List[int]]):\n if len(I)<=1:\n return I\n n = len(I)//2\n return self.mergeInterval(self.part(I[0:n]),self.part(I[n:]))\n \n def mergeInterval(self, I1: List[List[int]], I2: List[List[int]]):\n if not I1:\n return I2\n if not I2:\n return I1\n if I1[0][0]I2[0][0]:\n return [I2[0]]+self.mergeInterval(I1, I2[1:])\n else:\n if I1[0][1]= 0 and clothes[i-2] == 1:\n clothes[i-2] = 0\n answer += 1\n # back\n elif i < n and clothes[i] == 1:\n clothes[i] = 0\n answer += 1\n\n return answer","sub_path":"Python/Algorithm/programmers/peclass.py","file_name":"peclass.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"292813597","text":"# Задача-1:\n# Дан список, заполненный произвольными целыми числами, получите новый список,\n# элементами которого будут квадратные корни элементов исходного списка,\n# но только если результаты извлечения корня не имеют десятичной части и\n# если такой корень вообще можно извлечь\n# Пример: Дано: [2, -5, 8, 9, -25, 25, 4] Результат: [3, 5, 2]\n\nimport random\nimport math\n\n# диапозон возможных целых чисел для генератора\nMIN = -5\nMAX = 25\n\n# длина списка\nLEN_LIST = 20\n\nlist_number = []\n\n# Заполняем исходный список целыми числами\nfor _ in range(0, LEN_LIST):\n list_number.append(random.randint(MIN, MAX))\n\nprint('Исходный список целых чисел: ', list_number)\n\nnew_list = []\n\nfor item in list_number:\n if item >= 0:\n square_root = math.sqrt(item)\n if square_root - int(square_root) == 0:\n new_list.append(int(square_root))\n\nprint('Результат: ', new_list)\n","sub_path":"lesson02/normal/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"372869837","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\nimport time\nurl='http://10.115.161.203/oaweb/oaconsole'\ndriver = webdriver.Ie()\ndriver.maximize_window()\ndriver.get(url)\n#部门\n# js=\"document.getElementById('unit').click()\"\n# driver.execute_script(js)\n# time.sleep(5)\n# #部门二级标题框\n# table=driver.find_element_by_id('loginTreeDialog')\n# #本部按钮\n# js1=\"document.getElementsByClassName('x-tree-node-icon')[0].click()\"\n# driver.execute_script(js1)\n# time.sleep(3)\n# table.find_element_by_link_text('办公室').click()\ndriver.find_element_by_name('password').send_keys(1)\njs=\"document.getElementsByClassName('btnLogin')[0].click()\" #登录按钮\ndriver.execute_script(js)\ntime.sleep(30)\na=driver.find_element_by_xpath('/html/body/iframe')\ndriver.switch_to_frame(a)\ndriver.find_element_by_id('dbTotalNum').click()#工作台\ntime.sleep(3)\ndriver.switch_to_default_content()\ndriver.switch_to_frame(a)\ntime.sleep(3)\nb=driver.find_element_by_xpath('/html/body/div[5]/div[2]/div/div[1]/div[2]/div/div[2]/div[1]/div[2]/iframe')\ndriver.switch_to_frame(b)\ndriver.find_element_by_id('groupBtn').click()\ndriver.find_elements_by_class_name('ico-group')[3].click()\n# s=driver.find_element_by_id('groupTip')\n# s.find_element_by_link_text('按时间分组').click()\n# Select(s).select_by_index(0)\n# time.sleep(2)\n# Select(s).select_by_index(1)\n#/html/body/div[5]/div[2]/div/div[1]/div[2]/div/div[2]/div[1]/div[2]/iframe\n#测试\n","sub_path":"1/Select.py","file_name":"Select.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"525044909","text":"import time\r\nimport webbrowser\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nimport PIL.Image\r\nimport PIL.ImageTk\r\nimport video_capture as vid\r\nimport hand as h\r\n\r\nstart = False\r\nbase = []\r\n\r\n\r\nclass HandyBrowser(Tk):\r\n\r\n def __init__(self, *args, **kwargs):\r\n Tk.__init__(self, *args, **kwargs)\r\n # logo = PhotoImage(file='favicon.ico')\r\n # Tk.iconbitmap(self, logo)\r\n img = Image(\"photo\", file=\"favicon.gif\")\r\n self.tk.call('wm', 'iconphoto', Tk._w, img)\r\n # Tk.iconbitmap(self, img)\r\n Tk.wm_title(self, \"Handy browser\")\r\n\r\n self.view = StringVar(value=\"BasePage\")\r\n self.menu_bar_init()\r\n\r\n container = ttk.Frame(self)\r\n container.grid(row=0, column=0, sticky=(N, S, E, W))\r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n\r\n self.frames = {}\r\n\r\n for F in (BasePage, CameraPage):\r\n frame = F(container, self)\r\n self.frames[F] = frame\r\n frame.grid(row=0, column=0, sticky=(N, S, E, W))\r\n frame.grid_rowconfigure(0, weight=1)\r\n frame.grid_columnconfigure(0, weight=1)\r\n\r\n self.show_frame(BasePage)\r\n\r\n def show_frame(self, cont):\r\n frame = self.frames[cont]\r\n self.view.set(cont.__name__)\r\n frame.tkraise()\r\n\r\n def menu_bar_init(self):\r\n self.option_add('*tearOff', FALSE)\r\n menu_bar = Menu(self)\r\n self['menu'] = menu_bar\r\n menu_view = Menu(menu_bar)\r\n menu_help = Menu(menu_bar)\r\n menu_bar.add_cascade(menu=menu_view, label='View')\r\n menu_bar.add_cascade(menu=menu_help, label='Help')\r\n\r\n menu_theme = Menu(menu_view)\r\n menu_view.add_cascade(menu=menu_theme, label='Change theme')\r\n\r\n # Why it works? Why self.style not needed to have pre selected?\r\n style = StringVar(value=ttk.Style().theme_use())\r\n for theme in ttk.Style().theme_names():\r\n menu_theme.add_radiobutton(label=theme, value=theme,\r\n variable=style,\r\n command=lambda: ttk.Style().theme_use(\r\n style.get()))\r\n\r\n menu_view.add_separator()\r\n\r\n # Why it works? Why self.style needed to have pre selected?\r\n menu_view.add_radiobutton(label='Basic view', value=\"BasePage\",\r\n variable=self.view,\r\n command=lambda: self.show_frame(BasePage))\r\n\r\n # menu_view.add_radiobutton(label='Camera view', value=\"CameraPage\",\r\n # variable=self.view,\r\n # command=lambda: self.show_frame(CameraPage))\r\n\r\n menu_help.add_command(label='Manual',\r\n command=lambda: webbrowser.open_new_tab(\r\n \"https://github.com/mikiisz/Handy-Browser\"))\r\n\r\n menu_help.add_separator()\r\n\r\n menu_help.add_command(label='About', command=lambda: self.show_about())\r\n\r\n @staticmethod\r\n def show_about():\r\n messagebox.showinfo(\"About\", \"Handy Browser 2019\\n\"\r\n \"Dominik Mondzik\\nMichał Szkarłat\")\r\n\r\n\r\nclass Browser:\r\n def __init__(self):\r\n self.state = \"Not found\"\r\n\r\n def set(self, set_to):\r\n self.state = set_to\r\n\r\n\r\nclass Pages:\r\n def __init__(self):\r\n self.state = []\r\n\r\n def set(self, set1, set2, set3, cam):\r\n global start\r\n if not start:\r\n if browser.state != \"Not found\":\r\n self.state.append(set1)\r\n self.state.append(set2)\r\n self.state.append(set3)\r\n start = True\r\n global base\r\n base = vid.start(self.state, browser.state)\r\n # time.sleep(1)\r\n cam.__del__()\r\n global app\r\n app.destroy()\r\n h.init(base)\r\n else:\r\n messagebox.showinfo(\"Error\",\r\n \"Before staring choose browser\")\r\n\r\n\r\nbrowser = Browser()\r\npages = Pages()\r\n\r\n\r\nclass BasePage(Frame):\r\n # vid = vid.MyVideoCapture(0)\r\n\r\n def __init__(self, parent, controller):\r\n Frame.__init__(self, parent)\r\n\r\n padding_container = ttk.Frame(self, padding=(40, 20, 40, 20))\r\n padding_container.grid(row=0, column=0)\r\n\r\n left_frame = ttk.Frame(padding_container, padding=(0, 20, 20, 20))\r\n left_frame.grid(row=0, column=0, sticky=(N, S, E, W))\r\n\r\n def press(event):\r\n browser.set(camera_choice.get())\r\n print(browser.state)\r\n # camera_status = StringVar(value=\"\")\r\n\r\n camera_choice = StringVar(value=\"Select browser\")\r\n camera_combobox = ttk.Combobox(left_frame, textvariable=camera_choice,\r\n width=30,\r\n values=[\"Firefox\", \"Chrome\", \"Opera\"])\r\n camera_combobox.grid(row=0, column=0, columnspan=2, pady=10)\r\n camera_combobox.bind('<>', press)\r\n\r\n camera_status = StringVar(\r\n value=\"Choose browser you would \\n\"\r\n \"like to operate.\")\r\n camera_label = ttk.Label(left_frame, text=camera_status.get())\r\n camera_label.grid(row=1, column=0, columnspan=2, pady=10, sticky=W)\r\n\r\n # separator\r\n left_separator = ttk.Separator(left_frame, orient=HORIZONTAL)\r\n left_separator.grid(row=2, column=0, columnspan=2, sticky=(E, W),\r\n pady=20)\r\n\r\n web_page = StringVar(value=\"Identify fingers with webpages\")\r\n web_page_label = ttk.Label(left_frame, text=web_page.get())\r\n web_page_label.grid(row=4, column=0, columnspan=2, pady=10, sticky=N)\r\n\r\n ttk.Label(left_frame, text=\"Homepage:\").grid(row=3, sticky=W)\r\n ttk.Label(left_frame, text=\"Two Fingers, right:\").grid(row=5, sticky=W)\r\n ttk.Label(left_frame, text=\"Two Fingers, left:\").grid(row=6, sticky=W)\r\n\r\n e1 = ttk.Entry(left_frame)\r\n e2 = ttk.Entry(left_frame)\r\n e3 = ttk.Entry(left_frame)\r\n e1.insert(10, \"http://google.com\")\r\n e2.insert(10, \"http://youtube.com\")\r\n e3.insert(10, \"http://gmail.com\")\r\n\r\n e1.grid(row=3, column=1)\r\n e2.grid(row=5, column=1)\r\n e3.grid(row=6, column=1)\r\n\r\n left_separator = ttk.Separator(left_frame, orient=HORIZONTAL)\r\n left_separator.grid(row=7, column=0, columnspan=2, sticky=(E, W),\r\n pady=20)\r\n\r\n # browser_choice = StringVar(value=\"Select browser\")\r\n # browser_combox = ttk.Combobox(left_frame, textvariable=browser_choice,\r\n # width=30)\r\n # browser_combox.grid(row=3, column=0, columnspan=2, pady=10)\r\n\r\n # browser_status = StringVar(value=browser.state)\r\n # browser_label = ttk.Label(left_frame, text=browser_status.get())\r\n # browser_label.grid(row=4, column=0, columnspan=2, pady=10, sticky=E)\r\n self.vid = vid.MyVideoCapture(0)\r\n\r\n label = ttk.Label(left_frame, text=\"For more information \\n\"\r\n \"check manual: \\n\"\r\n \"Help -> Manual\", padding=(20, 10))\r\n button = ttk.Button(left_frame, text=\"Start\",\r\n command=lambda: pages.set(e1.get(), e2.get(),\r\n e3.get(),\r\n self.vid))\r\n label.grid(row=8, column=0, sticky=W)\r\n button.grid(row=9, column=1, sticky=E)\r\n\r\n # separator\r\n central_separator = ttk.Separator(padding_container, orient=VERTICAL)\r\n central_separator.grid(row=0, column=1, sticky=(N, S), padx=20)\r\n\r\n # right part:\r\n right_frame = ttk.Frame(padding_container)\r\n right_frame.grid(row=0, column=2)\r\n\r\n # open video source (by default this will try to open the computer webcam)\r\n # self.vid = MyVideoCapture(\"big_buck_bunny_480p_stereo.avi\")\r\n\r\n self.cam1 = Canvas(right_frame, width=320, height=240)\r\n self.cam1.grid(row=0, column=0, pady=20)\r\n\r\n self.cam2 = Canvas(right_frame, width=320, height=240)\r\n self.cam2.grid(row=0, column=1, pady=20)\r\n\r\n # names sX to change\r\n scale_frame = ttk.Frame(right_frame)\r\n self.s1_variable = DoubleVar(value=100000)\r\n self.s2_variable = DoubleVar(value=5000)\r\n self.s3_variable = DoubleVar(value=150)\r\n self.s4_variable = DoubleVar(value=150)\r\n\r\n ttk.Label(scale_frame, text=\"Size of pixel detection\",\r\n padding=(20, 10)).grid(\r\n row=0, column=0, sticky=W)\r\n ttk.Label(scale_frame, text=\"Number of motion pixels\",\r\n padding=(20, 10)).grid(\r\n row=1, column=0, sticky=W)\r\n ttk.Label(scale_frame, text=\"x offset\",\r\n padding=(20, 10)).grid(\r\n row=2, column=0, sticky=W)\r\n ttk.Label(scale_frame, text=\"y offset\",\r\n padding=(20, 10)).grid(\r\n row=3, column=0, sticky=W)\r\n\r\n s1 = ttk.Scale(scale_frame, orient=HORIZONTAL,\r\n variable=self.s1_variable,\r\n length=200, from_=0, to=500000)\r\n s2 = ttk.Scale(scale_frame, orient=HORIZONTAL,\r\n variable=self.s2_variable,\r\n length=200, from_=0, to=10000)\r\n s3 = ttk.Scale(scale_frame, orient=HORIZONTAL,\r\n variable=self.s3_variable,\r\n length=200, from_=0, to=500)\r\n s4 = ttk.Scale(scale_frame, orient=HORIZONTAL,\r\n variable=self.s4_variable,\r\n length=200, from_=0, to=500)\r\n scale_frame.grid(row=1, column=0, pady=20)\r\n s1.grid(row=0, column=1, pady=2)\r\n s2.grid(row=1, column=1, pady=2)\r\n s3.grid(row=2, column=1, pady=2)\r\n s4.grid(row=3, column=1, pady=2)\r\n\r\n self.delay = 15\r\n\r\n self.update()\r\n\r\n def update(self):\r\n # Get a frame from the video source\r\n ret, fg_mask, mask = self.vid.get_frame(int(self.s1_variable.get()),\r\n int(self.s2_variable.get()),\r\n int(self.s3_variable.get()),\r\n int(self.s4_variable.get()))\r\n\r\n if ret:\r\n self.photo1 = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(fg_mask).resize((320, 240)))\r\n self.cam1.create_image(0, 0, image=self.photo1, anchor=NW)\r\n\r\n self.photo2 = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(mask))\r\n self.cam2.create_image(0, 0, image=self.photo2, anchor=NW)\r\n\r\n self.cam1.after(self.delay, self.update)\r\n\r\n\r\nclass CameraPage(Frame):\r\n # vid = BasePage.vid\r\n def __init__(self, parent, controller):\r\n Frame.__init__(self, parent)\r\n # self.vid = vid.MyVideoCapture(\"big_buck_bunny_480p_stereo.avi\")\r\n # self.vid = MyVideoCapture(0)\r\n self.camera_canvas = Canvas(self, width=640, height=480)\r\n self.camera_canvas.grid(row=0, column=0)\r\n self.camera_canvas.bind(\"\",\r\n lambda e: controller.show_frame(BasePage))\r\n\r\n\r\n#\r\n# self.delay = 15\r\n# self.update()\r\n#\r\n# def update(self):\r\n# # Get a frame from the video source\r\n# ret, frame, s = self.vid.get_frame()\r\n#\r\n# if ret:\r\n# self.photo = PIL.ImageTk.PhotoImage(\r\n# image=PIL.Image.fromarray(frame).resize((640, 480)))\r\n# self.camera_canvas.create_image(0, 0, image=self.photo, anchor=NW)\r\n#\r\n# self.camera_canvas.after(self.delay, self.update)\r\n\r\n\r\napp = HandyBrowser()\r\napp.mainloop()\r\n","sub_path":"my_gui.py","file_name":"my_gui.py","file_ext":"py","file_size_in_byte":12023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"277821093","text":"from bs4 import BeautifulSoup\r\nimport datetime\r\nimport maya\r\nimport re \r\nimport requests\r\n\r\nclass Citation:\r\n def __init__(self):\r\n self.url = None\r\n self.title = None\r\n self.author = None\r\n self.date = None\r\n self.date_cite_made = None\r\n self.soup = None\r\n self.response_error = False\r\n\r\n tag_lst= (\"div\",\"a\",\"p\",\"span\",\"h4\")\r\n\r\n # takes a url and returns the text for scraping.\r\n def get_web_soup(self, url_to_cite):\r\n try:\r\n response = requests.get(url_to_cite).text #!!! remove html replace with comment!!!\r\n self.soup = BeautifulSoup(response, \"html.parser\")\r\n self.url = url_to_cite\r\n\r\n except:\r\n print(\"No response from website.\")\r\n self.url = f\"NO RESPONSE FROM {url_to_cite}.\"\r\n self.response_error = True\r\n \r\n # collects title from html. stores title in title instance variable.\r\n def get_title(self):\r\n self.title = self.soup.find(\"h1\").get_text()\r\n\r\n # collect author from html. passes list of authors to format method stores returned value in the author instance variable.\r\n def get_author(self):\r\n key_words = (\"name\", \"author\", \"byline\")\r\n exclude = (\",\",\"@\", \"staff\", \"busi\", \"tech\", \"&\", \"and\", \"sign\", \"bio\", \"name\", \"edit\")\r\n author_lst = []\r\n\r\n for tag in self.tag_lst:\r\n for word in key_words:\r\n if self.soup.find_all(tag, re.compile(word)):\r\n soup_of_author = self.soup.find_all(tag, re.compile(word))\r\n for j in soup_of_author:\r\n possible_name = self.name_check(j.get_text())\r\n if possible_name not in author_lst:\r\n author_lst.append(possible_name)\r\n\r\n for name in author_lst:\r\n name_lower = name.lower()\r\n for items in exclude:\r\n if items in name_lower:\r\n name_lower, author_lst[author_lst.index(name)] = '', ''\r\n author_lst = [name for name in author_lst if name != '']\r\n self.author = author_lst\r\n\r\n def name_check(self, name):\r\n if len(name) < 8 or len(name) >= 18:\r\n name = ''\r\n if name != '':\r\n temp_split = name.split()\r\n if len(temp_split) > 1:\r\n for i in temp_split:\r\n if i.lower() == 'by':\r\n temp_split.remove(i) \r\n name = ' '.join(temp_split)\r\n else:\r\n name = ''\r\n return name\r\n\r\n # formats names in names list so that it reads last name comma first initial. returns list. \r\n def author_format(self, author_lst):\r\n if author_lst == None or len(author_lst) == 0:\r\n return author_lst\r\n for author in author_lst:\r\n split_author = author.split(\" \")\r\n first_initial = split_author[0][0] + \".\"\r\n lastname = split_author[-1]\r\n author_lst[author_lst.index(author)] = lastname +\", \"+ first_initial\r\n self.author = author_lst \r\n\r\n # collects date from html. passes date to convert string date to a list \r\n # converts month to number if needed and makes all values integers. stores value in the date instance variable. \r\n def get_date(self):\r\n exclude = (\"pub\")\r\n class_date_lst = (\"date\", \"mod date\", \"published\", \"content-date\", \"time\", \"jeg_meta_date\")\r\n date_as_lst = []\r\n\r\n if self.soup.find_all(\"time\"):\r\n try:\r\n date = self.soup.find(\"time\").contents[0]\r\n temp_split = date.split()\r\n for i in temp_split:\r\n lower = i.lower()\r\n if exclude in lower:\r\n temp_split.remove(i)\r\n date = \" \".join(elem for elem in temp_split)\r\n date_as_lst = self.date_to_sterile_lst(str(self.maya_convert(date)))\r\n except:\r\n try:\r\n get_text_date = self.soup.find(\"time\").get_text()\r\n date_as_lst = self.date_to_sterile_lst(self.maya_convert(get_text_date))\r\n except:\r\n date_as_lst = []\r\n else:\r\n for tag in self.tag_lst:\r\n for i in class_date_lst:\r\n if self.soup.find_all(tag, {\"class\": re.compile(i)}):\r\n try:\r\n date = self.soup.find(class_ = i).contents[0]\r\n temp_split = date.split()\r\n for i in temp_split:\r\n lower = i.lower()\r\n if exclude in lower:\r\n temp_split.remove(i)\r\n date = \" \".join(elem for elem in temp_split)\r\n date_as_lst = self.date_to_sterile_lst(str(self.maya_convert(date)))\r\n break\r\n except:\r\n date_as_lst = [] \r\n\r\n for string in date_as_lst:\r\n if string.isalpha() == True:\r\n date_as_lst[date_as_lst.index(string)] = self.month_str_to_number(string)\r\n self.date = self.str_to_num(date_as_lst)\r\n\r\n def maya_convert(self,str):\r\n dt = maya.parse(str).datetime()\r\n return dt.date()\r\n\r\n # formats date stored in current instance variable. moves year to the fist element in list \r\n # returns a string in the order of year, month, day. If only the year is present then that is the only value returned. \r\n def date_format(self):\r\n date_lst = self.date\r\n if date_lst == None or len(date_lst) == 0:\r\n self.date = \"n.a.\"\r\n else: \r\n # for num in date_lst:\r\n # if len(str(num)) == 4:\r\n # year = date_lst.pop(date_lst.index(num))\r\n # date_lst.insert(0, year)\r\n if len(date_lst) == 3:\r\n self.date = f\"{date_lst[0]}, {date_lst[1]} {date_lst[2]}\"\r\n else:\r\n self.date = f\"{date_lst[0]}\"\r\n\r\n # seperates date into a list of three strings.\r\n def date_to_sterile_lst(self, str):\r\n return str.split('-')\r\n\r\n # converts spelled out month into the corosponding number.\r\n def month_str_to_number(self, str):\r\n months = {\r\n 'jan': 1,\r\n 'feb': 2,\r\n 'mar': 3,\r\n 'apr': 4,\r\n 'may': 5,\r\n 'jun': 6,\r\n 'jul': 7,\r\n 'aug': 8,\r\n 'sep': 9,\r\n 'oct': 10,\r\n 'nov': 11,\r\n 'dec': 12\r\n }\r\n s = str.strip()[:3].lower()\r\n\r\n try:\r\n out = months[s]\r\n return out\r\n except:\r\n raise ValueError('Not a month')\r\n\r\n # converts number month into the corosponding string \r\n def number_to_month_str(self, lst):\r\n months = {\r\n 1: 'January',\r\n 2: 'February',\r\n 3: 'March',\r\n 4: 'April',\r\n 5: 'May',\r\n 6: 'June',\r\n 7: 'July',\r\n 8: 'August',\r\n 9: 'September',\r\n 10: 'October',\r\n 11: 'November',\r\n 12: 'December'\r\n }\r\n\r\n try:\r\n lst[1] = months[lst[1]]\r\n return lst\r\n except:\r\n return []\r\n \r\n # convert date string to an integer.\r\n def str_to_num(self, lst):\r\n try:\r\n if len(lst) == 0:\r\n return lst\r\n for str in lst:\r\n lst[lst.index(str)] = int(str)\r\n return lst\r\n except:\r\n return []\r\n \r\n # method that generates an APA citation based on instance variables. \r\n def APA_cite_generator(self):\r\n title = self.title\r\n url = self.url\r\n date = f\"({self.date})\"\r\n\r\n if self.author == None or self.author == []:\r\n author_string = \"n.a.\"\r\n else:\r\n author_string = \", \".join(self.author)\r\n \r\n return f\"{author_string} {date}. {title}. Retrieved from {url}\"\r\n\r\n def populate_cite_obj(self, url):\r\n self.get_web_soup(url)\r\n if self.response_error == False:\r\n self.get_title()\r\n self.get_author()\r\n self.get_date()\r\n\r\n def clear_cite_obj(self):\r\n self.url = None\r\n self.title = None\r\n self.author = None\r\n self.date = None\r\n self.date_cite_made = None\r\n self.soup = None\r\n\r\nif __name__ == \"__main__\": \r\n loop = True\r\n while loop == True:\r\n input_url = input(\"Enter the URL of the website you want to cite: \")\r\n new_cite = Citation()\r\n new_cite.populate_cite_obj(input_url)\r\n str_month_date = new_cite.number_to_month_str(new_cite.date)\r\n\r\n print(f\"The URL is: {new_cite.url}\")\r\n print(f\"The title of the artical is: {new_cite.title}\")\r\n print(f\"The author(s) of the artical is/are: {new_cite.author}\")\r\n print(f\"The date the artical was created is: {str_month_date}\")\r\n\r\n new_cite.date_format()\r\n new_cite.author_format(new_cite.author)\r\n\r\n print(f\"\\n\\nReference\\n{new_cite.APA_cite_generator()}\")\r\n new_cite.clear_cite_obj()\r\n for i in range(3):\r\n user_input = input(\"\\nDo you want to create another citation?(y/n): \")\r\n if user_input.lower() == \"n\":\r\n print(\"Goodbye\")\r\n loop = False\r\n break\r\n elif user_input.lower() == \"y\":\r\n break\r\n else:\r\n print(\"Input not valid\")\r\n if i == 2:\r\n print(\"To many invalid inputs goodbye.\")\r\n loop = False\r\n break\r\n ","sub_path":"CitationMachine.py","file_name":"CitationMachine.py","file_ext":"py","file_size_in_byte":9836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"518875759","text":"import requests\nfrom requests_oauthlib import OAuth1, OAuth1Session\nimport urllib\nimport json\n\nCONSUMER_KEY = \"b35RuLwCLxLSjYLzlmNy57ePq\"\nCONSUMER_SECRET = \"PKxMJUgv2Gk0pZb1oB1G5GwLhjOuVc7OXspuqlfz8LlTF3sSHU\"\nCALLBACK_URI = \"http://127.0.0.1:5000/login\"\n\nREQUEST_TOKEN_URL = \"https://api.twitter.com/oauth/request_token\"\nACCESS_TOKEN_URL = \"https://api.twitter.com/oauth/access_token\"\nAUTHORIZATION_URL = \"https://api.twitter.com/oauth/authorize\"\nSEARCH_URL = \"https://api.twitter.com/1.1/search/tweets.json\"\nCREDENTIALS_URL = \"https://api.twitter.com/1.1/account/verify_credentials.json\"\nFOLLOWED_BY_USER_URL = \"https://api.twitter.com/1.1/friends/list.json\"\nFOLLOWING_USER_URL = \"https://api.twitter.com/1.1/followers/list.json\"\n\nclass Twitter:\n def __init__(self):\n self._OAUTH_REQUEST_TOKEN = None\n self._OAUTH_REQUEST_TOKEN_SECRET = None\n self._OAUTH_ACCESS_TOKEN = None\n self._OAUTH_ACCESS_TOKEN_SECRET = None\n self._auth = None\n self._username = None\n\n def get_redirect_url(self):\n \"\"\"\n Generate the redirect URL that the client is sent to on login.\n :return: Redirect URL\n \"\"\"\n oauth_client = OAuth1Session(CONSUMER_KEY, client_secret=CONSUMER_SECRET, callback_uri=CALLBACK_URI)\n try:\n resp = oauth_client.fetch_request_token(REQUEST_TOKEN_URL)\n except ValueError as e:\n raise 'Invalid response from Twitter requesting temp token: {0}'.format(e)\n\n self._OAUTH_REQUEST_TOKEN = resp['oauth_token']\n self._OAUTH_REQUEST_TOKEN_SECRET = resp['oauth_token_secret']\n return oauth_client.authorization_url(AUTHORIZATION_URL)\n\n def gen_access_token(self, oauth_token, oauth_verifier):\n \"\"\"\n Following a successful callback from the Twitter API, generate the authentication key for the session - also\n get the screen name of the user for future use.\n :param oauth_token: Oauth token received from Twitter.\n :param oauth_verifier: Oauth verifier received from Twitter.\n \"\"\"\n assert(oauth_token == self._OAUTH_REQUEST_TOKEN)\n oauth_client = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET,\n self._OAUTH_REQUEST_TOKEN, self._OAUTH_REQUEST_TOKEN_SECRET,\n verifier = oauth_verifier)\n resp = oauth_client.fetch_access_token(ACCESS_TOKEN_URL)\n self._OAUTH_ACCESS_TOKEN = resp['oauth_token']\n self._OAUTH_ACCESS_TOKEN_SECRET = resp['oauth_token_secret']\n\n self._auth = OAuth1(CONSUMER_KEY, CONSUMER_SECRET, self._OAUTH_ACCESS_TOKEN, self._OAUTH_ACCESS_TOKEN_SECRET)\n\n # Get screen_name once on login as getting credentials is highly rate limited\n r = requests.get(CREDENTIALS_URL, auth = self._auth)\n credentials = json.loads(r.text)\n self._username = credentials['screen_name']\n\n def get_tweet_list(self):\n \"\"\"\n Get a list of all tweets sent by the user.\n :return: List of dictionaries containing tweet data.\n \"\"\"\n # @@@MC do something if we get too many tweets back\n query = { 'q' : 'from:'+self._username }\n query_encoded = urllib.parse.urlencode(query)\n r = requests.get(\"{url}?{query}\".format(url=SEARCH_URL,query=query_encoded), auth = self._auth)\n assert(r.status_code == requests.codes.ok)\n return json.loads(r.text)['statuses']\n\n def get_following_user(self):\n \"\"\"\n Get a list of all Twitter users following the logged in user.\n :return: List of twitter users.\n \"\"\"\n query = { 'screen_name' : self._username }\n query_encoded = urllib.parse.urlencode(query)\n r = requests.get(\"{url}?{query}\".format(url=FOLLOWING_USER_URL,query=query_encoded), auth = self._auth)\n assert(r.status_code == requests.codes.ok)\n return [follower['screen_name'] for follower in json.loads(r.text)['users']]\n\n def get_followed_by_user(self):\n \"\"\"\n Get a list of all Twitter users followed by the logged in user.\n :return: List of twitter users.\n \"\"\"\n query = { 'screen_name' : self._username }\n query_encoded = urllib.parse.urlencode(query)\n r = requests.get(\"{url}?{query}\".format(url=FOLLOWED_BY_USER_URL,query=query_encoded), auth = self._auth)\n assert(r.status_code == requests.codes.ok)\n return [followed['screen_name'] for followed in json.loads(r.text)['users']]","sub_path":"twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"47379061","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom data import CLEAN_DATA_DIR\n\nDIAGRAM_OUTPUT_MESSAGE = '{} saved to {}'\n\n\ndef view_data_overview(df):\n save_filepath = os.path.abspath('images/data.png')\n\n plt.figure()\n sns.set(style='whitegrid', context='notebook')\n sns.pairplot(df)\n plt.savefig(save_filepath)\n\n print(DIAGRAM_OUTPUT_MESSAGE.format('Data overview', save_filepath))\n\n\ndef view_correlation_matrix(df):\n columns = list(df.columns)\n save_filepath = os.path.abspath('images/correlation_matrix.png')\n\n plt.figure()\n correlation_matrix = np.corrcoef(df[columns].values.T)\n sns.set(font_scale=1.)\n sns.heatmap(correlation_matrix, cbar=True, annot=True,\n square=True, fmt='.2f', yticklabels=columns, xticklabels=columns)\n plt.savefig(save_filepath)\n\n print(DIAGRAM_OUTPUT_MESSAGE.format('correlation matrix', save_filepath))\n\n\ndef view_feature_distribution(feature):\n save_filepath = os.path.abspath(\n 'images/{}_distribution.png'.format(feature.name))\n title = '{} Distribution'.format(feature.name)\n\n plt.figure()\n sns.distplot(feature)\n plt.title(title)\n plt.savefig(save_filepath)\n\n print(DIAGRAM_OUTPUT_MESSAGE.format(title, save_filepath))\n\n\ndef view_feature_boxplot(feature):\n save_filepath = os.path.abspath(\n 'images/{}_boxplot.png'.format(feature.name))\n title = '{} Boxplot'.format(feature.name)\n\n plt.figure()\n sns.boxplot(feature)\n plt.title(title)\n plt.savefig(save_filepath)\n\n print(DIAGRAM_OUTPUT_MESSAGE.format(title, save_filepath))\n\n\nif __name__ == '__main__':\n print('--- Creating Clean Data ---')\n\n # Get clean data\n clean_training_data_path = os.path.join(\n CLEAN_DATA_DIR, 'clean_training_data.csv')\n df = pd.read_csv(clean_training_data_path)\n\n # Overview of all features\n view_data_overview(df)\n view_correlation_matrix(df)\n\n # Create distributeion and box plots\n for col in ['Income in EUR', 'Body Height [cm]', 'Age', 'Size of City']:\n view_feature_distribution(df[col])\n view_feature_boxplot(df[col])\n","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"367189609","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 15 21:19:50 2017\r\n\r\n@author: Ilker\r\n\"\"\"\r\n\r\nimport random\r\nimport math\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.optimize \r\n\r\ndef main(c):\r\n ############################################################\r\n # INPUT\r\n #\r\n # c : the scattering ratio of the media\r\n #\r\n #\r\n # OUTPUT\r\n # Flux SP123\r\n #\r\n #############################################################\r\n\r\n #Initialisation of the parameters of the simulation\r\n thickness = 100; #Thickness of the mediq\r\n sigmaabs = 1-c; #Absorption cross-section of the media\r\n sigmascat = c; #Scattering cross-section of the media\r\n sigmatot=sigmaabs+sigmascat; #Total cross-section of the media\r\n global lamb; #Lamb is the mean free path\r\n if (sigmatot !=0):\r\n lamb = 2/((3)**(0.5)*sigmatot); #mean-free path of SP1\r\n Q = 1 #Source of neutrons\r\n n = 100000; #Number of path\r\n step = 0.00275; #Step for bins Number of bins will be Length/step\r\n\r\n #Output file\r\n filename = 'realend';\r\n file = open(filename + \".txt\", \"w\") \r\n fluxPSP1 = open(\"fluxPSP1.txt\",\"w\")\r\n coordPSP1 = open(\"coordPSP1.txt\",\"w\")\r\n variancePSP1 = open(\"variancePSP1.txt\",\"w\");\r\n deviationPSP1 = open(\"deviationPSP1.txt\",\"w\");\r\n \r\n # Time starts\r\n\r\n initialtime = time.time();\r\n \r\n\r\n # All parameters to count\r\n \r\n global numlost; #Number of particles lost by the left side (z < 0)\r\n global numesc; #Number of particle lost by the rifht side (z > Thickness)\r\n global numdeath; #Number of particles absorbed in the media\r\n global numscattot; #Number of collisions for ALL particles\r\n global freepath; #The distance giving by the sampling\r\n global flux_local; #List of an estimator of the flux in the media ; collision between 0 and 1 is in first place, between 1 and 2 is second ...\r\n global s; #List of the 6 first moments of s \r\n global variancepic; #List of the sum of the square of each scattering \r\n global deviationpic; #List of the sum of each scattering\r\n test = [0]*(int(thickness/step)); #List of the variance of each element in flux_local \r\n test2 = [0]*(int(thickness/step)); #List of the standart deviation of each element in flux_local\r\n fluxpic = [0]*(int(thickness/step));\r\n variancepic = [0]*(int(thickness/step));\r\n s =[0]*6;\r\n flux_local = [0]*(int(thickness/step)); \r\n numlost=0;\r\n numesc=0;\r\n numdeath =0;\r\n numscattot = 0;\r\n freepath =0;\r\n \r\n # Loop on each history\r\n \r\n for i in range(1,n+1):\r\n # Interface with user\r\n if (i%1000 == 0):\r\n print(str(i/n*100) + \"% of the running code done\")\r\n randnum = random.random();\r\n z0=thickness/2+(2*randnum-1)/2; #Initial rqndom position of particles according to the regional source : Source = Q between -0.5 + thickness/2 and +0.5 + thickness/2\r\n randnum = random.random()\r\n theta = 180*randnum; #Initial random angle for the particle\r\n w0=math.cos(math.pi/180*theta); #Initial direction of the particle\r\n z=z0; # position z follows the particle\r\n w=w0; # direction w follows the particle \r\n ilost=0; #Event escape by left side value 0 --> particle does not escape by the left side, value 1 --> particle has escaped by the left side\r\n iesc=0; #Event escape by right side value 0 --> particle does not escape by the right side, value 1 --> particle has escaped by the right side \r\n ideath=0; #Event absorption by the media value 0 --> particle is not absorpbed, value 1 --> particle has been absorbed \r\n iscat=0; #Number of collision by particle \r\n ivariancepic = [0]*(int(thickness/step)); #Number of collision squared by particle\r\n iflux = [0]*(int(thickness/step)); #Number of collision by particle \r\n \r\n #loop on the life of the particle \r\n \r\n while((ilost+iesc+ideath)==0):\r\n randnum=random.random(); \r\n if (sigmatot !=0):\r\n freepath = scipy.optimize.brentq(fsp,0.0,100.0,args=(randnum))/((3**0.5)*sigmatot) #Sampling of the distance travelled\r\n s[0] = s[0] + freepath; #First moment estimator\r\n s[1] = s[1] + freepath**2; #Second moment estimator\r\n s[2] = s[2] + freepath**3; #Third moment estimator \r\n s[3] = s[3] + freepath**4; #..\r\n s[4] = s[4] + freepath**5; #..\r\n s[5] = s[5] + freepath**6; #.\r\n z1=z+w*freepath; #Update of the position\r\n if(z1 >= thickness or sigmatot==0): #Check if the particle is lost in left side\r\n iesc=1;\r\n elif(z1 < 0.): ##Check if the particle is lost in right side \r\n ilost=1; \r\n else:\r\n xi = random.random();\r\n if (xi < sigmascat/sigmatot): #Check if it's a scattering with a probability of Sigmascat/sigmatot\r\n index = int(z1/step)+1; #Count in the bins\r\n flux_local[index-1] = flux_local[index-1] + 1; #Increment event for flux\r\n iflux[index-1] = iflux[index-1] +1; #Increment event for flux\r\n ivariancepic[index-1] = ivariancepic[index-1] + 1; #Increment event for flux \r\n iscat = iscat + 1; #Increment of the number of scattering for the particle \r\n z=z1; #Save the new position\r\n randnum=random.random() #Isotropic collision, direction random\r\n w=2*randnum-1;\r\n if (sigmaabs !=0 and xi > sigmascat/sigmatot): #Check if it's an absorption with a probability Sigmaabs/sigmatot\r\n index = int(z1/step) + 1; #Count in the bins\r\n flux_local[index-1] = flux_local[index-1] + 1; #Increment event for flux\r\n iflux[index-1] = iflux[index-1] +1; #Increment event for flux\r\n ivariancepic[index-1] = ivariancepic[index-1] + 1; #Increment event for flux \r\n ideath = 1; #Kill the particle\r\n else:\r\n ideath = 0; #No absorption \r\n #Computation of the flux and the variance \r\n for i in range(len(fluxpic)): \r\n fluxpic[i] = fluxpic[i] + iflux[i]; #We add the collision of each particle for flux \r\n variancepic[i] = variancepic[i] + ivariancepic[i]**2; #We add the square power of each particle for variance \r\n numlost=numlost+ilost; #Update of the number of lost in right side\r\n numesc=numesc+iesc; #Update of the number of lost in left side\r\n numdeath=numdeath+ideath; #Update of the number of qbsorption\r\n numscattot = numscattot + iscat; #Update of the number of scattering \r\n for i in range(len(fluxpic)):\r\n variancepic[i] = variancepic[i]/(n-1); #Average number of collision on the sample\r\n fluxpic[i] = fluxpic[i]/n; #Average number of collision square on the sample \r\n test[i] = variancepic[i]-(fluxpic[i])**2; #Variance\r\n test2[i] = test[i]**(0.5); #Standart deviation\r\n flux_local[:] = [(x/n)*(Q)/step/sigmatot for x in flux_local]; #Average flux with good normalisation The integral of the source appears Q*1 (because Q is constant in a distance = 1)\r\n s[:] = [x / (numscattot+numdeath+numesc+numlost) for x in s]; #Average of the moments \r\n finaltime = time.time();\r\n print(\"n =\",n);\r\n print(\"numscattot = \", numscattot);\r\n print(\"numlost =\",numlost);\r\n print(\"numabs =\",numdeath);\r\n print(\"numesc=\",numesc);\r\n print(\"flux(z) = \",flux_local);\r\n print(\"s = \",s); \r\n print(\"Time elapsed during the running of the code : \",finaltime - initialtime, \"seconds\");\r\n print(\" \");\r\n print(\"Variance of flux = \",test) \r\n print(\" \");\r\n print('max of the flux = ', max(flux_local))\r\n print(\"average on bins max = \", (flux_local[flux_local.index(max(flux_local))]+flux_local[flux_local.index(max(flux_local))+1])/2)\r\n #print(\"Standart deviation of flux = \", test2)\r\n print(\" \"); \r\n #print(\"Maximum for the standart deviation = \"+ str(max(test2)) + \" at the position \" + str(test2.index(max(test2))) )\r\n plt.plot(flux_local)\r\n plt.ylabel('Flux(z)')\r\n plt.show()\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" ------------------------------------------------------------\\n\")\r\n file.write(\" | |\\n\")\r\n file.write(\" | MONTE-CARLO CODE FOR |\\n\")\r\n file.write(\" | SP1 EQUATION (1D-Ponctual source) |\\n\")\r\n file.write(\" | Done at Berkeley by Ilker Makine |\\n\")\r\n file.write(\" | OUTPUT FILE |\\n\")\r\n file.write(\" | |\\n\")\r\n file.write(\" | |\\n\")\r\n file.write(\" ------------------------------------------------------------\\n\")\r\n file.write(\" ---------------------------INPUT PARAMETERS---------------------\\n\") \r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" \" + \" Thickness of the media = \" + str(thickness) + \"\\n\")\r\n file.write(\" \" + \" Absorption cross-section = \" + str(sigmaabs)+ \"\\n\")\r\n file.write(\" \" + \" Scattering cross-section = \" + str(sigmascat)+ \"\\n\")\r\n file.write(\" \" + \" Total cross-section = \" + str(sigmatot)+ \"\\n\")\r\n file.write(\" \" + \" Source of neutrons = \" + str(Q)+ \"\\n\")\r\n file.write(\" \" + \" Number of particles = \" + str(n)+ \"\\n\")\r\n file.write(\" \" + \" Precision in the media (step) = \" + str(step)+ \"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" ---------------------------TIME ELAPSED---------------------\\n\") \r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" \" + str(finaltime - initialtime) + \" seconds are elapsed during the running of the code\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" ---------------------------MOMENTS--------------------------\\n\") \r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n s1 = 1.1547/sigmatot;\r\n s2 = 2/sigmatot**2;\r\n s3 = 5.6188/sigmatot**3;\r\n s4 = 13.3333/sigmatot**4;\r\n s5 = 48.188/sigmatot**5;\r\n s6 = 186.66667/sigmatot**6;\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" | s^m | Theoretical values | Computational values | Errors |\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" | s^1 | \"+str(s1)+\" \"*(22-len(str(s1))) + \" |\" + str(s[0])+\" \"*(22-len(str(s[0])))+\"|\" +str((s[0]- s1)/s1) + \" \"*(22-len(str((s[0]- s1)/s1)))+ \"|\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" | s^2 | \"+str(s2)+\" \"*(22-len(str(s2))) + \" |\" + str(s[1])+\" \"*(22-len(str(s[1])))+\"|\" +str((s[1]- s2)/s2) +\" \"*(22-len(str((s[1]- s2)/s2))) + \"|\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" | s^3 | \"+str(s3)+\" \"*(22-len(str(s3))) + \" |\" + str(s[2])+\" \"*(22-len(str(s[2])))+\"|\" +str((s[2]- s3)/s3) + \" \"*(22-len(str((s[2]- s3)/s3))) + \"|\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" | s^4 | \"+str(s4)+\" \"*(22-len(str(s4))) + \" |\" + str(s[3])+\" \"*(22-len(str(s[3])))+\"|\" +str((s[3]- s4)/s4)+ \" \"*(22-len(str((s[3]- s4)/s4))) + \"|\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" | s^5 | \"+str(s5)+\" \"*(22-len(str(s5))) + \" |\" + str(s[4])+\" \"*(22-len(str(s[4])))+\"|\" +str((s[4]- s5)/s5)+ \" \"*(22-len(str((s[4]- s5)/s5))) + \"|\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" | s^6 | \"+str(s5)+\" \"*(22-len(str(s5))) + \" |\" + str(s[5])+\" \"*(22-len(str(s[5])))+\"|\" +str((s[5]- s6)/s6)+ \" \"*(22-len(str((s[5]- s6)/s6))) + \"|\\n\")\r\n file.write(\" | | | | |\\n\")\r\n file.write(\" ---------------------------------------------------------------------------------------\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" ---------------------------EVENTS COUNTER---------------------\\n\") \r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" \" + str(numscattot) + \" scatterings occurs during the simulation\\n\")\r\n file.write(\" \" + str(numlost+numesc) + \" escapes occurs during the simulation\\n\")\r\n file.write(\" \" + str(numdeath) + \" absorptions occurs during the simulation\\n\") \r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\"\\n\")\r\n file.write(\" ---------------------------FLUX(Z)----------------------------\\n\") \r\n file.write(\" --- Z --- ---FLUX(Z)--- \\n\")\r\n z=0;\r\n for i in range(len(flux_local)):\r\n #file.write(\" \"+ str(round(z,4))+ \" \"*(6-len(str(round(z,2)))) + \" \" + str(round(flux_local[i],6))+\" \"*(12-len(str(round(z,4)))) +str(round(test2[i],6)) +\"\\n\");\r\n z = z + step\r\n file.close(); \r\n for i in range(len(flux_local)):\r\n fluxPSP1.write(\" \"+ str(flux_local[i])+\"\\n\");\r\n for i in range(len(flux_local)):\r\n coordPSP1.write(\" \"+ str(z)+\"\\n\");\r\n z = z + step\r\n for i in range(len(flux_local)):\r\n variancePSP1.write(\" \"+ str(variancepic[i])+\"\\n\");\r\n #for i in range(len(flux_local)):\r\n #deviationPSP1.write(\" \"+ str(variancepic[i])+\"\\n\");\r\n deviationPSP1.close() \r\n variancePSP1.close() \r\n fluxPSP1.close();\r\n coordPSP1.close(); \r\n return flux_local\r\n \r\ndef fsp(x,xi): # function xi - (1+z)exp(-z)\r\n return xi - (1+x)*np.exp(-x) \r\n\r\nlistC = [0.1,0.2,0.5,0.8,0.9,0.95,0.99,0.999]\r\nfor i in range(len(listC)):\r\n a = main(listC[i]);\r\n filename = str(listC[i])\r\n file = open(filename + '.txt', 'w') \r\n file.write('max of the flux = ' + str(max(a)) + '\\n')\r\n file.write(\"average on bins max = \"+ str((a[a.index(max(a))]+a[a.index(max(a))+1])/2))\r\n file.close()\r\n \r\nprint(\"---------------------------------Program-------Ends-----------------------------\")","sub_path":"LABHYD/ProjectNonclassical-master/ProjectNonclassical-master/Monte-carlo/PonctualSP1.py","file_name":"PonctualSP1.py","file_ext":"py","file_size_in_byte":18873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"333340780","text":"import Queue\nfrom threading import RLock\n\nfrom morne.sdk.perfcounters.Counter import ValueCounter\nfrom morne.sdk.perfcounters.PerfCounters import PerfCounters\n\n\nclass AppQueue(Queue.Queue):\n \"\"\"\n A queue class, wrapping the Python Queue implementation. This implementation adds the ability to add\n performance counters (i.e. queue size) to the queue implementation.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"\n Constructor.\n Args:\n name: The queue name\n\n Returns:\n Nothing\n \"\"\"\n Queue.Queue.__init__(self)\n self._name = name\n self._size = 0\n\n self._size_lock = RLock()\n\n #\n # A performance counter to counter the number of events in the queue\n #\n self.PERF_VALUE_QUEUE_SIZE = ValueCounter(\\\n \"Queue:%s\" % self.name(),\\\n \"Queue size\",\\\n \"Number of events in queue\")\n PerfCounters.ApplicationCounters.register_counter(self.PERF_VALUE_QUEUE_SIZE)\n\n def name(self):\n \"\"\"\n Returns:\n The queue name\n \"\"\"\n return self._name\n\n def stop(self):\n self.put(None)\n\n def put(self, event):\n \"\"\"Put an item into the queue.\n\n If optional args 'block' is true and 'timeout' is None (the default),\n block if necessary until a free slot is available. If 'timeout' is\n a non-negative number, it blocks at most 'timeout' seconds and raises\n the Full exception if no free slot was available within that time.\n Otherwise ('block' is false), put an item on the queue if a free slot\n is immediately available, else raise the Full exception ('timeout'\n is ignored in that case).\n \"\"\"\n with self._size_lock:\n self._size += 1\n self.PERF_VALUE_QUEUE_SIZE.apply(self._size)\n\n Queue.Queue.put(self, event)\n\n def get(self, block=True, timeout=None):\n \"\"\"Remove and return an item from the queue.\n\n If optional args 'block' is true and 'timeout' is None (the default),\n block if necessary until an item is available. If 'timeout' is\n a non-negative number, it blocks at most 'timeout' seconds and raises\n the Empty exception if no item was available within that time.\n Otherwise ('block' is false), return an item if one is immediately\n available, else raise the Empty exception ('timeout' is ignored\n in that case).\n \"\"\"\n\n r = Queue.Queue.get(self, block, timeout)\n if not (r is None):\n with self._size_lock:\n self._size -= 1\n self.PERF_VALUE_QUEUE_SIZE.apply(self._size)\n else:\n PerfCounters.ApplicationCounters.remove_counter(self.PERF_VALUE_QUEUE_SIZE)\n return r\n","sub_path":"src/morne/sdk/app/AppQueue.py","file_name":"AppQueue.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"203803215","text":"from collections import OrderedDict\nimport json\nimport os\n\nclass TestClass(object):\n\n def __init__(self):\n\n self.base_websource_Uri = 'http://www.actuaries.jp/lib/collection/books/'\n self.base_staticsource_uri = 'static/pdf/'\n self.japan_calendar_year = 'H28'\n self.subject = 'A'\n self.part = 1\n self.question = 1\n self.source_fullUri = ''\n\n def generate_web_uri(self, **kwarg):\n\n file_name = self.japan_calendar_year + self.subject + '.pdf'\n source_full_uri = os.path.join(self.base_websource_Uri + self.japan_calendar_year, file_name)\n\n counter = 0\n for key in kwarg:\n if counter == 0:\n source_full_uri = source_full_uri + '#' + str(key) + '=' + str(kwarg[key])\n else:\n source_full_uri = source_full_uri + '&' + str(key) + '=' + str(kwarg[key])\n\n counter += 1\n\n return source_full_uri\n\n def generate_pdf_uri(self, **kwarg):\n\n file_name = self.japan_calendar_year + self.subject + '.pdf'\n source_full_uri = '/static/pdf/' + file_name\n\n counter = 0\n for key in kwarg:\n if counter == 0:\n source_full_uri = source_full_uri + '#' + str(key) + '=' + str(kwarg[key])\n else:\n source_full_uri = source_full_uri + '&' + str(key) + '=' + str(kwarg[key])\n\n counter += 1\n\n return source_full_uri\n\n def structure_json_user_history(self, japan_calendar_year='', subject='', debug=False):\n '''\n Create json data including the structure of the Tests to store history data of users\n from base test structure\n\n Add several attribution into base test structure\n tried : The number of trying to the question\n '''\n\n japan_calendar_year = japan_calendar_year if japan_calendar_year != '' \\\n else self.japan_calendar_year\n\n subject = subject if subject != '' else self.subject\n\n file_path = os.path.join(os.path.join(os.path.dirname(__file__), 'json'), japan_calendar_year)\n file_name = subject + '.json'\n\n with open(os.path.join(file_path, file_name), 'r') as test_info_text:\n test_info_json = json.load(test_info_text, object_pairs_hook=OrderedDict)\n\n structure = {}\n id = 1\n for key, value in test_info_json.items():\n index = 0\n for question in value[\"questions\"]:\n structure[str(id)] = {}\n question_id = key + '_' + str(question)\n\n structure[str(id)][\"question_id\"] = question_id\n\n structure[str(id)][\"last_tried\"] = ''\n structure[str(id)][\"tries\"] = 0\n structure[str(id)][\"corrects\"] = 0\n\n id += 1\n index += 1\n\n if debug:\n with open(os.path.join(file_path, 'test', subject + '_debug.json'), 'w+') as fp:\n json.dump(structure, fp, indent=4)\n\n return structure\n\n def structure_json_pages(self, japan_calendar_year='', subject='', debug=False):\n '''\n Create json data including the structure of the Tests to display and move to\n the specified question\n\n Add several attribution into base test structure\n tried : The number of trying to the question\n '''\n\n japan_calendar_year = japan_calendar_year if japan_calendar_year != '' \\\n else self.japan_calendar_year\n\n subject = subject if subject != '' else self.subject\n\n file_path = os.path.join(os.path.join(os.path.dirname(__file__), 'json'), japan_calendar_year)\n file_name = subject + '.json'\n\n with open(os.path.join(file_path, file_name), 'r') as test_info_text:\n test_info_json = json.load(test_info_text, object_pairs_hook=OrderedDict)\n\n structure = {}\n id = 1\n for key, value in test_info_json.items():\n index = 0\n for question in value[\"questions\"]:\n structure[str(id)] = {}\n\n question_id = key + '_' + str(question)\n structure[str(id)][\"question_id\"] = question_id\n\n pages = {}\n pages[\"question\"] = {}\n pages[\"answer\"] = {}\n pages[\"question\"][\"from\"] = value[\"pages\"][\"question\"][index][0]\n pages[\"question\"][\"to\"] = value[\"pages\"][\"question\"][index][1]\n pages[\"answer\"][\"from\"] = value[\"pages\"][\"answer\"][index][0]\n pages[\"answer\"][\"to\"] = value[\"pages\"][\"answer\"][index][1]\n\n structure[str(id)][\"pages\"]={}\n structure[str(id)][\"pages\"] = pages\n\n id += 1\n index += 1\n\n if debug:\n with open(os.path.join(file_path, 'test', subject + '_debug.json'), 'w+') as fp:\n json.dump(structure, fp, indent=4)\n\n return structure\n\n def collect_whole_subject(self, japan_calendar_year):\n '''\n :param japan_calendar_year: e.g 'H28'\n :return: json format data that all subject json data is collected in\n '''\n all_subject = {}\n subjects = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n\n for subject in subjects:\n all_subject[subject] = self.structure_json_user_history(japan_calendar_year=japan_calendar_year,\\\n subject=subject)\n\n return all_subject\n\n @staticmethod\n def get_question_name_from_json(question_id, questions_json):\n for key in questions_json:\n if questions_json[key][id] == question_id:\n return key\n\n\n def forward_question(self):\n self.question += 1\n\n def backward_question(self):\n self.question -= 1\n\n\nif __name__ == '__main__':\n print('start---')\n testClass = TestClass()\n print(testClass.structure_json_user_history('H28', debug=True))\n print('end-----')\n","sub_path":"core/testClass.py","file_name":"testClass.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"273330207","text":"#!/usr/bin/python\n\n\"\"\"Utility to automatically create a Makefile for a programming\nproject using a template.\"\"\"\n\nimport argparse\nfrom string import Template # pylint: disable=W0402\nfrom datetime import date\nimport os\n\nTEMPLATE_DIR = os.path.expanduser('~/git/utils/templates/')\nOUTFILE = 'Makefile'\n\nINFILE_CPP_NORMAL = 'Makefile-cpp-normal'\nINFILE_CPP_SIMPLE = 'Makefile-cpp-simple'\nINFILE_CPP_STATIC = 'Makefile-cpp-staticlib'\nINFILE_CPP_SHARED = 'Makefile-cpp-sharedlib'\nINFILE_C_NORMAL = 'Makefile-c-normal'\nINFILE_C_SIMPLE = 'Makefile-c-simple'\nINFILE_C_STATIC = 'Makefile-c-staticlib'\nINFILE_C_SHARED = 'Makefile-c-sharedlib'\n\n\ndef setup_parser():\n\n \"\"\"Gets command line arguments.\"\"\"\n\n parser = argparse.ArgumentParser(description=\"Automatically create \" +\n \"a Makefile from template.\")\n parser.add_argument(\"projectname\", help=\"Project name\")\n parser.add_argument(\"-l\", \"--language\", help=\"Project language \" +\n \"(default: C++)\", default=\"c++\", metavar=\"LANG\",\n choices=['c', 'c++'])\n megroup = parser.add_mutually_exclusive_group()\n megroup.add_argument(\"-s\", \"--simple\", help=\"Simple Makefile\",\n action=\"store_true\")\n megroup.add_argument(\"-a\", \"--static\", help=\"Static Library\",\n action=\"store_true\")\n megroup.add_argument(\"-o\", \"--shared\", help=\"Shared Library\",\n action=\"store_true\")\n args = parser.parse_args()\n return args\n\n\ndef make_templated_file(template_file, output_file, subs):\n\n \"\"\"Makes a new file from a template.\"\"\"\n\n # Open template file\n\n try:\n in_file = open(TEMPLATE_DIR + template_file, 'r')\n except IOError:\n print(\"Couldn't open file '\" + TEMPLATE_DIR +\n template_file + \"' for reading\")\n return False\n\n # Open output file\n\n try:\n out_file = open(output_file, 'w')\n except IOError:\n print(\"Couldn't open file '\" + OUTFILE + \"' for writing.\")\n return False\n\n for line_in in in_file.readlines():\n line_out = Template(line_in)\n out_file.write(line_out.substitute(subs))\n\n out_file.close()\n in_file.close()\n\n return True\n\n\ndef main():\n\n \"\"\"Main function.\"\"\"\n\n args = setup_parser()\n\n if args.language == \"c++\":\n if args.simple:\n TEMPLATEFILE = INFILE_CPP_SIMPLE\n elif args.static:\n TEMPLATEFILE = INFILE_CPP_STATIC\n elif args.shared:\n TEMPLATEFILE = INFILE_CPP_SHARED\n else:\n TEMPLATEFILE = INFILE_CPP_NORMAL\n elif args.language == \"c\":\n if args.simple:\n TEMPLATEFILE = INFILE_C_SIMPLE\n elif args.static:\n TEMPLATEFILE = INFILE_C_STATIC\n elif args.shared:\n TEMPLATEFILE = INFILE_C_SHARED\n else:\n TEMPLATEFILE = INFILE_C_NORMAL\n\n copyrightyear = date.today().year\n\n template_subs = {\"projname\": args.projectname,\n \"projnameunderline\": \"=\" * (len(args.projectname) +\n len(\" Makefile\")),\n \"language\": args.language.capitalize(),\n \"year\": copyrightyear}\n\n make_templated_file(TEMPLATEFILE, OUTFILE, template_subs)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"obsolete/makemakefile.py","file_name":"makemakefile.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"45250336","text":"from flask import Blueprint, request, url_for, redirect\nfrom flask import render_template\nfrom sqlalchemy import or_, func\n\nfrom corpint.core import project, session\nfrom corpint.model.mapping import Mapping, Entity\n\nblueprint = Blueprint('base', __name__)\n\nSKIP_FIELDS = ['name', 'aliases', 'source_url', 'opencorporates_url',\n 'aleph_id']\nJUDGEMENTS = {\n 'TRUE': True,\n 'FALSE': False,\n 'NULL': None,\n}\n\n\ndef common_fields_mapping(entity, mapping):\n other = mapping.get_other(entity)\n keys = set()\n for obj in [entity, other]:\n for k, v in obj.data.items():\n if v is not None and k not in SKIP_FIELDS:\n keys.add(k)\n return list(sorted([k for k in keys]))\n\n\ndef mapping_height(entity, mapping):\n return len(common_fields_mapping(entity, mapping)) + 2\n\n\ndef mapping_compare(entity, mapping):\n other = mapping.get_other(entity)\n for field in common_fields_mapping(entity, mapping):\n label = field.replace('_', ' ').capitalize()\n yield (label, entity.data.get(field), other.data.get(field))\n\n\ndef mapping_key(entity, mapping):\n other = mapping.get_other(entity)\n return 'judgement:%s:%s' % (entity.uid, other.uid)\n\n\ndef mapping_match(mapping, judgement, decisions):\n if mapping.decided:\n return mapping.judgement == judgement\n pair = Mapping.sort_uids(mapping.left_uid, mapping.right_uid)\n return judgement is decisions.get(pair, False)\n\n\n@blueprint.app_context_processor\ndef template_context():\n return {\n 'project': project.name.upper(),\n 'mapping_compare': mapping_compare,\n 'mapping_height': mapping_height,\n 'mapping_key': mapping_key,\n 'mapping_match': mapping_match,\n }\n\n\n@blueprint.route('/', methods=['GET'])\ndef index():\n return redirect(url_for('.entities'))\n\n\n@blueprint.route('/entities', methods=['GET'])\ndef entities():\n text_query = request.args.get('q', '').strip()\n offset = int(request.args.get('offset', '0'))\n limit = 50\n sq = session.query(Mapping.left_uid)\n sq = sq\n q = session.query(Entity)\n q = q.filter(Entity.project == project.name)\n q = q.filter(Entity.active == True) # noqa\n if len(text_query):\n q = q.filter(Entity.data['name'].astext.ilike('%' + text_query + '%'))\n total = q.count()\n context = {\n 'total': total,\n 'has_prev': offset > 0,\n 'has_next': total >= (offset + limit),\n 'next': offset + limit,\n 'prev': max(0, offset - limit),\n 'text_query': text_query,\n }\n q = q.offset(offset).limit(limit)\n return render_template('entities.html', entities=q, **context)\n\n\n@blueprint.route('/entity/', methods=['GET'])\ndef entity(uid):\n entity = Entity.get(uid)\n q = session.query(Mapping)\n q = q.filter(Mapping.project == project.name)\n q = q.filter(or_(\n Mapping.left_uid == entity.uid,\n Mapping.right_uid == entity.uid\n ))\n q = q.order_by(Mapping.score.desc())\n decisions = Mapping.get_decisions()\n undecided = q.filter(Mapping.decided == False) # noqa\n decided = q.filter(Mapping.decided == True) # noqa\n sections = (\n ('Undecided', undecided),\n ('Decided', decided)\n )\n return render_template('entity.html', entity=entity,\n sections=sections, decisions=decisions)\n\n\n@blueprint.route('/review', methods=['GET'])\ndef review_get(offset=None):\n \"\"\"Retrieve two lists of possible equivalences to map.\"\"\"\n limit = int(request.args.get('limit') or 3)\n offset = int(request.args.get('offset') or 0)\n candidates = Mapping.find_undecided(limit=limit, offset=offset)\n decisions = Mapping.get_decisions()\n return render_template('review.html', candidates=candidates,\n decisions=decisions)\n\n\n@blueprint.route('/review/entity', methods=['GET'])\ndef review_entity_get(offset=None):\n \"\"\"Jump to the next entity that needs disambiguation.\"\"\"\n qa = session.query(Mapping.left_uid.label('uid'),\n func.sum(Mapping.score).label('num'))\n qa = qa.filter(Mapping.project == project.name)\n qa = qa.filter(Mapping.decided == False) # noqa\n qa = qa.group_by(Mapping.left_uid)\n qb = session.query(Mapping.right_uid.label('uid'),\n func.sum(Mapping.score).label('num'))\n qb = qb.filter(Mapping.project == project.name)\n qb = qb.filter(Mapping.decided == False) # noqa\n qb = qa.group_by(Mapping.right_uid)\n sq = qa.union(qb).subquery()\n q = session.query(sq.c.uid, func.sum(sq.c.num))\n q = q.join(Entity, Entity.uid == sq.c.uid)\n q = q.filter(Entity.active == True) # noqa\n q = q.group_by(sq.c.uid, Entity.tasked)\n q = q.order_by(Entity.tasked.desc())\n q = q.order_by(func.sum(sq.c.num).desc())\n q = q.order_by(func.random())\n if q.count() == 0:\n return redirect(url_for('.entities'))\n q = q.limit(1)\n return redirect(url_for('.entity', uid=q.scalar()))\n\n\n@blueprint.route('/review', methods=['POST'])\ndef review_post():\n \"\"\"Retrieve two lists of possible equivalences to map.\"\"\"\n offset = int(request.args.get('offset') or 0)\n for key, value in request.form.items():\n if not key.startswith('judgement:'):\n continue\n _, left, right = key.split(':', 2)\n value = JUDGEMENTS.get(value)\n project.emit_judgement(left, right, value, decided=True)\n action = request.form.get('action')\n if action:\n if action == 'next':\n return redirect(url_for('.review_entity_get'))\n return redirect(url_for('.entity', uid=action))\n return redirect(url_for('.review_get', offset=offset))\n","sub_path":"corpint/webui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"194379985","text":"from bs4 import BeautifulSoup as bs\nimport requests\nfrom splinter import Browser\nimport time\nimport pandas as pd\n\ndef init_browser():\n # Replace the path with your actual path to the chromedriver\n executable_path = {\"executable_path\": \"./chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape_info():\n browser = init_browser()\n # collecting the latest news from Nasa webpage\n url='https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\n time.sleep(1)\n \n browser.visit(url)\n \n # import ipdb; ipdb.set_trace()\n\n soup = bs(browser.html, \"html.parser\")\n\n # results = soup.find_all('div', class_='slide')\n # titles = results.find('div', class_=\"content_title\" ).a.text\n # parr =results.find_all('div', class_=\"rollover_description_inner\")[0].text\n\n titles = soup.find('div', class_=\"content_title\" ).a.text\n parr =soup.find_all('div', class_=\"rollover_description_inner\")[0].text\n news_title= titles[1]\n news_p= parr[1]\n \n# #scrap the link for the Featured Image from https://www.jpl.nasa.gov/spaceimages.\n url= 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n time.sleep(1)\n browser.visit(url)\n full_image_elem = browser.find_by_id('full_image')\n full_image_elem.click()\n time.sleep(2)\n soup = bs(browser.html, 'html.parser')\n img_element = soup.find('img', \"fancybox-image\")\n\n featured_image_url = 'https://www.jpl.nasa.gov'+ img_element['src']\n \n# #scrap the latest tweet with Mars weather\n url= 'https://twitter.com/marswxreport?lang=en'\n\n response = requests.get(url)\n soup = bs(response.content, 'html.parser')\n #access the data inside the descendents from tweet class\n tweet_elem = soup.find_all('div', \"tweet\")\n mars_weather = tweet_elem[0].find('p', class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text\n\n# use Pandas to scrape table containing facts about the planet\n\n url = 'https://space-facts.com/mars/'\n tables = pd.read_html(url)\n df=tables[2]\n df.columns = ['description', 'value']\n df.set_index('description', inplace=True)\n html_table = df.to_html()\n \n # USGS Astrogeology obtain high resolution images for each of Mar's hemisphere\n\n url= 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url)\n html = browser.html\n soup = bs(html, 'html.parser')\n elements = soup.find_all('div', class_=\"item\")\n titles=[]\n urls = []\n for element in elements:\n \n title=element.h3.text\n partial_url =element.a['href']\n titles.append(title)\n urls.append('https://astrogeology.usgs.gov'+partial_url)\n\n #scrapeing the high resolution picture for the 4 elements using the bove URLs \n\n url_hi_res =[]\n\n for eachURL in urls:\n browser.visit(eachURL)\n time.sleep(2)\n soup = bs(browser.html, 'html.parser')\n img_url = soup.find('div', 'downloads')\n pic=img_url.a['href']\n url_hi_res.append(pic)\n\n #dictionary with both title and image\n\n # data = {'titles': titles, 'imag_HR': url_hi_res}\n # hemi_image_urls= pd.DataFrame(data)\n hemi_image_urls=[]\n for key, val in zip(titles, url_hi_res):\n hemi_image_urls.append({key: val})\n\n\n # Store data in a dictionary\n mars_data = {\n \"news_title\": news_title,\n \"news_p \": news_p,\n \"featured_image_url\": featured_image_url ,\n \"mars_weather\": mars_weather,\n \"hemi_image_urls\":hemi_image_urls,\n \"html_table\": html_table\n }\n print()\n # Close the browser after scraping\n browser.quit()\n\n # Return results\n return mars_data\n\n\n\n\n","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"226992294","text":"#!/usr/bin/env python3\n\nimport os\nfrom flask import Flask, redirect, url_for, jsonify, request, render_template\nfrom flask_dance.contrib.google import make_google_blueprint, google\nfrom flask_cors import CORS\nimport json\nimport requests\n\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\nos.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'\n\napp = Flask(\n __name__,\n static_url_path='',\n static_folder='display/static',\n template_folder='display/templates')\napp.secret_key = os.environ.get(\"FLASK_SECRET_KEY\", \"supersekrit\")\napp.config[\"GOOGLE_OAUTH_CLIENT_ID\"] = os.environ.get(\"GOOGLE_OAUTH_CLIENT_ID\")\napp.config[\"GOOGLE_OAUTH_CLIENT_SECRET\"] = os.environ.get(\"GOOGLE_OAUTH_CLIENT_SECRET\")\n\ngoogle_bp = make_google_blueprint(\n client_id=app.config[\"GOOGLE_OAUTH_CLIENT_ID\"],\n client_secret=app.config[\"GOOGLE_OAUTH_CLIENT_SECRET\"],\n scope=['profile', 'email'],\n offline=True,\n redirect_url='http://esdosmessaging.tk:9001/login/callback')\napp.register_blueprint(google_bp, url_prefix=\"/login\")\n\nfrom models import User\n\nCORS(app)\n\n# this index page is required to ensure that the user is logged in on Google\n# and to ensure that our app's OAuth Client has the access token to obtain the user's info from Google (checked by google.authorized)\n# else redirect the user to the Google login page and get the OAuth access token with flask-dance\n# if the user is logged in, the flask endpoint should serve the html file for the UI to the client.\n@app.route(\"/index\")\ndef index():\n if not google.authorized:\n print(\"no access token\")\n return redirect(url_for(\"google.login\"))\n\n try:\n # insert code to serve html file for UI\n resp = google.get(\"/oauth2/v1/userinfo\")\n assert resp.ok, resp.text\n user_id = resp.json()[\"id\"]\n return render_template('index.html', user_id=user_id)\n\n except Exception as e:\n return \"There was an error retrieving your data from the Google API.
\" + str(e)\n\n\n@app.route('/login')\ndef login():\n return redirect(url_for(\"google.login\"))\n\n@app.route('/login/current-user')\ndef get_current_user():\n if not google.authorized:\n # return jsonify({\"message\": \"Access token has expired\"}), 500\n return redirect(url_for(\"google.login\"))\n try:\n resp = google.get(\"/oauth2/v1/userinfo\")\n assert resp.ok, resp.text\n except Exception as e:\n print(e)\n return jsonify({\"message\": \"Unable to retrieve user data\"}), 500\n return jsonify({ \n \"userID\": resp.json()[\"id\"], \n \"username\": resp.json()[\"email\"].split(\"@\")[0], \n \"fullname\": resp.json()[\"name\"], \n \"picture\": resp.json()[\"picture\"]})\n\n\n@app.route('/login/callback')\ndef callback():\n if not google.authorized:\n print(\"no access token\")\n return redirect(url_for(\"google.login\"))\n try:\n resp = google.get(\"/oauth2/v1/userinfo\")\n assert resp.ok, resp.text\n except Exception as e:\n return jsonify({\"message\": \"Unable to retrieve user data\", \"error\": str(e)}), 500\n\n usergoogleid = resp.json()[\"id\"]\n username = resp.json()[\"email\"].split(\"@\")[0]\n fullname = resp.json()[\"name\"]\n picture = resp.json()[\"picture\"]\n \n user_res = requests.get('http://esdosmessaging.tk:8000/api/user/' + usergoogleid)\n \n if user_res.status_code not in [200,404]:\n return jsonify({\"message\": \"Error accessing user database\"}), 500\n\n if not (user_res.ok):\n try:\n user_info = {\"userID\": usergoogleid, \"username\": username, \"fullname\": fullname, \"picture\": picture}\n res = requests.post('http://esdosmessaging.tk:8000/api/user' + usergoogleid, json=user_info)\n\n if not res.ok:\n return jsonify(json.loads(res.text))\n \n return \"ESDOS RegistrationESDOS account created! You may now close this tab.\"\n except Exception as e:\n return str(e) + \" 500\"\n else:\n user = json.loads(user_res.text)\n \n # Update user's profile picture if it has changed\n if user[\"picture\"] != picture:\n try:\n picture_res = requests.post('http://esdosmessaging.tk:8000/api/user/picture/' + usergoogleid, json={\"picture\": picture})\n if not picture_res.ok:\n return jsonify(json.loads(picture_res.text))\n except Exception as e:\n return str(e) + \" 500\"\n return redirect(url_for('index'))\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=9001, debug=True)\n#rmb to change back to 9001 after local testing","sub_path":"services/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"327646288","text":"\"\"\"\nDefinition of views.\n\"\"\"\n\nfrom django.shortcuts import render\nfrom django.http import HttpRequest\nfrom django.template import RequestContext\nfrom datetime import datetime\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nfrom nltk.corpus import wordnet as wn\nfrom collections import Counter\nimport numpy as np #numpy.version.version\nimport urllib\nimport json\nimport cv2 #from cv2 import __version__ \nimport nltk\nimport requests\nimport re\nfrom .nl_model import CrmAdmin, Crm\n\n#r = CrmAdmin()\n\nclass JsonData:\n data = {}\n #def __init__(self):\n #self.data = {}\n\nclass NaturalLanguage(object):\n def __init__(self):\n self.corpus = []\n self.duplicates = []\n self.crm = Crm()\n\n @staticmethod\n def loadCorpora(self,filter):\n try:\n #crm_corpus = connectTOcrm(keyword)['value']\n crm_corpus = self.crm.connectTOcrm(filter)\n for i in range(0,len(crm_corpus)):\n corpora = {}\n if not crm_corpus[i]['knowledgearticleid'] in self.duplicates:\n #corpora[\"nlt_key\"] = keyword\n corpora['knowledgearticleid'] = crm_corpus[i]['knowledgearticleid']\n corpora[\"keywords\"] = str(crm_corpus[i]['keywords']).replace('\\n','')\n corpora[\"title\"] = str(crm_corpus[i]['title'])\n if crm_corpus[i]['description']:\n cleanDesc = re.sub('[^\\x00-\\x7F]+',' ', crm_corpus[i]['description'])\n else: cleanDesc = \"\"\n corpora[\"description\"] = cleanDesc\n self.duplicates.append(crm_corpus[i]['knowledgearticleid'])\n self.corpus.append(corpora)\n except Exception as ex:\n JsonData.data[\"sys_error\"] = ex\n return self.corpus\n\n @staticmethod\n def getSyn(sbj):\n #print sbj\n wordlist = []\n try:\n for word in sbj:\n for i,j in enumerate(wn.synsets(word)):\n for syn in j.lemma_names():\n #do not search again\n if not syn.lower() in sbj:\n if not syn.lower() in wordlist:\n wordlist.append(str(syn.lower()))\n except Exception as ex:\n JsonData.data[\"sys_error\"] = ex\n #print wordlist\n return wordlist\n\n @staticmethod\n def checkReserved(tagged):\n reservedValue = 3\n for word, entity in tagged:\n if word.lower() in [\"yes\", \"yeah\", \"correct\", \"yup\"]:\n return 0 #\"confirm\"\n elif word.lower() in [\"no\"]:\n return 1 #\"deny\"\n elif word.lower() in [\"hi\", \"hello\",\"hey\",\"greetings\"]:\n return 2 #\"greet\"\n return reservedValue\n\n @staticmethod\n def numSbj(tagged):\n total = 0\n counts = Counter(tag for word,tag in tagged)\n for key in counts.keys():\n if 'NN' in key:\n total += counts[key]\n return total\n\n @staticmethod\n def connect(tagged):\n r = CrmAdmin() \n #check for connection entry\n for word, entity in tagged:\n key = word.lower()\n if key == 'connect':\n r.status = True\n elif key == 'disconnect':\n r.status = False\n\n if key+'.onmicrosoft.com' in r.get_resource():\n r.type = key\n\n #check for connection type\n return str(r.type), r.status \n\n@csrf_exempt\ndef nlprocess(request):\n nl = NaturalLanguage();\n nl.corpus[:] = []\n nl.duplicates[:] = []\n\n #set defaults\n JsonData.data['success'] = False\n JsonData.data['value'] = []\n JsonData.data['syn'] = []\n JsonData.data['user_search'] = \"\"\n JsonData.data[\"sys_error\"] = \"\"\n JsonData.data['reserved_key'] = 3\n JsonData.data['number_of_nouns'] = 0\n JsonData.data['tagged_tokens'] = []\n JsonData.data['subject'] = []\n\n try:\n #psTag = ['NN','CD'] #NN inludes NNS NNP and NNPS\n #print request.session.get('chat_session_id', None)\n if request.method == \"POST\":\n \n sentence = request.POST.get(\"search_query\")\n tokens = nltk.word_tokenize(sentence.encode('ascii', 'ignore'))\n tagged = nltk.pos_tag(tokens)\n\n JsonData.data['reserved_key'] = nl.checkReserved(tagged)\n JsonData.data['number_of_nouns'] = nl.numSbj(tagged)\n #JsonData.data[\"request_uri\"] = str(request.META)#request.META.get(\"USERDOMAIN\")\n\n subject = []\n tokenized = []\n \n for word, entity in tagged:\n if 'NN' in entity:\n subject.append(str(word.lower()))\n tokenized.append(word+\"::\"+entity)\n\n #check if crm credentials is passed and is not reserved\n if nl.crm.checkCredentials(request) == True and JsonData.data['reserved_key'] == 3:\n nl.crm.crm_request[\"client_id\"] = request.POST.get(\"client_id\");\n nl.crm.crm_request[\"resource\"] = request.POST.get(\"resource\");\n nl.crm.crm_request[\"username\"] = request.POST.get(\"username\");\n nl.crm.crm_request[\"password\"] = request.POST.get(\"password\");\n nl.crm.crm_request[\"client_secret\"] = request.POST.get(\"client_secret\");\n nl.crm.crm_request[\"url\"] = request.POST.get(\"url\");\n nl.crm.crm_request[\"knowledgearticlesUrl\"] = request.POST.get(\"knowledgearticlesUrl\");\n\n if subject:\n filter = nl.crm.getQueryFilter(subject)\n JsonData.data['value'] = nl.loadCorpora(nl,filter)\n JsonData.data['syn'] = []\n\n syn = []\n #get synonyms if first result is empty\n if len(JsonData.data['value']) == 0:\n if subject: \n syn = nl.getSyn(subject)\n JsonData.data['synonyms'] = syn\n if syn:\n filter = nl.crm.getQueryFilter(syn)\n JsonData.data['syn'] = nl.loadCorpora(nl,filter)\n JsonData.data['value'] = [] \n else:\n if subject:\n JsonData.data['synonyms'] = nl.getSyn(subject)\n\n JsonData.data['connection_status'] = nl.connect(tagged)\n JsonData.data['tagged_tokens'] = tokenized\n JsonData.data['subject'] = subject\n JsonData.data['user_search'] = str(sentence)\n JsonData.data[\"success\"] = True\n #print JsonData.data\n except Exception as ex:\n JsonData.data[\"sys_error\"] = ex\n return JsonResponse(JsonData.data)","sub_path":"nl_views.py","file_name":"nl_views.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"484416749","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module has the class used to control the Nest thermostat.\n\"\"\"\n\n# Imports #####################################################################\nfrom __future__ import absolute_import, print_function\n\nimport json\nimport os\nimport time\nfrom typing import List\n\nimport requests\n\nfrom dataclasses import dataclass\n\nfrom .models import Action\nfrom .settings import TOKEN_FOLDER, get_settings\n\n# Metadata ####################################################################\n__author__ = \"Timothy McFadden\"\n__creationDate__ = \"07-JUN-2017\"\n\n\n# Globals #####################################################################\nclass APIError(Exception):\n \"\"\"\n Exception if something went wrong in the API\n \"\"\"\n\n def __init__(self, response):\n super().__init__()\n self.response = response\n\n def __str__(self):\n return self.response.text\n\n def __repr__(self):\n return f\" Session\n electrode_group_name: varchar(80) # electrode group name from NWBFile\n ---\n -> BrainRegion\n -> Probe\n description: varchar(80) # description of electrode group\n target_hemisphere: enum('Right','Left')\n \"\"\"\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n with pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r') as io:\n nwbf = io.read()\n # fill in the groups\n egroups = list(nwbf.electrode_groups.keys())\n \n for eg_name in egroups:\n # for each electrode group, we get the group and add an electrode group entry.\n # as the ElectrodeGroup\n electrode_group = nwbf.get_electrode_group(eg_name)\n key['electrode_group_name'] = eg_name\n # check to see if the location is listed in the region.BrainRegion schema, and if not add it\n region_dict = dict()\n region_dict['region_name'] = electrode_group.location\n region_dict['subregion_name'] = ''\n region_dict['subsubregion_name'] = ''\n query = BrainRegion() & region_dict\n if len(query) == 0:\n # this region isn't in the list, so add it\n BrainRegion().insert1(region_dict)\n query = BrainRegion() & region_dict\n # we also need to get the region_id for this new region or find the right region_id\n region_id_dict = query.fetch1()\n key['region_id'] = region_id_dict['region_id']\n key['description'] = electrode_group.description\n # the following should probably be a function that returns the probe devices from the file\n probe_re = re.compile(\"probe\")\n for d in nwbf.devices:\n if probe_re.search(d):\n if nwbf.devices[d] == electrode_group.device:\n # this will match the entry in the device schema\n key['probe_type'] = electrode_group.device.probe_type\n break\n if 'probe_type' not in key:\n key['probe_type'] = 'unknown-probe-type'\n self.insert1(key, skip_duplicates=True)\n\n@schema\nclass Electrode(dj.Imported):\n definition = \"\"\"\n -> ElectrodeGroup\n electrode_id: int # the unique number for this electrode\n ---\n -> Probe.Electrode\n -> BrainRegion\n name='': varchar(80) # unique label for each contact\n original_reference_electrode=-1: int # the configured reference electrode for this electrode \n x=NULL: float # the x coordinate of the electrode position in the brain\n y=NULL: float # the y coordinate of the electrode position in the brain\n z=NULL: float # the z coordinate of the electrode position in the brain\n filtering: varchar(200) # description of the signal filtering\n impedance=null: float # electrode impedance\n bad_channel: enum(\"True\",\"False\") # if electrode is 'good' or 'bad' as observed during recording\n x_warped=NULL: float # x coordinate of electrode position warped to common template brain\n y_warped=NULL: float # y coordinate of electrode position warped to common template brain\n z_warped=NULL: float # z coordinate of electrode position warped to common template brain\n contacts: varchar(80) # label of electrode contacts used for a bipolar signal -- current workaround\n \"\"\"\n\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n with pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r') as io:\n nwbf = io.read()\n # create the table of electrodes\n electrodes = nwbf.electrodes.to_dataframe()\n\n # Below it would be better to find the mapping between nwbf.electrodes.colnames and the schema fields and\n # where possible, assign automatically. It would also help to be robust to missing fields and have them\n # assigned as empty if they don't exist in the nwb file in case people are not using our column names.\n\n for elect in electrodes.iterrows():\n key['electrode_group_name'] = elect[1].group_name\n key['electrode_id'] = elect[0]\n key['name'] = str(elect[0])\n key['probe_type'] = elect[1].group.device.probe_type\n key['probe_shank'] = elect[1].probe_shank\n key['probe_electrode'] = elect[1].probe_electrode \n key['bad_channel'] = 'True' if elect[1].bad_channel else 'False'\n # look up the region\n region_dict = dict()\n region_dict['region_name'] = elect[1].group.location\n region_dict['subregion_name'] = ''\n region_dict['subsubregion_name'] = ''\n key['region_id'] = (BrainRegion() & region_dict).fetch1('region_id')\n key['x'] = elect[1].x\n key['y'] = elect[1].y\n key['z'] = elect[1].z\n key['x_warped'] = 0\n key['y_warped'] = 0\n key['z_warped'] = 0\n key['contacts'] = ''\n key['filtering'] = elect[1].filtering\n key['impedance'] = elect[1].imp\n try:\n key['original_reference_electrode'] = elect[1].ref_elect\n except:\n key['original_reference_electrode'] = -1\n self.insert1(key, skip_duplicates=True)\n\n\n\n@schema\nclass Raw(dj.Imported):\n definition = \"\"\"\n # Raw voltage timeseries data, electricalSeries in NWB\n -> Session\n ---\n -> IntervalList\n raw_object_id: varchar(80) # the NWB object ID for loading this object from the file\n sampling_rate: float # Sampling rate calculated from data, in Hz\n comments: varchar(80)\n description: varchar(80)\n \"\"\"\n def __init__(self, *args):\n # do your custom stuff here\n super().__init__(*args) # call the base implementation\n\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n with pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r') as io:\n nwbf = io.read()\n raw_interval_name = \"raw data valid times\"\n # get the acquisition object\n try:\n rawdata = nwbf.get_acquisition()\n except:\n print(f'WARNING: Unable to get aquisition object in: {nwb_file_abspath}')\n return\n print('Estimating sampling rate...')\n # NOTE: Only use first 1e6 timepoints to save time\n sampling_rate = estimate_sampling_rate(np.asarray(rawdata.timestamps[:1000000]), 1.5)\n print(f'Estimated sampling rate: {sampling_rate}')\n key['sampling_rate'] = sampling_rate\n # get the list of valid times given the specified sampling rate.\n interval_dict = dict()\n interval_dict['nwb_file_name'] = key['nwb_file_name']\n interval_dict['interval_list_name'] = raw_interval_name\n interval_dict['valid_times'] = get_valid_intervals(np.asarray(rawdata.timestamps), key['sampling_rate'],\n 1.75, 0)\n IntervalList().insert1(interval_dict, skip_duplicates=True)\n\n # now insert each of the electrodes as an individual row, but with the same nwb_object_id\n key['raw_object_id'] = rawdata.object_id\n key['sampling_rate'] = sampling_rate\n print(f'Importing raw data: Estimated sampling rate:\\t{key[\"sampling_rate\"]} Hz')\n print(f' Number of valid intervals:\\t{len(interval_dict[\"valid_times\"])}')\n key['interval_list_name'] = raw_interval_name\n key['comments'] = rawdata.comments\n key['description'] = rawdata.description\n self.insert1(key, skip_duplicates='True')\n\n def nwb_object(self, key):\n # return the nwb_object; FIX: this should be replaced with a fetch call. Note that we're using the raw file\n # so we can modify the other one. \n # NOTE: This leaves the file open, which means that it cannot be appended to. This should be fine normally\n nwb_file_name = key['nwb_file_name']\n\n # TO DO: This likely leaves the io object in place and the file open. Fix\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n io = pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r')\n nwbf = io.read()\n # get the object id\n raw_object_id = (self & {'nwb_file_name' : key['nwb_file_name']}).fetch1('raw_object_id')\n return nwbf.objects[raw_object_id]\n \n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (Nwbfile, 'nwb_file_abs_path'), *attrs, **kwargs)\n\n\n@schema\nclass SampleCount(dj.Imported):\n definition = \"\"\"\n # Sample count / timestamp timeseries\n -> Session\n ---\n sample_count_object_id: varchar(40) # the NWB object ID for loading this object from the file\n \"\"\"\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n with pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r') as io:\n nwbf = io.read()\n # get the sample count object\n #TODO: change name when nwb file is changed\n sample_count = get_data_interface(nwbf, 'CameraSampleFrameCounts')\n if sample_count is None:\n print(f'WARNING: Unable to get sample count object in: {nwb_file_abspath}')\n return\n key['sample_count_object_id'] = sample_count.object_id\n self.insert1(key)\n\n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (Nwbfile, 'nwb_file_abs_path'), *attrs, **kwargs)\n\n@schema\nclass LFPSelection(dj.Manual):\n definition = \"\"\"\n -> Session\n \"\"\"\n\n class LFPElectrode(dj.Part):\n definition = \"\"\"\n -> master\n -> Electrode\n \"\"\"\n\n def set_lfp_electrodes(self, nwb_file_name, electrode_list):\n '''\n Removes all electrodes for the specified nwb file and then adds back the electrodes in the list\n :param nwb_file_name: string - the name of the nwb file for the desired session\n :param electrode_list: list of electrodes to be used for LFP\n :return:\n '''\n # remove the session and then recreate the session and Electrode list\n (LFPSelection() & {'nwb_file_name' : nwb_file_name}).delete()\n # check to see if the user allowed the deletion\n if len((LFPSelection() & {'nwb_file_name' : nwb_file_name}).fetch()) == 0:\n LFPSelection().insert1({'nwb_file_name' : nwb_file_name})\n\n # TO DO: do this in a better way\n all_electrodes = Electrode.fetch(as_dict=True)\n primary_key = Electrode.primary_key\n for e in all_electrodes:\n # create a dictionary so we can insert new elects\n if e['electrode_id'] in electrode_list:\n lfpelectdict = {k: v for k, v in e.items() if k in primary_key}\n LFPSelection().LFPElectrode.insert1(lfpelectdict, replace='True')\n\n@schema\nclass LFP(dj.Imported):\n definition = \"\"\"\n -> LFPSelection\n ---\n -> IntervalList # the valid intervals for the data\n -> FirFilter # the filter used for the data\n -> AnalysisNwbfile # the name of the nwb file with the lfp data\n lfp_object_id: varchar(80) # the NWB object ID for loading this object from the file\n lfp_sampling_rate: float # the sampling rate, in HZ\n \"\"\"\n\n def make(self, key):\n # get the NWB object with the data; FIX: change to fetch with additional infrastructure\n rawdata = Raw().nwb_object(key)\n sampling_rate, interval_list_name = (Raw() & key).fetch1('sampling_rate', 'interval_list_name')\n sampling_rate = int(np.round(sampling_rate))\n\n #TEST\n #interval_list_name = '01_s1'\n key['interval_list_name'] = interval_list_name\n \n valid_times = (IntervalList() & {'nwb_file_name': key['nwb_file_name'] , 'interval_list_name': interval_list_name}).fetch1('valid_times')\n\n # target 1 KHz sampling rate\n decimation = sampling_rate // 1000\n\n # get the LFP filter that matches the raw data\n filter = (FirFilter() & {'filter_name' : 'LFP 0-400 Hz'} & {'filter_sampling_rate':\n sampling_rate}).fetch(as_dict=True)\n\n # there should only be one filter that matches, so we take the first of the dictionaries\n key['filter_name'] = filter[0]['filter_name']\n key['filter_sampling_rate'] = filter[0]['filter_sampling_rate']\n\n filter_coeff = filter[0]['filter_coeff']\n if len(filter_coeff) == 0:\n print(f'Error in LFP: no filter found with data sampling rate of {sampling_rate}')\n return None\n # get the list of selected LFP Channels from LFPElectrode\n electrode_keys = (LFPSelection.LFPElectrode & key).fetch('KEY')\n electrode_id_list = list(k['electrode_id'] for k in electrode_keys)\n\n lfp_file_name = AnalysisNwbfile().create(key['nwb_file_name'])\n\n lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name)\n # test:\n lfp_object_id = FirFilter().filter_data_nwb(lfp_file_abspath, rawdata,\n filter_coeff, valid_times, electrode_id_list, decimation)\n\n key['analysis_file_name'] = lfp_file_name\n key['lfp_object_id'] = lfp_object_id\n key['lfp_sampling_rate'] = sampling_rate // decimation\n self.insert1(key)\n \n def nwb_object(self, key):\n # return the nwb_object.\n lfp_file_name = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch1('analysis_file_name')\n lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name)\n io = pynwb.NWBHDF5IO(path=lfp_file_abspath, mode='r')\n nwbf = io.read()\n # get the object id\n nwb_object_id = (self & {'analysis_file_name' : lfp_file_name}).fetch1('filtered_data_object_id')\n return nwbf.objects[nwb_object_id]\n\n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (AnalysisNwbfile, 'analysis_file_abs_path'), *attrs, **kwargs)\n\n@schema\nclass LFPBandSelection(dj.Manual):\n definition = \"\"\"\n -> LFP\n -> FirFilter # the filter to use for the data \n --- \n -> IntervalList # the set of times to be filtered\n lfp_band_sampling_rate: int # the sampling rate for this band\n \"\"\"\n\n class LFPBandElectrode(dj.Part):\n definition = \"\"\"\n -> master\n -> LFPSelection.LFPElectrode # the LFP electrode to be filtered LFP\n reference_elect_id = -1: int # the reference electrode to use; -1 for no reference\n ---\n \"\"\"\n\n def set_lfp_band_electrodes(self, nwb_file_name, electrode_list, filter_name, interval_list_name, reference_electrode_list, lfp_band_sampling_rate):\n '''\n Adds an entry for each electrode in the electrode_list with the specified filter, interval_list, and reference electrode.\n Also removes any entries that have the same filter, interval list and reference electrode but are not in the electrode_list.\n :param nwb_file_name: string - the name of the nwb file for the desired session\n :param electrode_list: list of LFP electrodes to be filtered\n :param filter_name: the name of the filter (from the FirFilter schema)\n :param interval_name: the name of the interval list (from the IntervalList schema)\n :param reference_electrode_list: A single electrode id corresponding to the reference to use for all electrodes or a list with one element per entry in the electrode_list\n :param lfp_band_sampling_rate: The output sampling rate to be used for the filtered data; must be an integer divisor of the LFP sampling rate\n :return: none\n '''\n # Error checks on parameters\n # electrode_list\n available_electrodes = (LFPSelection().LFPElectrode() & {'nwb_file_name' : nwb_file_name}).fetch('electrode_id')\n if not np.all(np.isin(electrode_list,available_electrodes)):\n raise ValueError('All elements in electrode_list must be valid electrode_ids in the LFPSelection table')\n #sampling rate\n lfp_sampling_rate = (LFP() & {'nwb_file_name' : nwb_file_name}).fetch1('lfp_sampling_rate')\n decimation = lfp_sampling_rate // lfp_band_sampling_rate\n if lfp_sampling_rate // decimation != lfp_band_sampling_rate:\n raise ValueError(f'lfp_band_sampling rate {lfp_band_sampling_rate} is not an integer divisor of lfp samping rate {lfp_sampling_rate}')\n #filter \n if not len((FirFilter() & {'filter_name' : filter_name, 'filter_sampling_rate' : lfp_sampling_rate}).fetch()):\n raise ValueError(f'filter {filter_name}, sampling rate {lfp_sampling_rate}is not in the FirFilter table')\n #interval_list\n if not len((IntervalList() & {'interval_name' : interval_list_name}).fetch()):\n raise ValueError(f'interval list {interval_list_name} is not in the IntervalList table; the list must be added before this function is called')\n # reference_electrode_list\n if len(reference_electrode_list) != 1 and len(reference_electrode_list) != len(electrode_list):\n raise ValueError(f'reference_electrode_list must contain either 1 or len(electrode_list) elements')\n # add a -1 element to the list to allow for the no reference option\n available_electrodes = np.append(available_electrodes, [-1])\n if not np.all(np.isin(reference_electrode_list,available_electrodes)):\n raise ValueError('All elements in reference_electrode_list must be valid electrode_ids in the LFPSelection table')\n \n # make a list of all the references\n ref_list = np.zeros((len(electrode_list),))\n ref_list[:] = reference_electrode_list \n\n key = dict()\n key['nwb_file_name'] = nwb_file_name\n key['filter_name'] = filter_name\n key['filter_sampling_rate'] = lfp_sampling_rate\n key['interval_list_name'] = interval_list_name\n key['lfp_band_sampling_rate'] = lfp_sampling_rate // decimation\n # insert an entry into the main LFPBandSelectionTable\n self.insert1(key, skip_duplicates='True')\n\n #remove the keys that are not used for the LFPBandElectrode table\n key.pop('interval_list_name')\n key.pop('lfp_band_sampling_rate')\n #get all of the current entries and delete any that are not in the list\n elect_id, ref_id = (self.LFPBandElectrode() & key).fetch('electrode_id', 'reference_elect_id')\n for e, r in zip(elect_id, ref_id): \n if not len(np.where((electrode_list == e) & (ref_list == r))[0]):\n key['electrode_id'] = e\n key['reference_elect_id'] = r\n (self.LFPBandElectrode() & key).delete()\n\n #iterate through all of the new elements and add them\n for e, r in zip(electrode_list, ref_list):\n key['electrode_id'] = e\n key['electrode_group_name'] = (Electrode & {'electrode_id' : e}).fetch1('electrode_group_name')\n key['reference_elect_id'] = r\n self.LFPBandElectrode().insert1(key, skip_duplicates='True')\n\n \n\n@schema\nclass LFPBand(dj.Computed):\n definition = \"\"\"\n -> LFPBandSelection\n ---\n -> AnalysisNwbfile\n filtered_data_object_id: varchar(80) # the NWB object ID for loading this object from the file\n \"\"\"\n def make(self, key):\n # get the NWB object with the lfp data; FIX: change to fetch with additional infrastructure\n lfp_object = (LFP() & {'nwb_file_name' : key['nwb_file_name']}).fetch_nwb()[0]['lfp']\n \n # load all the data to speed filtering\n lfp_data = np.asarray(lfp_object.data, dtype=type(lfp_object.data[0][0]))\n lfp_timestamps = np.asarray(lfp_object.timestamps, dtype=type(lfp_object.timestamps[0]))\n\n #get the electrodes to be filtered and their references\n lfp_band_elect_id, lfp_band_ref_id = (LFPBandSelection().LFPBandElectrode() & key).fetch('electrode_id', 'reference_elect_id')\n\n # get the indeces of the electrodes to be filtered and the references \n lfp_band_elect_index = get_electrode_indeces(lfp_object, lfp_band_elect_id)\n lfp_band_ref_index = get_electrode_indeces(lfp_object, lfp_band_ref_id)\n\n # subtract off the references for the selected channels \n for index, elect_index in enumerate(lfp_band_elect_index):\n if lfp_band_ref_id[index] != -1:\n lfp_data[:,elect_index] = lfp_data[:,elect_index] - lfp_data[:,lfp_band_ref_index] \n\n\n lfp_sampling_rate = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch1('lfp_sampling_rate')\n interval_list_name, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1('interval_list_name', 'lfp_band_sampling_rate')\n valid_times = (IntervalList() & {'interval_list_name' : interval_list_name}).fetch1('valid_times')\n filter_name, filter_sampling_rate, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1('filter_name', 'filter_sampling_rate', 'lfp_band_sampling_rate')\n\n decimation = int(lfp_sampling_rate) // lfp_band_sampling_rate\n\n # get the LFP filter that matches the raw data\n filter = (FirFilter() & {'filter_name' : filter_name} & \n {'filter_sampling_rate': filter_sampling_rate}).fetch(as_dict=True)\n if len(filter) == 0:\n raise ValueError(f'Filter {filter_name} and sampling_rate {lfp_band_sampling_rate} does not exit in the FirFilter table')\n \n filter_coeff = filter[0]['filter_coeff']\n if len(filter_coeff) == 0:\n print(f'Error in LFPBand: no filter found with data sampling rate of {lfp_band_sampling_rate}')\n return None\n\n #create the analysis nwb file to store the results.\n lfp_band_file_name = AnalysisNwbfile().create(key['nwb_file_name'])\n lfp_band_file_abspath = AnalysisNwbfile().get_abs_path(lfp_band_file_name)\n # filter the data and write to an the nwb file\n filtered_data_object_id = FirFilter().filter_data_nwb(lfp_band_file_abspath, lfp_object,\n filter_coeff, valid_times, lfp_band_elect_id, decimation)\n\n key['analysis_file_name'] = lfp_band_file_name\n key['filtered_data_object_id'] = filtered_data_object_id\n self.insert1(key)\n\n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (AnalysisNwbfile, 'analysis_file_abs_path'), *attrs, **kwargs)\n\n\n","sub_path":"nwb_datajoint/common/common_ephys.py","file_name":"common_ephys.py","file_ext":"py","file_size_in_byte":24266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"85717732","text":"import os\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core import serializers\n\nimport ast\nimport re\nimport json\nimport time, datetime \nimport numpy as np\nfrom backendModels.models import UrlLog, User, QuantitativeLog\n\nimport pandas as pd\n\nhttp_response_obj = {}\nhttp_response_obj['code'] = 0\nhttp_response_obj['data'] = None \nhttp_response_obj['msg'] = ''\n\ndef index(request):\n return render(request, 'index.html')\n\n@csrf_exempt\ndef uploadUrlLog(request):\n if request.method == 'POST':\n fname = request.FILES.get('file')\n if fname:\n user, created = User.objects.get_or_create(\n userNo=fname.name.split('.')[0].split('-')[1])\n if (created):\n user.save()\n file = open('static/upload/' + fname.name, 'wb')\n for chunk in fname.chunks():\n file.write(chunk)\n file.close()\n return HttpResponse('OK')\n\n\n@csrf_exempt\ndef uploadUrlLogOld(request):\n if request.method == 'POST':\n fileName = request.FILES.get('file')\n invalidData = 0\n if fileName:\n for row in fileName:\n try:\n data = re.split(r',(?!(?:(?:(?!,)(?!\").)*,)*(?:(?!,)(?!\").)+\")', row.decode('gbk'))\n user, created = User.objects.get_or_create(\n userNo=data[0], ip=data[3])\n if (created):\n user.save()\n y, m, d, h, M = (time.strptime(\n data[7].strip('\\r\\n'), \"%Y/%m/%d %H:%M\"))[0: 5]\n urlLog = UrlLog(\n user_id=user.id,\n url=data[4],\n urlArgs=data[6],\n time=datetime.datetime(y, m, d, h, M, 0, 0)\n )\n urlLog.save()\n except Exception as e:\n print (e, row)\n invalidData += 1\n print ('无效数据共', invalidData , '条.')\n return HttpResponse('OK')\n\ndef chartShow(request):\n series = {}\n for item in QuantitativeLog.objects.order_by('-similarEuc')[:10]:\n series[item.url] = {\n 'x': json.loads(item.timeSeries),\n 'y': json.loads(item.urlSimilarOriginSeries)\n }\n \n return render(request, 'chart.html', {'series': series})\n\n@csrf_exempt\ndef fetchUrlList(request):\n data = {}\n series = QuantitativeLog.objects\n data['total'] = series.count()\n intParams = changeRequestIntoInt(request)\n start = ( intParams['page'] - 1 ) * intParams['size']\n end = intParams['page'] * intParams['size']\n data['list'] = list(series.all()[start:end].values('user', 'url', 'similarEuc', 'urlArgsEntropy', 'abnormalTimeProbability', 'sameArgsDiversity', 'webClassify'))\n http_response_obj['data'] = data\n return JsonResponse(http_response_obj)\n\ndef changeRequestIntoInt(request):\n result = {}\n params = request.GET\n for item in params:\n if params[item] is not None and params[item].isnumeric():\n result[item] = int(params[item])\n else:\n result[item] = params[item]\n return result\n\n@csrf_exempt\ndef fetchLabelList(request):\n # autoMark()\n # return\n data = {}\n series = UrlLog.objects.filter(mark=1)\n data['total'] = series.count() \n intParams = changeRequestIntoInt(request)\n start = ( intParams['page'] - 1 ) * intParams['size']\n end = intParams['page'] * intParams['size']\n data['list'] = list(series[start:end].values('url', 'user_id', 'times', 'urlArgs', 'id'))\n http_response_obj['data'] = data\n return JsonResponse(http_response_obj)\n\n# 自动标记函数,默认访问次数总和<10的访问为善意访问,mark置为0\ndef autoMark():\n allModel = UrlLog.objects.filter(mark=1)\n whilelist = ['qq.com', 'weixin.com', 'baidu.com', 'duba.net', 'sogou.com', 'ludashi', 'msg.71.am', 'cmcm.com', 'alibaba.com', '360.cn', 'sina', 'youdao.com', 'rising.com.cn', 'alicdn.com', 'aliyun.com', 'huajiao.com', 'ijinshan.com', '163.com', 'api.foxitreader.cn', 'api.mi.wifi.com', 'wx.qlogo.cn']\n for i in range(1, 31):\n model = allModel[((i - 1) * 10000) : (10000 * i)]\n # p = re.compile('^((25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)$')\n for item in model:\n # for wl in whilelist:\n # if (wl in item.url):\n # item.mark = 0\n # item.save()\n # print (item.id)\n # continue\n # else:\n # times = ast.literal_eval(item.times) \n # total = 0\n # for i in times:\n # if (i > 0):\n # total+=1\n # if (total < 20):\n # if p.match(item.url):\n if (item.url != 'stat.funshion.net'):\n item.mark = 0\n item.save()\n print (item.id)\n\n@csrf_exempt\ndef saveLabel(request):\n if request.method == 'POST':\n body = json.loads(request.body)\n for item in body['label']:\n QuantitativeLog.objects.filter(user_id=item['user_id'], url=item['url']).update(label=item['label'])\n UrlLog.objects.filter(id=item['id']).update(mark=0)\n http_response_obj['data'] = 'ok'\n return JsonResponse(http_response_obj)","sub_path":"backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"651177050","text":"import click\n\nfrom pathlib import Path\nfrom nanopath.beastling import BirthDeathSkylineContemporary\nfrom nanopath.utils import modify_model_priors\n\n@click.command()\n@click.option(\n \"--alignment\", \"-a\", required=True, type=Path,\n help=\"Variable site alignment of non-recombinant core-genome SNPs\",\n)\n@click.option(\n \"--data\", \"-d\", required=True, type=Path,\n help=\"Data file for samples in the alignment with headers: name, date\"\n)\n@click.option(\n \"--yaml\", \"-y\", required=False, type=Path, default=None,\n help=\"YAML configuration file including prior settings [None]\"\n)\n@click.option(\n \"--yaml_dir\", \"-yd\", required=False, type=Path, default=None,\n help=\"YAML configuration file directory to process all YAML files from [None]\"\n)\n@click.option(\n \"--yaml_glob\", \"-yg\", required=False, type=str, default=\"*.yaml\",\n help=\"Glob on the directory to subset config files matching alignment and data input ['*.yaml']\"\n)\n@click.option(\n \"--clock\", \"-c\", required=False, type=str, default='strict',\n help=\"Molecular clock model: strict, relaxed_exponential, relaxed_lognormal [strict]\"\n)\n@click.option(\n \"--mcmc\", \"-m\", required=False, type=str, default='default',\n help=\"Chain type: MCMC (default) or Coupled MCMC (coupled) [default]\"\n)\n@click.option(\n \"--length\", \"-l\", required=False, type=int, default=1e7,\n help=\"Number of steps in Monte Carlo chain [1e7]\"\n)\n@click.option(\n \"--hot\", \"-h\", required=False, type=int, default=3,\n help=\"Number of hot chains in Coupled Monte Carlo chain [3]\"\n)\n@click.option(\n \"--intervals\", \"-i\", is_flag=True,\n help=\"Use sampling proportion intervals from configuration YAML [false]\"\n)\n@click.option(\n \"--sample_prior\", \"-s\", is_flag=True,\n help=\"Sample from prior [false]\"\n)\n@click.option(\n \"--prefix\", \"-p\", required=False, type=str, default='bdss',\n help=\"Prefix for sample logs from BEAST [bdss]\"\n)\n@click.option(\n \"--outdir\", \"-o\", required=False, type=Path, default=Path('bdss'),\n help=\"Outdir for XML files [$PWD/bdss]\"\n)\n@click.option(\n \"--model_prior\", \"-pr\", required=False, type=str, default=None, multiple=True,\n help=\"One or multiple args setting the replacement prior value in the YAML file with keys in string [:]\"\n)\n@click.option(\n \"--tag\", \"-t\", is_flag=True,\n help=\"If modified on the fly attach the key path and setting to the output prefix [false]\"\n)\ndef xml_bdsc(alignment, data, outdir, yaml, yaml_dir, yaml_glob, clock, mcmc, length, hot, intervals, prefix, sample_prior, model_prior, tag):\n\n \"\"\" Pre-configured Birth-Death Skyline Contemporary XML \"\"\"\n\n if yaml_dir is not None:\n yaml_files = {f.stem: f for f in yaml_dir.glob(f\"{yaml_glob}\")}\n else:\n yaml_files = {prefix: yaml}\n\n outdir.mkdir(parents=True, exist_ok=True)\n\n for prefix, y in yaml_files.items():\n\n bdsc = BirthDeathSkylineContemporary(\n alignment=alignment,\n data=data,\n clock_model=clock,\n chain_type=mcmc,\n chain_length=length,\n chain_number=hot+1,\n prefix=prefix,\n sample_prior=sample_prior\n )\n\n bdsc.print_configuration()\n bdsc.check_configuration()\n\n config = bdsc.read_config(file=yaml)\n\n # Set model prior configuration\n model_priors = config.get('priors').get('model')\n # Modify the model prior configs if settings are passed\n if model_prior:\n model_priors = modify_model_priors(model_priors, model_prior, tag, prefix)\n\n bdsc.set_model_priors(prior_config=model_priors, distribution=True)\n\n # Set clock prior configuration\n clock_priors = config.get('priors').get('clock')\n bdsc.set_clock(prior_config=clock_priors)\n\n if intervals:\n # Set slice configurations and overwrite associated priors\n slice_config = config.get('priors').get('intervals')\n bdsc.set_slices(slice_config=slice_config)\n\n bdsc.construct_template(\n xml=outdir / f'{prefix}.xml'\n )\n","sub_path":"nanopath/terminal/beastling/xml_bdsc/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"493476532","text":"# Copyright 2015-2016, Google Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Provides distutils command classes for the GRPC Python setup process.\"\"\"\n\nimport distutils\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\n\nimport setuptools\nfrom setuptools.command import build_py\nfrom setuptools.command import test\nfrom setuptools.command import build_ext\n\nPYTHON_STEM = os.path.dirname(os.path.abspath(__file__))\n\nCONF_PY_ADDENDUM = \"\"\"\nextensions.append('sphinx.ext.napoleon')\nnapoleon_google_docstring = True\nnapoleon_numpy_docstring = True\n\nhtml_theme = 'sphinx_rtd_theme'\n\"\"\"\n\n\nclass CommandError(Exception):\n \"\"\"Simple exception class for GRPC custom commands.\"\"\"\n\n\nclass SphinxDocumentation(setuptools.Command):\n \"\"\"Command to generate documentation via sphinx.\"\"\"\n\n description = 'generate sphinx documentation'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n # We import here to ensure that setup.py has had a chance to install the\n # relevant package eggs first.\n import sphinx\n import sphinx.apidoc\n metadata = self.distribution.metadata\n src_dir = os.path.join(\n PYTHON_STEM, self.distribution.package_dir[''], 'grpc')\n sys.path.append(src_dir)\n sphinx.apidoc.main([\n '', '--force', '--full', '-H', metadata.name, '-A', metadata.author,\n '-V', metadata.version, '-R', metadata.version,\n '-o', os.path.join('doc', 'src'), src_dir])\n conf_filepath = os.path.join('doc', 'src', 'conf.py')\n with open(conf_filepath, 'a') as conf_file:\n conf_file.write(CONF_PY_ADDENDUM)\n sphinx.main(['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])\n\n\nclass BuildProtoModules(setuptools.Command):\n \"\"\"Command to generate project *_pb2.py modules from proto files.\"\"\"\n\n description = 'build protobuf modules'\n user_options = [\n ('include=', None, 'path patterns to include in protobuf generation'),\n ('exclude=', None, 'path patterns to exclude from protobuf generation')\n ]\n\n def initialize_options(self):\n self.exclude = None\n self.include = r'.*\\.proto$'\n self.protoc_command = None\n self.grpc_python_plugin_command = None\n\n def finalize_options(self):\n self.protoc_command = distutils.spawn.find_executable('protoc')\n self.grpc_python_plugin_command = distutils.spawn.find_executable(\n 'grpc_python_plugin')\n\n def run(self):\n if not self.protoc_command:\n raise CommandError('could not find protoc')\n if not self.grpc_python_plugin_command:\n raise CommandError('could not find grpc_python_plugin '\n '(protoc plugin for GRPC Python)')\n include_regex = re.compile(self.include)\n exclude_regex = re.compile(self.exclude) if self.exclude else None\n paths = []\n root_directory = PYTHON_STEM\n for walk_root, directories, filenames in os.walk(root_directory):\n for filename in filenames:\n path = os.path.join(walk_root, filename)\n if include_regex.match(path) and not (\n exclude_regex and exclude_regex.match(path)):\n paths.append(path)\n command = [\n self.protoc_command,\n '--plugin=protoc-gen-python-grpc={}'.format(\n self.grpc_python_plugin_command),\n '-I {}'.format(root_directory),\n '--python_out={}'.format(root_directory),\n '--python-grpc_out={}'.format(root_directory),\n ] + paths\n try:\n subprocess.check_output(' '.join(command), cwd=root_directory, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n raise CommandError('Command:\\n{}\\nMessage:\\n{}\\nOutput:\\n{}'.format(\n command, e.message, e.output))\n\n\nclass BuildProjectMetadata(setuptools.Command):\n \"\"\"Command to generate project metadata in a module.\"\"\"\n\n description = 'build grpcio project metadata files'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n with open(os.path.join(PYTHON_STEM, 'grpc/_grpcio_metadata.py'), 'w') as module_file:\n module_file.write('__version__ = \"\"\"{}\"\"\"'.format(\n self.distribution.get_version()))\n\n\nclass BuildPy(build_py.build_py):\n \"\"\"Custom project build command.\"\"\"\n\n def run(self):\n try:\n self.run_command('build_proto_modules')\n except CommandError as error:\n sys.stderr.write('warning: %s\\n' % error.message)\n self.run_command('build_project_metadata')\n build_py.build_py.run(self)\n\n\nclass BuildExt(build_ext.build_ext):\n \"\"\"Custom build_ext command to enable compiler-specific flags.\"\"\"\n\n C_OPTIONS = {\n 'unix': ('-pthread', '-std=gnu99'),\n 'msvc': (),\n }\n LINK_OPTIONS = {}\n\n def build_extensions(self):\n compiler = self.compiler.compiler_type\n if compiler in BuildExt.C_OPTIONS:\n for extension in self.extensions:\n extension.extra_compile_args += list(BuildExt.C_OPTIONS[compiler])\n if compiler in BuildExt.LINK_OPTIONS:\n for extension in self.extensions:\n extension.extra_link_args += list(BuildExt.LINK_OPTIONS[compiler])\n build_ext.build_ext.build_extensions(self)\n\n\nclass Gather(setuptools.Command):\n \"\"\"Command to gather project dependencies.\"\"\"\n\n description = 'gather dependencies for grpcio'\n user_options = [\n ('test', 't', 'flag indicating to gather test dependencies'),\n ('install', 'i', 'flag indicating to gather install dependencies')\n ]\n\n def initialize_options(self):\n self.test = False\n self.install = False\n\n def finalize_options(self):\n # distutils requires this override.\n pass\n\n def run(self):\n if self.install and self.distribution.install_requires:\n self.distribution.fetch_build_eggs(self.distribution.install_requires)\n if self.test and self.distribution.tests_require:\n self.distribution.fetch_build_eggs(self.distribution.tests_require)\n\n\nclass RunInterop(test.test):\n\n description = 'run interop test client/server'\n user_options = [\n ('args=', 'a', 'pass-thru arguments for the client/server'),\n ('client', 'c', 'flag indicating to run the client'),\n ('server', 's', 'flag indicating to run the server')\n ]\n\n def initialize_options(self):\n self.args = ''\n self.client = False\n self.server = False\n\n def finalize_options(self):\n if self.client and self.server:\n raise DistutilsOptionError('you may only specify one of client or server')\n\n def run(self):\n if self.distribution.install_requires:\n self.distribution.fetch_build_eggs(self.distribution.install_requires)\n if self.distribution.tests_require:\n self.distribution.fetch_build_eggs(self.distribution.tests_require)\n if self.client:\n self.run_client()\n elif self.server:\n self.run_server()\n\n def run_server(self):\n # We import here to ensure that our setuptools parent has had a chance to\n # edit the Python system path.\n from tests.interop import server\n sys.argv[1:] = self.args.split()\n server.serve()\n\n def run_client(self):\n # We import here to ensure that our setuptools parent has had a chance to\n # edit the Python system path.\n from tests.interop import client\n sys.argv[1:] = self.args.split()\n client.test_interoperability()\n","sub_path":"src/python/grpcio/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":8714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"286778753","text":"from django.conf.urls import include, url\nfrom . import views\nfrom django.conf.urls.static import static\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n url(r'^$', views.index_page, name=\"index_page\"),\n url(r'^institutions/$', views.all_institutions, name=\"all_institutions\"),\n url(r'^fonds/$', views.all_fonds, name=\"all_fonds\"),\n url(r'^institutions/(?P\\d+)/$', views.single_institution, name=\"single_institution\"),\n url(r'^fonds/(?P\\d+)/$', views.single_fond, name=\"single_fond\"),\n url(r'^series/(?P\\d+)/$', views.single_series, name=\"single_series\"),\n url(r'^add/$', views.add_remove, name=\"add_remove\"),\n url(r'^about/$', views.about, name=\"about\"),\n url(r'^contact/$', views.contact, name=\"contact\")\n]","sub_path":"archipoint/documents/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"163227188","text":"from conversorDeBases import ConversorDeBases\nfrom random import randint\n\nclass Chave:\n def __init__(self, coefs = None, base = None):\n self.coefs = coefs\n self.base = base\n\n def codLvl(self):\n return len(self.coefs)\n\n def eNumero(self, a):\n if a == \"0\" or a == \"1\" or a == \"2\" or a == \"3\" or a == \"4\" or a == \"5\" or a == \"6\" or a == \"7\" or a == \"8\" or a == \"9\":\n return True\n return False\n\n def geraString(self):\n if self.coefs == None or self.base == None:\n print(\"A chave não foi construida corretamente\")\n return None\n\n conversor = ConversorDeBases(self.base)\n\n def addLetra(string = \"\"):\n novaString = string\n novaString += conversor.dicionarioDecBase[randint(10, conversor.base - 3)]\n return novaString\n\n def addNumero(string, contador):\n novaString = string\n novaString += str(self.coefs[contador])\n return addLetra(novaString)\n\n string = addLetra()\n string += str(len(self.coefs))\n contadorCoefs = 0\n letrasEmSeq = 0\n n = randint(1, 3)\n while contadorCoefs < len(self.coefs):\n if (n == 1 and not(self.eNumero(string[-1]))) or letrasEmSeq > 3:\n string = addNumero(string, contadorCoefs)\n contadorCoefs += 1\n letrasEmSeq = 0\n else:\n string = addLetra(string)\n letrasEmSeq += 1\n n = randint(1, 3)\n string = addLetra(string)\n string += str(conversor.base)\n\n return string\n\n def extraiDados(self, msg):\n def pulaLetras(msg, contador):\n novoContador = contador\n while not self.eNumero(msg[novoContador]):\n novoContador += 1\n return novoContador\n\n nCoefs = \"\"\n coefs = []\n contador = 0\n\n contador = pulaLetras(msg, contador)\n while self.eNumero(msg[contador]):\n nCoefs += msg[contador]\n contador += 1\n while len(coefs) < int(nCoefs):\n if self.eNumero(msg[contador]):\n novoCoef = \"\"\n while self.eNumero(msg[contador]):\n novoCoef += msg[contador]\n contador += 1\n coefs.append(int(novoCoef))\n else:\n contador = pulaLetras(msg, contador)\n\n contador = pulaLetras(msg, contador)\n base = msg[contador] + msg[contador + 1] #Porque a base é entre 20 e 59\n contador += 3 #Pula a base e o espaço\n\n self.base = int(base)\n self.coefs = coefs\n\n return msg[contador:]\n","sub_path":"chave.py","file_name":"chave.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542853389","text":"#!/usr/bin/python\n\nimport sys\nfrom math import log\n\ne = 2.718281828\nPSEUDO = 0.00001\nmu = 0 #This can be tuned \n\n#sys.argv[1] is the CisBp PFM\n#sys.argv[2] is the Tab-Fasta \n#sys.argv[3] is the output name\n\n#Read in PFM And Convert to Energy Matrix\ndef PFM2ematrix(filename):\n\tematrix = []\n\twith open(filename, 'r') as pfm:\n\t\tfor line in pfm:\n\t\t\tif line[0] != 'P':\n\t\t\t\tmaxl = -1.0\n\t\t\t\tline = line.strip().split('\\t')[1:]\n\t\t\t\tline = map(float, line)\n\t\t\t\t\n\t\t\t\t#find max frequency\n\t\t\t\tif max(line) > maxl:\n\t\t\t\t\tmaxl = max(line) + PSEUDO\n\t\t\t\t\n\t\t\t\t#convert frequencies to energy scores\n\t\t\t\tA = -log(line[0]+ PSEUDO) + log(maxl)\n\t\t\t\tC = -log(line[1]+ PSEUDO) + log(maxl)\n\t\t\t\tG = -log(line[2]+ PSEUDO) + log(maxl)\n\t\t\t\tU = -log(line[3]+ PSEUDO) + log(maxl)\n\t\t\t\n\t\t\t\t#Add to matrix \n\t\t\t\tematrix.append([A,C,G,U])\n\treturn(ematrix)\n\ndef MaxAvEnergyScanner(energymatrix, fastafilehandle, outputname, Motifprefix):\n\twith open(fastafilehandle,'r') as infasta:\n\t\twith open(outputname,'w') as Output:\n\t\t\tHeader = ['Gene', (Motifprefix + '_MaxScore'),(Motifprefix + '_AverageScore'), (Motifprefix + '_SumScore')]\n\t\t\tOutput.write('\\t'.join(Header))\n\t\t\tOutput.write('\\n')\n\t\t\tBaseIndex = {'A': 0, 'C': 1, 'G': 2, 'U': 3}\n\t\t\tfor line in infasta:\n\t\t\t\tline = line.strip().split('\\t')\n\t\t\t\tname = line[0]\n\t\t\t\tseq = line[1].upper()\t\t\n\t\t\t\tpos = 0\n\t\t\t\t\n\t\t\t\tFwdScores = []\n\t\t\t\t\n\t\t\t\t#Score Forward Strand\n\t\t\t\twhile pos < (len(seq) - len(energymatrix) + 1):\n\t\t\t\t\tEi = 0\n\t\t\t\t\tsubseq = seq[pos:(pos + len(energymatrix))]\n\t\t\t\t\tfor subp in range(0, len(subseq)):\n\t\t\t\t\t\tbase = subseq[subp]\n\t\t\t\t\t\tEi += energymatrix[subp][BaseIndex[base]]\n\t\t\t\t\tsubseqscore = 1/(1+(e**(Ei-mu)))\n\t\t\t\t\tFwdScores.append(subseqscore)\n\t\t\t\t\tpos += 1\n\n\t\t\t\t#Output Info\n\t\t\t\t\n\t\t\t\toutline = [name, seq]\n\t\t\t\tOutput.write('\\t'.join(map(str, outline)))\n\t\t\t\tOutput.write('\\t')\n\t\t\t\tOutput.write(','.join(map(str, FwdScores)))\n\t\t\t\tOutput.write('\\n')\n\n\n#Run the whole thing\nMName = sys.argv[1].rstrip('.txt')\nematrix = PFM2ematrix(sys.argv[1])\nMaxAvEnergyScanner(ematrix, sys.argv[2], sys.argv[3], MName)\n","sub_path":"Lib/scan_ssms/MaxAvScanEnergy.py","file_name":"MaxAvScanEnergy.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"124123775","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pygame\nfrom webbrowser import open as web_open\nfrom twitch import Twitch\nfrom old_client import Client\nfrom time import time\n\npygame.init()\n\nRESOLUTION = (600, 600)\n\nWINNING_ROWS = ((0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6))\n\nARIAL100 = pygame.font.SysFont(\"Arial\", 100)\nARIAL80 = pygame.font.SysFont(\"Arial\", 80)\nARIAL50 = pygame.font.SysFont(\"Arial\", 50)\nARIAL40 = pygame.font.SysFont(\"Arial\", 40)\nARIAL30 = pygame.font.SysFont(\"Arial\", 30)\n#colors\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nRED = (200,0,0)\nLIGHT_RED = (255,51,0)\nGREEN = (0,204,0)\nLIME = (0,255,0)\nBLUE = (0,51,255)\nLIGHT_BLUE = (100,150,255)\nDARK_BLUE = (0,0,119)\nDARK_GRAY = (80,80,80)\nGRAY = (150,150,150)\nLIGHT_GRAY = (230,230,230)\nAQUA = (0,153,153)\nYELLOW = (255, 255, 0)\n\nVOID = \" \"\nPLAYER1 = \"X\"\nPLAYER2 = \"O\"\n\nEVALUATED = {}\n\nclass Game():\n def __init__(self, client = None, showAnimations = None, difficulty = None, optimiseAI = None):\n self.screen = pygame.display.set_mode(RESOLUTION, pygame.HWSURFACE)\n pygame.display.set_caption(\"Online mode\" if client is not None else \"Local mode\")\n self.clock = pygame.time.Clock()\n self.dontAskAgain = False\n self.showAnimations = showAnimations\n self.skipAnimations = False\n self.difficulty = difficulty\n self.optimiseAI = optimiseAI\n self.client = client\n if self.client is not None: self.start(self.client.turn()) #online mode\n else: self.start(True) #local mode, player starts\n \n def start(self, turn):\n self.screen.fill(BLACK)\n self.board = VOID * 9\n self.exit = False\n self.turn = turn\n if not self.dontAskAgain: self.replay = None\n self.loop()\n self.replay_menu()\n if self.replay:#rematch\n if self.client is not None: self.start(self.client.turn()) #online mode\n else: self.start(True) #local mode, player starts\n \n def winner(self, boardArg = None, check = True):\n board = boardArg if boardArg is not None else self.board\n if self.optimiseAI and check:\n if board not in EVALUATED: EVALUATED[board] = self.winner(board, False)\n return (EVALUATED[board])\n for row in WINNING_ROWS:\n if board[row[0]] is VOID: continue #ensuring its not a void only row\n if len(set(board[square] for square in row)) == 1: return board[row[0]]\n if VOID not in board: return 0\n return None\n \n def check(self):\n if self.winner() is not None: self.exit = True\n\n def minimax(self, board, playerTurn, alpha = (-31, None), beta = (31, None), depth = 5):\n if self.winner(board) == PLAYER2: return (+30 - depth, None) #pc wins\n elif self.winner(board) == PLAYER1: return (-30 - depth, None) #pc loses\n elif VOID not in board or depth < 1: return (0, None) #tie\n elif playerTurn:\n for a in range(9):\n if board[a] is VOID:\n value = self.minimax(board[:a] + PLAYER1 + board[a + 1:], not playerTurn, alpha, beta, depth - 1)[0]\n if value < beta[0]: beta = (value, a) #Player tries to get the less benefit for PC\n if beta[0] <= alpha[0]: break\n return beta\n else:\n for a in range(9):\n if board[a] is VOID:\n value = self.minimax(board[:a] + PLAYER2 + board[a + 1:], not playerTurn, alpha, beta, depth - 1)[0]\n if value > alpha[0]: alpha = (value, a) #PC tries to get the biggest benefit\n if beta[0] <= alpha[0]: break\n return alpha\n \n def ai(self):\n t0 = time()\n if self.board[4] == VOID: a = 4 #obvious movement\n elif self.difficulty < 1: #random movement\n a = set(i for i in range(9) if self.board[i] == VOID).pop()\n elif self.difficulty >= 1: #minimax algorithm with depth = difficulty (max 8)\n a = self.minimax(self.board, False, depth = self.difficulty)[1]\n print(\"The AI has spent {:.5f} ms\".format((time() - t0) * 1000))\n return a\n \n def replay_menu(self):\n winner = self.winner()\n if winner is None:\n self.replay = False\n return None\n self.turn = False\n self.exit = False\n self.skipAnimations = False\n pygame.display.flip()\n for i in range(91): #show winner and render_replay animation\n if self.exit or not self.showAnimations or self.skipAnimations: break\n self.clock.tick(24)\n self.update()\n #render\n self.screen.fill(DARK_GRAY)\n self.render_buttons()\n if i < 40:\n if i < 10: self.render_winner(winner, y = i * -5)\n elif i < 30: self.render_winner(winner, y = (i - 10) * 5 - 50)\n else: self.render_winner(winner, y = (i - 30) * -5 + 50)\n elif i < 91:\n self.render_winner(winner, x = (i - 40) * 10)\n if i >= 50 and not self.dontAskAgain: self.render_replay(x = (i - 60) * 5)\n pygame.display.flip()\n if not self.dontAskAgain:\n self.screen.fill(DARK_GRAY)\n self.render_buttons()\n self.render_replay()\n while not self.exit and self.replay is None: #ask for replay\n self.clock.tick(10)\n if self.update_replay_menu():\n self.render_replay()\n if self.client is not None:\n #wait until oponent answers\n self.screen.fill(WHITE)\n self.render_buttons()\n self.screen.blit(ARIAL50.render(\"Waiting for the oponent...\", True, BLACK, YELLOW), ((RESOLUTION[0] - ARIAL50.size(\"Waiting for the oponent...\")[0]) // 2, (RESOLUTION[1] - ARIAL50.size(\"Waiting for the oponent...\")[1]) // 2))\n pygame.display.flip()\n self.replay = self.client.replay(self.replay)\n \n self.exit = True\n self.screen.fill(WHITE)\n \n def update_replay_menu(self):\n render = False\n if self.client is not None:\n for i in self.client.server.receive():\n out = self.client.server.parse(i)\n if not out: continue\n if self.client.EXIT.match(out):\n raise SystemExit\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.exit = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n x, y = event.pos\n if 280 <= y <= 340: #yes no buttons\n if 170 <= x <= 275: #yes\n self.replay = True\n elif 325 <= x <= 430: #no\n self.replay = False\n elif 370 <= y <= 390:\n if 170 <= x <= 190: #dont ask again\n self.dontAskAgain = not self.dontAskAgain\n render = True\n return render\n\n def update(self):\n render = False\n if self.client is None:\n if self.turn == False and self.winner() is None:\n n = self.ai()\n if self.board[n] is VOID:\n self.turn = True\n self.board = self.board[:n] + PLAYER2 + self.board[n + 1:]\n self.check()\n render = True\n else: raise Exception(\"The PC did an incorrect movement: {}\".format(n))\n else:\n for i in self.client.server.receive():\n out = self.client.server.parse(i)\n if not out: continue\n if not self.turn and self.client.CLICK.match(out):\n n = int(self.client.CLICK.match(out).group(1))\n if self.board[n] is VOID:\n self.turn = True\n self.board = self.board[:n] + PLAYER2 + self.board[n + 1:]\n self.check()\n if not self.exit: render = True\n if not pygame.display.get_active(): pygame.display.iconify()\n elif self.client.EXIT.match(out):\n raise SystemExit\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.exit = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1 and self.turn:\n n = event.pos[0] // 200 + event.pos[1] // 200 * 3\n if self.board[n] is VOID:\n self.turn = False\n if self.client is not None: self.client.server.msg(\"{} CLICK {}\".format(self.client.user, n))\n self.board = self.board[:n] + PLAYER1 + self.board[n + 1:]\n self.check()\n render = True\n elif event.button == 1:\n self.skipAnimations = True\n return render\n \n def render_winner(self, winner, x = 0, y = 0):\n if x > RESOLUTION[0] or y > RESOLUTION[1]: return None\n if winner == 0: text = \"It was a tie\"\n elif winner == PLAYER1: text = \"You won\"\n elif winner == PLAYER2: text = \"The oponent won\" if self.client else \"The AI won\"\n else: raise Exception(\"Unrecognised winner: {}\".format(winner))\n x = (RESOLUTION[0] - ARIAL80.size(text)[0]) // 2 + x\n y = (RESOLUTION[1] - ARIAL80.size(text)[1]) // 2 + y\n self.screen.blit(ARIAL80.render(text, True, BLACK, YELLOW), (x, y))\n \n def render_replay(self, x = 150, y = 200):\n pygame.draw.rect(self.screen, BLACK, (x - 5, y - 5, 310, 210))\n pygame.draw.rect(self.screen, WHITE, (x, y, 300, 200))\n self.screen.blit(ARIAL50.render(\"PLAY AGAIN?\", True, BLACK, WHITE), (x + (300 - ARIAL50.size(\"PLAY AGAIN?\")[0]) // 2, y + 20))\n pygame.draw.rect(self.screen, GREEN, (x + 20, y + 80, 105, 60))\n self.screen.blit(ARIAL50.render(\"YES\", True, BLACK, GREEN), (x + 30, y + 80))\n pygame.draw.rect(self.screen, RED, (x + 175, y + 80, 105, 60))\n self.screen.blit(ARIAL50.render(\"NO\", True, BLACK, RED), (x + 193, y + 80))\n pygame.draw.rect(self.screen, BLACK, (x + 20, y + 170, 20, 20), 2)\n if self.dontAskAgain: pygame.draw.rect(self.screen, BLUE, (x + 20, y + 170, 20, 20))\n self.screen.blit(ARIAL40.render(\"Don't ask again\", True, BLACK, WHITE), (x + 50, y + 150))\n pygame.display.flip()\n \n def render_buttons(self):\n for x in range(3):\n for y in range(3):\n if self.board[x + y * 3] is PLAYER1: color = BLUE\n elif self.board[x + y * 3] is PLAYER2: color = RED\n else: color = LIGHT_GRAY if self.turn else GRAY\n pygame.draw.rect(self.screen, color, (5 + x * 200, 5 + y * 200, 190, 190))\n \n def render(self):\n self.render_buttons()\n pygame.display.flip()\n \n def loop(self):\n self.render()\n while not self.exit:\n self.clock.tick(10) #fps\n if self.update(): self.render()\n\nclass Main():\n def __init__(self):\n self.screen = pygame.display.set_mode(RESOLUTION, pygame.HWSURFACE)\n pygame.display.set_caption(\"TIC TAC TOE\")\n self.link_icon = pygame.image.load(\"link-black-16.png\").convert_alpha()\n self.clock = pygame.time.Clock()\n self.showAnimations = True \n self.difficulty = 8 #maximum 8\n self.optimiseAI = True\n self.start()\n \n def start(self):\n self.screen.fill(WHITE)\n self.exit = False\n self.loop()\n \n def update(self):\n render = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.exit = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n x, y = event.pos\n if y > 560 and x < 170:\n web_open(r\"https://github.com/PyBeans\")\n elif 100 <= x <= 500: #buttons\n if 200 <= y <= 280: #online game\n #wait until an opponent is found\n self.screen.fill(WHITE)\n self.screen.blit(ARIAL50.render(\"Waiting for an oponent...\", True, BLACK, YELLOW), ((RESOLUTION[0] - ARIAL50.size(\"Waiting for an oponent...\")[0]) // 2, (RESOLUTION[1] - ARIAL50.size(\"Waiting for an oponent...\")[1]) // 2))\n pygame.display.flip()\n game = Game(client = Client(), showAnimations = self.showAnimations)\n render = True\n pygame.display.set_caption(\"TIC TAC TOE\")\n elif 300 <= y <= 380: #local mode\n game = Game(showAnimations = self.showAnimations, difficulty = self.difficulty, optimiseAI = self.optimiseAI)\n render = True\n pygame.display.set_caption(\"TIC TAC TOE\")\n elif 400 <= y <= 480:\n self.settings_loop()\n return render\n \n def update_settings(self):\n render = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit_settings = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.exit_settings = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n x, y = event.pos\n if 100 <= x <= 500: #buttons\n if 200 <= y <= 280: #showAnimations\n self.showAnimations = not self.showAnimations\n render = True\n elif 300 <= y <= 380: #local mode\n self.optimiseAI = not self.optimiseAI\n render = True\n elif 400 <= y <= 480:\n self.difficulty = (self.difficulty + 1) % 9\n render = True\n return render\n \n def render(self):\n self.screen.fill(WHITE)\n buttonsColor = LIGHT_GRAY\n optionsColor = BLACK\n self.screen.blit(ARIAL100.render(\"TIC TAC TOE\", True, BLACK, WHITE), ((RESOLUTION[0] - ARIAL100.size(\"TIC TAC TOE\")[0]) // 2, 30))\n pygame.draw.rect(self.screen, buttonsColor, (100, 200, 400, 80))\n self.screen.blit(ARIAL40.render(\"PLAYER VS PLAYER\", True, optionsColor, buttonsColor), ((RESOLUTION[0] - ARIAL40.size(\"PLAYER VS PLAYER\")[0]) // 2, 220))\n pygame.draw.rect(self.screen, buttonsColor, (100, 300, 400, 80))\n self.screen.blit(ARIAL40.render(\"PLAYER VS AI\", True, optionsColor, buttonsColor), ((RESOLUTION[0] - ARIAL40.size(\"PLAYER VS AI\")[0]) // 2, 320))\n pygame.draw.rect(self.screen, buttonsColor, (100, 400, 400, 80))\n self.screen.blit(ARIAL50.render(\"SETTINGS\", True, optionsColor, buttonsColor), ((RESOLUTION[0] - ARIAL50.size(\"SETTINGS\")[0]) // 2, 410))\n self.screen.blit(ARIAL30.render(\"By PyBeans\", True, LIGHT_BLUE, WHITE), (10, 560)) #~140 pixels width\n self.screen.blit(self.link_icon, (150, 575))\n pygame.display.flip()\n \n def render_settings(self):\n self.screen.fill(WHITE)\n optionsColor = BLACK\n self.screen.blit(ARIAL100.render(\"SETTINGS\", True, BLACK, WHITE), ((RESOLUTION[0] - ARIAL100.size(\"SETTINGS\")[0]) // 2, 30))\n pygame.draw.rect(self.screen, LIGHT_BLUE if self.showAnimations else LIGHT_GRAY, (100, 200, 400, 80))\n self.screen.blit(ARIAL40.render(\"Animations: {}\".format(self.showAnimations), True, optionsColor,\n LIGHT_BLUE if self.showAnimations else LIGHT_GRAY),\n ((RESOLUTION[0] - ARIAL40.size(\"Animations: {}\".format(self.showAnimations))[0]) // 2, 220))\n pygame.draw.rect(self.screen, LIGHT_BLUE if self.optimiseAI else LIGHT_GRAY, (100, 300, 400, 80))\n self.screen.blit(ARIAL40.render(\"Optimised AI: {}\".format(self.optimiseAI), True, optionsColor,\n LIGHT_BLUE if self.optimiseAI else LIGHT_GRAY),\n ((RESOLUTION[0] - ARIAL40.size(\"Optimised AI: {}\".format(self.optimiseAI))[0]) // 2, 320))\n pygame.draw.rect(self.screen, LIGHT_GRAY, (100, 400, 400, 80))\n self.screen.blit(ARIAL40.render(\"Difficulty: {}\".format(self.difficulty), True, optionsColor, LIGHT_GRAY), ((RESOLUTION[0] - ARIAL40.size(\"Difficulty: {}\".format(self.difficulty))[0]) // 2, 420))\n self.screen.blit(ARIAL30.render(\"By PyBeans\", True, LIGHT_BLUE, WHITE), (10, 560)) #~140 pixels width\n self.screen.blit(self.link_icon, (150, 575))\n pygame.display.flip()\n \n def settings_loop(self):\n self.exit_settings = False\n pygame.display.set_caption(\"SETTINGS\")\n self.render_settings()\n while not self.exit_settings:\n self.clock.tick(10) #fps\n if self.update_settings(): self.render_settings()\n self.render()\n pygame.display.set_caption(\"TIC TAC TOE\")\n \n def loop(self):\n self.render()\n while not self.exit:\n self.clock.tick(10) #fps\n if self.update(): self.render()\n \nmain = Main()\n ","sub_path":"pygame_client.py","file_name":"pygame_client.py","file_ext":"py","file_size_in_byte":18086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"481270854","text":"import json, unittest\nfrom tests.utils import *\n\ndata = [\n\t{ # Valid insertion\n\t\t\"code\": 201,\n\t\t\"data\": {\"company_id\": \"tim\", \"product_id\": \"tim_10\", \"phone_number\": \"(011)940028922\"},\n\t\t\"response\": '{\"id\": 4, \"created_at\": \"20191104T050214.511381Z\", \"company_id\": \"tim\", \"product_id\": \"tim_10\", \"phone_number\": \"11940028922\", \"value\": 10.0}'\n\t},\n\t{ # Invalid company_id\n\t\t\"code\": 400,\n\t\t\"data\": {\"company_id\": 666},\n\t\t\"response\": '{\"code\": \"0002\", \"message\": \"The value \\'company_id\\' has a bad type.\"}'\n\t},\n\t{ # Invalid company_id\n\t\t\"code\": 400,\n\t\t\"data\": {\"company_id\": \"abraxas\", \"product_id\": \"tim_10\", \"phone_number\": \"(011)940028922\"},\n\t\t\"response\": '{\"code\": \"0100\", \"message\": \"The company of ID \\'abraxas\\' does not provide any phone recharging service. These are the available company IDs: \\'tim\\', \\'vivo\\', \\'claro\\', \\'oi\\'\"}'\n\t},\n\t{ # Invalid product_id\n\t\t\"code\": 400,\n\t\t\"data\": {\"company_id\": \"tim\", \"product_id\": 666},\n\t\t\"response\": '{\"code\": \"0002\", \"message\": \"The value \\'product_id\\' has a bad type.\"}'\n\t},\n\t{ # Invalid product_id\n\t\t\"code\": 400,\n\t\t\"data\": {\"company_id\": \"tim\", \"product_id\": \"tim_40\"},\n\t\t\"response\": '{\"code\": \"0101\", \"message\": \"The recharge of ID \\'tim_40\\' has not been found in the services provided by the company of ID \\'tim\\'. These are the available recharge IDs for the specified company: \\'tim_05\\', \\'tim_08\\', \\'tim_10\\', \\'tim_15\\', \\'tim_20\\'\"}'\n\t},\n\t{ # Invalid phone_number\n\t\t\"code\": 400,\n\t\t\"data\": {\"company_id\": \"tim\", \"product_id\": \"tim_10\", \"phone_number\": 696969},\n\t\t\"response\": '{\"code\": \"0002\", \"message\": \"The value \\'phone_number\\' has a bad type.\"}'\n\t},\n\t{ # Invalid phone_number\n\t\t\"code\": 400,\n\t\t\"data\": {\"company_id\": \"tim\", \"product_id\": \"tim_10\", \"phone_number\": \"119696969\"},\n\t\t\"response\": '{\"code\": \"0102\", \"message\": \"The phone number \\'119696969\\' is invalid.\"}'\n\t},\n\t{ # Formated phone_number\n\t\t\"code\": 201,\n\t\t\"data\": {\"company_id\": \"tim\", \"product_id\": \"tim_10\", \"phone_number\": \"(011) 9 4002-8922\"},\n\t\t\"response\": '{\"id\": 5, \"created_at\": \"20191104T051219.689995Z\", \"company_id\": \"tim\", \"product_id\": \"tim_10\", \"phone_number\": \"11940028922\", \"value\": 10.0}'\n\t}\n]\n\nclass TestPostRecharge(unittest.TestCase):\n\tdef setUp(self):\n\t\tpopulate_company_data()\n\t\tpopulate_recharges_data()\n\n\tdef tearDown(self):\n\t\tpass\n\n\tdef test_status(self):\n\t\t# No body\n\t\tself.assertEqual(http_request(\"POST\", \"phone/product\").status_code, 400)\n\n\t\t# Body\n\t\tfor t in data:\n\t\t\tself.assertEqual(http_request(\"POST\", \"phone/recharge\", data = t[\"data\"]).status_code, t[\"code\"])\n\n\tdef test_post_response(self):\n\t\t# No body\n\t\tresponse = '{\"message\": \"The browser (or proxy) sent a request that this server could not understand.\"}\\n'\n\t\tself.assertEqual(http_request(\"POST\", \"phone/recharge\").text, response)\n\n\t\t# Body\n\t\tfor t in data:\n\t\t\t# hack + a shame. Sorry;\n\t\t\tresponse = http_request(\"POST\", \"phone/recharge\", data = t[\"data\"])\n\t\t\tif (response.status_code == 201):\n\t\t\t\tt[\"response\"] = json.loads(t[\"response\"])\n\t\t\t\tt[\"response\"][\"created_at\"] = json.loads(response.text)[\"created_at\"]\n\t\t\t\tt[\"response\"] = json.dumps(t[\"response\"])\n\n\t\t\tself.assertEqual(response.text, t[\"response\"])","sub_path":"tests/test_phone_post_recharge.py","file_name":"test_phone_post_recharge.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627127762","text":"\n\nfrom xai.brain.wordbase.adjectives._dull import _DULL\n\n#calss header\nclass _DULLEST(_DULL, ):\n\tdef __init__(self,): \n\t\t_DULL.__init__(self)\n\t\tself.name = \"DULLEST\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"dull\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_dullest.py","file_name":"_dullest.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"428492039","text":"import copy\r\n\r\n# graph = {'a': {'b':10, 'c':3}, 'b':{'c':1, 'd':2}, 'c':{'b':4, 'd':8, 'e':2}, 'd':{'e':7}, 'e':{'d':9}}\r\n\r\nINFINITY = 99999999\r\n\r\ndef shortestPath(graph, start, destination, costKey='weight'):\r\n shortest_distance = {}\r\n predecessor = {}\r\n unseenNodes = graph.copy() # deep copy (will be modified)\r\n path = []\r\n for node in unseenNodes:\r\n shortest_distance[node] = INFINITY\r\n shortest_distance[start] = 0\r\n\r\n while unseenNodes: # if empty\r\n minNode = None\r\n for node in unseenNodes:\r\n if minNode is None:\r\n minNode = node\r\n elif shortest_distance[node] < shortest_distance[minNode]:\r\n minNode = node\r\n # we have a focus node,\r\n for childNode, link in unseenNodes[minNode].items():\r\n if link[costKey] + shortest_distance[minNode] < shortest_distance[childNode]:\r\n shortest_distance[childNode] = link[costKey] + shortest_distance[minNode]\r\n predecessor[childNode] = minNode\r\n unseenNodes.pop(minNode)\r\n # print(shortest_distance)\r\n \r\n currentNode = destination\r\n while currentNode != start:\r\n try:\r\n path.insert(0, currentNode)\r\n currentNode = predecessor[currentNode]\r\n except KeyError:\r\n # print('Path not reachable')\r\n break\r\n path.insert(0,start)\r\n if shortest_distance[destination] != INFINITY:\r\n # print('shortest distance: ' + str(shortest_distance[destination]))\r\n # print('Path: ' + str(path))\r\n return path, shortest_distance[destination]\r\n return None, None\r\n# dijkstra(graph, 'a', 'e')\r\n\r\ndef KshortestPath(graph, start, destination, costKey='weight'):\r\n agraph = copy.deepcopy(graph) # deep copy (will be modified)\r\n A = [] # Array of tuples\r\n while True:\r\n path, distance = shortestPath(agraph, start, destination, costKey)\r\n if path is None:\r\n break\r\n A.append((path, distance))\r\n # set cost of the selected path to INFINITY.\r\n for i in range(len(path) - 1):\r\n agraph[path[i]][path[i+1]][costKey] = INFINITY\r\n return A","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"438336107","text":"__author__ = 'espin'\n\n#######################################################################################################################\n# Dependencies\n#######################################################################################################################\nfrom model.pragmatix import IcdChange\nimport operator\nfrom datetime import datetime\n\n#######################################################################################################################\n# CLASS: Ontologies (pragmatix)\n#######################################################################################################################\n\nclass Ontologies(object):\n # ONTOLOGIES = { 'bro':{'instance':'bro', 'users':6, 'actions':13, 'changes':2489},\n # 'icd':{'instance':'icd2013-08-29_04h02m', 'users':110, 'actions':33, 'changes':439192},\n # 'ictm':{'instance':'ictm2013-08-29_04h02m', 'users':27, 'actions':16, 'changes':66885},\n # 'opl':{'instance':'opl', 'users':4, 'actions':11, 'changes':2997},\n # 'ncit':{'instance':'ncit', 'users':15, 'actions':7, 'changes':37494},\n # 'nci':{'instance':'nci100400', 'users':0, 'actions':0, 'changes':0},\n # }\n ONTOLOGIES = { 'bro':{'instance':'bro', 'days':22+1, 'dini':'2010-02-12 00:00:00'},\n 'icd':{'instance':'icd2013-08-29_04h02m', 'days':1380+1, 'dini':'2009-11-18 00:00:00'},\n 'ictm':{'instance':'ictm2013-08-29_04h02m', 'days':896+1, 'dini':'2011-02-02 00:00:00'},\n 'opl':{'instance':'opl', 'days':106+1, 'dini':'2011-06-09 00:00:00'},\n 'ncit':{'instance':'ncit', 'days':1175+1, 'dini':'2010-06-01 00:00:00'},\n 'nci':{'instance':'nci100400', 'days':0+1, 'dini':'0000-00-00 00:00:00'},\n }\n SORTBY_TIME = 0\n SORTBY_AUTHORTIME = 1\n TIMESCALE = {'hours':24,\n 'weekdays':7,\n 'weekdayhours':168\n }\n\n def __init__(self, onto, ttype=None, granularity=None, R=None):\n self.onto = onto\n self.ttype = ttype\n self.granularity = granularity\n self.R = R\n self.users = None\n self.nusers = -1\n self.actionskind = None\n self.nactionskind = -1\n self.trails = None\n self.ntrails = -1\n\n def getUsersFromDB(self):\n clauses = self._getDefaultClauses()\n return IcdChange.select(IcdChange.author).distinct().where(\n reduce(operator.and_, clauses)\n )\n\n # return IcdChange.select(IcdChange.author).distinct().where(\n # (IcdChange._instance == self.ONTOLOGIES[self.onto]['instance']) &\n # (IcdChange._instance.contains('_')) &\n # (IcdChange.timestamp.month != 0) &\n # (IcdChange.action == 'Composite_Change') &\n # (IcdChange.kind != '')\n # )\n\n def getActionsFromDB(self):\n clauses = self._getDefaultClauses()\n return IcdChange.select(IcdChange.kind).distinct().where(\n reduce(operator.and_, clauses)\n )\n\n def getChangesSortedBy(self, sortedby):\n clauses = self._getDefaultClauses()\n return IcdChange.select().order_by(IcdChange.timestamp if sortedby == self.SORTBY_TIME else IcdChange.author, IcdChange.timestamp if sortedby == self.SORTBY_AUTHORTIME else None).where(\n reduce(operator.and_, clauses)\n )\n\n # return IcdChange.select().order_by(IcdChange.timestamp if sortedby == self.SORTBY_TIME else IcdChange.author, IcdChange.timestamp if sortedby == self.SORTBY_AUTHORTIME else None).where(\n # (IcdChange._instance == self.ONTOLOGIES[self.onto]['instance']) &\n # (IcdChange._instance.contains('_')) &\n # (IcdChange.timestamp.month != 0) &\n # (IcdChange.action == 'Composite_Change') &\n # (IcdChange.kind != '')\n # )\n\n def setUsers(self, obj):\n self.users = obj\n self.nusers = len(self.users)\n\n def setActions(self, obj):\n self.actionskind = obj\n self.nactionskind = len(self.actionskind)\n\n def setTrails(self, obj):\n self.trails = obj\n self.ntrails = len(self.trails)\n\n def getShape(self):\n if self.ttype == 'changes':\n # user, hour, action\n if self.granularity == 'lifetimedays':\n return (self.nusers, self.ONTOLOGIES[self.onto]['days'], self.nactionskind)\n if self.granularity == 'lifetimehours':\n return (self.nusers, self.ONTOLOGIES[self.onto]['days']*24, self.nactionskind)\n return (self.nusers, self.TIMESCALE[self.granularity], self.nactionskind)\n\n if self.ttype == 'steps':\n # user step1 step2\n return (self.nusers, self.nactionskind, self.nactionskind)\n\n def getDIni(self):\n return datetime.strptime(self.ONTOLOGIES[self.onto]['dini'], '%Y-%m-%d %H:%M:%S')\n\n def _getDefaultClauses(self):\n clauses = [\n (IcdChange._instance == self.ONTOLOGIES[self.onto]['instance']),\n (IcdChange.timestamp.month != 0),\n (IcdChange.kind != '')\n ]\n\n if self.onto == 'nci':\n clauses.append((IcdChange.action != 'Composite_Change'))\n clauses.append(~(IcdChange.context.startswith('BatchEdit')))\n\n elif self.onto == 'bro':\n clauses.append((IcdChange.action == 'Composite_Change'))\n\n elif self.onto == 'opl':\n clauses.append((IcdChange.action == 'Composite_Change'))\n clauses.append(~(IcdChange.context.startswith('Remove template slot from class')))\n\n else:\n clauses.append((IcdChange.action == 'Composite_Change'))\n clauses.append(~(IcdChange.context.startswith('Automatic')))\n\n return clauses\n\n # #relevant_filter = ~Q(action=\"Export\") & ~Q(context__startswith=\"Automatic\")\n","sub_path":"ontologies.py","file_name":"ontologies.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"324688679","text":"import networkx as nx\nimport pandas as pd\n\ndata = pd.read_csv(r'D:/Project/pyCharmProjects/DSTGCN/data/NewYork_Edgelist_test.csv', header=0)\nedges_as_nodes = data.groupby('EDGE').agg({'XCoord': 'mean',\n 'YCoord': 'mean',\n 'START_NODE': 'nunique',\n 'END_NODE': 'nunique',\n 'LENGTH': 'mean'})\nedges_as_nodes['NUM_NODE'] = edges_as_nodes['START_NODE']\nedges_as_nodes.drop(['START_NODE', 'END_NODE'], axis=1, inplace=True)\n\ng = nx.DiGraph()\ng.add_nodes_from(edges_as_nodes.to_dict('index').items())\n\nadjacency_as_edges = set()\nedges = data.drop(['XCoord', 'YCoord', 'LENGTH'], axis=1)\nadjacency = pd.merge(edges, edges, left_on='START_NODE', right_on='END_NODE')[['EDGE_x', 'EDGE_y']]\nadjacency = adjacency[adjacency['EDGE_x'] != adjacency['EDGE_y']]\nadjacency_as_edges = adjacency_as_edges.union(\n set(map(lambda record: (record['EDGE_x'], record['EDGE_y']), adjacency.to_dict('records'))))\n\nadjacency = pd.merge(edges, edges, left_on='END_NODE', right_on='START_NODE')[['EDGE_x', 'EDGE_y']]\nadjacency = adjacency[adjacency['EDGE_x'] != adjacency['EDGE_y']]\nadjacency_as_edges = adjacency_as_edges.union(\n set(map(lambda record: (record['EDGE_x'], record['EDGE_y']), adjacency.to_dict('records'))))\n\ng.add_edges_from(adjacency_as_edges)\ng = nx.convert_node_labels_to_integers(g)\nnx.write_gpickle(g, 'D:/Project/pyCharmProjects/DSTGCN/data/newyork_roadnet_test.gpickle')\n\n# For read road network in Beijing.\n# nx.read_gpickle('data/beijing_roadnet.gpickle')\n\n\n# poi_divide_num = get_attribute(\"poi_divide_num\")\n# self.poi_tree_nodes = spatial.KDTree(list(zip(self.poi['longitude'], self.poi['latitude'])))\n# # 筛选 1110m / poi_divide_num 以内的poi\n# _, nodes_id = self.poi_tree_nodes.query([n_lng, n_lat], k=None,\n# distance_upper_bound=0.01 / poi_divide_num)\n# selected_poi = self.poi.loc[nodes_id]\n# poi_features = selected_poi.groupby('poi_type').count()['longitude'] \\\n# .reindex(list(range(1, 21)), fill_value=0).to_list()\n# spatial_features.append(poi_features + [node_number, road_len])\n","sub_path":"preprocessing_data/process_road.py","file_name":"process_road.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"444691319","text":"\n\n#calss header\nclass _AFFECT():\n\tdef __init__(self,): \n\t\tself.name = \"AFFECT\"\n\t\tself.definitions = [u'to have an influence on someone or something, or to cause a change in someone or something: ', u'to pretend to feel or think something: ', u'to start to wear or do something in order to make people admire or respect you: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_affect.py","file_name":"_affect.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"529342950","text":"from random import randrange\n\n\ndef insertion_sort_ascendente(lista):\n for i in range(len(lista)):\n h = i\n while h > 0 and lista[h] < lista[h - 1]:\n aux = lista[h]\n lista[h] = lista[h - 1]\n lista[h - 1] = aux\n h -= 1\n\n\nn = int(input(\"ingrese n: \"))\nm = int(input(\"ingrese m: \"))\nmatriz = []\n\n\nfor i in range(1, m + 1):\n lista = [randrange(1, n) for x in range(1, n + 1)]\n insertion_sort_ascendente(lista)\n matriz.append(lista)\n\nprint(matriz)","sub_path":"Ciclo 1/Examen 3/21listas_ordenadas.py","file_name":"21listas_ordenadas.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"341424686","text":"\"\"\"\nCopyright 2021 Data Science and ML Geosciences Group\n\nThis module includes plotting convenience functions for the competition.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom matplotlib.pyplot import subplots\n\n\ndef plot_slice(x, y, data, figsize=(5, 5), dpi=200):\n fig, ax = subplots(1, 1, figsize=figsize, dpi=dpi)\n\n ax.pcolormesh(x, y, data.T, shading='auto')\n\n ax.invert_yaxis()\n\n return fig, ax\n","sub_path":"seismic_inversion_2021/openvds/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"129058380","text":"\ndef simple_app(environ, start_response):\n headers = [('Content-Type', 'text/plain')]\n start_response('200 OK', headers)\n\n def content():\n # We start streaming data just fine.\n yield 'The dwarves of yore made mighty spells,'\n yield 'While hammers fell like ringing bells'\n\n # Then the back-end fails!\n try:\n 1/0\n except:\n start_response('500 Error', headers, sys.exc_info())\n return\n\n # So rest of the response data is not available.\n yield 'In places deep, where dark things sleep,'\n yield 'In hollow halls beneath the fells.'\n\n return content()\n","sub_path":"adapters/python_wsgi/test/fixtures/chunked.py","file_name":"chunked.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"592782445","text":"import numpy as np\r\nfrom configurations import *\r\n\r\nclass STLModel:\r\n def __init__(self,filepath,term):\r\n self.term=term\r\n self.term.addProcessMessage('STL file recovery')\r\n self.term.addSubProcessMessage('STL file opening')\r\n file=open(filepath)\r\n self.term.addSubProcessMessage('STL file reading')\r\n self.file_content=file.read().split('\\n')\r\n file.close()\r\n\r\n self.vertex,self.facet_normal=self.extract_facets_data()\r\n self.facets_number=len(self.vertex)\r\n\r\n if self.facets_number==0:\r\n self.term.addErrorMessage('Invalid STL : the STL file must be in ascii format')\r\n else:\r\n self.vertex_flatten=self.vertex.reshape(3*self.facets_number,3)\r\n X,Y,Z=self.vertex_flatten[:,0],self.vertex_flatten[:,1],self.vertex_flatten[:,2]\r\n self.x_range=[min(X),max(X)]\r\n self.y_range=[min(Y),max(Y)]\r\n self.bottom_ref=min(Z)\r\n self.height=max(Z)-self.bottom_ref\r\n\r\n self.weight=DEFAULT_OBJECT_MASS*GRAVITY\r\n\r\n self.sort_facets()\r\n\r\n def setMass(self,mass):\r\n \"\"\"\r\n Définir la masse de l'objet puis calculer son poids\r\n :param mass : masse de l'objet\r\n \"\"\"\r\n self.term.addInformativeMessage('Mass set to '+str(mass)+' kg')\r\n self.weight=mass*GRAVITY\r\n\r\n def extract_facets_data(self):\r\n \"\"\"\r\n Extraire les informations essentielles du fichier STL\r\n :return: numpy array des coordonnées des sommets de triangle, numpy array des vecteurs normaux\r\n \"\"\"\r\n self.term.addProcessMessage('Extraction of the different facets data')\r\n self.term.addSubProcessMessage('Extraction of vertex coordinates')\r\n self.term.addSubProcessMessage('Extraction of normal vectors')\r\n data_types=['facet normal','vertex']\r\n\r\n vertex=[]\r\n facet_normal=[]\r\n for line in self.file_content:\r\n if data_types[0] in line:\r\n facet_normal.append([float(i) for i in line.split(' ')[-3:]])\r\n vertex.append([])\r\n elif data_types[1] in line:\r\n vertex[-1].append([float(i) for i in line.split(' ')[-3:]])\r\n self.term.addSuccessMessage('Extraction of the different facets data complete')\r\n return np.array(vertex),np.array(facet_normal)\r\n \r\n def translateZ(self,delta):\r\n \"\"\"\r\n Translater verticalement toutes les facettes du modele 3d\r\n :param delta: valeur de la translation (>0 vers le haut, <0 vers le bas)\r\n \"\"\"\r\n for vertex in self.vertex:\r\n vertex[:,2]+=delta\r\n self.bottom_ref+=delta\r\n self.term.addInformativeMessage('3d model vertically translated by '+str(delta)+' m')\r\n self.sort_facets()\r\n\r\n def get_coordinates_intersection_point_fluid_leved_facet_side(self,A,B):\r\n \"\"\"\r\n A et B deux points de part et d'autre du niveau d'eau\r\n Calculer les coordonnées du point I, point d'intersection entre le segment AB et le plan z=0\r\n :param A: coordonnées du point A\r\n :param B: coordonnées du point B\r\n :return: coordonnées du point I\r\n \"\"\"\r\n Ax,Ay,Az=A\r\n Bx,By,Bz=B\r\n k=-Az/(Bz-Az)\r\n return np.array([k*(Bx-Ax)+Ax,k*(By-Ay)+Ay,0])\r\n\r\n def sort_facets(self):\r\n \"\"\"\r\n Trier les facettes en 2 groupes : emergées et immergées en fonction de la position de leurs sommets\r\n :return: numpay array des facettes emergées, immergées, leurs normales et leur nombre\r\n \"\"\"\r\n self.term.addProcessMessage('Separation of emerged and submerged facets')\r\n emerged_facets=[]\r\n\r\n submerged_facets=[]\r\n submerged_facet_normal=[]\r\n\r\n for i in range(self.facets_number):\r\n facet=self.vertex[i]\r\n z=facet[:,2]\r\n true_values=np.count_nonzero(z>=0)\r\n false_values=np.count_nonzero(z<=0)\r\n if true_values==3:\r\n emerged_facets.append(facet)\r\n elif false_values==3:\r\n submerged_facets.append(facet)\r\n submerged_facet_normal.append(self.facet_normal[i])\r\n else:\r\n sup=np.array([v for v in facet if v[2]>0])\r\n inf=np.array([v for v in facet if v[2]<0])\r\n\r\n sup_number=len(sup)\r\n inf_number=len(inf)\r\n\r\n if sup_number==1 and inf_number==1: # plan z=0 passe par le sommet d'une facette donc 1 seule intersection avec un côté\r\n A,B=sup[0],inf[0]\r\n C=np.array([v for v in facet if v[2]==0])[0]\r\n I=self.get_coordinates_intersection_point_fluid_leved_facet_side(A,B)\r\n emerged_facets.append(np.array([I,C,A]))\r\n submerged_facets.append(np.array([I,C,B]))\r\n submerged_facet_normal.append(self.facet_normal[i])\r\n elif sup_number==2 and inf_number==1: # triangle avec 2 sommets au dessus du plan z=0 et 1 en dessous\r\n A,B=sup\r\n C=inf[0]\r\n I=self.get_coordinates_intersection_point_fluid_leved_facet_side(A,C)\r\n J=self.get_coordinates_intersection_point_fluid_leved_facet_side(B,C)\r\n emerged_facets.append(np.array([I,J,A]))\r\n emerged_facets.append(np.array([A,B,J]))\r\n submerged_facets.append(np.array([I,J,C]))\r\n submerged_facet_normal.append(self.facet_normal[i])\r\n else: # facette avec 2 sommet au dessous de z=0 et 1 au dessus\r\n A, B = inf\r\n C = sup[0]\r\n I = self.get_coordinates_intersection_point_fluid_leved_facet_side(A,C)\r\n J = self.get_coordinates_intersection_point_fluid_leved_facet_side(B,C)\r\n emerged_facets.append(np.array([I,J,C]))\r\n submerged_facets.append(np.array([I,J,A]))\r\n submerged_facet_normal.append(self.facet_normal[i])\r\n submerged_facets.append(np.array([A, B, J]))\r\n submerged_facet_normal.append(self.facet_normal[i])\r\n self.emerged_facets,self.submerged_facets,self.submerged_facet_normal=np.array(emerged_facets),np.array(submerged_facets),np.array(submerged_facet_normal)\r\n self.submerged_facets_number=len(submerged_facets)\r\n self.term.addSuccessMessage('Separation of emerged and submerged facets complete')","sub_path":"code/STLModel.py","file_name":"STLModel.py","file_ext":"py","file_size_in_byte":6556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"417541511","text":"import argparse\nimport sys\nimport csv\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom rnn_models import rnn_fig2\nfrom target_response_curves import adapt_pulse_double_exp as adapt_pulse\n\nFLAGS = None\nN_CASES = 3\nGs = 2 # num of dynamic nodes (genes)\nGin= 1 # num of input signal\nTIMESTEP = 0.2\ntime_points = 40\n\n\ndef main(_):\n\n # folders for output\n if not os.path.exists(repr(FLAGS.output_name)):\n os.mkdir(repr(FLAGS.output_name))\n np.savetxt(repr(FLAGS.output_name) +'/links.csv', np.ones([Gin+Gs,Gs]), fmt='%.4f', delimiter=',')\n if not os.path.exists(repr(FLAGS.output_name)+'/train'):\n os.mkdir(repr(FLAGS.output_name)+'/train')\n if not os.path.exists(repr(FLAGS.output_name)+'/savenet'):\n os.mkdir(repr(FLAGS.output_name)+'/savenet')\n\n # Create session\n sess = tf.InteractiveSession()\n\n # read allowed regulation links\n # links[i,j] = 0 or 1: blocked or allowed regulation link\n links = np.genfromtxt(repr(FLAGS.output_name)+'/links.csv', delimiter=',', dtype='float32') #[Gin+Gs,Gs]\n \n # Define Loss\n STIMULI = tf.placeholder(np.float32, [N_CASES,time_points,Gin])\n TARGET = tf.placeholder(np.float32, [N_CASES,time_points])\n LINKS = tf.placeholder(np.float32, [Gin+Gs,Gs])\n NN_traj = rnn_fig2(STIMULI, LINKS, TIMESTEP) #output shape:[N_CASES,time_points,Gs]\n \n loss0_0 = tf.reduce_sum((NN_traj[0,:,:]-NN_traj[0,0,:])**2)\n loss0_1 = tf.reduce_sum((NN_traj[0,:,0]-TARGET[0,:])**2) #Case-0\n loss1 = tf.reduce_sum((NN_traj[1,:,0]-TARGET[1,:])**2) #Case-1\n loss2 = tf.reduce_sum((NN_traj[2,:,0]-TARGET[2,:])**2) #Case-2\n loss = loss0_0 + loss0_1 + loss1 + loss2 # all\n\n # Define optimizer\n train_step = tf.train.RMSPropOptimizer(0.001).minimize(tf.sqrt(loss))\n\n tf.global_variables_initializer().run()\n # saving and loading networks\n saver = tf.train.Saver()\n checkpoint = tf.train.get_checkpoint_state(repr(FLAGS.output_name)+'/savenet')\n '''\n # restart from saved model if necessary\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n '''\n\n # Train\n loss_writer = csv.writer(open(repr(FLAGS.output_name)+'/loss.csv', 'w'))\n\n # Case-0, fix-point without stimuli\n stimuli_0 = 0.1*np.ones([time_points,Gin],'float32')\n target_0 = 0.4*np.ones([time_points],'float32')\n\n # Case-1, high peak responce with high stimuli\n stimuli_1 = np.zeros([time_points,Gin],'float32')\n stimuli_1[:,0] = 1.0\n target_1 = adapt_pulse(time_points, height=1.0)+0.4\n\n for i in range(2001):\n # Case-2, random stimuli strength\n stimuli_level = np.random.rand()\n stimuli_2 = 0.1*np.ones([time_points,Gin],'float32')\n stimuli_2[:,0] = stimuli_level\n target_2 = adapt_pulse(time_points, height=stimuli_level)+0.4\n \n # stack all train cases\n stimuli_all = np.stack([stimuli_0,stimuli_1,stimuli_2],axis=0) #[N_CASES,time_points,Gin]\n target_all = np.stack([target_0,target_1,target_2],axis=0) #[N_CASES,time_points]\n \n # apply gradient\n sess.run(train_step, feed_dict={STIMULI:stimuli_all,LINKS:links, TARGET:target_all})\n\n # Test\n if i%100 == 0:\n monitor = sess.run([loss0_1, loss1, loss2], feed_dict={STIMULI:stimuli_all,LINKS:links,TARGET:target_all})\n print('step%g, loss0:%.4g, loss1:%.4g, loss2:%.4g,'%(i, monitor[0], monitor[1], monitor[2]))\n loss_writer.writerow([i, monitor[0], monitor[1], monitor[2]])\n\n if i%500 == 0:\n xs = np.linspace(0.0,(time_points-1), time_points)\n Xs_test = sess.run(NN_traj, feed_dict={STIMULI:stimuli_all,LINKS:links}) #[N_CASES,time_points,Gs] \n\n for n in range(N_CASES):\n plt.subplot(3,2,n+1)\n plt.plot(xs, Xs_test[n,:,:],'-', xs, target_all[n,:], ':k')\n plt.axis([0,time_points-1,-0.1,1])\n\n plt.savefig(repr(FLAGS.output_name)+'/train/step'+repr(i))\n plt.close()\n\n if i%2000 == 1999:\n saver.save(sess, repr(FLAGS.output_name)+'/savenet/dyn-network' , global_step = i)\n\n sess.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_name', type=int, default=1, help='***')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","sub_path":"fig1-2_adaptation/fig2_1_train.py","file_name":"fig2_1_train.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"516705329","text":"import time\nstart = time.time()\n\n# input the .osm data\nmapFile = 'brs_BS2_8DA.osm'\n#mapFile = 'map.osm'\ninput_type = 'file'\n\n\nintersection_coordinates = []\nnode_coordinates = []\n\n# parse .xml file with element tree\nimport xml.etree.ElementTree as ET\nif input_type == 'file':\n\ttree = ET.parse(mapFile)\n\troot = tree.getroot()\n\tchildren = root.getchildren()\nelif input_type == 'str':\n\ttree = ET.fromstring(mapFile)\n\tchildren = tree.getchildren()\n\ncounter = {} # for intersections\nlat = {} # for ways\nlon = {} # for ways\ninters_num = 0\nroad_num = 0\n\nfor child in children:\n\tif child.tag == 'bounds':\n\t\tnode_coordinates.append('minlat:' + child.attrib['minlat'])\n\t\tnode_coordinates.append('minlon:' + child.attrib['minlon'])\n\t\tnode_coordinates.append('maxlat:' + child.attrib['maxlat'])\n\t\tnode_coordinates.append('maxlon:' + child.attrib['maxlon'])\n\n\tif child.tag == 'way':\n \t# Check if the way represents a \"highway (road)\"\n \t# If the current way is not a road,\n \t# continue without checking any nodes\n\t\troad = False\n\t\troad_types = ('motorway', 'trunk', 'primary', 'secondary', 'tertiary', 'residental', 'service', 'unclassfied', \n\t\t'road', 'footway', 'path', 'pedestrain', 'track', 'living_street')\n\t\t#'motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link')\n\t\tfor item in child:\n\t\t\tif item.tag == 'tag' and item.attrib['k'] == 'highway' and item.attrib['v'] in road_types:\n\t\t\t\troad = True\n\t\t\t\troad_num += 1\n\t\t\t\tnode_coordinates.append(' ') # blank line between ways\n\t\t\t\tnode_coordinates.append('way id: ' + child.attrib['id']) # delineate start of new \"way\" \n\n\t\tif not road:\n\t\t\tcontinue\n\n\t\tfor item in child:\n\t\t\tif item.tag == 'nd':\n\t\t\t\tnd_ref = item.attrib['ref']\n\t\t\t\tcoordinate = lat[nd_ref] + ',' + lon[nd_ref]\n\t\t\t\tnode_coordinates.append(coordinate)\n\t\t\t\tif not nd_ref in counter:\n\t\t\t\t\tcounter[nd_ref] = 0\n\t\t\t\tcounter[nd_ref] += 1\n\n\telif child.tag == 'node':\n\t# store lat and lon coordinates\n\t\tnd_ref = child.attrib['id']\n\t\tlat[nd_ref] = child.attrib['lat']\n\t\tlon[nd_ref] = child.attrib['lon']\n\n# Find nodes that are shared with more than one way, which\n# might correspond to intersections\n# note: filter function is different between python 2 and 3 !\n# python 2:\n#intersections = filter(lambda x: counter[x] > 1, counter) \n# python 3:\nintersections_filter = filter(lambda x: counter[x] > 1, counter) \nintersections = list(intersections_filter)\n \n# Extract intersection coordinates\n# You can plot the results using this url:\n# http://www.darrinward.com/lat-long/\nfor child in children:\n\tif child.tag == 'node' and child.attrib['id'] in intersections:\n\t\tcoordinate = child.attrib['lat'] + ',' + child.attrib['lon']\n\t\tinters_num += 1\n\t\tintersection_coordinates.append(coordinate)\n\n\n# save and print the results\n# save intersections\nwriteFile = 'intersections.txt'\nf = open(writeFile, 'w')\nfor inters in intersection_coordinates:\n\tf.write(inters + '\\n')\nf.close()\n\n# save ways\nwriteFile = 'roads.txt'\nf = open(writeFile, 'w')\nfor roads in node_coordinates:\n\tf.write(roads + '\\n')\nf.close()\n\nprint('It took', time.time()-start, 'seconds to parse the OSM map.')\nprint('There are', road_num, 'roads in this area.')\nprint('There are', inters_num, 'intersections in this area.')\nprint('This area is limited in:', node_coordinates[0], node_coordinates[1], node_coordinates[2], node_coordinates[3])\n\n","sub_path":"map_parsing_v2/parse_script.py","file_name":"parse_script.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103434179","text":"#!/usr/bin/env python\n'''\n Copyright 2012 Root the Box\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\n\nimport sys\nimport logging\n\nfrom optparse import OptionParser\nfrom datetime import datetime\nfrom libs.ConsoleColors import *\n\n\n__version__ = 'Root the Box - v0.3.0'\ncurrent_time = lambda: str(datetime.now()).split(' ')[1].split('.')[0]\n\n\ndef serve(options, *args, **kwargs):\n ''' Starts the application '''\n from libs.ConfigManager import ConfigManager # Sets up logging\n from handlers import start_server\n print(INFO+'%s : Starting application ...' % current_time())\n start_server()\n\n\ndef create(options, *args, **kwargs):\n ''' Creates/bootstraps the database '''\n from libs.ConfigManager import ConfigManager # Sets up logging\n from models import create_tables, boot_strap\n print(INFO+'%s : Creating the database ...' % current_time())\n create_tables()\n print(INFO+'%s : Bootstrapping the database ...' % current_time())\n boot_strap()\n\n\ndef recovery(options, *args, **kwargs):\n ''' Starts the recovery console '''\n from libs.ConfigManager import ConfigManager # Sets up logging\n from setup.recovery import RecoveryConsole\n print(INFO+'%s : Starting recovery console ...' % current_time())\n console = RecoveryConsole()\n try:\n console.cmdloop()\n except KeyboardInterrupt:\n print(INFO + \"Have a nice day!\")\n\ndef setup(options, *args, **kwargs):\n ''' Imports a setup file '''\n from libs.ConfigManager import ConfigManager # Sets up logging\n print(INFO+\"%s : Running default setup file 'setup/game.py' ...\" % current_time())\n try:\n from setup import game\n except Exception as error:\n logging.exception(\"Game setup script raised an exception!\")\n print(WARN+\"Setup Error: Game script failed with \"+str(error))\n sys.exit()\n print(INFO+\"Setup file completed successfully.\")\n\n### Main\nif __name__ == '__main__':\n if not 1 < len(sys.argv):\n sys.argv.append('-h')\n parser = OptionParser(\n usage=bold+\"rootthebox.py\"+W+\" \",\n version=__version__,\n )\n parser.add_option(\n \"-c\", \"--create-tables\",\n action=\"callback\",\n callback=create,\n help=\"create and initialize database tables (run once)\"\n )\n parser.add_option(\n \"-s\", \"--start\",\n action=\"callback\",\n callback=serve,\n help=\"start the server\"\n )\n parser.add_option(\n \"-g\", \"--game-script\",\n action=\"callback\",\n callback=setup,\n help=\"run a game setup script (setup/game.py)\"\n )\n parser.add_option(\n \"-r\", \"--recovery\",\n action=\"callback\",\n callback=recovery,\n help=\"start the admin recovery console\"\n )\n parser.parse_args()\n","sub_path":"rootthebox.py","file_name":"rootthebox.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"92141890","text":"import requests \nfrom bs4 import BeautifulSoup\nimport requests\nimport sys\nclass Facebook():\n def __init__(self,username = sys.argv[len(sys.argv)-1]):\n self.username = username \n def scrap(self):\n try:\n URL = f\"https://facebook.com/{self.username}\"\n respond = requests.get(URL)\n if respond.status_code == 404:\n print(\"Could Not Connect or User does not exist!\\n\")\n if respond.status_code == 200:\n soup = BeautifulSoup(respond.content,\"html.parser\")\n facebook_name = soup.find(\"span\",{\n \"id\":\"fb-timeline-cover-name\"\n })\n profile_image = soup.find(\"img\",{\n \"class\" : \"_11kf img\"\n })\n current_city = soup.find(\"li\",{\n \"id\" : \"current_city\"\n })\n clg = soup.find(\"div\",{\n \"class\" : \"_2lzr _50f5 _50f7\"\n })\n \n return {\n \"profile_image\" : profile_image['src'],\n \"current_city\" : current_city.text.strip(),\n \"Education\" : clg.text.strip()\n }\n except Exception as ex:\n print(ex) \n\nfb = Facebook(\"shaikhsajid1111\")\nprint(fb.scrap()) ","sub_path":"facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"606643039","text":"'''\nProduction Job class. Used to define new productions.\n\nMostly similar to :mod:`~ILCDIRAC.Interfaces.API.NewInterface.UserJob`, but\ncannot be (and should not be) used like the\n:mod:`~ILCDIRAC.Interfaces.API.NewInterface.UserJob` class.\n\n:author: Stephane Poss\n:author: Remi Ete\n:author: Ching Bon Lam\n\n'''\n\nfrom __future__ import print_function\nimport os\nimport shutil\n\nfrom collections import defaultdict\nfrom decimal import Decimal\n\nfrom DIRAC import S_OK, S_ERROR, gLogger\nfrom DIRAC.Core.Utilities.ReturnValues import returnSingleResult\nfrom DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations\nfrom DIRAC.Core.Security.ProxyInfo import getProxyInfo\nfrom DIRAC.Core.Workflow.Module import ModuleDefinition\nfrom DIRAC.Core.Workflow.Step import StepDefinition\nfrom DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient\nfrom DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient\n\nfrom ILCDIRAC.ILCTransformationSystem.Client.Transformation import Transformation\nfrom ILCDIRAC.Interfaces.API.NewInterface.Job import Job\nfrom ILCDIRAC.Interfaces.Utilities import JobHelpers\n\n__RCSID__ = \"$Id$\"\n\nLOG = gLogger.getSubLogger(__name__)\n\nclass ProductionJob(Job): #pylint: disable=too-many-public-methods, too-many-instance-attributes\n \"\"\" Production job class. Suitable for CLIC studies. Need to sub class and overload for other clients.\n \"\"\"\n def __init__(self, script = None):\n super(ProductionJob, self).__init__( script )\n self.prodVersion = __RCSID__\n self.dryrun = False\n self.created = False\n self.checked = False\n self.call_finalization = False\n self.finalsdict = {}\n self.transfid = 0\n self.type = 'Production'\n self.csSection = '/Production/Defaults'\n self.ops = Operations()\n self.fc = FileCatalogClient()\n self.trc = TransformationClient()\n self.defaultProdID = '12345'\n self.defaultProdJobID = '12345'\n self.jobFileGroupSize = 1\n self.nbtasks = 1\n self.slicesize =0\n self.basename = ''\n self.basepath = self.ops.getValue('/Production/CLIC/BasePath','/ilc/prod/clic/')\n self.evttype = ''\n self.datatype = ''\n self.energycat = ''\n self.detector = ''\n self.currtrans = None\n self.description = ''\n\n self.finalpaths = []\n self.finalMetaDict = defaultdict( dict )\n self.prodMetaDict = {}\n self.finalMetaDictNonSearch = {}\n self.metadict_external = {}\n self.outputStorage = ''\n\n self.proxyinfo = getProxyInfo()\n\n self.inputdataquery = False\n self.inputBKSelection = {}\n self.plugin = 'Standard'\n self.prodGroup = ''\n\n self.prodTypes = ['MCGeneration', 'MCSimulation', 'Test', 'MCReconstruction',\n 'MCReconstruction_Overlay', 'Merge', 'Split',\n 'MCGeneration_ILD',\n 'MCSimulation_ILD',\n 'MCReconstruction_ILD',\n 'MCReconstruction_Overlay_ILD',\n 'Split_ILD'\n ]\n self.prodparameters = {}\n self.prodparameters['NbInputFiles'] = 1\n self.prodparameters['nbevts'] = 0 \n #self.prodparameters[\"SWPackages\"] = ''\n self._addParameter(self.workflow, \"IS_PROD\", 'JDL', True, \"This job is a production job\")\n if not script:\n self.__setDefaults()\n\n self._recBasePaths = {}\n self.maxFCFoldersToCheck = 100000\n\n #############################################################################\n def __setDefaults(self):\n \"\"\"Sets some default parameters.\n \"\"\"\n self.setPlatform(self.ops.getValue('%s/Platform' % (self.csSection), 'x86_64-slc5-gcc43-opt'))\n self.setCPUTime('300000')\n self.setLogLevel('verbose')\n self.setJobGroup('@{PRODUCTION_ID}')\n\n #version control\n self._setParameter('productionVersion', 'string', self.prodVersion, 'ProdAPIVersion')\n\n #General workflow parameters\n self._setParameter('PRODUCTION_ID', 'string', self.defaultProdID.zfill(8), 'ProductionID')\n self._setParameter('JOB_ID', 'string', self.defaultProdJobID.zfill(8), 'ProductionJobID')\n self._setParameter('Priority', 'JDL', '1', 'Priority')\n self._setParameter('emailAddress', 'string', 'ilcdirac-support@cern.ch', 'CrashEmailAddress')\n\n def _setParameter(self, name, parameterType, parameterValue, description):\n \"\"\"Set parameters checking in CS in case some defaults need to be changed.\n \"\"\"\n if self.ops.getValue('%s/%s' % (self.csSection, name), ''):\n LOG.debug('Setting %s from CS defaults = %s' % (name, self.ops.getValue('%s/%s' % (self.csSection, name))))\n self._addParameter(self.workflow, name, parameterType, self.ops.getValue('%s/%s' % (self.csSection, name), \n 'default'), description)\n else:\n LOG.debug('Setting parameter %s = %s' % (name, parameterValue))\n self._addParameter(self.workflow, name, parameterType, parameterValue, description)\n \n def setConfig(self,version):\n \"\"\" Define the Configuration package to obtain\n \"\"\"\n appName = 'ILDConfig'\n self._addSoftware(appName.lower(), version)\n self.prodparameters['ILDConfigVersion'] = version\n self._addParameter( self.workflow, 'ILDConfigPackage', 'JDL', appName+version, 'ILDConfig package' )\n return S_OK() \n\n\n def setConfigPackage(self, appName, version):\n \"\"\"Define the config package to obtain.\n\n Adds Config package to workflow execution, and sets production parameter.\n See :func:`ILCDIRAC.Interfaces.API.NewInterface.Job.Job.setConfigPackage`\n\n :param str appName: name of the ConfigPackage, e.g. 'ClicConfig'\n :param str version: version of the ConfigPackage\n \"\"\"\n super(ProductionJob, self).setConfigPackage(appName, version)\n self.prodparameters[appName + 'Version'] = version\n return S_OK()\n\n def setDryRun(self, run):\n \"\"\" In case one wants to get all the info as if the prod was being submitted\n \"\"\"\n self.dryrun = run\n \n #############################################################################\n def setProdGroup(self, group):\n \"\"\" Sets a user defined tag for the production as appears on the monitoring page\n \"\"\"\n self.prodGroup = group\n #############################################################################\n def setProdPlugin(self, plugin):\n \"\"\" Sets the plugin to be used to creating the production jobs\n \"\"\"\n self.plugin = plugin\n \n #############################################################################\n def setJobFileGroupSize(self, files):\n \"\"\" Sets the number of files to be input to each job created.\n \"\"\"\n if self.checked:\n return self._reportError(\"This input is needed at the beginning of the production definition: it is \\\n needed for total number of evts.\")\n self.jobFileGroupSize = files\n self.prodparameters['NbInputFiles'] = files\n \n def setNbEvtsPerSlice(self,nbevts):\n \"\"\" Define the number of events in a slice.\n \"\"\"\n self.slicesize = nbevts\n \n #############################################################################\n def setProdType(self, prodType):\n \"\"\"Set prod type.\n \"\"\"\n if prodType not in self.prodTypes:\n raise TypeError('Prod must be one of %s' % (', '.join(self.prodTypes)))\n self.setType(prodType)\n #############################################################################\n def setWorkflowName(self, name):\n \"\"\"Set workflow name.\n \"\"\"\n self.workflow.setName(name)\n self.name = name\n\n #############################################################################\n def setWorkflowDescription(self, desc):\n \"\"\"Set workflow name.\n \"\"\"\n self.workflow.setDescription(desc)\n \n #############################################################################\n def createWorkflow(self):\n \"\"\" Create XML for local testing.\n \"\"\"\n name = '%s.xml' % self.name\n if os.path.exists(name):\n shutil.move(name,'%s.backup' % name)\n self.workflow.toXMLFile(name)\n \n #############################################################################\n def setOutputSE(self, outputse):\n \"\"\" Define where the output file(s) will go. \n \"\"\"\n self.outputStorage = outputse\n return S_OK()\n \n #############################################################################\n def setInputDataQuery(self, metadata):\n \"\"\" Define the input data query needed\n \"\"\"\n\n retMetaKey = self._checkMetaKeys( metadata.keys() )\n if not retMetaKey['OK']:\n return retMetaKey\n LOG.notice('InputMetaQuery: %s' % metadata)\n\n if \"ProdID\" not in metadata:\n return self._reportError(\"Input metadata dictionary must contain at least a key 'ProdID' as reference\")\n retDirs = self._checkFindDirectories( metadata )\n if not retDirs['OK']:\n return retDirs\n dirs = retDirs['Value'].values()\n for mdir in dirs[:self.maxFCFoldersToCheck]:\n LOG.notice(\"Directory: %s\" % mdir)\n res = self.fc.getDirectoryUserMetadata(mdir)\n if not res['OK']:\n return self._reportError(\"Error looking up the catalog for directory metadata\")\n compatmeta = res['Value']\n compatmeta.update(metadata)\n\n if 'EvtType' in compatmeta:\n self.evttype = JobHelpers.getValue( compatmeta['EvtType'], str, basestring )\n else:\n return self._reportError(\"EvtType is not in the metadata, it has to be!\")\n\n if 'NumberOfEvents' in compatmeta:\n self.nbevts = JobHelpers.getValue( compatmeta['NumberOfEvents'], int, None )\n\n self.basename = self.evttype\n LOG.notice(\"MetaData: %s\" % compatmeta)\n LOG.notice(\"MetaData: %s\" % metadata)\n if \"Energy\" in compatmeta:\n self.energycat = JobHelpers.getValue( compatmeta[\"Energy\"], str, (int, long, basestring) )\n \n if self.energycat.count(\"tev\"):\n self.energy = Decimal(\"1000.\") * Decimal(self.energycat.split(\"tev\")[0])\n elif self.energycat.count(\"gev\"):\n self.energy = Decimal(\"1.\") * Decimal(self.energycat.split(\"gev\")[0])\n else:\n self.energy = Decimal(\"1.\") * Decimal(self.energycat)\n gendata = False\n if 'Datatype' in compatmeta:\n self.datatype = JobHelpers.getValue( compatmeta['Datatype'], str, basestring )\n if self.datatype == 'gen':\n gendata = True\n if \"DetectorType\" in compatmeta and not gendata:\n self.detector = JobHelpers.getValue( compatmeta[\"DetectorType\"], str, basestring )\n self.inputBKSelection = metadata\n self.inputdataquery = True\n \n self.prodparameters['nbevts'] = self.nbevts \n self.prodparameters[\"FCInputQuery\"] = self.inputBKSelection\n\n return S_OK()\n\n def setDescription(self, desc):\n \"\"\" Set the production's description\n \n :param str desc: Description\n \"\"\"\n self.description = desc\n return S_OK()\n\n def getBasePath(self):\n \"\"\" Return the base path. Updated by :any:`setInputDataQuery`.\n \"\"\"\n return self.basepath\n \n def addFinalization(self, uploadData = False, registerData = False, uploadLog = False, sendFailover=False):\n \"\"\" Add finalization step\n\n :param bool uploadData: Upload or not the data to the storage\n :param bool uploadLog: Upload log file to storage (currently only available for admins, thus add them to OutputSandbox)\n :param bool sendFailover: Send Failover requests, and declare files as processed or unused in transfDB\n :param bool registerData: Register data in the file catalog\n \"\"\"\n #TODO: Do the registration only once, instead of once for each job\n \n self.call_finalization = True\n self.finalsdict['uploadData'] = uploadData\n self.finalsdict['registerData'] = registerData\n self.finalsdict['uploadLog'] = uploadLog\n self.finalsdict['sendFailover'] = sendFailover\n\n def _addRealFinalization(self): \n \"\"\" This is called at creation: now that the workflow is created at the last minute,\n we need to add this also at the last minute\n \"\"\"\n importLine = 'from ILCDIRAC.Workflow.Modules. import '\n \n dataUpload = ModuleDefinition('UploadOutputData')\n dataUpload.setDescription('Uploads the output data')\n self._addParameter(dataUpload, 'enable', 'bool', False, 'EnableFlag')\n body = importLine.replace('', 'UploadOutputData')\n dataUpload.setBody(body)\n\n failoverRequest = ModuleDefinition('FailoverRequest')\n failoverRequest.setDescription('Sends any failover requests')\n self._addParameter(failoverRequest, 'enable', 'bool', False, 'EnableFlag')\n body = importLine.replace('', 'FailoverRequest')\n failoverRequest.setBody(body)\n\n registerdata = ModuleDefinition('RegisterOutputData')\n registerdata.setDescription('Module to add in the metadata catalog the relevant info about the files')\n self._addParameter(registerdata, 'enable', 'bool', False, 'EnableFlag')\n body = importLine.replace('', 'RegisterOutputData')\n registerdata.setBody(body)\n\n logUpload = ModuleDefinition('UploadLogFile')\n logUpload.setDescription('Uploads the output log files')\n self._addParameter(logUpload, 'enable', 'bool', False, 'EnableFlag')\n body = importLine.replace('', 'UploadLogFile')\n logUpload.setBody(body)\n\n errorReport = ModuleDefinition('ReportErrors')\n errorReport.setDescription('Reports errors at the end')\n body = importLine.replace('', 'ReportErrors')\n errorReport.setBody(body)\n\n finalization = StepDefinition('Job_Finalization')\n finalization.addModule(dataUpload)\n up = finalization.createModuleInstance('UploadOutputData', 'dataUpload')\n up.setValue(\"enable\", self.finalsdict['uploadData'])\n\n finalization.addModule(registerdata)\n ro = finalization.createModuleInstance('RegisterOutputData', 'RegisterOutputData')\n ro.setValue(\"enable\", self.finalsdict['registerData'])\n\n finalization.addModule(logUpload)\n ul = finalization.createModuleInstance('UploadLogFile', 'logUpload')\n ul.setValue(\"enable\", self.finalsdict['uploadLog'])\n\n finalization.addModule(failoverRequest)\n fr = finalization.createModuleInstance('FailoverRequest', 'failoverRequest')\n fr.setValue(\"enable\", self.finalsdict['sendFailover'])\n\n finalization.addModule(errorReport)\n fr = finalization.createModuleInstance('ReportErrors', 'reportErrors')\n\n self.workflow.addStep(finalization)\n self.workflow.createStepInstance('Job_Finalization', 'finalization')\n\n return S_OK()\n \n def createProduction(self, name = None):\n \"\"\" Create production.\n \"\"\"\n if not self.dryrun:\n if not self.proxyinfo['OK']:\n return S_ERROR('Not allowed to create production, you need a production proxy.')\n if 'groupProperties' not in self.proxyinfo['Value']:\n return S_ERROR('Could not determine groupProperties, you do not have the right proxy.')\n groupProperties = self.proxyinfo['Value']['groupProperties']\n if 'ProductionManagement' not in groupProperties:\n return S_ERROR('Not allowed to create production, you need a production proxy.')\n\n if self.created:\n return S_ERROR(\"Production already created.\")\n\n ###We need to add the applications to the workflow\n res = self._addToWorkflow()\n if not res['OK']:\n return res\n if self.call_finalization:\n self._addRealFinalization()\n \n workflowName = self.workflow.getName()\n fileName = '%s.xml' % workflowName\n LOG.verbose('Workflow XML file name is:', '%s' % fileName)\n try:\n self.createWorkflow()\n except Exception as x:\n LOG.error(\"Exception creating workflow\", repr(x))\n return S_ERROR('Could not create workflow')\n with open(fileName, 'r') as oFile:\n workflowXML = oFile.read()\n if not name:\n name = workflowName\n\n res = self.trc.getTransformationStats(name)\n if res['OK']:\n return self._reportError(\"Transformation with name %s already exists! Cannot proceed.\" % name)\n \n ###Create Tranformation\n Trans = Transformation()\n Trans.setTransformationName(name)\n Trans.setDescription(self.description)\n Trans.setLongDescription(self.description)\n Trans.setType(self.type)\n self.prodparameters['JobType'] = self.type\n Trans.setPlugin(self.plugin)\n if self.inputdataquery:\n Trans.setGroupSize(self.jobFileGroupSize)\n Trans.setTransformationGroup(self.prodGroup)\n Trans.setBody(workflowXML)\n if not self.slicesize:\n Trans.setEventsPerTask(self.jobFileGroupSize * self.nbevts)\n else:\n Trans.setEventsPerTask(self.slicesize)\n self.currtrans = Trans\n if self.dryrun:\n LOG.notice('Would create prod called', name)\n self.transfid = 12345\n else: \n res = Trans.addTransformation()\n if not res['OK']:\n LOG.error(res['Message'])\n return res\n self.transfid = Trans.getTransformationID()['Value']\n\n if self.inputBKSelection:\n res = self.applyInputDataQuery()\n if not self.dryrun:\n Trans.setAgentType(\"Automatic\") \n Trans.setStatus(\"Active\")\n \n finals = []\n for finalpaths in self.finalpaths:\n finalpaths = finalpaths.rstrip(\"/\")\n finalpaths += \"/\"+str(self.transfid).zfill(8)\n finals.append(finalpaths)\n self.finalMetaDict[finalpaths].update( { \"ProdID\": self.transfid } )\n self.finalMetaDict[finalpaths].update( self.prodMetaDict )\n # if 'ILDConfigVersion' in self.prodparameters:\n # self.finalMetaDict[finalpaths].update({\"ILDConfig\":self.prodparameters['ILDConfigVersion']})\n \n if self.nbevts:\n self.finalMetaDict[finalpaths].update({'NumberOfEvents' : self.jobFileGroupSize * self.nbevts})\n self.finalpaths = finals\n self.created = True\n \n return S_OK()\n\n def setNbOfTasks(self, nbtasks):\n \"\"\" Define the number of tasks you want. Useful for generation jobs.\n \"\"\"\n if not self.currtrans:\n LOG.error(\"Not transformation defined earlier\")\n return S_ERROR(\"No transformation defined\")\n if self.inputBKSelection and self.plugin not in ['Limited', 'SlicedLimited']:\n LOG.error('Metadata selection activated, should not specify the number of jobs')\n return S_ERROR()\n self.nbtasks = nbtasks\n self.currtrans.setMaxNumberOfTasks(self.nbtasks) #pylint: disable=E1101\n return S_OK()\n\n def applyInputDataQuery(self, metadata = None, prodid = None):\n \"\"\" Tell the production to update itself using the metadata query specified, i.e. submit new jobs if new files \n are added corresponding to same query.\n \"\"\"\n if not self.transfid and self.currtrans:\n self.transfid = self.currtrans.getTransformationID()['Value'] #pylint: disable=E1101\n elif prodid:\n self.transfid = prodid\n if not self.transfid:\n LOG.error(\"Not transformation defined earlier\")\n return S_ERROR(\"No transformation defined\")\n if metadata:\n self.inputBKSelection = metadata\n\n if not self.dryrun:\n res = self.trc.createTransformationInputDataQuery(self.transfid, self.inputBKSelection)\n if not res['OK']:\n return res\n else:\n LOG.notice(\"Would use %s as metadata query for production\" % str(self.inputBKSelection))\n return S_OK()\n \n def addMetadataToFinalFiles(self, metadict):\n \"\"\" Add additionnal non-query metadata \n \"\"\"\n self.metadict_external = metadict\n \n return S_OK()\n \n def finalizeProd(self, prodid = None, prodinfo = None):\n \"\"\" Finalize definition: submit to Transformation service and register metadata\n \"\"\"\n currtrans = 0\n if self.currtrans:\n if not self.dryrun:\n currtrans = self.currtrans.getTransformationID()['Value'] #pylint: disable=E1101\n else:\n currtrans = 12345\n if prodid:\n currtrans = prodid\n if not currtrans:\n LOG.error(\"Not transformation defined earlier\")\n return S_ERROR(\"No transformation defined\")\n if prodinfo:\n self.prodparameters = prodinfo\n \n info = []\n info.append('%s Production %s has following parameters:\\n' % (self.prodparameters['JobType'], currtrans))\n if \"Process\" in self.prodparameters:\n info.append('- Process %s' % self.prodparameters['Process'])\n if \"Energy\" in self.prodparameters:\n info.append('- Energy %s GeV' % self.prodparameters[\"Energy\"])\n\n if not self.slicesize:\n self.prodparameters['nbevts'] = self.jobFileGroupSize * self.nbevts\n else:\n self.prodparameters['nbevts'] = self.slicesize\n if self.prodparameters['nbevts']:\n info.append(\"- %s events per job\" % (self.prodparameters['nbevts']))\n if self.prodparameters.get('lumi', False):\n info.append(' corresponding to a luminosity %s fb' % (self.prodparameters['lumi'] * \\\n self.prodparameters['NbInputFiles']))\n if 'FCInputQuery' in self.prodparameters:\n info.append('Using InputDataQuery :')\n for key, val in self.prodparameters['FCInputQuery'].iteritems():\n info.append(' %s = %s' % (key, val))\n if \"SWPackages\" in self.prodparameters:\n info.append('- SW packages %s' % self.prodparameters[\"SWPackages\"])\n if \"SoftwareTag\" in self.prodparameters:\n info.append('- SW tags %s' % self.prodparameters[\"SoftwareTag\"])\n if \"ILDConfigVersion\" in self.prodparameters:\n info.append('- ILDConfig %s' % self.prodparameters['ILDConfigVersion']) \n\n if 'ClicConfigVersion' in self.prodparameters:\n info.append('- ClicConfig %s' % self.prodparameters['ClicConfigVersion'] )\n\n if 'extraCLIArguments' in self.prodparameters:\n info.append('- ExtraCLIArguments %s' % self.prodparameters['extraCLIArguments'] )\n\n # as this is the very last call all applications are registered, so all software packages are known\n #add them the the metadata registration\n for finalpath in self.finalpaths:\n if finalpath not in self.finalMetaDictNonSearch:\n self.finalMetaDictNonSearch[finalpath] = {}\n if \"SWPackages\" in self.prodparameters:\n self.finalMetaDictNonSearch[finalpath][\"SWPackages\"] = self.prodparameters[\"SWPackages\"]\n \n if self.metadict_external:\n self.finalMetaDictNonSearch[finalpath].update(self.metadict_external) \n \n info.append('- Registered metadata: ')\n for path, metadata in sorted( self.finalMetaDict.iteritems() ):\n info.append(' %s = %s' % (path, metadata))\n info.append('- Registered non searchable metadata: ')\n for path, metadata in sorted( self.finalMetaDictNonSearch.iteritems() ):\n info.append(' %s = %s' % (path, metadata))\n\n infoString = '\\n'.join(info)\n self.prodparameters['DetailedInfo'] = infoString\n \n for name, val in self.prodparameters.iteritems():\n result = self._setProdParameter(currtrans, name, val)\n if not result['OK']:\n LOG.error(result['Message'])\n\n res = self._registerMetadata()\n if not res['OK']:\n LOG.error('Could not register the following directories:', res['Message'])\n return res\n return S_OK()\n\n def _createDirectory(self, path, failed, mode=0o775):\n \"\"\"Create the directory at path if it does not exist.\n\n :param str path: path to check\n :param list failed: list of failed paths\n :param int mode: mode to set for directory\n \"\"\"\n exists = returnSingleResult(self.fc.isDirectory(path))\n if exists['OK'] and exists['Value']:\n LOG.verbose('Directory already exists:', path)\n return S_OK()\n result = returnSingleResult(self.fc.createDirectory(path))\n if not result['OK']:\n LOG.error('Failed to create directory:', '%s: %s' % (path, result['Message']))\n failed[path].append(result['Message'])\n return S_ERROR()\n LOG.verbose('Successfully created directory:', path)\n res = self.fc.changePathMode({path: mode}, False)\n if not res['OK']:\n LOG.error(res['Message'])\n failed[path].append(res['Message'])\n return S_ERROR()\n LOG.verbose('Successfully changed mode:', path)\n return S_OK()\n\n def _checkMetadata(self, path, metaCopy):\n \"\"\"Get existing metadata, if it is the same do not set it again, otherwise return error.\"\"\"\n existingMetadata = self.fc.getDirectoryUserMetadata(path.rstrip('/'))\n if not existingMetadata['OK']:\n return S_OK()\n failure = False\n for key, value in existingMetadata['Value'].iteritems():\n if key in metaCopy and metaCopy[key] != value:\n LOG.error('Metadata values for folder %s disagree for key %s: Existing(%r), new(%r)' %\n (path, key, value, metaCopy[key]))\n failure = True\n elif key in metaCopy and metaCopy[key] == value:\n LOG.verbose('Meta entry is unchanged', '%s = %s' % (key, value))\n metaCopy.pop(key, None)\n if failure:\n return S_ERROR('Error when setting new metadata, already existing metadata disagrees!')\n return S_OK()\n\n def _registerMetadata(self):\n \"\"\"Set metadata for given folders.\n\n Register path and metadata before the production actually runs. This allows for the definition\n of the full chain in 1 go.\n \"\"\"\n prevent_registration = self.ops.getValue('Production/PreventMetadataRegistration', False)\n\n if self.dryrun or prevent_registration:\n LOG.notice('Would have created and registered the following\\n',\n '\\n '.join([' * %s: %s' % (fPath, val) for fPath, val in sorted(self.finalMetaDict.iteritems())]))\n LOG.notice('Would have set this as non searchable metadata', str(self.finalMetaDictNonSearch))\n return S_OK()\n\n failed = defaultdict(list)\n for path, meta in sorted(self.finalMetaDict.items()):\n res = self._createDirectory(path, failed)\n if not res['OK']:\n continue\n LOG.verbose('Checking to set metadata:', meta)\n metaCopy = dict(meta)\n res = self._checkMetadata(path, metaCopy)\n if not res['OK']:\n return res\n if not metaCopy:\n LOG.verbose('No new metadata to set')\n continue\n\n LOG.notice('Setting metadata information: ', '%s: %s' % (path, metaCopy))\n result = self.fc.setMetadata(path.rstrip('/'), metaCopy)\n if not result['OK']:\n LOG.error('Could not preset metadata', str(metaCopy))\n LOG.error('Could not preset metadata', result['Message'])\n failed[path].append(result['Message'])\n\n for path, meta in sorted(self.finalMetaDictNonSearch.items()):\n res = self._createDirectory(path, failed)\n if not res['OK']:\n continue\n LOG.notice('Setting non searchable metadata information: ', '%s: %s' % (path, meta))\n result = self.fc.setMetadata(path.rstrip('/'), meta)\n if not result['OK']:\n LOG.error('Could not preset non searchable metadata', str(meta))\n LOG.error('Could not preset non searchable metadata', result['Message'])\n failed[path].append(result['Message'])\n\n if failed:\n return S_ERROR('Failed to register some metadata: %s' % dict(failed))\n return S_OK()\n\n def getMetadata(self):\n \"\"\" Return the corresponding metadata of the last step\n \"\"\"\n metadict = {}\n for meta in self.finalMetaDict.values():\n metadict.update(meta)\n if 'NumberOfEvents' in metadict:\n del metadict['NumberOfEvents'] #As this is not supposed to be a searchable thing\n return metadict\n \n def _setProdParameter(self, prodID, pname, pvalue):\n \"\"\" Set a production parameter.\n \"\"\"\n if isinstance( pvalue, list ):\n pvalue = '\\n'.join(pvalue)\n\n if isinstance( pvalue, (int, long) ):\n pvalue = str(pvalue)\n if not self.dryrun: \n result = self.trc.setTransformationParameter(int(prodID), str(pname), str(pvalue))\n if not result['OK']:\n LOG.error('Problem setting parameter %s for production %s and value:\\n%s' % (prodID, pname, pvalue))\n else:\n LOG.notice('Adding %s=%s to transformation' % (str(pname), str(pvalue)))\n result = S_OK()\n return result\n \n def _jobSpecificParams(self, application):\n \"\"\" For production additional checks are needed: ask the user\n \"\"\"\n\n if self.created:\n return S_ERROR(\"The production was created, you cannot add new applications to the job.\")\n\n if not application.logFile:\n logf = application.appname + \"_\" + application.version + \"_@{STEP_ID}.log\"\n res = application.setLogFile(logf)\n if not res['OK']:\n return res\n \n #in fact a bit more tricky as the log files have the prodID and jobID in them\n \n ### Retrieve from the application the essential info to build the prod info.\n if not self.nbevts and not self.slicesize:\n self.nbevts = application.numberOfEvents\n if not self.nbevts:\n return S_ERROR(\"Number of events to process is not defined.\")\n elif not application.numberOfEvents:\n if not self.slicesize:\n res = application.setNumberOfEvents(self.jobFileGroupSize * self.nbevts)\n else:\n res = application.setNumberOfEvents(self.slicesize)\n if not res['OK']:\n return res\n \n if application.numberOfEvents > 0 and (self.jobFileGroupSize * self.nbevts > application.numberOfEvents or self.slicesize > application.numberOfEvents):\n self.nbevts = application.numberOfEvents\n\n \n if not self.energy:\n if application.energy:\n self.energy = Decimal(((\"%1.2f\" % float(application.energy)).rstrip('0').rstrip('.')))\n else:\n return S_ERROR(\"Could not find the energy defined, it is needed for the production definition.\")\n elif not application.energy:\n res = application.setEnergy(float(self.energy))\n if not res['OK']:\n return res\n if self.energy:\n self._setParameter( \"Energy\", \"float\", float(self.energy), \"Energy used\") \n self.prodparameters[\"Energy\"] = float(self.energy)\n \n if not self.evttype:\n if hasattr(application, 'eventType'):\n self.evttype = application.eventType\n else:\n return S_ERROR(\"Event type not found nor specified, it's mandatory for the production paths.\") \n self.prodparameters['Process'] = self.evttype\n \n if not self.outputStorage:\n return S_ERROR(\"You need to specify the Output storage element\")\n \n curpackage = \"%s.%s\" % (application.appname, application.version)\n if \"SWPackages\" in self.prodparameters: \n if not self.prodparameters[\"SWPackages\"].count(curpackage):\n self.prodparameters[\"SWPackages\"] += \";%s\" % ( curpackage ) \n else :\n self.prodparameters[\"SWPackages\"] = \"%s\" % (curpackage)\n \n if not application.accountInProduction:\n res = self._updateProdParameters(application)\n if not res['OK']:\n return res \n self.checked = True\n\n return S_OK()\n \n res = application.setOutputSE(self.outputStorage)\n if not res['OK']:\n return res\n \n energypath = self.getEnergyPath()\n\n if not self.basename:\n self.basename = self.evttype\n\n evttypepath = ''\n if not self.evttype[-1] == '/':\n evttypepath = self.evttype + '/'\n \n path = self.basepath \n ###Need to resolve file names and paths\n if self.energy:\n self.finalMetaDict[self.basepath + energypath] = {\"Energy\":str(self.energy)}\n\n if hasattr(application, \"setOutputRecFile\") and not application.willBeCut:\n evtPath = self.basepath + energypath + evttypepath\n self.finalMetaDict[evtPath] = {'EvtType': self.evttype}\n detPath = evtPath + application.detectortype\n self.finalMetaDict[detPath] = {'DetectorType': application.detectortype}\n if application.keepRecFile:\n path = self.basepath + energypath + evttypepath + application.detectortype + '/REC'\n self.finalMetaDict[path] = {'Datatype': 'REC'}\n fname = self.basename + '_rec.slcio'\n application.setOutputRecFile(fname, path)\n LOG.notice('Will store the files under', path)\n self.finalpaths.append(path)\n path = self.basepath + energypath + evttypepath + application.detectortype + '/DST'\n self.finalMetaDict[path] = {'Datatype': 'DST'}\n fname = self.basename + '_dst.slcio'\n application.setOutputDstFile(fname, path) \n LOG.notice('Will store the files under', path)\n self.finalpaths.append(path)\n\n elif hasattr(application, \"outputFile\") and hasattr(application, 'datatype') and not application.outputFile and not application.willBeCut:\n LOG.notice('Adding output meta data for %s' % type(application))\n path = self.basepath + energypath + evttypepath\n self.finalMetaDict[path] = {\"EvtType\" : self.evttype}\n if hasattr(application, \"detectortype\"):\n if application.detectortype:\n path += application.detectortype\n self.finalMetaDict[path] = {\"DetectorType\" : application.detectortype}\n path += '/'\n elif self.detector:\n path += self.detector\n self.finalMetaDict[path] = {\"DetectorType\" : self.detector}\n path += '/'\n if not application.datatype and self.datatype:\n application.datatype = self.datatype\n path += application.datatype\n self.finalMetaDict[path] = {'Datatype' : application.datatype}\n LOG.notice('Will store the files under', '%s' % path)\n self.finalpaths.append(path)\n extension = 'stdhep'\n if application.datatype in ['SIM', 'REC']:\n extension = 'slcio'\n fname = self.basename + \"_%s\" % (application.datatype.lower()) + \".\" + extension\n application.setOutputFile(fname, path) \n \n self.basepath = path\n\n res = self._updateProdParameters(application)\n if not res['OK']:\n return res\n \n self.checked = True\n \n return S_OK()\n\n def _updateProdParameters(self, application):\n \"\"\" Update the prod parameters stored in the production parameters visible from the web\n \"\"\"\n try:\n self.prodparameters.update(application.prodparameters)\n except Exception as x:\n return S_ERROR(\"Exception: %r\" % x )\n\n if hasattr( application, 'extraCLIArguments' ) and application.extraCLIArguments:\n self.prodparameters['extraCLIArguments'] = repr(application.extraCLIArguments)\n\n return S_OK()\n\n def _jobSpecificModules(self, application, step):\n return application._prodjobmodules(step)\n\n def getEnergyPath(self):\n \"\"\"returns the energy path 250gev or 3tev or 1.4tev etc.\"\"\"\n energy = Decimal(str(self.energy))\n tD = Decimal('1000.0')\n unit = 'gev' if energy < tD else 'tev'\n energy = energy if energy < tD else energy/tD\n energyPath = (\"%1.2f\" % energy).rstrip('0').rstrip('.')\n energyPath = energyPath+unit+'/'\n\n LOG.notice('Energy path is: ', energyPath)\n return energyPath\n\n\n def _checkMetaKeys( self, metakeys, extendFileMeta=False ):\n \"\"\" check if metadata keys are allowed to be metadata\n\n :param list metakeys: metadata keys for production metadata\n :param bool extendFileMeta: also use FileMetaFields for checking meta keys\n :returns: S_OK, S_ERROR\n \"\"\"\n\n res = self.fc.getMetadataFields()\n if not res['OK']:\n LOG.error(\"Could not contact File Catalog\")\n return S_ERROR(\"Could not contact File Catalog\")\n metaFCkeys = res['Value']['DirectoryMetaFields'].keys()\n if extendFileMeta:\n metaFCkeys.extend( res['Value']['FileMetaFields'].keys() )\n\n for key in metakeys:\n for meta in metaFCkeys:\n if meta != key and meta.lower() == key.lower():\n return self._reportError(\"Key syntax error %r, should be %r\" % (key, meta), name = self.__class__.__name__)\n if key not in metaFCkeys:\n return self._reportError(\"Key %r not found in metadata keys, allowed are %r\" % (key, metaFCkeys))\n\n return S_OK()\n\n def _checkFindDirectories( self, metadata ):\n \"\"\" find directories by metadata and check that there are directories found\n\n :param dict metadata: metadata dictionary\n :returns: S_OK, S_ERROR\n \"\"\"\n\n metaQuery = ' '.join('%s=%s' % (k, v) for k, v in metadata.items())\n LOG.verbose('Looking for folder with', repr(metaQuery))\n res = self.fc.findDirectoriesByMetadata(metadata)\n if not res['OK']:\n return self._reportError(\"Error looking up the catalog for available directories\")\n elif len(res['Value']) < 1:\n return self._reportError('Could not find any directories corresponding to the query issued: %s' % metaQuery)\n for folderId, folder in res['Value'].items():\n if (folderId == 0 or folder == 'None') and not self.dryrun:\n return self._reportError('Could not find any directories corresponding to the query issued: %s' % metaQuery)\n return res\n\n def setReconstructionBasePaths( self, recPath, dstPath ):\n \"\"\" set the output Base paths for the reconstruction REC and DST files \"\"\"\n self._recBasePaths['REC'] = recPath\n self._recBasePaths['DST'] = dstPath\n","sub_path":"Interfaces/API/NewInterface/ProductionJob.py","file_name":"ProductionJob.py","file_ext":"py","file_size_in_byte":36711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"130498534","text":"class Nodo():\r\n def __init__( self, value, siguiente = None ):\r\n self.data = value #Falta Encapsulamiento\r\n self.siguiente = siguiente \r\n\r\nclass LinkedList:\r\n def __init__( self ):\r\n self.__head = None\r\n\r\n def is_empty( self ):\r\n return self.__head == None\r\n\r\n def append( self, value ):\r\n nuevo = Nodo( value )\r\n if self.__head == None: #self is empty\r\n self.__head = nuevo\r\n else:\r\n curr_node = self.__head\r\n while curr_node.siguiente != None:\r\n curr_node = curr_node.siguiente\r\n curr_node.siguiente = nuevo\r\n\r\n def remove( self, value ):\r\n curr_node=self.__head\r\n\r\n if self.__head.data == value:\r\n self.__head = self.__head.siguiente\r\n \r\n else:\r\n curr_anterior = None\r\n while curr_node.data != value and curr_node.siguiente != None:\r\n curr_anterior=curr_node\r\n curr_node= curr_node.siguiente\r\n \r\n if curr_node.data == value:\r\n curr_anterior.siguiente= curr_anterior.siguiente.siguiente \r\n else:\r\n print(\"El dato no existe en la lista\")\r\n \r\n \r\n def transversal ( self ):\r\n curr_node = self.__head\r\n while curr_node != None:\r\n print(f\"{curr_node.data} -> \", end=\"\")\r\n curr_node = curr_node.siguiente\r\n print(\"\")\r\n\r\n def preppend(self, value):\r\n nuevo = Nodo( value, self.__head)\r\n self.__head = nuevo\r\n \r\n\r\n def tail(self):\r\n curr_node = self.__head\r\n while curr_node.siguiente != None:\r\n curr_node= curr_node.siguiente\r\n\r\n return(curr_node.data)\r\n \r\n def get(self, posicion = None):\r\n contador = 0\r\n curr_node = self.__head\r\n dato = \"No existe dato en esta posición\"\r\n \r\n while(curr_node):\r\n if(contador == posicion):\r\n dato = curr_node.data\r\n contador += 1\r\n curr_node = curr_node.siguiente\r\n\r\n print(f\"El dato en la posición {posicion} es {dato}\")\r\n\r\n\r\nclass NodoDoble():\r\n\r\n def __init__( self, data, prev = None, next = None):\r\n self.next = next\r\n self.prev = prev\r\n self.data = data\r\n\r\nclass DoubleLinkedList:\r\n\r\n def __init__(self):\r\n self.__head = ( NodoDoble(None)) \r\n self.__tail = ( NodoDoble(None)) \r\n self.__size = 0\r\n \r\n def getSize(self):\r\n return self.__size\r\n \r\n def is_empty(self):\r\n return self.__size == 0\r\n\r\n def append( self, val ):\r\n\r\n if self.is_empty():\r\n nuevo = NodoDoble(val)\r\n self.__head = nuevo\r\n self.__tail = nuevo\r\n else:\r\n nuevo = NodoDoble(val, self.__tail, None)\r\n self.__tail.next = nuevo\r\n self.__tail = nuevo \r\n\r\n self.__size += 1\r\n \r\n def transversal( self ):\r\n curr_node = self.__head\r\n while curr_node != None:\r\n print(f\" <- {curr_node.data} -> \", end=\"\")\r\n curr_node = curr_node.next\r\n print(\"\")\r\n\r\n def reverse_transversal( self ):\r\n curr_node = self.__tail\r\n while curr_node != None:\r\n print(f\" <- {curr_node.data} -> \", end=\"\")\r\n curr_node = curr_node.prev\r\n print(\"\")\r\n\r\n def remove_from_head( self, value ):\r\n\r\n curr_node = self.__head \r\n\r\n if self.__head.data == value:\r\n self.__head = self.__head.next\r\n self.__head.prev = None\r\n\r\n elif self.__tail.data == value:\r\n self.__tail.prev.next = None \r\n self.__tail = self.__tail.prev\r\n\r\n elif self.__head.data != value and self.__tail.data != value:\r\n \r\n while curr_node.data != value and curr_node.next != None:\r\n \r\n curr_node = curr_node.next\r\n \r\n if curr_node.data == value:\r\n curr_node.prev.next = curr_node.next\r\n curr_node.next.prev = curr_node.prev\r\n else:\r\n print(\"El dato no existe en la lista\")\r\n \r\n curr_node.next = None\r\n curr_node.prev = None\r\n self.__size -= 1\r\n \r\n \r\n\r\n","sub_path":"24Noviembre2020/listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"541447379","text":"import FWCore.ParameterSet.Config as cms\n\nfrom Configuration.Generator.PythiaUEZ2Settings_cfi import *\n\ngenerator = cms.EDFilter(\"Pythia6GeneratorFilter\",\n comEnergy = cms.double(5020.0),\n crossSection = cms.untracked.double(2.864e-08), \n filterEfficiency = cms.untracked.double(1.),\n maxEventsToPrint = cms.untracked.int32(-1),\n pythiaHepMCVerbosity = cms.untracked.bool(False),\n pythiaPylistVerbosity = cms.untracked.int32(False),\n PythiaParameters = cms.PSet(pythiaUESettingsBlock,\n processParameters = cms.vstring('MSEL=5 ! b-quark flavor creation processes',\n 'CKIN(3)= 170 ! minimum pt hat for hard interactions',\n ),\n parameterSets = cms.vstring('pythiaUESettings',\n 'processParameters',\n )\n )\n )\n\nconfigurationMetadata = cms.untracked.PSet(\n annotation = cms.untracked.string('PYTHIA6, b-jets from flavor creation (MSEL=5), pt-hat > 170 GeV, at sqrt(s) = 5.02 TeV')\n )\n\nProductionFilterSequence = cms.Sequence(generator)\n\n\n\n","sub_path":"python/HI/dijet_analysis/pp/Pythia6_bJetFCR170_pp_TuneZ2_5020GeV_cff.py","file_name":"Pythia6_bJetFCR170_pp_TuneZ2_5020GeV_cff.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"68965103","text":"### TESTING SPECIFIC VOLUMES\n## Web scraper for Journal of Machine Learning Research - https://www.jmlr.org/papers/\n# Can get from an html file or a web page - This will pull from webpage\n# Santosh Khadka skhadka.code@gmail.com\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\n\n## VOLUME X - Abstract URL Scraper ##\nfor n in range(0, 1):\n current_url = \"https://www.jmlr.org/papers/v21/\" \n print(\"################# \", current_url) # !Testing \n source_url_get = requests.get(current_url).text\n soup_url = BeautifulSoup(source_url_get, 'lxml')\n current_volume_abstract_list = []\n abstractURL_list = []\n jmlr_abstract_URL_list = []\n \n current_volume_table = soup_url.find(\"div\", id=\"content\")\n current_volume_aTag = current_volume_table.find_all(\"a\")\n #print(current_volume_aTag) # !Testing\n for i in range(len(current_volume_aTag)):\n if (\"[abs]\" in str(current_volume_aTag[i])) or (\">abs<\" in str(current_volume_aTag[i])):\n string1 = str(current_volume_aTag[i])\n string1 = string1.split('\"')\n string1 = (current_url+string1[1]).replace(\" \", '')\n #print(string1) # !Testing\n abstractURL_list.append(string1)\n jmlr_abstract_URL_list.append(abstractURL_list) \nprint(len(jmlr_abstract_URL_list[0]))\n \n# txt = \"apple#banana#cherry#orange\"\n\n# # setting the maxsplit parameter to 1, will return a list with 2 elements!\n# x = txt.split(\"#\", 1)\n\n# print(x)","sub_path":"JMLR - Code & CSVs/JMLR_scraper_VolumeX_abstractURL.py","file_name":"JMLR_scraper_VolumeX_abstractURL.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"176113539","text":"import math\r\nimport numpy as np\r\n\r\nvalues = [\"0\", \"0\", \"0\", \"0\", \"0\"]\r\n\r\nSn = float\r\nU1 = float\r\nek = float\r\nPcu = float\r\nU2 = float\r\n\r\nclass Transformer():\r\n def __init__(self, Sn, U1, ek, Pcu, U2):\r\n self.Sn = Sn\r\n self.U1 = U1\r\n self.ek = ek\r\n self.Pcu = Pcu\r\n self.U2 = U2\r\n \r\n def resitance_primary(self):\r\n R_primary = Pcu / (3 * data.current_primary()**2)\r\n return R_primary\r\n\r\n def reactance_primary(self):\r\n XL_primary = math.sqrt(data.impedance_primary()**2 - data.resitance_primary()**2)\r\n return XL_primary\r\n\r\n def impedance_primary(self):\r\n Z_primary = (U1**2 * ek) / (Sn * 100)\r\n return Z_primary\r\n\r\n def current_primary(self):\r\n I1 = Sn / (U1 * math.sqrt(3))\r\n return I1\r\n\r\n def resitance_secondary(self):\r\n R_secondary = Pcu / (3 * data.current_secondary()**2)\r\n return R_secondary\r\n\r\n def reactance_secondary(self):\r\n XL_secondary = math.sqrt(data.impedance_secondary()**2 - data.resitance_secondary()**2)\r\n return XL_secondary\r\n\r\n def impedance_secondary(self):\r\n Z_secondary = (U2**2 * ek) / (Sn * 100)\r\n return Z_secondary\r\n\r\n def current_secondary(self):\r\n I2 = Sn / (U2 * math.sqrt(3))\r\n return I2\r\n\r\n def phase_angle(self):\r\n phase_angle = math.degrees(math.atan(1 / (data.resitance_primary() / data.reactance_primary())))\r\n return phase_angle\r\n\r\ndata = Transformer(values[0], values[1], values[2], values[3], values[4])\r\n\r\ndef print_calculation():\r\n \r\n current_primary_result = \"Ip current: %s A\" % data.current_primary()\r\n print(current_primary_result)\r\n current_secondary_result = \"Is current: %s A\" % data.current_secondary()\r\n print(current_secondary_result)\r\n resistance_primary_result = \"Rp: %s Ω\" % data.resitance_primary()\r\n print(resistance_primary_result)\r\n resistance_secondary_result = \"Rs: %s Ω\" % data.resitance_secondary()\r\n print(resistance_secondary_result)\r\n reactance_primary_result = \"XLp: %s Ω\" % data.reactance_primary()\r\n print(reactance_primary_result)\r\n reactance_secondary_result = \"XLs: %s Ω\" % data.reactance_secondary()\r\n print(reactance_secondary_result)\r\n impadance_primary_result = \"Zs: %s Ω\" % data.impedance_primary()\r\n print(impadance_primary_result)\r\n impadance_secondary_result = \"Zs: %s Ω\" % data.impedance_secondary()\r\n print(impadance_secondary_result)\r\n phase_angle_result = \"Φ: %s\" % data.phase_angle()\r\n print(phase_angle_result)\r\n\r\n return current_primary_result, current_secondary_result, resistance_primary_result, resistance_secondary_result, reactance_primary_result, reactance_secondary_result, impadance_primary_result, impadance_secondary_result, phase_angle_result","sub_path":"calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"399654366","text":"import numpy as np\nimport tensorflow as tf\nimport csv\nimport matplotlib as mpl\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport time\n\n# Hyperparameters\nLEARNING_RATE = 0.001\nEPOCHS = 20\nBATCH_SIZE = 100\nDATA_PORTION = 800 # 80,000 Maximum\nTRAIN_PORTION = int(5 * DATA_PORTION // 5)\nTEST_PORTION = DATA_PORTION - TRAIN_PORTION\nsubmission_data_portion = 20000 # 20,000 Maximum\n\n# Do you want to build a submit file?\nSUBMIT = False\n\n\n# Retrieve data from csv files\n# Data kept in train_data and eval_data\n# Labels kept in train_labels and eval_labels\nprint(\"\\n\\n\\nFetching data...\\n\\n\")\nwith open('../train.csv', newline='') as csvfile:\n read = csv.reader(csvfile)\n i = 0\n train_data = []\n test_data = []\n for row in read:\n del row[0]\n if(i > 0 and i <= TRAIN_PORTION):\n train_data.append(row)\n if(i > 0 and i > TRAIN_PORTION):\n test_data.append(row)\n if i % DATA_PORTION == 0:\n if i > 0:\n break\n i += 1\n print(\"Finished reading training images.\")\n\n\n # Change type to numpy array for use with tensorflow\n train_data = np.array(train_data, dtype='f4')\n eval_data = np.array(test_data, dtype='f4')\n print(\"Training image shape:\", train_data.shape)\n print(\"Testing image shape:\", eval_data.shape)\n\n\nwith open('../train_labels.csv', newline='') as csvfile:\n read = csv.reader(csvfile)\n\n i = 0\n train_lab = []\n test_lab = []\n for row in read:\n if(i > 0 and i <= TRAIN_PORTION):\n train_lab.append(row[1])\n if(i > 0 and i > TRAIN_PORTION):\n test_lab.append(row[1])\n if i % DATA_PORTION == 0:\n if i > 0:\n break\n i += 1\n print(\"Finished reading training labels.\")\n\n # Change type to numpy array for use with tensorflow\n train_labels = np.asarray(train_lab, dtype=np.int32)\n eval_labels = np.asarray(test_lab, dtype=np.int32)\n # print(train_labels.shape)\n\n # Uncomment to view training image at index of your choice\n # train_data = train_data.reshape(train_data.shape[0], 24, 24)\n # plt.imshow(train_data[20], cmap='gray')\n # plt.title(train_labels[20])\n # plt.show()\n\nif SUBMIT:\n with open('../test.csv', newline='') as csvfile:\n read = csv.reader(csvfile)\n\n i = 0\n submission_data = []\n test_data = []\n for row in read:\n del row[0]\n if(i > 0 and i <= submission_data_portion):\n submission_data.append(row)\n if i % submission_data_portion == 0:\n if i > 0:\n break\n i += 1\n print(\"Finished reading submission images.\")\n\n\n submission_data = np.array(submission_data, dtype='f4')\n # print(\"Submission image shape:\", submission_data.shape)\n\n submission_data = submission_data.reshape(submission_data_portion, 24, 120)\n submission_data = submission_data.transpose([0,2,1])\n submission_data = submission_data.reshape(submission_data_portion, 5, 24, 24)\n submission_data = submission_data.transpose([0,1,3,2])\n submission_data = submission_data.reshape(submission_data_portion, 5, 576)\n # print(\"Submission image shape:\", submission_data[0].shape)\n\n\n# Build one-hot arrays for labels\nprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nTrain_labels.size:\",train_labels.size)\nprint(\"train_labels.shape:\",train_labels.shape)\ny_train = np.zeros((train_labels.size, 13))\nprint(\"y_train.shape:\",y_train.shape)\ny_train[range(train_labels.size), train_labels] = 1.0\ntrain_labels = y_train\n\ny_eval = np.zeros((eval_labels.size, 13))\ny_eval[range(eval_labels.size), eval_labels] = 1.0\neval_labels = y_eval\n\n# Input placeholder\nx = tf.placeholder(tf.float32, [None,576])\n# Reshape for convolution\nx_shaped = tf.reshape(x, [-1,24,24,1])\n\n# Output placeholder\ny = tf.placeholder(tf.float32, [None, 13])\n\ndef convolution_layer(input_data, num_input_channels, num_filters, filter_shape, pool_shape, name):\n conv_filter_shape = [filter_shape[0], filter_shape[1], num_input_channels, num_filters]\n\n # Initilization of filter weights and biases\n weights = tf.Variable(tf.truncated_normal(conv_filter_shape, stddev=0.03), name=name+'_W')\n biases = tf.Variable(tf.truncated_normal([num_filters]), name=name+'_b')\n\n out_layer = tf.nn.conv2d(input_data, weights, [1,1,1,1], padding='SAME')\n out_layer += biases\n out_layer = tf.nn.relu(out_layer)\n\n kernal_size = [1, pool_shape[0], pool_shape[1], 1]\n strides = [1,2,2,1]\n out_layer = tf.nn.max_pool(out_layer, ksize=kernal_size, strides=strides, padding='SAME')\n\n return out_layer\n\n\n# Model definition\nlayer1 = convolution_layer(x_shaped, 1, 32, [5,5], [2,2], name='layer1')\nlayer2 = convolution_layer(layer1, 32, 64, [5,5], [2,2], name='layer2')\n# Ouput shape is 6 x 6\n\n# Reshaping for dense layers\nflattened = tf.reshape(layer2, [-1,6*6*64])\n\n# Dense layer 1 definition\nwd1 = tf.Variable(tf.truncated_normal([6*6*64, 1000], stddev=0.03), name='wd1')\nbd1 = tf.Variable(tf.truncated_normal([1000], stddev=0.01), name='bd1')\ndense_layer1 = tf.matmul(flattened, wd1) + bd1\ndense_layer1 = tf.nn.relu(dense_layer1)\n# Output shape: 1 x 1000\n\n# Dense layer 2 definition\nwd2 = tf.Variable(tf.truncated_normal([1000, 13], stddev=0.03), name='wd2')\nbd2 = tf.Variable(tf.truncated_normal([13], stddev=0.01), name='bd2')\ndense_layer2 = tf.matmul(dense_layer1, wd2) + bd2\ny_ = tf.nn.softmax(dense_layer2)\n\n# Cost function declaration (cross-entropy)\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=dense_layer2, labels=y))\n\n# Set up optimizer\noptimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cross_entropy)\n\n# Define accuracy assessment\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# Initialization operator\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n\n total_batch_num = int(TRAIN_PORTION / BATCH_SIZE)\n\n\n for epoch in range(EPOCHS):\n print(\"Starting epoch\", epoch)\n print(\"\\nTotal number of batches to be run:\", total_batch_num)\n print(\"\\n\")\n avg_cost = 0\n batch_low_ind = 0\n batch_high_ind = batch_low_ind + BATCH_SIZE\n\n # Shuffle training data\n shuff = list(range(TRAIN_PORTION))\n np.random.shuffle(shuff)\n train_data = train_data[shuff]\n train_labels = train_labels[shuff]\n\n for i in range(total_batch_num):\n if((i + 1) % 20 == 0 or i == 0):\n print(\"Current Batch Number:\", i+1)\n # Extract batch interval from total dataset\n batch_x = train_data[batch_low_ind:batch_high_ind]\n batch_y = train_labels[batch_low_ind:batch_high_ind]\n batch_low_ind += BATCH_SIZE\n batch_high_ind += BATCH_SIZE\n\n _, c = sess.run([optimizer, cross_entropy], feed_dict={x: batch_x, y: batch_y})\n avg_cost += c / total_batch_num\n # Uncomment below for example on first image\n # if i % (total_batch_num // 5) == 0:\n # network_out = sess.run(y_, feed_dict={x: eval_data[0:1], y: eval_labels[0:1]})\n # label_ex = eval_labels[0]\n # print(\"Ex. network output:\", network_out)\n # print(\"Corresponding label:\", label_ex)\n print(\"Testing accuracy...\")\n test_acc = sess.run(accuracy, feed_dict={x: eval_data, y: eval_labels})\n\n\n print(\"Epoch\", (epoch+1), \"cost =\", \"{:.3f}\".format(avg_cost), \"test accuracy: \", \"{:.3f}\".format(test_acc))\n print(\"\\n\\n\\n\\n\")\n\n print(\"\\nTraining Complete.\")\n print(\"Final Accuracy:\",sess.run(accuracy, feed_dict={x: eval_data, y: eval_labels}))\n\n\n\n if SUBMIT:\n print(\"Running submission file through network...\")\n\n # Out variable will be output to file\n out = []\n\n # Iterate through submission data to test the validity of each equation and store each evaluation in a variable (out)\n # for submission_data_single in submission_data:\n check_tick = time.clock()\n x1 = sess.run(tf.argmax(y_,1), feed_dict={x: submission_data[:,0]})\n x2 = sess.run(tf.argmax(y_,1), feed_dict={x: submission_data[:,2]})\n x3 = sess.run(tf.argmax(y_,1), feed_dict={x: submission_data[:,4]})\n op1 = sess.run(tf.argmax(y_,1), feed_dict={x: submission_data[:,1]})\n op2 = sess.run(tf.argmax(y_,1), feed_dict={x: submission_data[:,3]})\n check_toc = time.clock()\n print(\"Forward pass of submission data (s):\", check_toc-check_tick)\n # Determine whether equation is true or false\n row = []\n row.append(0)\n row.append(0)\n for i in range(submission_data.shape[0]):\n row[0] = i\n if op1[i] == 12:\n if op2[i] == 11:\n row[1] = int(x1[i] == x2[i] - x3[i])\n else:\n row[1] = int(x1[i] == x2[i] + x3[i])\n else:\n if op1[i] == 11:\n row[1] = int(x1[i] - x2[i] == x3[i])\n else:\n row[1] = int(x1[i] + x2[i] == x3[i])\n out.append([row[0], row[1]])\n\n\nwith open('submission.csv', 'w', newline='') as csvfile:\n write = csv.writer(csvfile)\n write.writerow(['index', 'label'])\n for r in out:\n write.writerow(r)\n","sub_path":"V1/CompCNN_V1.py","file_name":"CompCNN_V1.py","file_ext":"py","file_size_in_byte":9413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313371081","text":"from config.dbconfig import pg_config\nimport psycopg2\n\n\nclass RequestDAO:\n\n def __init__(self):\n\n connection_url = \"dbname=%s user=%s password=%s host=127.0.0.1\" % (pg_config['dbname'],\n pg_config['user'],\n pg_config['passwd'])\n self.conn = psycopg2._connect(connection_url)\n\n def getAllRequests(self):\n cursor = self.conn.cursor()\n query = \"select rq_id, rq_date from request;\"\n cursor.execute(query)\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getRequestById(self, rq_id):\n cursor = self.conn.cursor()\n query = \"select rq_id, rq_date from request where rq_id = %s;\"\n cursor.execute(query, (rq_id,))\n result = cursor.fetchone()\n return result\n\n def getRequestByUsrId(self, usr_id):\n cursor = self.conn.cursor()\n query = \"select rq_id, rq_date \" \\\n \"from request natural inner join Places natural inner join client \" \\\n \"where usr_id = %s;\"\n cursor.execute(query, (usr_id,))\n result = cursor.fetchone()\n return result\n\n def getRequestByDate(self, rq_date):\n cursor = self.conn.cursor()\n query = \"select * from request where rq_date = %s;\"\n cursor.execute(query, (rq_date,))\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getRequestByClient(self, c_usr):\n cursor = self.conn.cursor()\n query = \"select * from request natural inner join Places natural inner join client where c_usr = %s;\"\n cursor.execute(query, (c_usr,))\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getRequestByTransaction(self, transaction_num):\n cursor = self.conn.cursor()\n query = \"select * from request natural inner join Transaction where transaction_num = %s;\"\n cursor.execute(query, (transaction_num,))\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def insert(self, rq_date):\n cursor = self.conn.cursor()\n query = \"insert into request(rq_date) values (%s) returning rq_id;\"\n cursor.execute(query, (rq_date,))\n rqid = cursor.fetchone()[0]\n self.conn.commit()\n return rqid\n\n def delete(self, rq_id):\n cursor = self.conn.cursor()\n query = \"delete from request where rq_id = %s;\"\n cursor.execute(query, (rq_id,))\n self.conn.commit()\n return rq_id\n\n def update(self, rq_id, rq_date):\n cursor = self.conn.cursor()\n query = \"update request set rq_date = %s where rq_id = %s;\"\n cursor.execute(query, (rq_id, rq_date,))\n self.conn.commit()\n return rq_id","sub_path":"dao/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"483993115","text":"import random\nA=random.randint(1,20)\nI=0\nwhile True:\n B=input('請輸入1-20中的一個數字') \n B=int(B) \n if AB:\n print('太小 只能猜五次喔') \n elif B<1 or B>20:\n print('error type in again')\n \n I=I+1\n if I==5:\n print(\"正確答案是\"+str(A))\n break\n \n ","sub_path":"day2.1.py","file_name":"day2.1.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"201700594","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.6/dist-packages/irk/util/storage/database.py\n# Compiled at: 2018-06-21 18:58:03\n# Size of source mod 2**32: 1541 bytes\nfrom bisect import bisect_left\nfrom . import etcfile\ninstalled_database = []\npackages = []\nDB_FILE = etcfile.EtcFile('instdb')\n\ndef search_entry(package):\n global installed_database\n global packages\n i = bisect_left(packages, package)\n if i != len(packages):\n if packages[i] == package:\n return (\n installed_database[i], i)\n\n\ndef delete_entry(package):\n i = bisect_left(packages, package)\n if i != len(packages):\n if packages[i] == package:\n del installed_database[i]\n del packages[i]\n else:\n raise ValueError()\n\n\ndef insert_entry(e):\n i = bisect_left(packages, e[0])\n packages.insert(i, e[0])\n installed_database.insert(i, e)\n\n\ndef load_installed_database():\n global installed_database\n global packages\n packages = []\n installed_database = []\n if not DB_FILE.fullpath.exists():\n write_database()\n print('(Reading database... ', end='')\n with DB_FILE.open('r') as (f):\n i = 0\n while True:\n line = f.readline()\n if line == '':\n break\n i += 1\n line = line[:-1]\n line = line.split(' ')\n packages.append(line[0])\n installed_database.append(line)\n\n print(f\" {i} packages installed.)\")\n\n\ndef write_database():\n print('Writing package database... ', end='')\n with DB_FILE.open('w') as (f):\n for i in installed_database:\n f.write(' '.join(i))\n f.write('\\n')\n\n print('done.')","sub_path":"pycfiles/irk-0.2.linux-x86_64.tar/database.cpython-36.py","file_name":"database.cpython-36.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"577929670","text":"from plugins import plugin\n\ndef brainfuck(input_bytes, program, max_steps):\n input_bytes_pos = 0\n\n tape = {}\n ptr = 0\n\n def parse():\n loops = {}\n i = 0\n stack = []\n while i < len(program):\n if program[i] == '[':\n stack.append(i)\n if program[i] == ']':\n if len(stack) == 0:\n raise Exception(\"Mismatched square brackets.\")\n loops[stack[-1]] = i\n del stack[-1]\n if program[i] not in '[]<>+-,.':\n raise Exception(\"Illegal character \" + program[i])\n i += 1\n if len(stack) > 0:\n raise Exception(\"Mismatched square brackets.\")\n return loops\n\n ends = parse()\n\n steps = 0\n pc = 0\n stack = []\n output = \"\"\n while pc < len(program):\n c = program[pc]\n if c == '>':\n ptr += 1\n elif c == '<':\n ptr -= 1\n elif c == '+':\n if ptr not in tape:\n tape[ptr] = 0\n tape[ptr] += 1\n elif c == '-':\n if ptr not in tape:\n tape[ptr] = 0\n tape[ptr] -= 1\n elif c == '.':\n if ptr not in tape:\n tape[ptr] = 0\n \n if tape[ptr] < ord(' ') or tape[ptr] > ord('~'):\n a = '\\\\' + hex(tape[ptr])\n else:\n a = chr(tape[ptr])\n \n output += a\n elif c == ',':\n if ptr not in tape:\n tape[ptr] = 0\n if input_bytes_pos >= len(input_bytes):\n tape[ptr] = 0\n else:\n tape[ptr] = ord(input_bytes[input_bytes_pos])\n input_bytes_pos += 1\n elif c == '[':\n if ptr not in tape:\n tape[ptr] = 0\n if tape[ptr]:\n stack.append(pc)\n else:\n pc = ends[pc]\n elif c == ']':\n pc = stack[-1]-1\n del stack[-1]\n pc += 1\n steps += 1\n if steps >= max_steps:\n raise Exception(\"Program timeout (PC: %d)\" % pc)\n return output\n\nclass pluginClass(plugin):\n def gettype(self):\n return \"command\"\n \n def action(self,complete):\n msg = complete.message()\n\n tokens = msg.split()\n if len(tokens) == 1:\n program = tokens[0]\n input_bytes = ''\n elif len(tokens) > 1:\n program = tokens[-1]\n input_bytes = ' '.join(tokens[:-1])\n else:\n return self.describe(0)\n\n try:\n output = brainfuck(input_bytes, program, 2000000)\n return [\"PRIVMSG $C$ :\" + (output if len(output) > 0 else \"[No output]\")]\n except Exception as e:\n return [\"PRIVMSG $C$ :Fatal error - \" + str(e)]\n\n def describe(self, complete):\n return [\"PRIVMSG $C$ :I am the brainfuck plugin!\",\n \"PRIVMSG $C$ :Usage: !brainfuck \",\n \"PRIVMSG $C$ :The tape consists of 1024 integers. Good luck!\"]\n\n","sub_path":"plugins/command/brainfuck.py","file_name":"brainfuck.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"649909801","text":"#-*- coding:utf-8 -*-\nfrom Tkinter import *\nimport tkFileDialog \nimport sys\nimport os\nfrom huizong import *\nreload(sys)\nsys.setdefaultencoding('gbk')\n\ntk = Tk()\ntk.geometry('600x500')\ntk.resizable(False,True)\n\nclass shuru(Frame):\n \n def __init__(self, master=None): \n Frame.__init__(self, master)\n self.pack()\n self.createwidge()\n self.value = 'null' \n self.f1 = 'null'\n self.f2 = 'null' \n \n\n\n def createwidge(self):\n \n # self.prompt = Message(width = 200) \n # self.prompt.pack()\n # self.prompt[\"text\"] = \"潘工的表\"\n # self.prompt[\"font\"] = \"20\" \n \n self.buttonp = Button(text = '潘工的表 浏览',font = 20,borderwidth = 5)\n self.buttonp.pack()\n\n self.entrythingy = Entry(width = 100)\n self.entrythingy.pack(side = TOP)\n\n self.buttonz = Button(text = '质量管理系统表 浏览',font = 20,borderwidth = 5)\n self.buttonz.pack() \n \n self.entrythingy2 = Entry(width = 100) \n self.entrythingy2.pack(side = TOP)\n\n self.button = Button(text = '确定',width = 10,font = 30,borderwidth = 5)\n self.button.pack() \n ###########part1#############\n self.part1 = PanedWindow(width = 200,height = 200)\n self.part1.pack(side = LEFT)\n\n\n self.prompt2 = Message(self.part1,width = 200) \n self.prompt2.pack() \n self.prompt2[\"font\"] = \"20\" \n\n self.prompt3 = Message(self.part1,width = 200) \n self.prompt3.pack() \n self.prompt3[\"font\"] = \"20\" \n ###########part2#############\n self.part2 = PanedWindow(width = 200,height = 200) \n self.part2.pack(side = LEFT)\n\n self.prompt4 = Message(self.part2,width = 200) \n self.prompt4.pack()\n \n self.prompt4[\"font\"] = \"20\" \n\n self.prompt5 = Message(self.part2,width = 200) \n self.prompt5.pack() \n self.prompt5[\"font\"] = \"20\" \n ###########part3#############\n self.part3 = PanedWindow(width = 200,height = 200) \n self.part3.pack(side = LEFT)\n\n self.prompt6 = Message(self.part3,width = 200) \n self.prompt6.pack()\n \n self.prompt6[\"font\"] = \"20\" \n\n self.prompt7 = Message(self.part3,width = 200) \n self.prompt7.pack() \n self.prompt7[\"font\"] = \"20\" \n\n\n # here is the application variable\n self.contents = StringVar()\n self.contents2 = StringVar()\n # set it to some value\n #self.contents.set(\"this is a variable\")\n # tell the entry widget to watch this variable\n self.entrythingy[\"textvariable\"] = self.contents\n self.entrythingy2[\"textvariable\"] = self.contents2\n # and here we get a callback when the user hits return.\n # we will have the program print out the value of the\n # application variable when the user hits return\n self.buttonp.bind('',self.openfilepath1)\n self.buttonz.bind('',self.openfilepath2)\n self.button.bind('',self.print_contents)\n \n\n def print_contents(self, event):\n self.f1 = str(self.contents.get()).strip()\n self.f2 = str(self.contents2.get()).strip() \n if (self.f1.endswith('.xls') | self.f1.endswith('.xlsx')) & (self.f2.endswith('.xls') | self.f2.endswith('.xlsx')):\n self.f1 = str.replace(self.f1,'\\\\','/')\n self.f2 = str.replace(self.f2,'\\\\','/')\n self.prompt3[\"text\"] = \"汇总计算中\"\n jstr = huizong(self.f1,self.f2)\n self.prompt2[\"text\"] = \"潘工的表汇总统计结果\"\n self.prompt3[\"text\"] = jstr[0]\n self.prompt4[\"text\"] = \"质量管理表汇总统计结果\"\n self.prompt5[\"text\"] = jstr[1]\n self.prompt6[\"text\"] = \"两表对比结果\"\n self.prompt7[\"text\"] = jstr[3]\n else: \n self.prompt3[\"text\"] = \"请输入正确的路径\"\n def openfilepath1(self,event):\n path = tkFileDialog.askopenfilename()\n self.contents.set(path)\n def openfilepath2(self,event):\n path = tkFileDialog.askopenfilename()\n self.contents2.set(path) \n\n\n\nshuru().mainloop() \n\n \n\n\n\n\n\n\n ","sub_path":"tongji/huizong_shuru.py","file_name":"huizong_shuru.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"412751960","text":"#!python\nimport sys\n\nif len(sys.argv) == 1:\n \n import fixbib_gui\n fixbib_gui.main()\n \nelse:\n import argparse\n import fixbib\n \n msg = \"\"\"\\\n This program can be used to fix .bib files by looking each entry up on CrossRef by its DOI. Metadata found in CrossRef will be merged into the existing entries. If the DOI cannot be successfully queried, the entry will be left as is. Extra tags in your bib entries that are not in CrossRef will also be left alone. A new file will be formed in input directory named \"input-file-name_fixed.bib\"\n \"\"\"\n epilog = \"\\n\\nExample:\\n\\n./BibFixer.py inputfile.bib -o outputfile.bib\"\n \n parser = argparse.ArgumentParser(description = msg, epilog = epilog)\n parser.add_argument(\"input\")\n parser.add_argument(\"--output\", \"-o\", default = None)\n args = parser.parse_args()\n fixbib.main(args)\n ","sub_path":"bin/BibFixer.py","file_name":"BibFixer.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2139436","text":"# -*- coding:utf-8 -*-\nfrom pyramid.view import view_config\nfrom ..services.image_manager import ImageManager\nfrom .concerns.failure_response import BadRequest, NotFound\nfrom ..models.picture import Picture\nfrom ..models import DBSession\n\n\n@view_config(route_name=\"picture_upload\", renderer=\"json\")\ndef upload(request):\n file = request.POST[\"picture\"]\n picture = ImageManager.create(file.file, file.filename)\n if picture is None:\n raise BadRequest(\"failed to save picture\")\n return {\n \"result\": True,\n \"picture\": picture.to_dict(Picture.ExposeKeys)\n }\n\n\n@view_config(route_name=\"picture_delete\", renderer=\"json\")\ndef delete(request):\n picture_id = request.matchdict[\"id\"]\n picture = DBSession.query(Picture).get(picture_id)\n if picture is None:\n raise NotFound\n ImageManager.delete_image(picture)\n return {\"result\": True}\n","sub_path":"shoyu/views/picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"7239277","text":"\nfrom tkinter import *\nimport tkinter as tk # proper way to import tkinter\nimport serial\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom matplotlib import style\nstyle.use(\"ggplot\")\nimport threading\n\nclass Dee(tk.Frame):\n def __init__(self, master=None, title='', ylabel='', label='', color='c', ylim=1, **kwargs):\n tk.Frame.__init__(self, master, **kwargs)\n self.data = []\n fig = Figure(figsize = (7,6))\n self.plot = fig.add_subplot(111)\n self.plot.set_title(title)\n self.plot.set_ylabel(ylabel)\n self.plot.set_ylim(0, ylim)\n self.line, = self.plot.plot([], [], color, marker = 'o',label = label)\n self.plot.legend(loc='upper left')\n\n label = Label(self, text = ylabel, relief = \"solid\", font = \"Times 22 bold\")\n label.grid(row = 0, column = 3)\n button_1 = Button(self, text = \"Back To Homepage\", command = F1.tkraise)\n button_1.grid(row = 1, column = 2)\n label_1 = Label(self, text = \"Current Value: \", relief = \"solid\", font = \"Verdana 10 bold\")\n label_1.grid(row = 2, column = 2)\n self.label_data = Label(self, font = \"Verdana 10\")\n self.label_data.grid(row = 2, column = 3)\n canvas = FigureCanvasTkAgg(fig, master=self)\n canvas.get_tk_widget().grid(row = 3, column = 3)\n\n ani = animation.FuncAnimation(fig, self.update_graph, interval = 1000, blit = False)\n canvas.draw()\n\n def update_graph(self, i):\n if self.data:\n self.line.set_data(range(len(self.data)), self.data)\n self.plot.set_xlim(0, len(self.data))\n\n def set(self, value):\n self.data.append(value)\n self.label_data.config(text=value)\n\nmy_window = Tk()\nmy_window.title(\"Graphical User Interface Demo#1\")\nmy_window.geometry(\"720x720\")\n\nF1 = Frame(my_window, relief = RAISED)\nF2 = Dee(my_window, title='Temperature Graph', ylabel='Temperature', color='c', label='Degrees C', ylim=40, relief = RAISED)\nF3 = Dee(my_window, title='Humidity Graph', ylabel='Humidity', color='g', label='Percentage %', ylim=100, relief = RAISED)\nF4 = Dee(my_window, title='Solved Water Graph', ylabel='Water Volume', color='b', label='mL', ylim=55, relief = RAISED)\n\n#For Frame One\nlabel_1 = Label(F1, text = \"Homepage of GUI\", relief = \"solid\", font = \"Times 22 bold\")\nlabel_1.grid(row = 0, column = 3)\nbutton_1 = Button(F1, text = \"Page of Humidity\", relief = GROOVE, bd = 8, command = F2.tkraise)\nbutton_1.grid(row = 1, column = 2)\nbutton_2 = Button(F1, text = \"Page of Temperature\", relief = GROOVE, bd = 8, command = F3.tkraise)\nbutton_2.grid(row = 1, column = 3)\nbutton_3 = Button(F1, text = \"Page of Water\", relief = GROOVE, bd = 8, command = F4.tkraise)\nbutton_3.grid(row = 1, column = 4)\n\nfor frame in(F1, F2, F3, F4):\n frame.grid(row = 0, column = 0, sticky = \"NSEW\")\n\nF1.tkraise()\n\ndef get_data():\n #Initialization of Serial Comm\n ser = serial.Serial('/dev/ttyS0', 9600)\n while True:\n pulldata = ser.readline().decode('ascii')\n get_data = pulldata.split(',')\n F2.set(int(get_data[0]))\n F3.set(int(get_data[1]))\n F4.set(int(get_data[3]))\n\n# start the thread that will poll the arduino\nt = threading.Thread(target=get_data)\nt.daemon = True\nt.start()\n\nmy_window.mainloop()","sub_path":"Testing bench part1/tk_test.py","file_name":"tk_test.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"351462155","text":"# list of products\ndef index():\n form=SQLFORM.factory(Field('search',requires=IS_NOT_EMPTY())).process()\n if form.accepts(request):\n keywords = form.vars.search.split()\n query = reduce(lambda a,b:a|b,[db.product.name.like('%'+key+'%') for key in keywords])\n products = db(query).select(orderby=db.product.price)\n else:\n products=None\n if products and len(products)==1:\n redirect(URL('show',args=products.first().id))\n p=db().select(db.wallpapers.ALL)\n return locals()\ndef clothings():\n clothes=db(db.product.sortable == 'Clothings').select()\n menu1=[['Men clothings',False,'menclothes',[['Topwear',False,'topmen'],['Bottomwear',False,'bottommen']]]]\n menu2=[['Women Clothings',False,'womenclothes',[['Topwear',False,'topwomen'],['Bottomwear',False,'bottomwomen']]]]\n return locals()\ndef accessories():\n acc=db(db.product.sortable == 'Accessories').select()\n menu1=[['Laptops',False,'laptopss']]\n menu2=[['Mobiles',False,'mobiless']]\n return locals()\ndef menclothes():\n men = db((db.product.sortable == 'Clothings') & (db.product.Gender == 'Male')).select()\n menu1=[['Topwear',False,'topmen']]\n menu2=[['Bottomwear',False,'bottommen']]\n return locals()\ndef womenclothes():\n women = db((db.product.sortable == 'Clothings') & (db.product.Gender == 'Female')).select()\n menu1=[['Topwear',False,'topwomen']]\n menu2=[['Bottomwear',False,'bottomwomen']]\n return locals()\ndef topmen():\n tmen = db((db.product.sortable == 'Clothings') & (db.product.Gender == 'Male') & (db.product.Typecloth == 'Topwear')).select()\n return locals()\ndef bottommen():\n bmen = db((db.product.sortable == 'Clothings') & (db.product.Gender == 'Male') & (db.product.Typecloth == 'Bottomwear')).select()\n return locals()\ndef topwomen():\n twomen = db((db.product.sortable == 'Clothings') & (db.product.Gender == 'Female') & (db.product.Typecloth == 'Topwear')).select()\n return locals()\ndef bottomwomen():\n bwomen = db((db.product.sortable == 'Clothings') & (db.product.Gender == 'Female') & (db.product.Typecloth == 'Bottomwear')).select()\n return locals()\ndef laptopss():\n laptops = db((db.product.sortable == 'Accessories') & (db.product.Typeacc == 'Laptop')).select()\n return locals()\ndef mobiless():\n mobiles = db((db.product.sortable == 'Accessories') & (db.product.Typeacc == 'Mobile')).select()\n return locals()\ndef show():\n p=db.product(request.args(0,cast=int))\n return dict(p=p)\n\n# login, registration, etcetera\ndef user():\n return dict(form=auth())\n\n# an action to download uploaded images\ndef download():\n return response.download(request,db)\n\n# an action to expose web services\ndef call():\n session.forget()\n return service()\n\n\n# an action to see and process a shopping cart\ndef cart():\n if not session.cart:\n session.flash = 'Add something to shopping cart'\n redirect(URL('index'))\n total = sum(db.product(id).price*qty for id,qty in session.cart.items())\n return dict(cart=session.cart,total=total)\n\n# time to pay ... now for real\n@auth.requires_login()\ndef buy():\n if not session.cart:\n session.flash = 'Add something to shopping cart'\n redirect(URL('index'))\n import uuid\n invoice = str(uuid.uuid4())\n total = sum(db.product(id).price*qty for id,qty in session.cart.items())\n form = SQLFORM.factory(\n Field('creditcard',default='4427802641004797',requires=IS_NOT_EMPTY()),\n Field('expiration',default='12/2012',requires=IS_MATCH('\\d{2}/\\d{4}')),\n Field('cvv',default='123',requires=IS_MATCH('\\d{3}')),\n Field('shipping_address',requires=IS_NOT_EMPTY()),\n Field('shipping_city',requires=IS_NOT_EMPTY()),\n Field('shipping_state',requires=IS_NOT_EMPTY()),\n Field('shipping_zip_code',requires=IS_MATCH('\\d{5}(\\-\\d{4})?')))\n if form.accepts(request,session):\n for key, value in session.cart.items():\n db.sale.insert(invoice=invoice,\n buyer=auth.user.id,\n product = key,\n quantity = value,\n price = db.product(key).price,\n creditcard = form.vars.creditcard,\n shipping_address = form.vars.shipping_address,\n shipping_city = form.vars.shipping_city,\n shipping_state = form.vars.shipping_state,\n shipping_zip_code = form.vars.shipping_zip_code)\n session.cart.clear()\n session.flash = 'Thank you for your order'\n redirect(URL('invoice',args=invoice))\n else:\n response.flash = \"Do your payment carefully\"\n return dict(cart=session.cart,form=form,total=total)\n\n@auth.requires_login()\ndef invoice():\n return dict(invoice=request.args(0))\n\n# an action to add and remove items from the shopping cart\ndef cart_callback():\n id = int(request.vars.id)\n if request.vars.action == 'add':\n session.cart[id]=session.cart.get(id,0)+1\n if request.vars.url == 'cart':\n redirect(request.vars.url)\n else:\n redirect(URL(request.vars.url,args=id))\n if request.vars.action == 'sub':\n session.cart[id]=max(0,session.cart.get(id,0)-1)\n redirect(URL(request.vars.url))\n return str(session.cart[id])\n\n@auth.requires_login()\ndef myorders():\n orders = db(db.sale.buyer==auth.user.id).select(orderby=~db.sale.created_on)\n form=SQLFORM.factory(\n Field('Invoice_code'))\n if form.accepts(request,session):\n db((db.sale.buyer==auth.user.id) & (db.sale.invoice==form.vars.Invoice_code)).delete()\n redirect(URL('myorders'))\n return dict(orders=orders,form=form)\n\n@auth.requires_membership('manager')\ndef products():\n grid=SQLFORM.grid(db.product)\n return locals()\n@auth.requires_membership('manager')\ndef edit_product():\n form = crud.update(db.product,request.args(0))\n return dict(form=form)\n@auth.requires_membership('manager')\ndef users():\n db.auth_user.id.represent=lambda id:A('info',_href=URL('info_user',args=id))\n form,items = crud.search(db.auth_user)\n return dict(form=form,users=items)\n\n@auth.requires_membership('manager')\ndef info_user():\n form = crud.read(db.auth_user,request.args(0))\n orders = db(db.sale.buyer==request.args(0)).select(orderby=~db.sale.created_on)\n return dict(form=form,orders=orders)\n\n@auth.requires_membership('manager')\ndef edit_order():\n db.sale.invoice.writable=False\n db.sale.buyer.writable=False\n db.sale.creditcard.writable=False\n db.sale.product.writable=False\n form = crud.update(db.sale,request.args(0))\n return dict(form=form)\n","sub_path":"MyOnlineShop/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":6812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"66149198","text":"\"\"\"\n2. Для списка реализовать обмен значений соседних элементов, т.е. Значениями обмениваются элементы с индексами 0 и 1,\n2 и 3 и т.д. При нечетном количестве элементов последний сохранить на своем месте. Для заполнения списка элементов\nнеобходимо использовать функцию input().\n\"\"\"\n\ni_stop = 0\nwhile True:\n user_tmp = input('Введите желаемое количество элементов:\\n')\n if user_tmp.isdigit():\n i_stop = int(user_tmp)\n break\n print('Ошибка при вводе, введите число!')\n\nmy_list = []\nwhile len(my_list) < i_stop:\n user_tmp = input(f'Введите элемент списка № {len(my_list)}\\n')\n my_list.append(user_tmp)\n\n# my_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# i_stop = len(my_list)\n\ni_stop = (i_stop - 1, i_stop - 2)[i_stop % 2]\n\nprint(f'Список до перестановки {my_list}')\nfor i in range(0, i_stop, 2):\n my_list[i], my_list[i+1] = my_list[i+1], my_list[i]\n\nprint(f'Список после перестановки {my_list}')\n","sub_path":"lesson_2/quest_2.py","file_name":"quest_2.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"297106959","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 3 13:59:21 2018\n\n@author: Abhilash\n\"\"\"\n#Assignment 0 file to calculate N Rooks and N Queens\n\nimport sys\n\n# Count # of pieces in given row\ndef count_on_row(board, row):\n return sum( board[row] ) \n\n# Count # of pieces in given column\ndef count_on_col(board, col):\n return sum( [ row[col] for row in board ] ) \n\n# Count total # of pieces on board\ndef count_pieces(board):\n return sum([ sum(row) for row in board ] )\n\n# Return a string with the board rendered in a human-friendly format for n queens\ndef printable_board(board):\n piece = \"Q \" if Problem_Type == \"nqueen\" else \"R \"\n printable_board = \"\"\n for r in range(0, len(board)):\n for c in range(0, len(board)):\n if board[r][c] == 1:\n printable_board+=piece\n if board[r][c] == 0:\n printable_board+=\"_ \"\n if board[r][c] == -1:\n printable_board+=\"X \"\n printable_board+= \"\\n\"\n \n return printable_board\n\n# Add a piece to the board at the given position, and return a new board (doesn't change original)\ndef add_piece(board, row, col):\n return board[0:row] + [board[row][0:col] + [1,] + board[row][col+1:]] + board[row+1:]\n\n#Check if the coordinates are not restricted\ndef isNotRestricted(row, col):\n for i in range(0,len(RestrictedCoordinates)):\n if row == RestrictedCoordinates[i][0] and col == RestrictedCoordinates[i][1]:\n return False\n return True\n \n\n#Get list of successors of given board : optimized code : Adding\n # pieces only from the topmost empty rows and adding only to the empty column\ndef successorsNRooks(board):\n if(count_pieces(board) >= N):\n return []\n emptyRowNum = getFirstEmptyRowIndex(board)\n if emptyRowNum == 0:\n return [ add_piece(board, emptyRowNum, N - col - 1) for col in range(0,N) if isNotRestricted(emptyRowNum, N - col - 1)]\n sol = []\n for a in range(0, N):\n if count_on_col(board, N - a - 1) < 1 and board[emptyRowNum][N - a - 1] == 0 and \\\n isNotRestricted(emptyRowNum,N - a - 1):\n sol = sol + [add_piece(board, emptyRowNum, N - a - 1)]\n return sol\n\n\n#Gives the index of the first empty row from start\ndef getFirstEmptyRowIndex(board):\n for i in range(0,N):\n if sum(board[i]) == 0:\n return i\n return 0\n \n#Returns the count on diagonals : Optimized code in linear time\ndef count_diag(board, row, col):\n if N == 1 and board[row][col] == 1:\n return 1\n n = N - abs(row - col)\n if(row <= col):\n sum_diag1 = sum([board[i][col - row + i] for i in range(0,n)])\n \n if(row>col):\n sum_diag1 = sum([board[row - col + i][i] for i in range(0,n)])\n\n i = 0\n j = 0\n sum_diag2 = 0\n while(i < N - row and j <= col):\n sum_diag2 = sum_diag2 + board[row+i][col-j]\n i = i + 1\n j = j + 1\n\n i = 1\n j = 1\n while(i <= row and j < N - col):\n sum_diag2 = sum_diag2 + board[row-i][col+j]\n i = i + 1\n j = j + 1\n \n if board[row][col] == 1:\n return sum_diag1 + sum_diag2 - 1\n return sum_diag1 + sum_diag2 \n\n#Returns the position of the piece in the row\ndef checkPiecePositionInRow(board, row):\n pos = -1\n for i in range(0,N):\n if board[row][i] == 1:\n pos = i\n return pos\n\n#successor function for nQueens problem\ndef successorsNQueens(board):\n if(count_pieces(board) >= N):\n return []\n emptyRowNum = getFirstEmptyRowIndex(board)\n if emptyRowNum == 0:\n return [ add_piece(board, emptyRowNum, col) for col in range(0,N) if isNotRestricted(emptyRowNum, col)]\n prevRowQueen = checkPiecePositionInRow(board, emptyRowNum-1)\n sol = []\n for a in range(0, prevRowQueen - 1):\n if count_on_col(board, a) < 1 and count_diag(board,emptyRowNum,a) < 1 and \\\n board[emptyRowNum][a] == 0 and isNotRestricted(emptyRowNum,a):\n sol = [add_piece(board, emptyRowNum, a)]\n for b in range(prevRowQueen+2, N):\n if count_on_col(board, b) < 1 and count_diag(board,emptyRowNum,b) < 1 and \\\n board[emptyRowNum][b] == 0 and isNotRestricted(emptyRowNum,b):\n sol = sol + [add_piece(board, emptyRowNum, b)]\n return sol\n\n#Check if board is a goal state for n Queens\ndef is_goal_NQueens(board):\n if(count_pieces(board) != N):\n return False\n for i in range(0,N):\n if(sum(board[i]) > 1):\n return False\n col = checkPiecePositionInRow(board, i)\n if(count_on_col(board, col) > 1):\n return False\n if(count_diag(board, i, col) > 1):\n return False\n return True\n\n#Check if board is a goal state for n Rooks\ndef is_goal_NRooks(board):\n if(count_pieces(board) != N):\n return False\n for i in range(0,N):\n if(sum(board[i]) > 1):\n return False\n col = checkPiecePositionInRow(board, i)\n if(count_on_col(board, col) > 1):\n return False\n return True\n\n# Solve n-rooks!\ndef solveRooks(initial_board):\n fringe = [initial_board]\n while len(fringe) > 0:\n for s in successorsNRooks( fringe.pop() ):\n if is_goal_NRooks(s):\n return(s)\n fringe.append(s)\n return False\n\n# Solve n-queens!\ndef solveQueens(initial_board):\n fringe = [initial_board]\n while len(fringe) > 0:\n for s in successorsNQueens( fringe.pop() ):\n if is_goal_NQueens(s):\n return(s)\n fringe.append(s)\n return False\n\n#mark restricted positions with -1 in a solution board\ndef markRestrictedPositions(board):\n for r in range(0, len(RestrictedCoordinates)):\n board[RestrictedCoordinates[r][0]][RestrictedCoordinates[r][1]] = -1\n return board\n\n#Problem type is defined by nrook or nqueen\nProblem_Type = str(sys.argv[1])\n\n# This is N, the size of the board. It is passed through command line arguments.\nN = int(sys.argv[2])\n\n#N_Restricted is the number of restricted positions\nN_Restricted = int(sys.argv[3])\n\n#All the arguments after 2nd are the restricted positions\nRestrictedCoordinates = [[int(sys.argv.pop(4)) - 1, int(sys.argv.pop(4)) - 1] for r in range(0,N_Restricted)]\n\n# The board is stored as a list-of-lists. Each inner list is a row of the board.\n# A zero in a given square indicates no piece, and a 1 indicates a piece.\ninitial_board = [[0 for x in range(N)] for y in range(N)] \n\nif Problem_Type == \"nqueen\":\n solution = solveQueens(initial_board)\nif Problem_Type == \"nrook\":\n solution = solveRooks(initial_board)\nif solution:\n solution = markRestrictedPositions(solution)\nprint (printable_board(solution) if solution else \"Sorry, no solution found. :(\")\n\n","sub_path":"AI_Algos_GameAIs/NQueens/a0Working.py","file_name":"a0Working.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"642366167","text":"# coding: utf-8\n\n\"\"\"\n LOCKSS Metadata Extraction Service REST API\n\n API of the LOCKSS Metadata Extraction REST Service # noqa: E501\n\n OpenAPI spec version: 1.0.0\n Contact: lockss-support@lockss.org\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom lockss_metadata_extractor.models.au import Au # noqa: F401,E501\nfrom lockss_metadata_extractor.models.status import Status # noqa: F401,E501\n\n\nclass Job(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'au': 'Au',\n 'id': 'str',\n 'description': 'str',\n 'creation_date': 'date',\n 'start_date': 'date',\n 'end_date': 'date',\n 'status': 'Status'\n }\n\n attribute_map = {\n 'au': 'au',\n 'id': 'id',\n 'description': 'description',\n 'creation_date': 'creationDate',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'status': 'status'\n }\n\n def __init__(self, au=None, id=None, description=None, creation_date=None, start_date=None, end_date=None, status=None): # noqa: E501\n \"\"\"Job - a model defined in Swagger\"\"\" # noqa: E501\n\n self._au = None\n self._id = None\n self._description = None\n self._creation_date = None\n self._start_date = None\n self._end_date = None\n self._status = None\n self.discriminator = None\n\n self.au = au\n self.id = id\n if description is not None:\n self.description = description\n self.creation_date = creation_date\n if start_date is not None:\n self.start_date = start_date\n if end_date is not None:\n self.end_date = end_date\n self.status = status\n\n @property\n def au(self):\n \"\"\"Gets the au of this Job. # noqa: E501\n\n\n :return: The au of this Job. # noqa: E501\n :rtype: Au\n \"\"\"\n return self._au\n\n @au.setter\n def au(self, au):\n \"\"\"Sets the au of this Job.\n\n\n :param au: The au of this Job. # noqa: E501\n :type: Au\n \"\"\"\n if au is None:\n raise ValueError(\"Invalid value for `au`, must not be `None`\") # noqa: E501\n\n self._au = au\n\n @property\n def id(self):\n \"\"\"Gets the id of this Job. # noqa: E501\n\n The identifier of this job # noqa: E501\n\n :return: The id of this Job. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this Job.\n\n The identifier of this job # noqa: E501\n\n :param id: The id of this Job. # noqa: E501\n :type: str\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def description(self):\n \"\"\"Gets the description of this Job. # noqa: E501\n\n A description of the task being performed by this job # noqa: E501\n\n :return: The description of this Job. # noqa: E501\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Sets the description of this Job.\n\n A description of the task being performed by this job # noqa: E501\n\n :param description: The description of this Job. # noqa: E501\n :type: str\n \"\"\"\n\n self._description = description\n\n @property\n def creation_date(self):\n \"\"\"Gets the creation_date of this Job. # noqa: E501\n\n The timestamp when this job was created # noqa: E501\n\n :return: The creation_date of this Job. # noqa: E501\n :rtype: date\n \"\"\"\n return self._creation_date\n\n @creation_date.setter\n def creation_date(self, creation_date):\n \"\"\"Sets the creation_date of this Job.\n\n The timestamp when this job was created # noqa: E501\n\n :param creation_date: The creation_date of this Job. # noqa: E501\n :type: date\n \"\"\"\n if creation_date is None:\n raise ValueError(\"Invalid value for `creation_date`, must not be `None`\") # noqa: E501\n\n self._creation_date = creation_date\n\n @property\n def start_date(self):\n \"\"\"Gets the start_date of this Job. # noqa: E501\n\n The timestamp when this job processing started # noqa: E501\n\n :return: The start_date of this Job. # noqa: E501\n :rtype: date\n \"\"\"\n return self._start_date\n\n @start_date.setter\n def start_date(self, start_date):\n \"\"\"Sets the start_date of this Job.\n\n The timestamp when this job processing started # noqa: E501\n\n :param start_date: The start_date of this Job. # noqa: E501\n :type: date\n \"\"\"\n\n self._start_date = start_date\n\n @property\n def end_date(self):\n \"\"\"Gets the end_date of this Job. # noqa: E501\n\n The timestamp when this job processing ended # noqa: E501\n\n :return: The end_date of this Job. # noqa: E501\n :rtype: date\n \"\"\"\n return self._end_date\n\n @end_date.setter\n def end_date(self, end_date):\n \"\"\"Sets the end_date of this Job.\n\n The timestamp when this job processing ended # noqa: E501\n\n :param end_date: The end_date of this Job. # noqa: E501\n :type: date\n \"\"\"\n\n self._end_date = end_date\n\n @property\n def status(self):\n \"\"\"Gets the status of this Job. # noqa: E501\n\n\n :return: The status of this Job. # noqa: E501\n :rtype: Status\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this Job.\n\n\n :param status: The status of this Job. # noqa: E501\n :type: Status\n \"\"\"\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Job, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Job):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"lockss_metadata_extractor/models/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":7836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"63763620","text":"# https://www.codewars.com/kata/5899dc03bc95b1bf1b0000ad\n\n# Given a set of numbers, return the additive inverse of each. Each positive becomes negatives, and the negatives become positives.\n\n# invert([1,2,3,4,5]) == [-1,-2,-3,-4,-5]\n# invert([1,-2,3,-4,5]) == [-1,2,-3,4,-5]\n# invert([]) == []\n# You can assume that all values are integers. Do not mutate the input array/list.\n\ndef invert(lst):\n newLst = []\n if len(lst) > 0:\n for x in lst:\n newLst.append(-x)\n return newLst","sub_path":"python/invert_values.py","file_name":"invert_values.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"522106429","text":"import tkinter as tk\nimport os\nimport random\nimport string\nimport re\nfrom tkinter import filedialog\nfrom tkinter import *\nfrom PIL import Image, ImageTk, ImageOps\nfrom guest import Guest\nfrom guest_manager import *\n\n\nclass CapabilityFive:\n def __init__(self, frame, guest_id):\n self.frame = frame\n self.guests = get_guests()\n self.current_guest = find_guest(guest_id)\n self.current_guestid = guest_id\n self.filename = self.current_guest.get_img_path()\n\n self.RESOURCE_FOLDER = './res/'\n self.DEFAULT_IMAGE = 'placeholder.png'\n\n profile_title = tk.Label(\n self.frame, text=\"Guest Profile\", font=(\"Times\", 20, \"bold\"))\n profile_title.grid(row=0, column=0)\n btn = Button(self.frame, text='replace image',\n command=self.replace_img).grid(row=2, column=0)\n save_btn = tk.Button(self.frame, text=\"Save All Changes\", command=self.save_changes, anchor=tk.W,\n width=20).grid(row=10, column=1, pady=5)\n\n # create the labels for each field\n fname_label = tk.Label(self.frame, text=\"First Name\",\n anchor='w', width=20).grid(row=3, column=0, padx=15, pady=5)\n lname_label = tk.Label(self.frame, text=\"Last Name\", anchor='w',\n width=20).grid(row=4, column=0, padx=15, pady=5)\n phone_label = tk.Label(self.frame, text=\"Phone Number\",\n anchor='w', width=20).grid(row=5, column=0, padx=15, pady=5)\n address_label = tk.Label(self.frame, text=\"Address\",\n anchor='w', width=20).grid(row=6, column=0, padx=15, pady=5)\n email_label = tk.Label(self.frame, text=\"E-mail\", anchor='w',\n width=20).grid(row=7, column=0, padx=15, pady=5)\n id_label = tk.Label(self.frame, text=\"ID\", anchor='w',\n width=20).grid(row=8, column=0, padx=15, pady=5)\n vehicle_label = tk.Label(self.frame, text=\"Vehicle License Plate\",\n anchor='w', width=20).grid(row=9, column=0, padx=15, pady=5)\n\n # create the fields\n self.fname_field = tk.Entry(self.frame, name=\"fname\")\n self.fname_field.grid(row=3, column=1, padx=15, pady=5)\n self.lname_field = tk.Entry(self.frame, name=\"lname\")\n self.lname_field.grid(row=4, column=1, padx=15, pady=5)\n self.phone_field = tk.Entry(self.frame, name=\"phone\")\n self.phone_field.grid(row=5, column=1, padx=15, pady=5)\n self.address_field = tk.Entry(self.frame, name=\"address\")\n self.address_field.grid(row=6, column=1, padx=15, pady=5)\n self.email_field = tk.Entry(self.frame, name=\"email\")\n self.email_field.grid(row=7, column=1, padx=15, pady=5)\n self.id_field = tk.Entry(self.frame, name=\"id\")\n self.id_field.grid(row=8, column=1, padx=15, pady=5)\n self.vehicle_field = tk.Entry(self.frame, name=\"license\")\n self.vehicle_field.grid(row=9, column=1, padx=15, pady=5)\n self.populate_fields()\n\n def open_fn(self):\n # choose file from computer and save it to the projects res folder.\n self.filename = filedialog.askopenfilename(title='open')\n if self.filename == \"\":\n self.filename = self.current_guest.get_img_path()\n return self.filename\n\n def save_img(self):\n # if file not in folder save the file to the res folder\n fn = os.path.basename(self.filename)\n file_path = os.path.join(self.RESOURCE_FOLDER, fn)\n self.img = Image.open(self.filename)\n self.img = self.img.resize((250, 250), Image.ANTIALIAS)\n try:\n self.img.save(file_path, 'JPEG')\n except OSError:\n try:\n self.img = self.img.convert('RGB')\n self.img.save(file_path, 'JPEG')\n except OSError:\n print(\"Could not convert image to JPEG or RGB to JPEG\")\n exit(-1)\n\n saved_fn = self.RESOURCE_FOLDER + fn\n return saved_fn\n\n def replace_img(self):\n # replaces old image with new image\n self.x = self.open_fn()\n self.new_img = Image.open(self.x)\n self.new_img = self.new_img.resize((250, 250), Image.ANTIALIAS)\n self.new_img = ImageTk.PhotoImage(self.new_img)\n self.panel.configure(image=self.new_img)\n self.panel.image = self.new_img\n\n def populate_img(self, path):\n try:\n self.img = Image.open(path)\n except FileNotFoundError:\n self.img = Image.open(self.RESOURCE_FOLDER + self.DEFAULT_IMAGE)\n self.img = self.img.resize((250, 250), Image.ANTIALIAS)\n self.img = ImageTk.PhotoImage(self.img)\n self.panel = Label(self.frame, image=self.img)\n self.panel.image = self.img\n self.panel.grid(row=1, column=0, padx=15, pady=5)\n\n # EXAMPLE: pass in self.guest[0]\n def populate_fields(self):\n self.fname_field.insert(0, self.current_guest.get_fname())\n self.lname_field.insert(0, self.current_guest.get_lname())\n self.phone_field.insert(0, self.current_guest.get_phone())\n self.address_field.insert(0, self.current_guest.get_address())\n self.email_field.insert(0, self.current_guest.get_email())\n self.id_field.insert(0, self.current_guest.get_id())\n self.vehicle_field.insert(0, self.current_guest.get_vehicle())\n self.populate_img(self.current_guest.get_img_path())\n\n def valid_number(self, phone_number):\n if len(phone_number) != 12:\n return False\n for i in range(12):\n if i in [3, 7]:\n if phone_number[i] != '-':\n return False\n elif not phone_number[i].isalnum():\n return False\n return True\n\n def valid_email(self, email):\n regex = '^(\\w|\\.|\\_|\\-)+[@](\\w|\\_|\\-|\\.)+[.]\\w{2,3}$'\n if(re.search(regex, email)):\n return True\n else:\n return False\n\n def popup_msg(self, msg):\n popup = tk.Tk()\n popup.wm_title(\"Error: Unable to save changes\")\n label = tk.Label(popup, text=msg)\n label.pack(side=\"top\", fill=\"x\", padx=20, pady=10)\n B1 = tk.Button(popup, text=\"Okay\", command=popup.destroy)\n B1.pack()\n popup.mainloop()\n\n def save_changes(self):\n fn = os.path.basename(self.filename)\n file_path = os.path.join(self.RESOURCE_FOLDER, fn)\n phone_number = self.phone_field.get()\n email = self.email_field.get()\n if not self.valid_number(phone_number) or not self.valid_email(email):\n error_msg = \"Phone number must be in the format: ###-###-####\\nand Email must be in the format: example@email.com\"\n self.popup_msg(error_msg)\n else:\n if not os.path.exists(file_path):\n self.filename = self.save_img()\n else:\n self.filename = file_path\n changed_fields = [self.fname_field.get(), self.lname_field.get(), self.phone_field.get(), self.address_field.get(),\n self.email_field.get(), self.id_field.get(), self.vehicle_field.get(), self.filename]\n message_label = tk.Label(\n self.frame, text=\"Changes saved to database.\").grid(row=11, column=0, padx=15, pady=5)\n if self.current_guestid != self.id_field.get():\n self.current_guestid = self.id_field.get()\n \n update_guest(self.current_guestid, changed_fields)\n\n","sub_path":"capabilities/capability_five.py","file_name":"capability_five.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"160752465","text":"from django.views.generic import TemplateView\n\nfrom rest_framework import generics, viewsets\nfrom rest_framework.renderers import JSONRenderer\n\nfrom manager.models import Show, Episode\nfrom player.serializers import EpisodeSerializer\n\nclass FeedView(TemplateView):\n template_name = 'player/index.html'\n\n def get_context_data(self, **kwargs):\n context_data = super(FeedView, self).get_context_data(**kwargs)\n episodes = Episode.objects.all().order_by('-released')[:10]\n serializer = EpisodeSerializer(episodes, many = True)\n jsoned = JSONRenderer().render(serializer.data)\n context_data['episodes'] = jsoned\n return context_data\n\nclass EpisodeViewSet(viewsets.ModelViewSet):\n serializer_class = EpisodeSerializer\n queryset = Episode.objects.all().order_by('-released')\n","sub_path":"player/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"506431420","text":"# https://atcoder.jp/contests/arc081/tasks/arc081_a\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\ndef resolve():\n n=int(input())\n A=map(int,input().split())\n from collections import Counter\n c=Counter(A)\n ans=0\n first=0\n second=0\n for k,v in c.items():\n if v>=4: ans=max(ans,k*k)\n if v>=2:\n if k>first:\n first,second=k,first\n elif k>second:\n second=k\n print(max(ans,first*second))\nresolve()\n","sub_path":"ARC081/c_make_a_rectangle.py","file_name":"c_make_a_rectangle.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"277527992","text":"#!/usr/bin/env python\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tck\nfrom matplotlib.widgets import Slider\nimport spada\n\n\n\ndef find_mass():\n Teff = 6591\n Vmag = 0.40\n FeH = -0.05\n parallax = 284.56\n BC = 0.012 # Flower, 1996, ApJ, 469, 355\n mass = 1.0\n param = [mass,FeH]\n\n Mbol = Vmag + BC - 15. + 5.*math.log10(parallax) + 5.\n logL = 0.4*(4.75-Mbol)\n logTeff = math.log10(Teff)\n\n\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_axes([0.15,0.2,0.75,0.70])\n axs = fig.add_axes([0.70,0.25,0.15,0.15])\n ax1 = fig.add_axes([0.15,0.09,0.75,0.03])\n ax2 = fig.add_axes([0.15,0.05,0.75,0.03])\n mbar = Slider(ax1,'$M_*/M_\\odot$',0.5,2.5,valinit=mass)\n fbar = Slider(ax2,'$\\mathrm{[Fe/H]}$',-2.0,1.0,valinit=FeH)\n\n def update_mass(val):\n param[0] = val\n replot()\n def update_feh(val):\n param[1] = val\n replot()\n def replot():\n ax.cla()\n track = spada.track.Y2track(mass=param[0],FeH=param[1])\n ax.plot(track.logTeff_lst,track.logL_lst,'b-')\n ax.plot(logTeff,logL,'ro',alpha=0.8)\n ax.set_xlim(4.1+1e-4,3.4-1e-4)\n ax.set_ylim(-1.0,4.0)\n ax.set_xlabel('$\\log{T_\\mathrm{eff}}$')\n ax.set_ylabel('$\\log_{10}(L/L_\\odot)$')\n ax.text(4.05,3.6,'M=%4.2f [Fe/H]=%+4.2f'%(param[0],param[1]),family='serif')\n ax.xaxis.set_major_locator(tck.MultipleLocator(0.1))\n ax.xaxis.set_minor_locator(tck.MultipleLocator(0.01))\n ax.yaxis.set_major_locator(tck.MultipleLocator(1))\n ax.yaxis.set_minor_locator(tck.MultipleLocator(0.1))\n\n axs.cla()\n axs.plot(track.logTeff_lst,track.logL_lst,'b-')\n axs.plot(logTeff,logL,'ro',alpha=0.8)\n axs.set_xlim(logTeff+0.01,logTeff-0.01)\n axs.set_ylim(logL-0.05,logL+0.05)\n axs.xaxis.set_major_formatter(tck.FormatStrFormatter('%g'))\n axs.xaxis.set_major_locator(tck.MultipleLocator(0.01))\n axs.xaxis.set_minor_locator(tck.MultipleLocator(0.001))\n axs.yaxis.set_major_locator(tck.MultipleLocator(0.05))\n axs.yaxis.set_minor_locator(tck.MultipleLocator(0.01))\n\n mbar.on_changed(update_mass)\n fbar.on_changed(update_feh)\n\n replot()\n\n plt.show()\n plt.close()\n\nif __name__=='__main__':\n find_mass()\n","sub_path":"demo/find_mass/find_mass2.py","file_name":"find_mass2.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"15939929","text":"import numpy as np\nimport random\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras import Model\nfrom utils import test_time\n\nfrom GlobalConstants import hidden_layers, activations, optimizer\n\n\nclass NeuralNet:\n def __init__(self, input_shape):\n input_layer = Input(shape=input_shape)\n # Define architecture\n x = input_layer\n #assert len(hidden_layers) == len(\n # activations), 'Different number of hidden layers and activations'\n for layer, activation in zip(hidden_layers, activations):\n x = Dense(layer, activation=activation)(x)\n self.anet = Model(input_layer, x)\n self.anet._name = 'ANET'\n self.history = []\n\n # Compile model\n self.anet.compile(optimizer=optimizer,\n loss='mean_squared_error', metrics=['accuracy'])\n\n def random_possible_action(self, possible_actions):\n random_index = np.random.randint(len(possible_actions))\n return possible_actions[random_index]\n\n def best_action(self, possible_actions, state, player):\n \"\"\"\n Returns the greedy best action\n \"\"\"\n features = np.append(state, player).reshape((1, len(state)+1))\n action_probabilities = self.anet(features)\n # If there are only zeros\n if not np.any(action_probabilities):\n print('......only zeros in predictions, returning random action')\n return self.random_possible_action(possible_actions)\n action_probabilities = self.scale_actions(state, action_probabilities)\n return np.argmax(action_probabilities)\n\n def default_policy(self, possible_actions, state, player):\n \"\"\"\n NOTE: predict takes 0.038 seconds, total runtime without assert is 0.038 (predict uses all)\n possible_actions: list of tuples\n state: ndarray\n player: tuple\n\n :returns: tuple, action\n \"\"\"\n features = np.append(state, player).reshape((1, len(state)+1))\n action_probabilities = self.anet(features)\n # If there are only zeros\n if not np.any(action_probabilities):\n print('......only zeros in predictions, returning random action')\n return self.random_possible_action(possible_actions)\n action_probabilities = self.scale_actions(state, action_probabilities)\n return self.get_action(action_probabilities)\n\n def scale_actions(self, state, action_probabilities):\n # Make impossible actions have probability 0\n # If the board is not 0, set action_probabilities to 0\n # Predict gives (1,len(features)), take first to get (len(features), )\n action_probabilities = np.where(state, 0, action_probabilities)[0]\n # Normalize\n return action_probabilities/np.sum(action_probabilities)\n\n def get_action(self, scaled_predictions):\n \"\"\"\n Randomly pick action, weighted by scale (illegal actions scaled to 0 by scaled_predictions)\n \"\"\"\n return np.random.choice(len(scaled_predictions), p=scaled_predictions)\n\n def train_on_rbuf(self, train_X, train_y, batch_size):\n \"\"\"\n :param train_X: training features, state+player\n :param train_y: training labels, D (distributions over actions from states)\n :param batch_size: batch size, int\n \"\"\"\n history = self.anet.fit(train_X, train_y, epochs=3,\n verbose=0, batch_size=batch_size)\n self.history.append(history)\n\n def save_params(self, path):\n \"\"\"\n Saves weights and biases of network to file\n\n :param path: str, grid_size and round the params have been saved \n\n https://www.tensorflow.org/tutorials/keras/save_and_load\n \"\"\"\n self.anet.save_weights(path)\n print('...parameters have been saved to {}'.format(path))\n\n def load_params(self, path):\n \"\"\"\n Load weights and biases from file to the network\n\n :param i: str, grid_size and round the params have been saved \n \"\"\"\n self.anet.load_weights(path)\n","sub_path":"NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"407830598","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport subprocess\n# TODO: Remove ME\nimport random\n\nfrom polaris_health import Error, MonitorFailed\nfrom . import BaseMonitor\n\n\n__all__ = [ 'External' ]\n\nLOG = logging.getLogger(__name__)\nLOG.addHandler(logging.NullHandler())\n\n\nclass External(BaseMonitor):\n\n \"\"\"External script monitor base\"\"\"\n\n def __init__(self, port, file_path, result='', args=None, dynamic_weight=False,\n interval=10, timeout=5, retries=2):\n \"\"\"\n args:\n port: int, port number\n file_path: string, the full file path to the external check,\n starting at /, must be executable\n args: list, additional command line arguments to be passed to\n the external check\n dynamic_weight: boolean\n result: a string to check against the result of the executed script\n any other response will mean a failure.\n Other args as per BaseMonitor() spec\n \"\"\"\n super(External, self).__init__(interval=interval, timeout=timeout,\n retries=retries)\n\n # name to show in generic state export\n self.name = 'external'\n\n ### port ###\n self.port = port\n if not isinstance(port, int) or port < 1 or port > 65535:\n log_msg = ('port \"{}\" must be an integer between 1 and 65535'.\n format(port))\n LOG.error(log_msg)\n raise Error(log_msg)\n ### file path ###\n if os.path.isfile(file_path):\n ## check if file is executable\n if not os.access(file_path, os.X_OK):\n log_msg = ('file_path \"{}\" file path is not executable'.\n format(file_path))\n LOG.error(log_msg)\n raise Error(log_msg)\n self.file_path = file_path\n else:\n log_msg = ('file_path \"{}\" cannot be found on the system'.\n format(file_path))\n LOG.error(log_msg)\n raise Error(log_msg)\n ### args ###\n if isinstance(args, list):\n self.args = args\n else:\n self.args = []\n ### result ###\n self.result = result\n if type(result) is not str:\n log_msg = 'result is not a string'\n LOG.error(log_msg)\n raise Error(log_msg)\n ### dynamic weight ###\n # dynamically set the member weight based on the returned value from the\n # external script.\n self.dynamic_weight = dynamic_weight\n if type(dynamic_weight) is not bool:\n log_msg = 'dynamic_weight is not a boolean'\n LOG.error(log_msg)\n raise Error(log_msg)\n self.weight = None\n\n def run(self, dst_ip):\n \"\"\"\n Execute a shell command script\n Check response matches the result string\n args:\n dst_ip: string, IP address to connect to\n returns:\n None\n raises:\n MonitorFailed() on process timeout or if output does not match result string\n or command returns a non 0 response.\n \"\"\"\n # force what ever args into strings or subprocess.run will crash\n command = list(map(str, [self.file_path, dst_ip, self.port] + self.args))\n try:\n cmd = subprocess.run(command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n timeout=self.timeout)\n except subprocess.TimeoutExpired as e:\n log_msg = ('command timeout reached: {error}'.format(error=e))\n raise MonitorFailed(log_msg)\n except subprocess.SubprocessError as e:\n raise MonitorFailed(e)\n\n if cmd.returncode != 0:\n log_msg = ('External Check Failed: child returned {}, stderr: {}'.format(cmd.returncode, cmd.stderr.rstrip()))\n raise MonitorFailed(log_msg)\n\n stdout = cmd.stdout.rstrip ()\n if self.dynamic_weight:\n try:\n weight = int(stdout)\n except ValueError:\n log_msg = ('External Check Failed:{} cannot set weight is not an integer'.format(stdout))\n raise MonitorFailed(log_msg)\n if weight < 0 or weight > 10:\n log_msg = 'External Check Failed: weight is out of bounds 0-10'\n raise MonitorFailed(log_msg)\n else:\n self.weight = weight\n return\n # check the return string matches if we are not updating the weight\n elif stdout == self.result:\n return\n else:\n log_msg = ('External Check Failed: returned result:{}'.format(stdout))\n raise MonitorFailed(log_msg)\n","sub_path":"polaris_health/monitors/external.py","file_name":"external.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"78609599","text":"# author: David Gessner \n\nimport pytest\nfrom ft4fttsim.networking import Message\nfrom ft4fttsim.exceptions import FT4FTTSimException\nfrom unittest.mock import sentinel, Mock\nfrom ft4fttsim.ethernet import Ethernet\n\n\nMINIMUM_ETHERNET_FRAME_SIZE = 64\nMAXIMUM_ETHERNET_FRAME_SIZE = 1518\n\n\n@pytest.mark.parametrize(\n \"size_in_bytes\",\n [\n # 64 is the minimum valid size in bytes\n MINIMUM_ETHERNET_FRAME_SIZE,\n # 1518 is the maximum valid size in bytes\n MAXIMUM_ETHERNET_FRAME_SIZE\n ] +\n # also test with a couple of values between the minimum and the\n # maximum size in bytes\n list(range(65, 1519, 404))\n)\ndef test_message_constructor_does_not_raise_exception(\n env, size_in_bytes):\n try:\n Message(env, sentinel.dummy_source, sentinel.dummy_destination,\n size_in_bytes, sentinel.dummy_type)\n except:\n assert False, \"Message constructor should not raise exception.\"\n\n\n@pytest.mark.parametrize(\n \"size_in_bytes\",\n [\n -1000, -1, -0.9, 0, 0.5,\n MINIMUM_ETHERNET_FRAME_SIZE - 1,\n MINIMUM_ETHERNET_FRAME_SIZE + 0.5,\n MAXIMUM_ETHERNET_FRAME_SIZE - 9.1,\n MAXIMUM_ETHERNET_FRAME_SIZE + 1,\n 10000\n ]\n)\ndef test_message_constructor_raises_exception(env, size_in_bytes):\n with pytest.raises(FT4FTTSimException):\n Message(env, sentinel.dummy_source, sentinel.dummy_destination,\n size_in_bytes, sentinel.dummy_type)\n\n\ndef test_message_created__returns_expected_destination(env):\n message = Message(env, sentinel.source, sentinel.destinations,\n Ethernet.MAX_FRAME_SIZE_BYTES, sentinel.message_type)\n assert message.destination == sentinel.destinations\n\n\ndef test_message_created__returns_expected_source(env):\n message = Message(env, sentinel.source, sentinel.destinations,\n Ethernet.MAX_FRAME_SIZE_BYTES, sentinel.message_type)\n assert message.source == sentinel.source\n","sub_path":"ft4fttsim/tests/test_message.py","file_name":"test_message.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"139587069","text":"# coding=utf-8\r\n\r\nimport requests\r\nimport time\r\nfrom lxml import etree\r\nfrom matplotlib import font_manager\r\nfrom matplotlib import pyplot as plt\r\nfrom math import ceil\r\n\r\n\r\nclass Douyu_Spider:\r\n def __init__(self, area, name, interval):\r\n self.area = area\r\n self.name = name\r\n self.interval = interval\r\n self.headers = {\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.3\", }\r\n self.hot_list = []\r\n self.time_list = []\r\n response = requests.get(\"https://www.douyu.com/directory/all\", headers=self.headers)\r\n ret = response.content.decode()\r\n html = etree.HTML(ret)\r\n self.url_part = html.xpath('''//a[@title=\"{}\"]/@href'''.format(self.area))[0]\r\n self.url = \"https://www.douyu.com/{}\".format(self.url_part)\r\n\r\n def parse_url(self,url):\r\n time_list = list(time.localtime())[3:6]\r\n time_str = str(time_list[0]) + \":\" + str(time_list[1]) + \":\" + str(time_list[2])\r\n self.time_list.append(time_str)\r\n print(time_str)\r\n response = requests.get(url, headers=self.headers)\r\n return response.content.decode()\r\n\r\n def get_info(self,html_str):\r\n html = etree.HTML(html_str)\r\n hot = html.xpath('''//h2[text()=\"{}\"]/..//span[@class=\"DyListCover-hot is-template\"]/text()'''.format(self.name))\r\n if len(hot)>0:\r\n hot = hot[0]\r\n if hot.count(\"万\"):\r\n hot = float(hot[0:-1])*10000\r\n else:\r\n hot = float(hot[0:-1])\r\n self.hot_list.append(hot)\r\n print(hot)\r\n return False\r\n else:\r\n self.time_list.pop()\r\n return True\r\n\r\n def plot_hot(self):\r\n my_font1 = font_manager.FontProperties(fname='C:\\Windows\\Fonts\\msyh.ttc', size=18)\r\n my_font2 = font_manager.FontProperties(fname='C:\\Windows\\Fonts\\msyh.ttc', size=10)\r\n plt.figure(figsize=(20,8), dpi=80)\r\n x = range(len(self.time_list))\r\n plt.plot(x, self.hot_list)\r\n # 横坐标理想数为40\r\n if len(self.time_list)>40:\r\n x_interval = ceil(len(self.time_list)//40)\r\n else:\r\n x_interval = len(self.time_list)\r\n plt.xticks(x[::x_interval], self.time_list[::x_interval], fontproperties=my_font2, rotation=45)\r\n plt.xlabel('时间轴', fontproperties=my_font1)\r\n plt.ylabel('主播热度', fontproperties=my_font1)\r\n plt.title(\"斗鱼主播《{}》的热度变化图{}-{}\".format(self.name, self.time_list[0], self.time_list[-1]), fontproperties=my_font1)\r\n plt.grid(alpha=0.3)\r\n file_name = \"./Yangshu/斗鱼主播《{}》的热度变化图{}-{}.png\".format(self.name, self.time_list[0].replace(\":\",\"_\"), self.time_list[-1].replace(\":\",\"_\"))\r\n plt.savefig(file_name)\r\n\r\n\r\n def run(self):\r\n while True:\r\n time_list = list(time.localtime())[3:6]\r\n time_str = str(time_list[0]) + \":\" + str(time_list[1]) + \":\" + str(time_list[2])\r\n print(\"跟踪:{}\".format(time_str))\r\n while True:\r\n # 1.发送请求,接受响应\r\n html_str = self.parse_url(self.url)\r\n # 2.每隔1min提取一次热度数据\r\n if self.get_info(html_str):\r\n if len(self.hot_list)>0:\r\n self.interval = len(self.time_list)\r\n print(\"主播已下播\")\r\n else:\r\n print(\"主播未上线\")\r\n break\r\n time.sleep(self.interval)\r\n # 3.绘制热度变化图并保存\r\n if len(self.hot_list)>0:\r\n self.plot_hot()\r\n self.hot_list = []\r\n self.time_list = []\r\n time.sleep(3600)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n a = Douyu_Spider(\"DOTA2\", \"yyfyyf\", 60)\r\n a.run()\r\n","sub_path":"斗鱼主播.py","file_name":"斗鱼主播.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"150970285","text":"from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\nfrom django.utils.translation import gettext as _\n\nfrom .forms import UserForm, DiningProfileForm, AssociationLinkForm\n\n\nclass SettingsProfileView(LoginRequiredMixin, TemplateView):\n template_name = \"account/settings/settings_account.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update({\n 'user_form': UserForm(instance=self.request.user),\n 'dining_form': DiningProfileForm(instance=self.request.user.userdiningsettings),\n 'association_links_form': AssociationLinkForm(self.request.user),\n })\n return context\n\n def post(self, request, *args, **kwargs):\n context = self.get_context_data()\n\n context.update({\n 'user_form': UserForm(request.POST, instance=self.request.user),\n 'dining_form': DiningProfileForm(request.POST, instance=self.request.user.userdiningsettings),\n 'association_links_form': AssociationLinkForm(self.request.user, request.POST),\n })\n\n if context['user_form'].is_valid() and \\\n context['dining_form'].is_valid() and \\\n context['association_links_form'].is_valid():\n context['user_form'].save()\n context['dining_form'].save()\n context['association_links_form'].save()\n messages.success(request, _(\"Account saved\"))\n\n return redirect('settings_account')\n\n return self.render_to_response(context)\n","sub_path":"UserDetails/views_user_settings.py","file_name":"views_user_settings.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"39469446","text":"#!/usr/bin/env python3\nimport sys\nimport psycopg2\n\n\"\"\"Module to analyse the log and print desired results.\n\nIt prints the results of the following problems from the 'news'\ndatabase:\n 1. What are the most popular three articles of all time?\n 2. Who are the most popular article authors of all time?\n 3. On which days did more than 1% of requests lead to errors?\n\nAll the queries are written for PostgreSQL DBMS. More details can\nbe found in the README.md file, which is included with this project.\n\"\"\"\n\n__author__ = 'Subhadeep Dey'\n\n\ndef execute_query(query):\n \"\"\"Establish connection with database and execute the query passed to it.\n\n Argument:\n query (string) -- The SQL query to execute.\n\n Returns:\n rows (list) -- A list of resultant rows.\n\n \"\"\"\n try:\n conn = psycopg2.connect('dbname=news')\n c = conn.cursor()\n c.execute(query)\n rows = c.fetchall()\n conn.close()\n return rows\n except psycopg2.Error as e:\n print(e)\n sys.exit(1)\n\n\n# --------------------------------------------------------------\n# 1. TOP THREE ARTICLES OF ALL TIME\n# --------------------------------------------------------------\ndef get_popular_articles():\n \"\"\"Return top three popular articles of all time.\"\"\"\n query = \"\"\"\n SELECT articles.title, COUNT(*) AS article_views\n FROM articles JOIN log\n ON log.path = concat('/article/', articles.slug)\n GROUP BY articles.title\n ORDER BY article_views DESC\n LIMIT 3;\n \"\"\"\n\n # Execute the above query.\n results = execute_query(query)\n\n # Print results.\n print(\"--------------------------------------------------------------\")\n print(\"\\t\\tI. TOP THREE ARTICLES OF ALL TIME\")\n print(\"--------------------------------------------------------------\")\n rank = 1\n for row in results:\n print(u\" {0}. \\\"{1}\\\" — {2:,} views.\".format(rank, row[0], row[1]))\n rank += 1\n\n\n# --------------------------------------------------------------\n# 2. POPULAR AUTHORS OF ALL TIME\n# --------------------------------------------------------------\ndef get_popular_authors():\n \"\"\"Return authors sorted by page views.\"\"\"\n query = \"\"\"\n SELECT authors.name, COUNT(*) AS views\n FROM authors JOIN articles\n ON authors.id = articles.author\n JOIN log\n ON log.path = concat('/article/', articles.slug)\n GROUP BY authors.name\n ORDER BY views DESC\n \"\"\"\n\n # Run above query.\n results = execute_query(query)\n\n # Print results.\n print(\"\\n\\n--------------------------------------------------------------\")\n print(\"\\t\\tII. POPULAR AUTHORS OF ALL TIME\")\n print(\"--------------------------------------------------------------\")\n rank = 1\n for row in results:\n print(u\" {0}. {1} — {2:,} views.\".format(rank, row[0], row[1]))\n rank += 1\n\n\n# --------------------------------------------------------------\n# 3. DAYS IN WHICH MORE THAN 1% OF REQUESTS LEAD TO ERRORS\n# --------------------------------------------------------------\ndef get_days_with_errors():\n \"\"\"Return days in which more than 1% requests lead to errors.\"\"\"\n query = \"\"\"\n SELECT total.day,\n ROUND(((errors.err_requests * 100.0) / total.requests), 5) AS percent\n FROM (\n SELECT date_trunc('day', time) AS day, count(*) AS err_requests\n FROM log\n WHERE status LIKE '404%'\n GROUP BY day\n ) AS errors\n JOIN (\n SELECT date_trunc('day', time) AS day, count(*) AS requests\n FROM log\n GROUP BY day\n ) AS total\n ON total.day = errors.day\n WHERE (ROUND(((errors.err_requests * 100.0)/total.requests), 5) > 1.0)\n ORDER BY percent DESC;\n \"\"\"\n\n # Execute the above query.\n results = execute_query(query)\n\n # Print results.\n print(\"\\n\\n--------------------------------------------------------------\")\n print(\" III. DAYS IN WHICH MORE THAN 1% OF REQUESTS LEAD TO ERRORS\")\n print(\"--------------------------------------------------------------\")\n for row in results:\n date = row[0].strftime('%B %d, %Y') # Pretty-formatting date.\n errors = str(round(row[1], 2)) + \"%\" + \" errors\"\n print(\" \" + date + u\" — \" + errors)\n\n\n# Print all the results.\nif __name__ == '__main__':\n get_popular_articles()\n get_popular_authors()\n get_days_with_errors()\n","sub_path":"log-analyser.py","file_name":"log-analyser.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"444204298","text":"\"\"\"\n设计一个函数返回传入的列表最大和第二最大元素的值\n\"\"\"\n\n\ndef max(x):\n m1, m2 = (x[0], x[1]) if x[0] > x[1] else (x[1], x[0])\n for index in range(2, x):\n if x[index] > m1:\n m2 = m1\n m1 = x[index]\n elif x[index] > m2:\n m2 = x[index]\n return m1, m2\n\n\n# max(list[1, 2, 4, 6, 7, 8])\n","sub_path":"day7/函数返回传入列表最大和第二最大的元素值.py","file_name":"函数返回传入列表最大和第二最大的元素值.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"561661268","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('oPlayer', '0003_auto_20150920_1919'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Track',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),\n ('songName', models.CharField(max_length=200)),\n ('trackNumber', models.CharField(max_length=200)),\n ('length', models.IntegerField(default=0)),\n ('pathToFile', models.CharField(max_length=200)),\n ('album', models.ForeignKey(to='oPlayer.Album')),\n ('performer', models.ForeignKey(to='oPlayer.Performer')),\n ],\n ),\n ]\n","sub_path":"odroidDjangoProject/oPlayer/migrations/0004_track.py","file_name":"0004_track.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"23915290","text":"import pandas as pd\ndf = pd.read_csv('data/survey_results_public.csv')\n# print(df)\ndf.shape #Number of rows and columns in tuple form\ndf.info() #Number of rows and columns and data types of them\n\npd.set_option('display.max_columns', 85) #Display number of columns of data frame\n\npd.set_option('display.max_rows', 85) #Display number of rows of data frame\n\ndf.head(10) #show only specific number of rows -here 10\n\n# df.head(10) #To display first 10 rows\n# df.tail(10) #To display last 10 rows","sub_path":"Pandas/PD Intro.py","file_name":"PD Intro.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"86114650","text":"import math\r\ndef isPrime(n):\r\n if n==1:\r\n return False\r\n elif n<=0:\r\n return False\r\n \r\n else:\r\n temp=True\r\n for i in range(2,int(math.sqrt(n))+1):\r\n if n%i==0:\r\n temp=False\r\n break\r\n return temp\r\n\r\n \r\ndef myFunc(a,b):\r\n n=0\r\n while isPrime(n**2+n*a+b):\r\n n+=1\r\n return n-1\r\n\r\nlistA=[]\r\nlistB=[]\r\n\r\ni=-999\r\nwhile i<999 :\r\n i+=2\r\n listA.append(i)\r\n\r\nfor i in range(3,1000):\r\n if isPrime(i):\r\n listB.append(i)\r\n\r\ndef main():\r\n largest=0\r\n pair=[0]\r\n for i in listA:\r\n for j in listB:\r\n if j>i and myFunc(i,j)>largest:\r\n largest=myFunc(i,j)\r\n pair.clear()\r\n pair.append(i)\r\n pair.append(j)\r\n\r\n return pair[0]*pair[1]\r\n \r\nprint(main()) \r\n \r\n \r\n","sub_path":"EulerProblem26.py","file_name":"EulerProblem26.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"353973254","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\ndef QQadjust(Obs,Mod):\n \"\"\"\n Mapping a quantile to quantile adjust\n INPUTS\n Obs : observation data\n Mod : Modeled Data\n OUTPUTS\n Cor : Modeled data adjust by quantiles to observation data\n \"\"\"\n perc = np.arange(0,101)\n\n P_obs = np.percentile(Obs, perc)\n P_mod = np.percentile(Mod, perc)\n\n Cor = interp1d(perc, P_obs)(interp1d(P_mod, perc)(Mod))\n return Cor\n\ndef BiasParametric(Obs, Xref, X):\n \"\"\"\n Parametric bias correction for modeled data based in the observations in a single point\n IMPUTS\n Obs : array of observed Data\n Xref : array of modeled data that correspond to observed data, i.e. the same point\n X : point of modeled data to correct, no necesarly in the same point of the observations\n OUTPUTS\n Xcor : array of modeled data with bias correction\n \"\"\"\n # basic parameters\n O_m = np.nanmean(Obs)\n Xr_m = np.nanmean(Xref)\n X_m = np.nanmean(X)\n O_s = np.nanstd(Obs)\n Xr_s = np.nanstd(Xref)\n X_s = np.nanstd(X)\n\n N_m = X_m -(Xr_m-O_m) # new mean\n N_s = X_s*(O_s/Xr_s) # new sta\n\n Xcor = N_m + (N_s/O_s)*(X-X_m) # bias correction of mean an scale\n\n return Xcor\n","sub_path":"Modules/Quality.py","file_name":"Quality.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"613148316","text":"#!/usr/bin/python\nfrom operator import itemgetter\nimport sys\n\ndict_ip_count = {}\ntop_ip = {}\ntop_ip_three = {}\n\nfile_object = open(\"log-format.txt\",\"r\")\n\n\nfor line in file_object:\n line = line.strip()\n ip, num = line.split(' ')\n try:\n num = int(num)\n ip\n dict_ip_count[ip] = dict_ip_count.get(ip, 0) + num\n\n except ValueError:\n pass\n\n\n\nsorted_dict_ip_count = sorted(dict_ip_count.items(), key=itemgetter(0))\n# print(sorted_dict_ip_count)\nfor ip, count in sorted_dict_ip_count:\n hour_ip = ip.strip().split('\\t')\n hour = hour_ip[0][1:3]\n ip = hour_ip[0][10:]\n hour = int(hour)\n count = int(count)\n if hour not in top_ip.keys():\n top_ip[hour] = [[ip, count]]\n else:\n top_ip[hour].append([ip, count])\n\n\n# top_ip_three = sorted(top_ip.items(), key=itemgetter(1), reverse=True)[0:3]\nfor hour in top_ip:\n top_ip_three[hour] = sorted(top_ip[hour], key=itemgetter(1), reverse=True)[0:3]\n\n\n# for hour, ip, count in top_ip_three:\n# print (hour, ip, count)\nfor hour in top_ip_three:\n print (\"Hour = \" + str(hour) + \", Top 3 IP's and Count = \" + str(top_ip_three[hour][0])+ str(top_ip_three[hour][1])+ str(top_ip_three[hour][2]))\n# print(top_ip_three)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# --------------------------------------\n\n\n#!/usr/bin/python\n\n# from operator import itemgetter\n# import sys\n#\n# # file_object = open(\"log-format.txt\",\"r\")\n#\n# top_ip = {}\n#\n#\n# # print(file_object.readline())\n#\n# for line in sys.stdin:\n# # print(line)\n# line = line.strip().split('\\t')\n# hour_ip, count = line\n# hour = hour_ip[1:3]\n# ip = hour_ip[7:]\n# hour = int(hour)\n# count = int(count)\n# if hour not in top_ip.keys():\n# top_ip[hour] = [[ip, count]]\n# else:\n# top_ip[hour].append([ip, count])\n#\n#\n# for index in range(24):\n# if index not in top_ip.keys():\n# print(\"no key = \" + str(index))\n# else:\n# top_ip_three = sorted(top_ip[index], key=itemgetter(1), reverse=True)[0:3]\n# print (index, top_ip_three)\n","sub_path":"mapreduce-test-python/logstat3/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"136820697","text":"#coding=utf-8\r\nimport re\r\nstring = re.compile(r'[\\u4e00-\\u9fa5]+')\r\nname_list = set()\r\nwhile(1):\r\n name = input(\"请输入需要统计取快递的人员姓名:\")\r\n if string.match(name) == None:\r\n print(\"请输入中文!\")\r\n else:\r\n chinese_string = string.match(name)\r\n name_list.add(chinese_string.group())\r\n print(name_list)\r\n\r\n","sub_path":"Python/进阶/统计取快递人员名单.py","file_name":"统计取快递人员名单.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627488353","text":"# encoding: utf-8\nimport util\nfrom util import *\nimport urllib2\nimport re\nimport logging\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass Analyze_Model:\n def __init__(self, Items):\n self.Items = Items\n\n # analyze all of the items after since_time\n def anylyzeItems(self, isTest):\n Items = self.Items\n for Item in Items:\n title = re.findall('title\":\"(.*?)\",\"type', Item, re.S)[0]\n #print \"title:\" + title \n self.analyze(title, isTest)\n\n # classify page according to title and store result in db\n def analyze(self, title, isTest):\n type = None\n value = None\n # 分析该Item,包含id,status,title,只要分析title即可,但保留其他的信息,以便以后拓展\n newDate = util.dateProcess.getLastDayOfLastMonth()\n jsonDict = util.common.Tool.readConfig()\n keyWords = jsonDict['category']\n \n if keyWords[0][\"cpi\"] in title and \"中国\" in title and \"%\" in title:\n [value, cpiDate] = self.cpiAnalyze(title)\n newDate = cpiDate\n type = \"MR:CPI\"\n elif keyWords[0][\"pmi\"] in title and \"中国\" in title:\n [value, pmiDate] = self.pmiAnalyze(title)\n newDate = pmiDate\n type = \"MR:PMI\"\n elif keyWords[0][\"fai\"] in title and \"中国\" in title and \"%\" in title:\n [value, faiDate] = self.faiAnalyze(title)\n newDate = faiDate\n type = \"MR:FAI\"\n elif keyWords[0][\"iav\"] in title and \"中国\" in title and \"%\" in title:\n [value, iavDate] = self.iavAnalyze(title)\n newDate = iavDate\n type = \"MR:IAV\"\n if(value != None and type != None):\n util.dbHelper.Redis_Helper.updateDb(value, type, newDate, title, isTest)\n # ppi 和 cpi 可能在一个标题里,匹配了cpi,匹配不到ppi,所以单独列出来匹配\n if keyWords[0][\"ppi\"] in title and \"中国\" in title and \"%\" in title:\n [value,ppiDate] = self.ppiAnalyze(title)\n newDate = ppiDate\n type = \"MR:PPI\"\n if(value != None and type != None):\n util.dbHelper.Redis_Helper.updateDb(value, type, newDate, title, isTest)\n \n #cpi 中国7月CPI同比1.6%,创年内新高,预期1.5%,前值1.4%。\\n中国7月CPI环比0.3%,前值0.0%。 把 CPI同比 到 %的第一个匹配值取出来即可\n def cpiAnalyze(self,title):\n values = None # \\u4e2d\\u56fd 中国\n values = re.compile(u\"CPI[\\u540c][\\u6bd4](.*?)%\").findall(title.decode('utf-8')) #同比 % 数字已带符号\n month = re.compile(u\"[\\u4e2d][\\u56fd]([\\d]{1,})[\\u6708]CPI\").findall(title.decode('utf-8')) #匹配 中国xx月CPI,1-12\n if(month == None or len(month) == 0):\n return [None, None]\n cpiDate = util.dateProcess.getDate(month[0])\n #print \"title=\", title\n #print \"values=\", values\n #print \"cpiDate=\", cpiDate\n if(values == None or len(values) == 0):\n return [None, None]\n return [values[0], cpiDate]\n\n #pmi 中国12月官方制造业PMI为51.0,预期51.2,前值51.4 PMI51.0 PMI 51.0\n def pmiAnalyze(self,title):\n values = None \n values = re.compile(u\"[\\u4e3a]*[\\s]*([\\d]{1,}.[\\d]{1,})\").findall(title.decode('utf-8')) #匹配 xx.x 数据\n month = re.compile(u\"[\\u4e2d][\\u56fd]([\\d]{1,})[\\u6708]\").findall(title.decode('utf-8')) #匹配 中国xx月,1-12\n if(month == None or len(month) == 0):\n return [None, None]\n pmiDate = util.dateProcess.getDate(month[0])\n #print \"title=\", title\n #print \"values=\", values\n #print \"pmiDate=\", pmiDate\n if(values == None or len(values) == 0):\n return [None, None]\n return [values[0], pmiDate]\n\n #fai 中国1-11月城镇固定资产投资同比15.8%,近13年来最低,预期15.8%,前值15.9% 中国1-12月城镇固定资产投资同比+15.7%\n def faiAnalyze(self,title):\n values = None \n values = re.compile(u\"[\\u540c][\\u6bd4](.*?)%\").findall(title.decode('utf-8')) #同比 % 数字已带符号,可能带正号\n month = re.compile(u\"^[\\u4e2d][\\u56fd]1-([\\d]{1,})[\\u6708]\").findall(title.decode('utf-8')) #匹配 中国1-xx月,1-12\n if(month == None or len(month) == 0):\n return [None, None]\n faiDate = util.dateProcess.getDate(month[0])\n #print \"title=\", title\n #print \"values=\", values\n #print \"faiDate=\", faiDate\n if(values == None or len(values) == 0):\n return [None, None]\n return [values[0], faiDate]\n \n #iav 中国11月规模以上工业增加值同比6.2%,增速创五个月新高;预期5.7%,前值5.6%。\\n中国1-11月规模以上工业增加值同比6.1%,预期6.1%,前值6.1%\n # wallstreet 从15年4月开始有iav数据\n def iavAnalyze(self,title):\n values = None # \\u4e2d\\u56fd 中国\n values = re.compile(u\"[\\u540c][\\u6bd4](.*?)%\").findall(title.decode('utf-8')) #同比 % 数字已带符号,注意一定要取第一个值\n month = re.compile(u\"^[\\u4e2d][\\u56fd]([\\d]{1,})[\\u6708]\").findall(title.decode('utf-8')) #匹配 中国xx月,1-12\n if(month == None or len(month) == 0):\n return [None, None]\n iavDate = util.dateProcess.getDate(month[0])\n #print \"title=\", title\n #print \"values=\", values\n #print \"iavDate=\", iavDate\n if(values == None or len(values) == 0):\n return [None, None]\n return [values[0], iavDate]\n \n #ppi 1 中国9月CPI同比1.6%,预期1.8%,前值2.0%。\\n中国9月PPI同比-5.9%,连续第43个月下滑,预期-5.9%,前值-5.9% 把 PPI同比 到 %的第一个匹配值取出来即可\n # 2 \"中国12月PPI同比-3.3%,���2012年9月来最\"\n def ppiAnalyze(self,title):\n values = None \n values = re.compile(u\"PPI[\\u540c][\\u6bd4](.*?)%\").findall(title.decode('utf-8')) #PPI同比 % 数字已带符号\n month = re.compile(u\"[\\u4e2d][\\u56fd]([\\d]{1,})[\\u6708]PPI\").findall(title.decode('utf-8')) #匹配 中国xx月PPI,1-12\n if(month == None or len(month) == 0):\n return [None, None]\n ppiDate = util.dateProcess.getDate(month[0])\n #print \"title=\", title\n #print \"values=\", values\n #print \"ppiDate=\", ppiDate\n if(values == None or len(values) == 0):\n return [None, None]\n return [values[0], ppiDate]","sub_path":"util/urlAnalyze.py","file_name":"urlAnalyze.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"613801280","text":"from collections import OrderedDict\nimport uuid\n\nclass Workspace:\n def __init__(self):\n self.resources=OrderedDict()\n\n def addResource(self,resource):\n self.resources[resource.uid]=resource;\n #print(\"addResource\",resource.uid)\n #resource.dump()\n\n def dump(self,type=None):\n print(\"WORKSPACE DUMP\")\n for uid,resource in self.resources.items():\n if type is None or resource.getType()==type:\n resource.dump()\n \n def getResources(self,bank=None,name=None,id=None,type=None):\n \"\"\"Search and returns resources\"\"\"\n resources=[]\n for uid,resource in self.resources.items():\n if bank and resource.getBank()!=bank:\n continue\n if id and resource.getId()!=id:\n continue\n if name and resource.getName()!=name:\n continue\n if type and resource.getType()!=type:\n continue\n # match\n resources.append(resource)\n return resources\n \n def inheritProperties(self,resource=None,reset=False):\n \"\"\"Resolve basis and inherit properties. Applies to all resource if none specified\"\"\"\n if resource is None:\n # All resources\n if reset:\n for resource in self.resources.values():\n resource.inheritanceCompleted=False\n for resource in self.resources.values():\n self.inheritProperties(resource)\n else:\n # Individual resource\n if reset:\n resource.inheritanceCompleted=False\n if not resource.inheritanceCompleted:\n if resource.getBasis() is not None:\n parent = self.resolve(resource.getBasis())\n if parent:\n if not parent.inheritanceCompleted and parent.hasProperty(\"basis\"):\n # recurse\n self.inheritProperties(parent,reset=reset)\n else:\n pass # parent has no basis or is inheritance readily completed\n # add parent inheritable properties on this resource as needed\n for property,value in parent.getProperties().items():\n if not resource.hasProperty(property):\n if property not in [\"basis\",\"id\",\"bank\"]:\n resource.addProperty(property,value,inherited=True)\n else:\n pass # not inheritable\n else:\n pass\n else:\n resource.dump()\n raise RuntimeError(\"Unable to resolve basis for \"+str(resource.getId()))\n else:\n pass # resource has no basis\n else:\n pass # inheritance already performed\n ### \n \n def resolve(self,reference,type=None):\n \"\"\"Finds a resource by reference.\n A runtime error will be thrown if more than one match if found\n \"\"\"\n # get reference components\n resolved=None\n if '.' in reference:\n tokens = reference.split('.',1)\n bank=tokens[0]\n id=tokens[1]\n else:\n bank=None\n id=reference\n # resolve\n for uid,resource in self.resources.items():\n if type and resource.getType() != type:\n continue # skip: type mismatch\n if resource.getId() == id:\n # id matches\n if bank and resource.getBank() != bank:\n continue # skip: bank mismatch\n if resolved is None:\n resolved=resource\n else:\n print(\"Resolved\")\n resolved.dump()\n print(\"Duplicate\")\n resource.dump()\n raise RuntimeError(\"Multiple macthes found while resolving \"+reference+\"[\"+str(type)+\"]\")\n else:\n pass\n return resolved\n\n\nclass Resource:\n \n def __init__(self,type,source):\n self._properties=OrderedDict()\n self.source=source\n self.uid = uuid.uuid4()\n self._type=type\n self._inheritedProperties=set()\n self.inheritanceCompleted=False\n\n def addProperty(self,property,value,inherited=False):\n if not self.validateProperty(property,value):\n raise RuntimeError(self.getSource(),\"- Invalid property or value specified\")\n \n self._properties[property]=value\n # set name as id is not readily set\n if property==\"name\" and not self.hasProperty(\"id\"):\n self.addProperty(\"id\",value)\n if inherited:\n self._inheritedProperties.add(property)\n else:\n self._inheritedProperties.discard(property);\n\n def dump(self):\n print(self._type,self.uid)\n for property in self._properties:\n if property in self.getInheritedPropertiesKeys():\n print(\"...\",property,\"*\",str(self._properties.get(property)))\n else:\n print(\"...\",property,str(self._properties.get(property)))\n \n def getBank(self):\n return self._properties.get(\"bank\")\n\n def getBasis(self):\n return self._properties.get(\"basis\")\n\n def getId(self):\n return self._properties.get(\"id\")\n\n def getName(self):\n return self._properties.get(\"name\")\n\n def getLabel(self):\n return self._properties.get(\"name[label]\")\n\n def getProperties(self):\n return self._properties\n\n def getPropertiesKeys(self):\n return self._properties.keys()\n \n def getInheritedPropertiesKeys(self):\n \"\"\"Set of inherited properties\"\"\"\n return self._inheritedProperties;\n \n def getLocalPropertiesKeys(self):\n \"\"\"Set of locally defined properties\"\"\"\n return set(self._properties.keys()) - self._inheritedProperties\n \n def getPropertiesValues(self):\n return self._properties.values()\n \n def getProperty(self,prop):\n return self._properties.get(prop)\n\n def getFacetedProperty(self, prop):\n \"Return the property or it's unfaceted value\"\n value = self._properties.get(prop)\n if value is None:\n value = self.properties.get(prop[0:prop.find('[')])\n return value\n\n def getType(self):\n return self._type\n\n def hasProperties(self):\n \"\"\"Returns true if resource has at least one property\"\"\"\n return len(self._properties)>0\n\n def hasProperty(self,prop):\n return prop in self._properties\n\n def validateProperty(self,property,value):\n isValid=True\n # positive integers\n if property in [\"end\",\"start\",\"width\"] and not isinstance(value,int) and (isinstance(value,str) and not value.isdigit()):\n print(self.source,\"Invalid: non-numeric value specified for \"+property)\n return isValid\n \n","sub_path":"metasheet/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":7163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"211437640","text":"############################################################\r\n# CIS 521: Homework 5\r\n############################################################\r\n\r\nstudent_name = \"Yu-Ning Huang\"\r\n\r\n############################################################\r\n# Imports\r\n############################################################\r\n\r\nimport email\r\nimport collections\r\nimport math\r\nimport os\r\nimport time\r\n\r\n############################################################\r\n# Section 1: Spam Filter\r\n############################################################\r\n\r\ndef load_tokens(email_path):\r\n tokens = []\r\n fp = open(email_path)\r\n message = email.message_from_file(fp)\r\n\r\n for line in email.iterators.body_line_iterator(message):\r\n tokens += line.split()\r\n\r\n fp.close()\r\n\r\n return tokens\r\n\r\n\r\ndef log_probs(email_paths, smoothing):\r\n count = collections.defaultdict(int)\r\n\r\n for email_path in email_paths:\r\n tokens = load_tokens(email_path)\r\n tokens_counter = collections.Counter(tokens)\r\n visited = set()\r\n\r\n for token in tokens:\r\n if token not in visited:\r\n count[token] += tokens_counter[token]\r\n visited.add(token)\r\n\r\n V_count = len(count.keys())\r\n total_count = sum(count.values())\r\n count[\"\"] = 0\r\n word_prob = {k: math.log((v+smoothing)/(total_count+smoothing*(V_count+1))) for k, v in count.iteritems()}\r\n word_prob = collections.defaultdict(float, word_prob)\r\n\r\n return word_prob\r\n\r\nclass SpamFilter(object):\r\n\r\n def __init__(self, spam_dir, ham_dir, smoothing):\r\n spam_paths = [os.path.join(spam_dir, name) for name in os.listdir(spam_dir)]\r\n ham_paths = [os.path.join(ham_dir, name) for name in os.listdir(ham_dir)]\r\n spam_count = float(len(spam_paths))\r\n ham_count = float(len(ham_paths))\r\n self.spam_prob = spam_count/(spam_count+ham_count)\r\n self.ham_prob = ham_count/(spam_count+ham_count)\r\n self.spam_log_probs = log_probs(spam_paths, smoothing)\r\n self.ham_log_probs = log_probs(ham_paths, smoothing)\r\n\r\n def is_spam(self, email_path):\r\n tokens = load_tokens(email_path)\r\n tokens_counter = collections.Counter(tokens)\r\n spam_category = math.log(self.spam_prob)\r\n ham_category = math.log(self.ham_prob)\r\n\r\n visited = set()\r\n for token in tokens:\r\n if token not in visited:\r\n token_count = tokens_counter[token]\r\n # P(spam|email)\r\n if token in self.spam_log_probs:\r\n spam_category += token_count*(self.spam_log_probs[token])\r\n else:\r\n spam_category += token_count*(self.spam_log_probs[\"\"])\r\n\r\n # P(not spam|email)\r\n if token in self.ham_log_probs:\r\n ham_category += token_count*(self.ham_log_probs[token])\r\n else:\r\n ham_category += token_count*(self.ham_log_probs[\"\"])\r\n visited.add(token)\r\n\r\n return spam_category > ham_category\r\n\r\n def most_indicative_spam(self, n):\r\n indicative = {k: v-math.log(math.exp(v)*self.spam_prob+math.exp(self.ham_log_probs[k])*self.ham_prob) for k, v in self.spam_log_probs.iteritems() if k != \"\"}\r\n indicative_list = sorted(indicative, key=indicative.__getitem__, reverse=True)\r\n return indicative_list[:n]\r\n\r\n def most_indicative_ham(self, n):\r\n indicative = {k: v-math.log(math.exp(v)*self.ham_prob+math.exp(self.spam_log_probs[k])*self.spam_prob) for k, v in self.ham_log_probs.iteritems() if k != \"\"}\r\n indicative_list = sorted(indicative, key=indicative.__getitem__, reverse=True)\r\n return indicative_list[:n]\r\n\r\n############################################################\r\n# Section 2: Feedback\r\n############################################################\r\n\r\nfeedback_question_1 = \"\"\"\r\n7 hours\r\n\"\"\"\r\n\r\nfeedback_question_2 = \"\"\"\r\nimproving the efficiency of log_probs\r\n\"\"\"\r\n\r\nfeedback_question_3 = \"\"\"\r\nThe assignment gives us the chance to exercise the formulas leraned in lecutes,\r\nso that we can really understand the meanings of the mathemtical symobls.\r\n\"\"\"\r\n\r\n# 2\r\n# paths = [\"data/train/ham/ham%d\" % i for i in range(1, 11)]\r\n# a = time.time()\r\n# p = log_probs(paths, 1e-5)\r\n# b = time.time()\r\n# print p[\"the\"]\r\n# print p[\"line\"]\r\n# print \"time\", b-a\r\n#\r\n# print \"\"\r\n# paths = [\"data/train/spam/spam%d\" % i for i in range(1, 11)]\r\n# a = time.time()\r\n# p = log_probs(paths, 1e-5)\r\n# b = time.time()\r\n# print p[\"Credit\"]\r\n# print p[\"\"]\r\n# print \"time\", b-a\r\n\r\n\r\n# 4\r\n# sf = SpamFilter(\"data/train/spam\",\"data/train/ham\", 1e-5)\r\n# print sf.is_spam(\"data/train/spam/spam1\")\r\n# print sf.is_spam(\"data/train/spam/spam2\")\r\n#\r\n# sf = SpamFilter(\"data/train/spam\",\"data/train/ham\", 1e-5)\r\n# print sf.is_spam(\"data/train/ham/ham1\")\r\n# print sf.is_spam(\"data/train/ham/ham2\")\r\n\r\n\r\n# 5\r\n# a = time.time()\r\n# sf = SpamFilter(\"data/train/spam\", \"data/train/ham\", 1e-5)\r\n# b = time.time()\r\n# print b-a\r\n# print sf.most_indicative_spam(20)\r\n#\r\n# sf = SpamFilter(\"data/train/spam\", \"data/train/ham\", 1e-5)\r\n# print sf.most_indicative_ham(20)\r\n\r\n# testing\r\n# sf = SpamFilter(\"data/train/spam\", \"data/train/ham\", 1e-5)\r\n# ham_paths = [\"data/dev/ham/dev%d\" % i for i in range(1, 201)]\r\n# spam_paths = [\"data/dev/spam/dev%d\" % i for i in range(201, 401)]\r\n#\r\n# ham_correct = 0.0\r\n# ham_total = 0.0\r\n#\r\n# for ham in ham_paths:\r\n# ham_total += 1.0\r\n# if not sf.is_spam(ham):\r\n# ham_correct += 1.0\r\n#\r\n# print \"ham total\", ham_correct\r\n# print \"ham accuracy:\", ham_correct/ham_total\r\n#\r\n# spam_correct = 0.0\r\n# spam_total = 0.0\r\n#\r\n# for spam in spam_paths:\r\n# spam_total += 1.0\r\n# if sf.is_spam(spam):\r\n# spam_correct += 1.0\r\n#\r\n# print \"spam total\", spam_correct\r\n# print \"spam accuracy:\", spam_correct/spam_total\r\n","sub_path":"hw5/homework5.py","file_name":"homework5.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"262937021","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib\r\nimport math as m\r\nimport sklearn\r\nfrom matplotlib import pyplot as plt\r\nmatplotlib.style.use('ggplot')\r\nfrom sklearn.metrics import roc_curve, roc_auc_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn import tree, ensemble\r\nimport sklearn.datasets\r\nfrom sklearn.model_selection import cross_val_score\r\nimport xgboost\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom sklearn.metrics import roc_curve, roc_auc_score\r\n\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n\r\n\r\n(X, y) = sklearn.datasets.load_boston(return_X_y=True)\r\n\r\n\r\n\r\nprint(X.shape)\r\nprint(y.shape)\r\n\r\n#(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.25, random_state=0)\r\n\r\n\r\nX_train = X[0:379,]\r\nX_test = X[379:506,]\r\ny_train = y[0:379]\r\ny_test = y[379:506]\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nX_train_scaled = scaler.fit_transform(X_train)\r\nX_test_scaled = scaler.transform(X_test)\r\n\r\n\r\nprint(X_train.shape)\r\nprint(y_train.shape)\r\nprint(X_test.shape)\r\nprint(y_test.shape)\r\n\r\n\r\nalg = xgboost.XGBRegressor()\r\nalg.fit(X_train, y_train)\r\npred = alg.predict(X_test)\r\n\r\n\r\n\r\n\r\nprint(pred.shape)\r\n\r\nprint(type(pred))\r\n\r\nprint(type(y_test))\r\n\r\nprint(pred)\r\nprint(y_test)\r\n\r\nprint (np.sqrt(mean_squared_error(y_test,pred)))\r\n\r\n\r\nlin = sklearn.linear_model.LinearRegression()\r\nlin.fit(X_train, y_train)\r\npred_lin = lin.predict(X_test)\r\n\r\nprint (np.sqrt(mean_squared_error(y_test,pred_lin)))\r\n\r\n\r\n\r\nrf = sklearn.ensemble.RandomForestRegressor(n_estimators=100, random_state=0)\r\nrf.fit(X_train, y_train)\r\npred_rf = rf.predict(X_test)\r\n\r\nprint (np.sqrt(mean_squared_error(y_test,pred_rf)))\r\n\r\n\r\nrf.fit(X_train_scaled, y_train)\r\npred_rf = rf.predict(X_test_scaled)\r\n\r\nprint (np.sqrt(mean_squared_error(y_test,pred_rf)))\r\n\r\n\r\nlin.fit(X_train_scaled, y_train)\r\npred_lin = lin.predict(X_test_scaled)\r\n\r\nprint (np.sqrt(mean_squared_error(y_test,pred_lin)))\r\n\r\n\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\ntransform = PolynomialFeatures(3)\r\nX_train_poly = transform.fit_transform(X_train)\r\nX_test_poly = transform.transform(X_test)\r\n\r\nX_train_scaled = scaler.fit_transform(X_train_poly)\r\nX_test_scaled = scaler.transform(X_test_poly)\r\n\r\nlin.fit(X_train_scaled, y_train)\r\npred_lin = lin.predict(X_test_scaled)\r\n\r\nprint (np.sqrt(mean_squared_error(y_test,pred_lin)))\r\n\r\n\r\n\r\n#rc = roc_curve(y_test, pred[:, 1])\r\n\r\n#print(rc)\r\n\r\n\r\n","sub_path":"xgboost.py","file_name":"xgboost.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"602524628","text":"import meraki\n\n# Defining your API key as a variable in source code is not recommended\nAPI_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'\n# Instead, use an environment variable as shown under the Usage section\n# @ https://github.com/meraki/dashboard-api-python/\n\ndashboard = meraki.DashboardAPI(API_KEY)\n\nnetwork_id = 'L_646829496481105433'\n\nresponse = dashboard.networks.createNetworkPiiRequest(\n network_id, \n type='delete', \n datasets=['usage', 'events'], \n mac='00:77:00:77:00:77'\n)\n\nprint(response)","sub_path":"meraki/sdk/python/createNetworkPiiRequest.py","file_name":"createNetworkPiiRequest.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"297721369","text":"'''A few miscellaneous tools. '''\nfrom __future__ import division # Division of integers with / should never round!\nimport collections\nimport itertools\nimport subprocess\n\n__author__ = \"dpark@broadinstitute.org\"\n\n\ndef unique(items):\n ''' Return unique items in the same order as seen in the input. '''\n seen = set()\n for i in items:\n if i not in seen:\n seen.add(i)\n yield i\n\n\ndef histogram(items):\n ''' I count the number of times I see stuff and return a dict of counts. '''\n out = {}\n for i in items:\n out.setdefault(i, 0)\n out[i] += 1\n return out\n\n\ndef freqs(items, zero_checks=None):\n ''' Given a list of comparable, non-unique items, produce an iterator of\n (item, count, freq) tuples.\n item is a unique instance of one of the items seen on input\n count is a positive integer describing the number of times this item was observed\n freq is count / sum(count) for all outputs.\n If zero_checks is specified, then the output iterator will emit tuples for the\n items in zero_checks even if they do not appear in the input. If they are not in\n the input, they will be emitted with a zero count and freq.\n See histogram(items)\n '''\n zero_checks = zero_checks or set()\n \n tot = 0\n out = {}\n for i in items:\n out.setdefault(i, 0)\n out[i] += 1\n tot += 1\n for k, v in out.items():\n yield (k, v, float(v) / tot)\n for i in zero_checks:\n if i not in out:\n yield (i, 0, 0.0)\n\n\ndef intervals(i, n, l):\n ''' Divide something of length l into n equally sized parts and return the\n start-stop values of the i'th part. Values are 1-based. Each part\n will be adjacent and non-overlapping with the next part. i must be a\n number from 1 to n.\n '''\n assert 1 <= i <= n and l >= n\n part_size = l // n\n start = 1 + part_size * (i - 1)\n stop = part_size * i\n if i == n:\n stop = l\n return (start, stop)\n\n# from http://stackoverflow.com/a/312467\n\n\ndef batch_iterator(iterator, batch_size):\n \"\"\"Returns lists of length batch_size.\n\n This can be used on any iterator, for example to batch up\n SeqRecord objects from Bio.SeqIO.parse(...), or to batch\n Alignment objects from Bio.AlignIO.parse(...), or simply\n lines from a file handle.\n\n This is a generator function, and it returns lists of the\n entries from the supplied iterator. Each list will have\n batch_size entries, although the final list may be shorter.\n \"\"\"\n it = iter(iterator)\n item = list(itertools.islice(it, batch_size))\n while item:\n yield item\n item = list(itertools.islice(it, batch_size))\n\n\ntry:\n from subprocess import run\nexcept ImportError:\n CompletedProcess = collections.namedtuple(\n 'CompletedProcess', ['args', 'returncode', 'stdout', 'stderr'])\n\n def run(args, stdin=None, stdout=None, stderr=None, shell=False,\n env=None, cwd=None, timeout=None):\n '''A poor man's substitute of python 3.5's subprocess.run().\n\n Definitely a poor man's substitute because stdout and stderr are\n forcibly merged into stdout and capturing always takes place even when\n they should require subprocess.PIPE assignments, but the interface is\n fairly similar.\n '''\n try:\n output = subprocess.check_output(\n args, stdin=stdin, stderr=subprocess.STDOUT, shell=shell,\n env=env, cwd=cwd)\n returncode = 0\n except subprocess.CalledProcessError as e:\n output = e.output\n returncode = e.returncode\n\n return CompletedProcess(args, returncode, output, '')\n\n\ndef run_and_print(args, stdin=None, shell=False, env=None, cwd=None,\n timeout=None, silent=False):\n '''Capture stdout+stderr and print.\n\n This is useful for nose, which has difficulty capturing stdout of\n subprocess invocations.\n '''\n result = run(args, stdin=stdin, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, env=env, cwd=cwd, timeout=timeout)\n if not silent:\n print(result.stdout.decode('utf-8'))\n return result\n","sub_path":"util/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"167858103","text":"from bs4 import BeautifulSoup\nimport requests\nfrom collections import Counter\nimport re\nimport threading\nimport time\nimport csv\n\nglobal all_case\nall_case=0\n\nwc = Counter() # local variable 'wc' referenced before assignment 要注意區域變數問題!!! 不能放在迴圈\nwc[\"C\"] = 0 # 自行建立字典過濾非必要的單字\nwc[\"C++\"] = 0\nwc[\"C#\"] = 0\nwc[\"PYTHON\"] = 0\nwc[\"JAVA\"] = 0\nwc[\"JAVASCRIPT\"] = 0\nwc[\"PHP\"] = 0\nwc[\"HTML\"] = 0\nwc[\"SQL\"] = 0\nwc[\"CSS\"] = 0\nwc[\"CSS\"] = 0\nwc[\"R\"] = 0\nwc[\"BASH\"] = 0\nwc[\"RUBY\"] = 0\nwc[\"PERL\"] = 0\nwc[\"SCALA\"] = 0\nwc[\"SWIFT\"] = 0\nwc[\"GO\"] = 0\nwc[\"DELPHI\"] = 0\nwc[\"TYPESCRIPT\"] = 0\nwc[\"MYSQL\"] = 0\nwc[\"FTP\"] = 0\nwc[\"DNS\"] = 0\n\n\n\ndef getLink(page):\n befh = \"https://www.1111.com.tw/job-bank/job-index.asp?si=4&sk=100400,100600,100300&fs=0&page=\"\n Host = befh+str(page)#換頁\n res = requests.get(Host)\n soup = BeautifulSoup(res.text,\"lxml\")\n choose = soup.select('div.jbInfoin')#為了選標題頁連結\n link = [\"https:\"+choose1.select_one('a')['href']for choose1 in choose]\n global links #將內頁連結裝在一起\n links += link\n print(\"success\")\n\n\n\nclass getLinkThread (threading.Thread):#跑主頁的thread\n def __init__(self,page):\n threading.Thread.__init__(self)\n self.page=page\n def run(self):\n getLink(self.page)\n\n\n\n\ndef getWord(link):\n res = requests.get(link)\n global all_case\n all_case=all_case+1\n print(link)\n soup = BeautifulSoup(res.text,\"lxml\")\n text = []#建空白list為了放內文upper後的值\n text1 = soup.select(\"dl.dataList\")#求才條件\n for word in text1:\n k = word.text.upper()\n text.append(k)\n a = re.findall('[A-Z]+[+#]*',\"%s\"%text)#取出我們要的值但會有重複\n text2 = []#建空白list為了放過濾好的值\n for language in a:#過濾重複的值\n if language not in text2:\n text2.append(language)\n from collections import Counter\n for target in text2:#計次\n if target in wc:\n wc[target]+=1\n return wc\n\n\nclass getWordThread (threading.Thread):#跑內頁的thread\n def __init__(self,link):\n threading.Thread.__init__(self)\n self.link=link\n def run(self):\n getWord(self.link)\n\n\n\nlinks = []\nthreads=[]\nfor page in range(1,151):\n Thread=getLinkThread(page)\n threads.append(Thread)\nfor i in threads:\n i.start()\nfor i in threads:\n i.join()\nthreadsword = []\nfor link in links:\n Thread=getWordThread(link)\n threadsword.append(Thread)\nfor i in threadsword:\n i.start()\n time.sleep(0.1)\nfor i in threadsword:\n i.join()\n\n\n#將結果製成長條圖\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib.pyplot as plt\nlanguage = OrderedDict(wc.most_common())\nxticks = np.arange(len(language))\nplt.bar(xticks, language.values(), align='center')\nplt.xticks(xticks, list(language.keys()),rotation=75)\nplt.title(\"The most popular programming language\")\nplt.show()\n\n\nimport json\nwith open ('../data/1111_crawler.json','w') as f:#建json檔\n json.dump(wc, f)\nprint(wc.most_common())\n\n# with open ('../data/1111_crawler.csv','w') as fw: # 寫入檔案\n# for lang,counts in wc.most_common():\n# fw.write('{},{}\\n'.format(lang,counts))\n\n\nprint('case:'+str(all_case))\n\n\n","sub_path":"1111main_DUAN/1111_crawler.py","file_name":"1111_crawler.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"303986358","text":"import turtle as t\n\ndef 사각형(x, y) :\n t.goto(x,y)\n t.pendown()\n for i in range (4) :\n t.forward(100)\n t.left(90)\n t.penup()\n return 0\n\n\nt.shape(\"turtle\")\nt.penup() \n\n사각형(0,0)\n사각형(150,0)\n사각형(300,0)\n\n사각형(0,150)\n사각형(150,150)\n사각형(300,150)\n\n\n","sub_path":"chap10_p200_code1.py","file_name":"chap10_p200_code1.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"332253445","text":"from math import*\nprint(\" For_ 20\\n\\n\")\nn = float(input(\" _N_ Nechagacha bo'lgan sonlarning factoriali yig'indisi kerak: \"))\n\nwhile ceil(n) > n or n <= 0:\n n = float(input(\" XATOLIK!!!\\n Iltimos n>0 qilib qayta kiriritng: n = \"))\n\nsuma1 = 0\n\nn = int(n)\n\nfor i in range(1,n+1):\n c = factorial(i)\n print(i,\" ) \" , i ,\"! = \", c )\n suma1 += c\nprint(\" Hammasini yig'indisi: Summa = \" , suma1)\n\ninput()\n","sub_path":"for_40/for_20.py","file_name":"for_20.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"510562467","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport sys\nfrom workflow import web, Workflow, ICON_ERROR, ICON_WARNING\nimport HTMLParser\nimport subprocess\n\nBASE_URL = \"http://www.myinstants.com\"\nDAY = 86400\n\ndef main(wf):\n # Change instant dict to arg string\n def instant_to_arg(instant):\n return instant[\"url\"] + \" \" + instant[\"name\"]\n\n # Change arg string to instant dict\n def arg_to_instant(arg):\n split = wf.args[1].split(\" \", 1)\n return { \"name\": split[1], \"url\": split[0] }\n\n # Plays sound at given url\n def play_instant(instant):\n subprocess.call(\"curl -s \" + instant[\"url\"] + \" > /tmp/alfred_myinstants.mp3 && afplay /tmp/alfred_myinstants.mp3\", shell=True)\n\n # Get instants from Myinstants\n def get_instants(get_url, cache_name, max_age):\n instants = None\n\n # Parser to scrape instants from search webpage\n class Parser(HTMLParser.HTMLParser):\n instant = None\n\n def handle_starttag(self, tag, attributes):\n if len(instants) < 25 and (tag == \"div\" or tag == \"a\"):\n for name, value in attributes:\n if name == \"class\" and value == \"instant\":\n Parser.instant = {}\n elif Parser.instant != None:\n if name == \"onclick\":\n Parser.instant[\"url\"] = wf.decode(BASE_URL + value[6:-2])\n elif name == \"href\":\n Parser.instant[\"name\"] = \"\"\n\n def handle_data(self, data):\n if Parser.instant != None and \"name\" in Parser.instant:\n Parser.instant[\"name\"] = wf.decode(data)\n instants.append(Parser.instant)\n Parser.instant = None\n\n # Get and cache instants\n instants = wf.cached_data(cache_name, max_age=max_age)\n if instants == None:\n # Get instants\n try:\n instants = []\n response = web.get(get_url)\n if response.status_code == 200: Parser().feed(response.content)\n else: instants = None\n except:\n instants = None\n # Cache on success\n if instants != None and len(instants) > 0: wf.cached_data(cache_name, lambda: instants, max_age=max_age)\n\n # Build results\n if instants == None:\n wf.add_item(\"Error getting instants\", \"Unable to retrieve instants\", valid=False, icon=ICON_ERROR)\n elif len(instants) == 0:\n wf.add_item(\"None found\", \"No sounds found for choice\", valid=False, icon=ICON_WARNING)\n else:\n for instant in instants: wf.add_item(instant[\"name\"], instant[\"url\"], arg=instant_to_arg(instant), valid=True)\n\n # Return results\n wf.send_feedback()\n\n # Gets favorite instants\n def get_favorites():\n instants = wf.stored_data(\"favorites\")\n if instants == None or len(instants) == 0:\n wf.add_item(\"None found\", \"No favorite sounds found (add to favorites with cmd+enter)\", valid=False, icon=ICON_WARNING)\n else:\n for instant in instants: wf.add_item(instant[\"name\"], instant[\"url\"], arg=instant_to_arg(instant), valid=True)\n\n # Store a favorite\n def add_favorite(instant):\n instants = wf.stored_data(\"favorites\")\n if instants == None: instants = []\n if instant in instants: instants.remove(instant)\n instants.insert(0, instant)\n wf.store_data(\"favorites\", instants)\n\n # Remove a favorite\n def remove_favorite(instant):\n instants = wf.stored_data(\"favorites\")\n instants.remove(instant)\n wf.store_data(\"favorites\", instants)\n\n # Calculate get url, cache name and max age\n type = wf.args[0]\n if type == \"play\":\n play_instant(arg_to_instant(wf.args[1]))\n elif type == \"search\":\n query = wf.args[1]\n get_instants(BASE_URL + \"/search/?name=\" + query.replace(\" \", \"+\"), \"search:\" + query, 7 * DAY)\n elif type == \"best\":\n get_instants(BASE_URL, \"best\", DAY)\n elif type == \"trending\":\n get_instants(BASE_URL + \"/trending\", \"trending\", DAY)\n elif type == \"recent\":\n get_instants(BASE_URL + \"/recent\", \"recent\", DAY)\n elif type == \"favorites\":\n get_favorites()\n wf.send_feedback()\n elif type == \"favorite\":\n add_favorite(arg_to_instant(wf.args[1]))\n elif type == \"unfavorite\":\n remove_favorite(arg_to_instant(wf.args[1]))\n\nif __name__ == \"__main__\":\n wf = Workflow(\n update_settings={ \"github_slug\": \"flipxfx/alfred-myinstants\" },\n help_url=\"https://github.com/flipxfx/alfred-myinstants#help\"\n )\n sys.exit(wf.run(main))\n","sub_path":"src/instants.py","file_name":"instants.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"49188920","text":"import ast\n\nimport click\n\nfrom codegrapher.graph import FunctionGrapher\nfrom codegrapher.parser import FileVisitor\n\n\n@click.command()\n@click.argument('code', type=click.File('rb'))\n@click.option('--printed', default=False, is_flag=True, help='Pretty prints the call tree for each class in the file')\n@click.option('--remove-builtins', default=False, is_flag=True, help='Removes builtin functions from call trees')\n@click.option('--output', help='Graphviz output file name')\n@click.option('--output-format', default='pdf', help='File type for graphviz output file')\ndef cli(code, printed, remove_builtins, output, output_format):\n \"\"\"\n Parses a file.\n codegrapher [file_name]\n \"\"\"\n parsed_code = ast.parse(code.read(), filename='code.py')\n visitor = FileVisitor()\n visitor.visit(parsed_code)\n if remove_builtins:\n visitor.remove_builtins()\n if printed:\n click.echo('Classes in file:')\n for class_object in visitor.classes:\n click.echo('=' * 80)\n click.echo(class_object.name)\n click.echo(class_object.pprint())\n click.echo('')\n if output:\n graph = FunctionGrapher()\n graph.add_visitor_to_graph(visitor)\n graph.name = output\n graph.format = output_format\n graph.render()\n","sub_path":"cli/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"119801713","text":"from unittest import TestCase\n\nfrom p0001 import computeSum\n\nclass Test0001(TestCase):\n\n def testComputeSum(self):\n for data in self.__createTestData():\n limit = data[\"limit\"]\n divisors = data[\"divisors\"]\n expected = data[\"expected\"]\n\n with self.subTest(\n limit = limit\n , divisors = divisors\n , expected = expected):\n\n self.assertEqual(expected, computeSum(limit, divisors))\n\n def testComputeSumIndifferentToDivisorOrder(self):\n for data in self.__createTestData():\n limit = data[\"limit\"]\n divisors = data[\"divisors\"]\n expected = data[\"expected\"]\n\n with self.subTest(\n limit = limit\n , divisors = divisors\n , expected = expected):\n\n self.assertEqual(\n computeSum(limit, divisors)\n , computeSum(limit, divisors[::-1]))\n\n def __createTestData(self):\n\n return [\n {\"limit\" : 10, \"divisors\" : [3, 5], \"expected\" : 23}\n , {\"limit\" : 1000, \"divisors\" : [3, 5], \"expected\" : 233168}\n , {\"limit\" : 1000, \"divisors\" : [3, 5, 15, 30, 45], \"expected\" : 233168}]\n","sub_path":"python/test/testP0001.py","file_name":"testP0001.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"225826854","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render\n\nfrom core.utils import get_context\nfrom public_interface.models import Genes\nfrom overview_table.models import OverviewTable\n\n\ndef index(request):\n context = get_context(request)\n\n genes = Genes.objects.all().order_by('gene_code')\n vouchers = OverviewTable.objects.all()\n\n paginator = Paginator(vouchers, 100)\n\n page = request.GET.get('page')\n try:\n vouchers_for_page = paginator.page(page)\n except PageNotAnInteger:\n vouchers_for_page = paginator.page(1)\n except EmptyPage:\n vouchers_for_page = paginator.page(paginator.num_pages)\n\n context[\"data\"] = vouchers_for_page\n context[\"genes\"] = genes\n context[\"page_range\"] = paginator.page_range\n return render(request, 'overview_table/index.html', context)\n","sub_path":"overview_table/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"486921267","text":"import os\nimport re\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_city_code():\n url = 'https://www.fang.com/SoufunFamily.htm'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'\n }\n url_compile = re.compile('http://(.*?)\\.fang.*?')\n\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n city_url_dict = {}\n save_path = os.path.join(os.getcwd(), 'city_code.json')\n\n soup = BeautifulSoup(response.text, 'html5lib')\n city_infos = soup.select('#senfe > tbody > tr')\n\n last_province = ''\n for line in city_infos:\n try:\n province = line.select('td:nth-of-type(2) > strong')[0].text\n if province == ' ':\n province = last_province\n except:\n province = last_province\n last_province = province\n\n if province == '其它':\n continue\n\n for info in line.select('td:nth-of-type(3) > a'):\n city_name = info.text\n city_url = info.get('href')\n city_code = re.search(url_compile, city_url).group(1)\n city_url_dict[city_name] = city_code\n\n with open(save_path, 'w') as f:\n f.write(json.dumps(city_url_dict))\n\n return None\n\n\nif __name__ == '__main__':\n get_city_code()\n","sub_path":"Scrapy/fangtianxia_house/fangtianxia_house/spiders/get_city_code.py","file_name":"get_city_code.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"438522065","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 18 17:01:20 2016\n\n@author: Chao\n\"\"\"\nimport pandas as pd\nfrom ReadData import getData\nfrom ReadData import getTableS1\nimport time\nimport pulp\n#import csv\n\ndef addAct(schedule,act):\n for i in range(act.startTime,act.endTime):\n schedule[i].append(act)\n \ndef makeContentionGroup(parkingSchedule,problem):\n groupSetListParkDic={}\n for parkname in parkingSchedule.keys():\n groupSetListParkDic[parkname]=[]\n schedule=parkingSchedule[parkname]\n for i in range(problem.earliestTime,problem.latestTime+1):\n if len(schedule[i])<=1:\n continue\n groupSet=set()\n for act in schedule[i]:\n groupSet.add(act.airId)\n if len(groupSet)<=1:\n continue\n if groupSet not in groupSetListParkDic[parkname]:\n IsSubSet=False\n for groupSetAdded in groupSetListParkDic[parkname]:\n if not (groupSet-groupSetAdded):\n IsSubSet=True\n break\n if not (groupSetAdded-groupSet):\n groupSetListParkDic[parkname].remove(groupSetAdded)\n break\n if not IsSubSet:\n groupSetListParkDic[parkname].append(groupSet)\n return groupSetListParkDic\n \n#def DicToResult(dic,tb1,filename):\n# lines=[]\n# with open(\"%s\" %filename, \"w\") as f:\n# writer = csv.writer(f)\n# for index,row in tb1.iterrows():\n# airId=row[\"进港航班号\"]\n# if airId in dic and dic[airId]:\n# line=[str(row[\"进港航班号\"]),row[\"进机位时间\"],row[\"出机位时间\"],dic[airId]]\n# lines.append(line) \n# writer.writerows(lines) \n \ndef DicToResult(dic,tb1,filename):\n lines=[]\n #with open(\"%s\" %filename, \"w\") as f:\n #writer = csv.writer(f)\n for index,row in tb1.iterrows():\n airId=row[\"进港航班号\"]\n if airId in dic and dic[airId]:\n line=[str(row[\"进港航班号\"]),row[\"进机位时间\"],row[\"出机位时间\"],dic[airId]]\n lines.append(line) \n #writer.writerows(lines)\n re=pd.DataFrame(lines)\n re.to_csv(filename,index=False,header=False)\n\n\n\ndef AirportSolver(Solver,tb1,tb2,filename=\"../output/result.csv\"):\n start = time.clock()\n print(\"Read data...\")\n problem=getData(tb1,tb2)\n print(\"Preprocess...\")\n maxClashNum=50\n parkingSchedule={}\n for name in problem.allParkings.keys():\n parkingSchedule[name]={}\n for i in range(problem.earliestTime,problem.latestTime+1):\n parkingSchedule[name][i]=[]\n \n taxiwaySchedule={}\n for name in problem.allTaxiways:\n taxiwaySchedule[name]={}\n for i in range(problem.earliestTime,problem.latestTime+1):\n taxiwaySchedule[name][i]=[]\n \n \n for air in list(problem.allAirplanes.values()):\n for parkname in problem.air2park[air.name]:\n park=problem.allParkings[parkname]\n inAct,stayAct,outAct=problem.allActivities[air.name]\n addAct(parkingSchedule[parkname],stayAct)\n if park.tid!=\"无限制\":\n addAct(taxiwaySchedule[park.tid],outAct)\n addAct(taxiwaySchedule[park.tid],inAct)\n \n #记录有意义的时间\n #竞争组合\n groupSetListParkDic=makeContentionGroup(parkingSchedule,problem)\n groupSetListTaxiDic=makeContentionGroup(taxiwaySchedule,problem)\n\n\n airplanes=list(problem.allAirplanes.keys())\n parkings=list(tb2[\"停机位\"].unique())\n taxiways=list(tb2[\"滑行道\"].unique())\n taxiways.remove(\"无限制\")\n air2park=problem.air2park\n air_park=[]\n c_air_park={}\n \n air_taxi=set()\n air_taxi2park={}\n for air in airplanes:\n for park in air2park[air]:\n air_park.append((air,park))\n c_air_park[(air,park)]=problem.allParkings[park].getScore()*2+3\n tid=problem.allParkings[park].tid\n if tid==\"无限制\":\n continue\n air_taxi.add((air,tid))\n if (air,tid) not in air_taxi2park:\n air_taxi2park[(air,tid)]=[park]\n else:\n air_taxi2park[(air,tid)].append(park)\n air_taxi=list(air_taxi)\n #每个taxiGroup对应一个clash\n tid_gidx=[]\n for tid in groupSetListTaxiDic.keys():\n for i in range(len(groupSetListTaxiDic[tid])):\n tid_gidx.append((tid,i)) \n \n \n end = time.clock()\n print(\"Done\")\n print (\"[Time: %.3f s]\" % (end - start))\n print(\"Write LP\")\n \n prob = pulp.LpProblem('Airport', pulp.LpMaximize) \n \n v_air_park= pulp.LpVariable.dicts(\"air_park\",air_park,0,1,pulp.LpInteger)\n v_air_taxi= pulp.LpVariable.dicts(\"air_taxi\",air_taxi,0,1,pulp.LpInteger)\n v_air_taxi_c= pulp.LpVariable.dicts(\"air_taxi_c\",air_taxi,0,1,pulp.LpInteger) \n v_clash_taxi_gidx= pulp.LpVariable.dicts(\"Clash\",tid_gidx,0,1,pulp.LpInteger)\n \n #OBJ\n print(\"->Obj\")\n prob+=pulp.lpSum([c_air_park[ap]*v_air_park[ap] for ap in air_park])\\\n -pulp.lpSum([v_air_taxi_c[at] for at in air_taxi]) \n \n# prob+=v_air_park[air_park[0]] \n #s.t\n #一架飞机只能停在一个机位\n print(\"->Constraint 1\")\n for air in airplanes:\n prob+=pulp.lpSum([v_air_park[(air,park)] for park in air2park[air]])<=1,\"\"\n \n# 一架飞机是否用了滑行道取与是否用了对应的机位之一\n print(\"->Constraint 2\")\n for at in air_taxi:\n prob+=pulp.lpSum([v_air_park[(at[0],park)] for park in air_taxi2park[at]])\\\n -v_air_taxi[at]-v_air_taxi_c[at]==0,\"\"\n \n #冲突组中只能有一架飞机\n print(\"->Constraint 3\")\n for park in parkings:\n for groupSet in groupSetListParkDic[park]:\n if groupSet:\n prob+=pulp.lpSum([v_air_park[(air,park)] for air in groupSet])<=1,\"\"\n \n #若不冲突,冲突组只能有一架飞机\n #若冲突,在冲突机位停放\n print(\"->Constraint 4\")\n for taxi in taxiways:\n if taxi==\"无限制\":\n continue\n for gidx in range(len(groupSetListTaxiDic[taxi])):\n groupSet=groupSetListTaxiDic[taxi][gidx]\n if groupSet:\n prob+=pulp.lpSum([v_air_taxi[(air,taxi)] for air in groupSet])\\\n +v_clash_taxi_gidx[(taxi,gidx)]<=1,\"\"\n \n prob+=pulp.lpSum([v_air_taxi_c[(air,taxi)] for air in groupSet])\\\n -maxClashNum*v_clash_taxi_gidx[(taxi,gidx)]<=0,\"\"\n \n end = time.clock()\n print (\"[Time: %.3f s]\" % (end - start))\n\n print(\"Solving...\")\n Solver(prob)\n\n print(\"Solved.\")\n print(\"Write Result\")\n dic={}\n for ap in air_park:\n if v_air_park[ap].value()==1:\n dic[ap[0]]=ap[1]\n \n \n DicToResult(dic,tb1,filename)\n \n score=pulp.value(prob.objective)/problem.p0+1\n print(\"Score:\",score)\n\n\n\nif __name__==\"__main__\":\n start = time.clock()\n tb1,tb2=getTableS1()\n \n #Solver=pulp.GUROBI_CMD().solve\n Solver=pulp.CPLEX_CMD().solve \n #Solver=pulp.COIN_CMD(path=\"F:/Program Files (x86)/COIN-OR/1.7.4/win32-msvc10/bin/cbc.exe\").solve\n #Solver=pulp.GLPK_CMD(path=\"F:/ToolBox/glpk-4.60/w64/glpsol.exe\").solve\n \n AirportSolver(Solver,tb1,tb2)\n \n \n end = time.clock()\n print(\"Done\")\n print (\"[Time: %.3f s]\" % (end - start))","sub_path":"src/MIPSolver.py","file_name":"MIPSolver.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"41615847","text":"from person import Person\nfrom manager import Manager\n\nbob = Person(name='Bob Smith', age=42, pay=10000)\nsue = Person(name='Sue Jones', age=45, pay=20000)\ntom = Manager(name='Tom Due', age=55, pay=30000)\ndb = [bob, sue, tom]\n\nfor obj in db:\n obj.give_raise(.10) # the default or special method\n\nfor obj in db:\n print(obj.last_name(), '=>', round(obj.pay, 2))\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"200500974","text":"import numpy as np\n\nfrom collections import Counter\nfrom math import log\nimport operator\nimport pickle\n\n\ndef calc_shannon_ent(classes):\n classes_counter = Counter(classes)\n shannon_ent = 0.0\n for key, value in classes_counter.items():\n prob = float(value) / len(classes)\n shannon_ent -= prob * log(prob, 2)\n return shannon_ent\n\n\n# for test only\ndef create_dataset():\n data = [[1, 1, 'yes'], [0, 1, 'yes'], [1, 0, 'no'], [0, 0, 'no'], [0, 0, 'no'], ]\n labels = ['no surfacing', 'flippers']\n classes = [item[-1] for item in data]\n return data, labels, classes\n\n\ndef split_dataset(data, axis, value):\n return [item[:axis] + item[axis + 1:] for item in data if item[axis] == value]\n\n\ndef dataset_filter(data, filter):\n return [item for item in data if filter(item)]\n\n\ndef load_data():\n data = np.loadtxt('page-blocks.data', delimiter=',')\n labels = [\"height\", \"lenght\", \"area\", \"eccen\", \"p_black\",\n \"p_and\", \"mean_tr\", \"blackpix\", \"blackand\", \"wb_trans\"]\n classes = [item[-1] for item in data]\n return data, labels, classes\n\n\ndef choose_feature_to_split(data, labels, classes):\n base_ent = calc_shannon_ent(classes)\n base_info_gain = 0.0\n best_feature = -1\n for i in range(len(labels)):\n uni_features = set([item[i] for item in data])\n new_ent = 0.0\n for val in uni_features:\n sub = split_dataset(data, i, val)\n prob = len(sub) / float(len(data))\n new_ent += prob * calc_shannon_ent(sub)\n info_gain = base_ent - new_ent\n if info_gain > base_info_gain:\n base_info_gain = info_gain\n best_feature = i\n return best_feature\n\n\ndef reg_feature_to_split(data, labels, classes):\n base_env = calc_shannon_ent(classes)\n base_info_gain = 0.0\n best_feature = -1\n for i in range(len(labels)):\n # get unique & sorted features\n features = sorted(list(set([item[i] for item in data])))\n new_ent = 0.0\n for i in range(len(features) - 1):\n boundary = (features[i] + features[i + 1]) / 2\n sub_g = dataset_filter(data, lambda item: item[i] > boundary)\n sub_l = dataset_filter(data, lambda item: item[i] <= boundary)\n prob_g = len(sub_g) / float(len(data))\n prob_l = len(sub_l) / float(len(data))\n new_ent = prob_g * calc_shannon_ent(sub_g) + prob_l * calc_shannon_ent(sub_l)\n info_gain = base_env - new_ent\n if info_gain > base_info_gain:\n base_info_gain = info_gain\n best_feature = i\n return best_feature\n\n\ndef tree(data, labels, classes, features):\n if classes.count(classes[0]) == len(classes):\n return classes[0]\n if len(data[0]) == 1:\n return Counter(classes).most_common(1)[0][0]\n\n best_feature = choose_feature_to_split(data, labels, classes)\n label = labels[best_feature]\n\n features.append(label)\n my_tree = {label: {}}\n del (labels[best_feature])\n uniq_vals = set([item[best_feature] for item in data])\n for v in uniq_vals:\n my_tree[label][v] = tree(data, labels, classes, features)\n return my_tree\n\n\ndef classify(tree, features, test):\n label = ''\n first_str = next(iter(tree))\n second_dic = tree[first_str]\n feature_i = features.index(first_str)\n for key, value in second_dic.items():\n if test[feature_i] == key:\n if type(value).__name__ == 'dict':\n label = classify(value, features, test)\n else:\n label = value\n return label\n\n\nif __name__ == '__main__':\n print(calc_shannon_ent([1, 1, 1, 2, 2, 2]))\n print(calc_shannon_ent([1, 1]))\n print(calc_shannon_ent([1, 2, 2, 2]))\n # data, labels, classes = create_dataset()\n # features = []\n # t = tree(data, labels, classes, features)\n #\n # test = []\n # result = classify(tree, features, test)\n #\n # print(split_dataset(data, 1, 1))\n # print(data)\n","sub_path":"03-decision-tree/hw2-decision-tree-class.py","file_name":"hw2-decision-tree-class.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"496002811","text":"#!/usr/bin/python\n# Solved by Bogdan Trif @ Completed on Wed, 30 Nov 2016, 23:46\n#The Euler Project https://projecteuler.net\n'''\nGoldbach's other conjecture - Problem 46\nIt was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square.\n9 = 7 + 2×12\n15 = 7 + 2×22\n21 = 3 + 2×32\n25 = 7 + 2×32\n27 = 19 + 2×22\n33 = 31 + 2×12\nIt turns out that the conjecture was false.\nWhat is the smallest odd composite that cannot be written as the sum of a prime and twice a square?\n'''\nimport time\nimport gmpy2\n\n\ndef prime_generator(n): # HIghly Efficient !!!! THE FASTEST, The BEST , The ONE\n \"\"\" Sieve of Eratosthenes !!!!!!!!! THE FASTEST SIEVE. It won the battle with sieve\n Create a candidate list within which non-primes will be marked as None.\n \"\"\"\n cand = [i for i in range(3, n + 1, 2)]\n end = int(n ** 0.5) // 2\n\n # Loop over candidates (cand), marking out each multiple.\n for i in range(end):\n if cand[i]:\n cand[cand[i] + i::cand[i]] = [None] * ( (n // cand[i]) - (n // (2 * cand[i])) - 1 )\n\n # Filter out non-primes and return the list.\n return [2] + [i for i in cand if i]\n\ndef factorise( n):\n ''' Decompose a factor in its prime factors. This function uses the pyprimes module. THE FASTEST '''\n from pyprimes import factorise\n return [val for sublist in [[i[0]]*i[1] for i in factorise(n)] for val in sublist]\n\nprimes = prime_generator(10**4)\n\ndef detect_Goldbach(n):\n j=0\n while primes[j] < n :\n i = primes[j]\n a = (n - i)/2\n # print(a, i)\n if a % 1 == 0 :\n a = int(a)\n if gmpy2.is_square( a ) == True:\n return True\n j+=1\n return False\n\nprint('\\n--------------------------TESTS------------------------------')\n\nprint('\\nTest Function factorise : ', factorise(14))\nprint('\\nTest Function detect_Goldbach : ', detect_Goldbach(24))\n\n\nprint('\\n================ My FIRST SOLUTION, ===============\\n')\nt1 = time.time()\n\ndef solve():\n for i in range(27,10**7, 2):\n if gmpy2.is_prime(i) == False :\n if detect_Goldbach(i) == False:\n return print('\\nAnswer : ',i)\n\nsolve() # Answer : 5777\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n') # Completed in : 230.013132 ms\n\n\nprint('\\n===============OTHER SOLUTIONS FROM THE EULER FORUM ==============')\nprint('\\n--------------------------SOLUTION 1, aolea, Spain --------------------------')\nt1 = time.time()\n\nfrom math import sqrt\n\ndef primos (num):\n iRoot = int(sqrt(num))\n primos = [2,3]\n for j in range(5,num,2):\n if j > num :\n break\n primos.append(j)\n for k in primos[1:]:\n if k > iRoot:\n break\n for l in primos[k+1:]:\n if l > k:\n if l % k == 0:\n # print(k,l)\n primos.remove(l)\n return primos\n\nn = 10000\nlistPrimes = primos(n)\n\nfor i in range(101,n,2):\n flag = False\n for j in listPrimes :\n if j > i :\n break\n if int(sqrt((i-j)/2)) == sqrt((i-j)/2):\n flag = True\n # print (i,j,sqrt((i-j)/2))\n break\n if flag == False :\n result = i\n break\nprint(result)\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\n\n\nprint('\\n--------------------------SOLUTION 2, shwetalm, USA --------------------------')\nt1 = time.time()\n\nodd_nums = []\nfor i in range(1,10**4,2):\n odd_nums.append(i)\n\nprime_nums = []\nfor i in odd_nums:\n for j in range(3,int(i**0.5)+1,2):\n if i%j == 0:\n break\n else:\n prime_nums.append(i)\n\n\nodd_nums = set(odd_nums) ^ set(prime_nums)\n\nsq_dub = []\nfor i in range(1,500):\n sq_dub.append(2*i**2)\n\n\nfor i in odd_nums:\n for j in sq_dub:\n if (i - j) in prime_nums:\n break\n elif j > i:\n print(i)\n break\n else:\n print(i)\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\nprint('\\n--------------------------SOLUTION 3, svamja, India, VERY NICE --------------------------')\nt1 = time.time()\n\n# Another nice problem to play with!!\n# maintain an array of double of squares and odd primes.\n# Walk though odd numbers. You can collect primes by checking existing primes,\n# or if it is a composite, then check if its difference with double of squares is a prime.\n\nsquare_doubles = []\nodd_primes = [ 3, 5, 7 ]\n\nfor i in range(1, 100):\n square_doubles.append(2*i*i)\n\nsqroot_index = 0\nsqroot = 3\n\nfor num in range(9, 100001, 2):\n is_prime = True\n\n # check if prime / composite\n\n for odd_prime in odd_primes:\n if odd_prime > sqroot: break\n if num % odd_prime == 0:\n is_prime = False\n break\n\n # prime found, save it safely\n\n if is_prime:\n odd_primes.append(num)\n if odd_primes[sqroot_index+1]*odd_primes[sqroot_index+1] < num:\n sqroot_index += 1\n sqroot = odd_primes[sqroot_index+1]\n continue\n\n # composite number -> check conjecture\n match_found = False\n for square_double in square_doubles:\n num_diff = num - square_double\n if num_diff in odd_primes:\n match_found = True\n break\n\n if not match_found:\n print (\"invalid conjecture for \", num)\n break\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\n# print('\\n--------------------------SOLUTION 4, --------------------------')\n# t1 = time.time()\n#\n#\n#\n# t2 = time.time()\n# print('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n#\n# print('\\n--------------------------SOLUTION 5, --------------------------')\n# t1 = time.time()\n#\n#\n#\n# t2 = time.time()\n# print('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n#\n#\n# print('\\n--------------------------SOLUTION 6, --------------------------')\n# t1 = time.time()\n#\n#\n#\n# t2 = time.time()\n# print('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n#\n","sub_path":"Project EULER/pb046 Goldbach's other conjecture.py","file_name":"pb046 Goldbach's other conjecture.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"200362799","text":"from django.core.management.base import BaseCommand, CommandError\n\nclass Command(BaseCommand):\n help = 'Updates data on age distribution from SCB.se'\n\n def handle(self, *args, **options):\n from stats.models import menAgesData\n from stats.models import womenAgesData\n import requests\n import pandas as pd\n import numpy as np\n from io import StringIO\n\n ####### API data pull for ages ########################################################################\n url = \"http://api.scb.se/OV0104/v1/doris/sv/ssd/START/BE/BE0101/BE0101A/BefolkningR1860\"\n\n json = {\n \"query\": [\n {\n \"code\": \"Alder\",\n \"selection\": {\n \"filter\": \"vs:Ålder1årA\",\n \"values\": [\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"17\",\n \"18\",\n \"19\",\n \"20\",\n \"21\",\n \"22\",\n \"23\",\n \"24\",\n \"25\",\n \"26\",\n \"27\",\n \"28\",\n \"29\",\n \"30\",\n \"31\",\n \"32\",\n \"33\",\n \"34\",\n \"35\",\n \"36\",\n \"37\",\n \"38\",\n \"39\",\n \"40\",\n \"41\",\n \"42\",\n \"43\",\n \"44\",\n \"45\",\n \"46\",\n \"47\",\n \"48\",\n \"49\",\n \"50\",\n \"51\",\n \"52\",\n \"53\",\n \"54\",\n \"55\",\n \"56\",\n \"57\",\n \"58\",\n \"59\",\n \"60\",\n \"61\",\n \"62\",\n \"63\",\n \"64\",\n \"65\",\n \"66\",\n \"67\",\n \"68\",\n \"69\",\n \"70\",\n \"71\",\n \"72\",\n \"73\",\n \"74\",\n \"75\",\n \"76\",\n \"77\",\n \"78\",\n \"79\",\n \"80\",\n \"81\",\n \"82\",\n \"83\",\n \"84\",\n \"85\",\n \"86\",\n \"87\",\n \"88\",\n \"89\",\n \"90\",\n \"91\",\n \"92\",\n \"93\",\n \"94\",\n \"95\",\n \"96\",\n \"97\",\n \"98\",\n \"99\",\n \"100+\"\n ]\n }\n },\n {\n \"code\": \"Kon\",\n \"selection\": {\n \"filter\": \"item\",\n \"values\": [\n \"1\",\n \"2\"\n ]\n }\n }\n ],\n \"response\": {\n \"format\": \"px\"\n }\n }\n\n r = requests.post(url=url, json=json)\n\n Agestext = StringIO(r.text)\n\n Agesdata = pd.read_csv(Agestext, sep=\"\\n\")\n\n Agesdata.columns = ['var1']\n\n year = Agesdata[Agesdata['var1'].str.contains('TIMEVAL')]\n dataindex = Agesdata.index[Agesdata['var1'] == 'DATA=']\n\n #Get the year data\n year = year.values.tolist()\n year = str(year)\n year = year[27:]\n year = year[:-4]\n year = year.replace('\"', '')\n year = year.split(',')\n year = list(map(int, year))\n\n agestext =[\"zero\",\n \"one\",\n \"two\",\n \"three\",\n \"four\",\n \"five\",\n \"six\",\n \"seven\",\n \"eight\",\n \"nine\",\n \"ten\",\n \"eleven\",\n \"twelve\",\n \"thirteen\",\n \"fourteen\",\n \"fifteen\",\n \"sixteen\",\n \"seventeen\",\n \"eighteen\",\n \"nineteen\",\n \"twenty\",\n \"twentyone\",\n \"twentytwo\",\n \"twentythree\",\n \"twentyfour\",\n \"twentyfive\",\n \"twentysix\",\n \"twentyseven\",\n \"twentyeight\",\n \"twentynine\",\n \"thirty\",\n \"thirtyone\",\n \"thirtytwo\",\n \"thirtythree\",\n \"thirtyfour\",\n \"thirtyfive\",\n \"thirtysix\",\n \"thirtyseven\",\n \"thirtyeight\",\n \"thirtynine\",\n \"forty\",\n \"fortyone\",\n \"fortytwo\",\n \"fortythree\",\n \"fortyfour\",\n \"fortyfive\",\n \"fortysix\",\n \"fortyseven\",\n \"fortyeight\",\n \"fortynine\",\n \"fifty\",\n \"fiftyone\",\n \"fiftytwo\",\n \"fiftythree\",\n \"fiftyfour\",\n \"fiftyfive\",\n \"fiftysix\",\n \"fiftyseven\",\n \"fiftyeight\",\n \"fiftynine\",\n \"sixty\",\n \"sixtyone\",\n \"sixtytwo\",\n \"sixtythree\",\n \"sixtyfour\",\n \"sixtyfive\",\n \"sixtysix\",\n \"sixtyseven\",\n \"sixtyeight\",\n \"sixtynine\",\n \"seventy\",\n \"seventyone\",\n \"seventytwo\",\n \"seventythree\",\n \"seventyfour\",\n \"seventyfive\",\n \"seventysix\",\n \"seventyseven\",\n \"seventyeight\",\n \"seventynine\",\n \"eighty\",\n \"eightyone\",\n \"eightytwo\",\n \"eightythree\",\n \"eightyfour\",\n \"eightyfive\",\n \"eightysix\",\n \"eightyseven\",\n \"eightyeight\",\n \"eightynine\",\n \"ninety\",\n \"ninetyone\",\n \"ninetytwo\",\n \"ninetythree\",\n \"ninetyfour\",\n \"ninetyfive\",\n \"ninetysix\",\n \"ninetyseven\",\n \"ninetyeight\",\n \"ninetynine\",\n \"hundred\"]\n\n #Create ages variable\n ages = []\n for i in range(0, 101, 1):\n ages.append(i)\n\n #ages.append(None)\n\n # DATA FOR MEN #######################################################\n #Create empty data frame for men\n menlist = []\n\n #For-loop to get all the data for men in each age group\n for i in range(1, 202, 2):\n menindex = dataindex + i\n men = Agesdata.iloc[menindex]\n men = men.values.tolist()\n men = str(men)\n men = men[3:]\n men = men[:-4]\n men = men.split(\" \")\n men = list(map(int, men))\n menlist.append(men)\n\n mendf = pd.DataFrame(menlist, columns=year)\n mendf['ages'] = mendf.index\n men_melted = pd.melt(mendf, id_vars=['ages'], var_name='year')\n men_melted.columns = ['ages', 'year', 'men']\n\n\n # DATA FOR WOMEN #######################################################\n #Create empty data frame for men\n womenlist = []\n\n #For-loop to get all the data for men in each age group\n for i in range(2, 203, 2):\n womenindex = dataindex + i\n women = Agesdata.iloc[womenindex]\n women = women.values.tolist()\n women = str(women)\n women = women[3:]\n women = women[:-4]\n women = women.split(\" \")\n women = list(map(int, women))\n womenlist.append(women)\n\n womendf = pd.DataFrame(womenlist, columns=year)\n womendf['ages'] = womendf.index\n women_melted = pd.melt(womendf, id_vars=['ages'], var_name='year')\n women_melted.columns = ['ages', 'year', 'women']\n\n # DATA UPDATE SCRIPT ###################################################################################\n #Pull data from the current database into a data frame, to check against the data frame created through the API pull.\n menagesdata_from_db = pd.DataFrame.from_records(menAgesData.objects.all().values())\n\n womenagesdata_from_db = pd.DataFrame.from_records(womenAgesData.objects.all().values())\n\n # CHECK DATA FOR THE AGE DISTRIBUTION OF MEN ########################################################################\n #Check if there is data in the website database. If there is no data, upload the API-data.\n if len(menagesdata_from_db) == 0:\n #Convert the data frame to a dictionary, for upload to Django data base\n menagesdict = men_melted.to_dict('records')\n\n #Upload-script\n menAgesData.objects.bulk_create(menAgesData(**vals) for vals in menagesdict)\n print(\"New data on the age distribution of men have been added\")\n\n #If there is data in the database, prepare that data for comparison with the API-data.\n else:\n del menagesdata_from_db['id']\n names = list(menagesdata_from_db)\n men_melted_arranged = men_melted[names]\n\n #Compare the data from the API-pull to the data from the database. If they are the same, print that the data are already up to date.\n if men_melted_arranged.equals(menagesdata_from_db) == True:\n print(\"The data on the age distribution of men are already up to date.\")\n\n #If the two dataframes are different, delete the database-data and import the API-data.\n else:\n #Delete all entries in the model\n menAgesData.objects.all().delete()\n #Convert the API data frame to a dictionary, for upload to Django data base\n menagesdict = men_melted.to_dict('records')\n #Upload-script\n menAgesData.objects.bulk_create(menAgesData(**vals) for vals in menagesdict)\n print(\"The data on the age distribution of men have been updated\")\n\n # CHECK DATA FOR THE AGE DISTRIBUTION OF WOMEN ######################################################################\n if len(womenagesdata_from_db) == 0:\n #Convert the data frame to a dictionary, for upload to Django data base\n women_melteddict = women_melted.to_dict('records')\n\n #Upload-script\n womenAgesData.objects.bulk_create(womenAgesData(**vals) for vals in women_melteddict)\n print(\"New data on the age distribution of women have been added\")\n\n #If there is data in the database, prepare that data for comparison with the API-data.\n else:\n del womenagesdata_from_db['id']\n names = list(womenagesdata_from_db)\n women_melted_arranged = women_melted[names]\n\n #Compare the data from the API-pull to the data from the database. If they are the same, print that the data are already up to date.\n if women_melted_arranged.equals(womenagesdata_from_db) == True:\n print(\"The data on the age distribution of women are already up to date.\")\n\n #If the two dataframes are different, delete the database-data and import the API-data.\n else:\n #Delete all entries in the model\n womenAgesData.objects.all().delete()\n #Convert the API data frame to a dictionary, for upload to Django data base\n women_melteddict = women_melted.to_dict('records')\n\n #Upload-script\n womenAgesData.objects.bulk_create(womenAgesData(**vals) for vals in women_melteddict)\n print(\"New data on the age distribution of women have been added\")\n","sub_path":"stats/management/commands/update_agesdata.py","file_name":"update_agesdata.py","file_ext":"py","file_size_in_byte":11864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"584808016","text":"\"\"\"empty message\n\nRevision ID: dd66f04233a5\nRevises: 13ef50762226\nCreate Date: 2017-08-07 13:45:56.995770\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'dd66f04233a5'\ndown_revision = '13ef50762226'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('first_name', sa.String(length=50), nullable=False))\n op.add_column('user', sa.Column('last_name', sa.String(length=50), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'last_name')\n op.drop_column('user', 'first_name')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/dd66f04233a5_.py","file_name":"dd66f04233a5_.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"9188925","text":"\nprovinceLst = [\"ST\", \"BĐ\", \"BTL\", \"CG\", \"ĐĐ\", \"HBT\"]\npopulationLst = [150300, 247100, 333300, 266800, 420900, 318000]\n\nfor i in range(len(populationLst)):\n if (populationLst[i] == max(populationLst)):\n maxInd = i\n elif (populationLst[i] == min(populationLst)):\n minInd = i\n\nprint()\nprint(\" Max PoP Name:\", provinceLst[maxInd])\nprint(\" Min PoP Name:\", provinceLst[minInd])\nprint()\n","sub_path":"Session-5/miniHack/part5/minMax2Name.py","file_name":"minMax2Name.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"159029735","text":"from xu_ly_su_kien_lib import *\n\ndic = {}\ni = 1\nwhile i == 1:\n try:\n dic = tao_danh_sach_su_kien(dic)\n i = 0\n except (AssertionError, TypeError, ValueError, IndexError):\n print('Nhập định dạng ngày giờ không đúng, vui lòng nhập lại')\n i = 1\n else:\n in_su_kien(dic)\n s_find = input('Nhập sự kiện cần tìm \\t')\n tim_su_kien(dic, s_find)\n thong_ke_thang_5(dic)\n","sub_path":"thi_python_coban/xu_ly_su_kien.py","file_name":"xu_ly_su_kien.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605967457","text":"import pygame\n\nfrom player import Player\nfrom misc_functions import handle_events\n\nMASKFILE = \"mask.png\"\nWIDTH = 640\nHEIGHT = 480\nFPS = 60\nCOLOR_GREEN = pygame.Color('green')\nCOLOR_WHITE = pygame.Color('white')\n\n\npygame.init()\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\n\nplayer = Player(screen)\nmask = pygame.image.load(MASKFILE)\n\nwhile True:\n clock.tick_busy_loop(FPS)\n screen.fill(COLOR_WHITE)\n\n handle_events(player)\n\n player.draw()\n\n screen.blit(mask, (0, 0))\n pygame.display.flip()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"79352969","text":"def fatorial(num, show=False):\n \"\"\"\n => Calcula o fatorial de um número.\n :param num: o número a ser calculado.\n :param show: (opcional) mostra ou não a conta.\n :return:O valor do fatorial do número num.\n \"\"\"\n print('-' * 20)\n valor = 1\n for num in range(num, 0, -1):\n if show is True:\n print(f'{num}', end=' ')\n print('X' if num > 1 else '=', end=' ')\n valor *= num\n return valor\n\n\nprint(fatorial(5, True))\n","sub_path":"funcoes/fatorial.py","file_name":"fatorial.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"71694121","text":"import gradio as gr\nfrom transformers import GPT2LMHeadModel, AutoTokenizer, pipeline\nimport torch\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# load pretrained + finetuned GPT2\nmodel = GPT2LMHeadModel.from_pretrained(\"./model\")\nmodel = model.to(device)\n\n# create tokenizer\ntokenizer = AutoTokenizer.from_pretrained(\n \"gpt2\", \n pad_token='<|endoftext|>'\n)\n\ntrump = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, config={\"max_length\":140})\n\ndef generate(text):\n result = trump(text, num_return_sequences=1)\n return result[0][\"generated_text\"].replace('\"', '') # remove quotation marks\n\nexamples = [\n [\"Why does the lying news media\"],\n [\"The democrats have\"],\n [\"Today I'll be\"],\n]\n\ndemo = gr.Interface(\n fn=generate,\n inputs=gr.inputs.Textbox(lines=5, label=\"Prompt\"),\n outputs=gr.outputs.Textbox(label=\"Generated Trump Tweet\"),\n examples=examples\n)\n\ndemo.launch()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627103175","text":"# coding:utf-8\r\nfrom xml.dom.minidom import parse\r\nimport xml.dom.minidom\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport traceback\r\nimport math\r\nfrom utils import BoxDiagram\r\n\r\n'''\r\n作者的caption使用了统一符号的操作,getTrain.py未做相应处理,现将其\r\n原caption文件用作标注文件,并生成.align文件\r\n'''\r\n\r\nFLAG = \"train\"\r\n\r\n# ************************加载符号词表*************************************\r\nfile = open(\"dictionary.txt\")\r\nsymbol_dict = {}\r\nmax_length = -1 # 最长的symbol所包含的字符长度\r\nmax_length_symbol = \"\"\r\nwhile 1:\r\n lines = file.readlines(100000)\r\n if not lines:\r\n break\r\n for line in lines:\r\n temp_list = line.split()\r\n if len(temp_list) == 2:\r\n a = temp_list[0].strip()\r\n b = temp_list[1].strip()\r\n symbol_dict[a] = b\r\n if len(a) > max_length:\r\n max_length = len(a)\r\n max_length_symbol = a\r\n\r\nprint(symbol_dict)\r\nprint(\"max_length: \" + str(max_length))\r\nprint(max_length_symbol)\r\n# *************************************************************\r\n# 该代码使用的caption文件为作者原本caption,其做了格式统一化处理,先加载其文件\r\norigin_dict = {}\r\norigin_train_caption = open(\"caption/origin_\" + FLAG + \"_caption.txt\", \"r\")\r\nwhile 1:\r\n lines = origin_train_caption.readlines(100000)\r\n if not lines:\r\n break\r\n for line in lines:\r\n temp_list = line.strip().split()\r\n temp_str = \"\"\r\n for i, x in enumerate(temp_list[1:]):\r\n if x != \"\":\r\n temp_str += x\r\n if i != len(temp_list[1:]) - 1:\r\n temp_str += \" \"\r\n origin_dict[temp_list[0]] = temp_str\r\norigin_train_caption.close()\r\n\r\n# ***********************************************************************\r\n\r\n# 因为CROHME 2014 数据不同类型文件坐标不统一,因此在去除重复点之前先对齐到标准文件\r\n# standard_X standard_Y则是标准,根据统一的641个文件计算而出\r\nbias = 1\r\nstandard_X = 659.0263006313946 * bias\r\n\r\ntotal = 0\r\nannotation_type = {}\r\nerror_count = 0\r\nnum_remove_point = 0\r\n\r\n'''\r\nscale为缩放比例,对于某些以浮点数为坐标的inkml,cv2无法画出它们的坐标,需要线性对标到标准文件\r\n在删除重复点时选用的对标文件为106_Nina_origin.jpg系列\r\n'''\r\n\r\n\r\ndef scaleTrace(now_x, traceid2xy):\r\n max_x = -1\r\n max_y = -1\r\n min_x = 99999999999\r\n min_y = 99999999999\r\n new_traceid2xy = []\r\n scale_x = now_x / standard_X\r\n for i, x in enumerate(traceid2xy):\r\n temp_list = []\r\n for j, y in enumerate(x):\r\n temp_list.append([y[0] / scale_x, y[1] / scale_x, y[2], y[3]])\r\n if y[0] / scale_x > max_x:\r\n max_x = y[0] / scale_x\r\n if y[1] / scale_x > max_y:\r\n max_y = y[1] / scale_x\r\n if y[0] / scale_x < min_x:\r\n min_x = y[0] / scale_x\r\n if y[1] / scale_x < min_y:\r\n min_y = y[1] / scale_x\r\n new_traceid2xy.append(temp_list)\r\n return new_traceid2xy, max_x, max_y, min_x, min_y\r\n\r\n\r\n'''\r\n按照trace把公式图片画出来\r\n'''\r\n\r\n\r\ndef drawPictureByTrace(traceid2xy, filename, pic_output_path, min_x, min_y, max_x, max_y):\r\n img_ = np.full((int(max_y - min_y) + 1, int(max_x - min_x) + 1, 3), (255, 255, 255), np.uint8)\r\n for i, x in enumerate(traceid2xy):\r\n for j, y in enumerate(x):\r\n if j == 0:\r\n continue\r\n try:\r\n cv2.line(img_, (int(x[j - 1][0] - min_x), int(x[j - 1][1] - min_y)),\r\n (int(y[0] - min_x), int(y[1] - min_y)), (0, 0, 0), 2)\r\n except:\r\n traceback.print_exc()\r\n print(\"what\")\r\n cv2.imwrite(pic_output_path + \"/\" + filename + \".jpg\", img_)\r\n\r\n\r\n'''\r\n传入symbol_label,返回以空格隔开的序列\r\n'''\r\n\r\n\r\ndef deal_symbol_label(symbol_label):\r\n result_str = \"\"\r\n start_index = 0\r\n while start_index < len(symbol_label):\r\n if symbol_label[start_index] == \" \" or symbol_label[start_index] == \"$\":\r\n start_index += 1\r\n continue\r\n sign = 0\r\n i = 0\r\n while i < max_length:\r\n if symbol_label[start_index:start_index + max_length - i] in symbol_dict:\r\n result_str += symbol_label[start_index:start_index + max_length - i]\r\n result_str += \" \"\r\n start_index = start_index + max_length - i\r\n sign = 1\r\n break;\r\n i += 1\r\n if sign == 0:\r\n print(symbol_label + \" 中有字典中没有的字符\")\r\n start_index = len(symbol_label)\r\n result_str = \"\"\r\n return result_str\r\n\r\n\r\n'''\r\n计算T_cos与T_dis,去除多余点,以每个笔画为单位\r\n'''\r\n\r\n\r\ndef rve_duplicate(traceid2xy, T_dis, T_cos):\r\n count = 0\r\n for i, x in enumerate(traceid2xy):\r\n j = 0\r\n while j < len(x):\r\n if j == 0:\r\n # temp_list.append([x[j][0], x[j][1], x[j][2], x[j][3]])\r\n j += 1\r\n continue\r\n real_dis = ((x[j][0] - x[j - 1][0]) ** 2 + (x[j][1] - x[j - 1][1]) ** 2) ** 0.5\r\n if not real_dis < T_dis:\r\n # temp_list.append([x[j][0], x[j][1], x[j][2], x[j][3]])\r\n j += 1\r\n else:\r\n if j != len(x) - 1:\r\n x.pop(j)\r\n print(\"因为距离删除一个点\")\r\n else:\r\n print(\"原本要删除的点为抬笔点,保留\")\r\n j += 1\r\n\r\n count += 1\r\n for i, x in enumerate(traceid2xy):\r\n j = 0\r\n while j < len(x):\r\n if j == 0 or j == len(x) - 1:\r\n j += 1\r\n continue\r\n if (((x[j][0] - x[j - 1][0]) ** 2 + (x[j][1] - x[j - 1][1]) ** 2) ** 0.5 * (\r\n ((x[j + 1][0] - x[j][0]) ** 2 + (x[j + 1][1] - x[j][1]) ** 2) ** 0.5)) == 0:\r\n j += 1\r\n continue\r\n real_cos = abs(\r\n ((x[j][0] - x[j - 1][0]) * (x[j + 1][0] - x[j][0]) + (x[j][1] - x[j - 1][1]) * (\r\n x[j + 1][1] - x[j][1])) / (\r\n ((x[j][0] - x[j - 1][0]) ** 2 + (x[j][1] - x[j - 1][1]) ** 2) ** 0.5 * (\r\n ((x[j + 1][0] - x[j][0]) ** 2 + (x[j + 1][1] - x[j][1]) ** 2) ** 0.5)))\r\n if not real_cos < T_cos:\r\n j += 1\r\n else:\r\n x.pop(j)\r\n print(\"因为角度删除一个点\")\r\n count += 1\r\n return traceid2xy, count\r\n\r\n\r\n'''\r\n抽取8维特征,前6维分别为x,y,xi+1 - xi,yi+1 - yi,xi+2 - xi,yi+2 - yi\r\n最后两维为 1 0 代表落笔 0 1 代表提笔 \r\n'''\r\n\r\n\r\ndef feature_extraction(traceid2xy):\r\n new_traceid2xy = []\r\n for i, x in enumerate(traceid2xy):\r\n if i != len(traceid2xy) - 1:\r\n if i != len(traceid2xy) - 2:\r\n new_traceid2xy.append([x[0], x[1], traceid2xy[i + 1][0] - x[0], traceid2xy[i + 1][1] - x[1],\r\n traceid2xy[i + 2][0] - x[0], traceid2xy[i + 2][1] - x[1], 0.0, x[2], x[3]])\r\n else:\r\n new_traceid2xy.append([x[0], x[1], traceid2xy[i + 1][0] - x[0], traceid2xy[i + 1][1] - x[1],\r\n 0.0, 0.0, 0.0, x[2], x[3]])\r\n else:\r\n new_traceid2xy.append([x[0], x[1], 0.0, 0.0, 0.0, 0.0, 0.0, x[2], x[3]])\r\n return new_traceid2xy\r\n\r\n\r\n'''\r\n减均值μx 除以标准差δx\r\n\r\n'''\r\n\r\n\r\ndef z_score(traceid2xy):\r\n u_x_numerator = 0\r\n u_x_denominator = 0\r\n u_y_numerator = 0\r\n u_y_denominator = 0\r\n for i, x in enumerate(traceid2xy):\r\n for j, y in enumerate(x):\r\n if j == 0:\r\n continue\r\n L = ((y[0] - x[j - 1][0]) ** 2 + (y[1] - x[j - 1][1]) ** 2) ** 0.5\r\n u_x_numerator += L * (y[0] + x[j - 1][0]) / 2\r\n u_x_denominator += L\r\n u_y_numerator += L * (y[1] + x[j - 1][1]) / 2\r\n u_y_denominator += L\r\n u_x = u_x_numerator / u_x_denominator\r\n u_y = u_y_numerator / u_y_denominator\r\n delta_x_numerator = 0\r\n delta_x_denominator = 0\r\n for i, x in enumerate(traceid2xy):\r\n for j, y in enumerate(x):\r\n if j == 0:\r\n continue\r\n L = ((y[0] - x[j - 1][0]) ** 2 + (y[1] - x[j - 1][1]) ** 2) ** 0.5\r\n delta_x_numerator += L / 3 * (\r\n (y[0] - u_x) ** 2 + (x[j - 1][0] - u_x) ** 2 + (x[j - 1][0] - u_x) * (y[0] - u_x))\r\n delta_x_denominator += L\r\n\r\n delta_x = (delta_x_numerator / delta_x_denominator) ** 0.5\r\n\r\n new_traceid2xy = []\r\n for i, x in enumerate(traceid2xy):\r\n for j, y in enumerate(x):\r\n new_traceid2xy.append([(y[0] - u_x) / delta_x, (y[1] - u_y) / delta_x, y[2], y[3]])\r\n return new_traceid2xy\r\n\r\n\r\n'''\r\n将原caption文件结果以空格隔开形式返回\r\n'''\r\n\r\n\r\ndef deal_single_file(parent, filename, caption_file, ascii_output_path, align_output_path, pic_output_path,\r\n origin_dict):\r\n file_name = os.path.join(parent, filename)\r\n symbol_label = \"\"\r\n document = xml.dom.minidom.parse(file_name + \".inkml\")\r\n collection = document.documentElement\r\n count = 0\r\n # movie.getElementsByTagName('description')[0]\r\n annotations = collection.getElementsByTagName(\"annotation\")\r\n # 获取 symbol_label\r\n for annotation in annotations:\r\n if annotation.hasAttribute(\"type\"):\r\n if annotation.getAttribute(\"type\") == \"truth\":\r\n count += 1\r\n if count == 1:\r\n symbol_label = annotation.childNodes[0].data\r\n if count == 2:\r\n temp = annotation.childNodes[0].data.strip()\r\n if not temp in annotation_type:\r\n annotation_type[temp] = 1\r\n print(\"新类型:\" + file_name + \".inkml\")\r\n else:\r\n annotation_type[temp] += 1\r\n break;\r\n\r\n assert symbol_label != \"\"\r\n\r\n # **************************原代码**************************\r\n # result = deal_symbol_label(symbol_label.strip())\r\n # ** ** ** ** ** ** ** ** **新代码** ** ** **\r\n '''\r\n 将原caption文件结果以空格隔开形式返回\r\n '''\r\n if filename in origin_dict:\r\n result = origin_dict[filename]\r\n else:\r\n result = \"\"\r\n\r\n # ***********************************************************\r\n traceid2xy = []\r\n # 将所有点提出\r\n traces = collection.getElementsByTagName(\"trace\")\r\n\r\n total_x = 0\r\n total_y = 0\r\n num_points = 0\r\n for trace in traces:\r\n temp_result = []\r\n trace_str = trace.childNodes[0].data.strip()\r\n temp_list = trace_str.split(\",\")\r\n for i, xy in enumerate(temp_list):\r\n x_y = xy.split()\r\n x = float(x_y[0])\r\n y = float(x_y[1])\r\n total_x += x\r\n total_y += y\r\n if i != len(temp_list) - 1:\r\n temp_result.append([x, y, 1, 0])\r\n else:\r\n temp_result.append([x, y, 0, 1])\r\n num_points += 1\r\n traceid2xy.append(temp_result)\r\n\r\n avg_x = total_x / float(num_points)\r\n avg_y = total_y / float(num_points)\r\n\r\n print(\"avg_x is \" + str(avg_x))\r\n print(\"avg_y is \" + str(avg_y))\r\n\r\n # ***********************计算标准文件用***********************\r\n # return avg_x, avg_y\r\n # ************************************************************\r\n\r\n # 进行scale变换\r\n traceid2xy, max_x, max_y, min_x, min_y = scaleTrace(avg_x, traceid2xy)\r\n\r\n print(\"移除重复点之前\")\r\n drawPictureByTrace(traceid2xy, filename + \"_origin\", pic_output_path, min_x, min_y, max_x, max_y)\r\n # 移除重复点\r\n # traceid2xy, num_remove_point = rve_duplicate(traceid2xy, 0.5, -9999)\r\n # traceid2xy, num_remove_point = rve_duplicate(traceid2xy, .5 * bias, math.pi / 8)\r\n traceid2xy, num_remove_point = rve_duplicate(traceid2xy, 1.0 * bias, math.pi / 4)\r\n print(\"移除重复点之后\")\r\n drawPictureByTrace(traceid2xy, filename + \"_after\", pic_output_path, min_x, min_y, max_x, max_y)\r\n # Z-score正则化,二元列表的行元素不再为stroken,为points\r\n traceid2xy = z_score(traceid2xy)\r\n # 使用箱线图去除异常点\r\n traceid2xy = BoxDiagram(traceid2xy)\r\n # 转换为8维特征\r\n traceid2xy = feature_extraction(traceid2xy)\r\n\r\n # ***************将feature输出到文件*******************\r\n o = open(os.path.join(ascii_output_path, filename) + \".ascii\", \"w\")\r\n for x in traceid2xy:\r\n for j, y in enumerate(x):\r\n o.write(str(y))\r\n if j != len(x) - 1:\r\n o.write(\" \")\r\n o.write(\"\\n\")\r\n o.close()\r\n # ***************将symbol对应的trace_id提出*******************\r\n symbol2traceId = {}\r\n traceGroups = collection.getElementsByTagName(\"traceGroup\")\r\n for i, x in enumerate(traceGroups):\r\n if i == 0:\r\n continue\r\n annotation = x.getElementsByTagName('annotation')[0]\r\n symbol2traceId[annotation.childNodes[0].data.strip()] = []\r\n for y in x.getElementsByTagName('traceView'):\r\n symbol2traceId[annotation.childNodes[0].data.strip()].append(int(y.getAttribute(\"traceDataRef\").strip()))\r\n\r\n print(symbol2traceId)\r\n\r\n o = open(os.path.join(align_output_path, filename) + \".align\", \"w\")\r\n\r\n iteration = result.strip().split()\r\n for j, x in enumerate(iteration):\r\n o.write(x + \" \")\r\n if x in symbol2traceId:\r\n for i, y in enumerate(symbol2traceId[x]):\r\n if i != len(symbol2traceId[x]) - 1:\r\n o.write(str(y) + \" \")\r\n else:\r\n o.write(str(y))\r\n else:\r\n o.write(\"-1\")\r\n if j != len(iteration) - 1:\r\n o.write(\"\\n\")\r\n o.close()\r\n # ***********************************************************\r\n if result != \"\":\r\n caption_file.write(filename)\r\n caption_file.write(\"\\t\")\r\n caption_file.write(result)\r\n caption_file.write(\"\\n\")\r\n return True, num_remove_point\r\n else:\r\n return False, num_remove_point\r\n\r\n\r\n# *********************计算标准文件所用***********************\r\ntotal_avg_x = 0\r\ntotal_avg_y = 0\r\n# ************************************************************\r\n\r\ninput_path = FLAG + \"_data/\"\r\n# input_path = \"another_\" + FLAG + \"_data/\"\r\ncaption_output_path = \"./caption/\"\r\nascii_output_path = \"./on-ascii-\" + FLAG + \"/\"\r\nalign_output_path = \"./on-align-\" + FLAG + \"/\"\r\npic_output_path = \"./pic-\" + FLAG + \"/\"\r\n\r\nif not os.path.exists(caption_output_path):\r\n os.makedirs(caption_output_path)\r\nif not os.path.exists(ascii_output_path):\r\n os.makedirs(ascii_output_path)\r\nif not os.path.exists(align_output_path):\r\n os.makedirs(align_output_path)\r\nif not os.path.exists(pic_output_path):\r\n os.makedirs(pic_output_path)\r\n\r\ncaption_file = caption_output_path + FLAG + \"_caption.txt\"\r\ncaption_file = open(caption_file, \"w\")\r\nfor parent, dirnames, filenames in os.walk(input_path, followlinks=True):\r\n for filename in filenames:\r\n file_path = os.path.join(parent, filename)\r\n print('文件名:%s' % filename)\r\n print('文件完整路径:%s\\n' % file_path)\r\n if file_path[-6:] == \".inkml\":\r\n sign, _ = deal_single_file(parent, filename[:-6], caption_file, ascii_output_path, align_output_path,\r\n pic_output_path, origin_dict)\r\n if False == sign:\r\n error_count += 1\r\n num_remove_point += _\r\n # *********************计算标准文件所用***********************\r\n # delta_x, delta_y = deal_single_file(parent, filename[:-6], caption_file, ascii_output_path,\r\n # align_output_path,\r\n # pic_output_path)\r\n # total_avg_x += delta_x\r\n # total_avg_y += delta_y\r\n # ************************************************************\r\n total += 1\r\n\r\n# *********************计算标准文件所用***********************\r\n# print(\"标准X \" + str(total_avg_x / total))\r\n# print(\"标准Y \" + str(total_avg_y / total))\r\n# ************************************************************\r\n\r\ncaption_file.close()\r\nprint(\"annotation type:\")\r\nfor x, y in annotation_type.items():\r\n print(x + \":\" + str(y))\r\n\r\nprint(\"共处理文件\" + str(total) + \"个,错误文件数\" + str(error_count))\r\nif total - error_count == 0:\r\n print(\"全错\")\r\nelse:\r\n print(\"平均每个文件移除点个数:\" + str(float(num_remove_point) / (total - error_count)))\r\n","sub_path":"v2/getTrainWithOriginLabel.py","file_name":"getTrainWithOriginLabel.py","file_ext":"py","file_size_in_byte":16945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"1365067","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom Products.CMFCore.utils import getToolByName\n\nfrom Products.GenericSetup.upgrade import listUpgradeSteps\n\n\n_PROJECT = 'sc.galleria.support'\n_PROFILE_ID = 'sc.galleria.support:default'\n\n\ndef install(context):\n\n # Ordinarily, GenericSetup handlers check for the existence of XML files.\n # Here, we are not parsing an XML file, but we use this text file as a\n # flag to check that we actually meant for this import step to be run.\n # The file is found in profiles/default.\n\n if context.readDataFile('sc.galleria.support_default.txt') is None:\n return\n\n\ndef run_upgrades(context):\n ''' Run Upgrade steps\n '''\n if context.readDataFile('sc.galleria.support_default.txt') is None:\n return\n logger = logging.getLogger(_PROJECT)\n site = context.getSite()\n setup_tool = getToolByName(site, 'portal_setup')\n version = setup_tool.getLastVersionForProfile(_PROFILE_ID)\n upgradeSteps = listUpgradeSteps(setup_tool, _PROFILE_ID, version)\n sorted(upgradeSteps, key=lambda step: step['sortkey'])\n\n for step in upgradeSteps:\n oStep = step.get('step')\n if oStep is not None:\n oStep.doStep(setup_tool)\n msg = \"Ran upgrade step %s for profile %s\" % (oStep.title,\n _PROFILE_ID)\n setup_tool.setLastVersionForProfile(_PROFILE_ID, oStep.dest)\n logger.info(msg)\n\ndef uninstall(context):\n ''' Run uninstall steps\n '''\n\n if context.readDataFile('sc.galleria.support_uninstall.txt') is None:\n return\n\n portal = context.getSite()\n portal_conf = getToolByName(portal, 'portal_controlpanel')\n portal_conf.unregisterConfiglet('@@galleria-settings')\n","sub_path":"src/sc/galleria/support/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"495448233","text":"from concurrent.futures import ProcessPoolExecutor\nimport collections\nimport xlrd\nimport sys\nimport time\n\n'''\nDescription: Script reads in Excel file of all vehicle fires and extracts\n vehicle make, year, and origin of file. These are placed within\n their appropriate dictionaries and ultimately written out to two\n files for the machine learning process: samples.csv and targets.csv.\nUSAGE: python parser.py\n'''\n\nsys.stderr.write(\"\\nOpening workbook..\")\n\n# Start taking time, bruh..\nstart = time.time()\n\n# Open Excel file\nworkbook = xlrd.open_workbook(\"Vehicles2006-2011.xlsx\")\n\nsys.stderr.write(\"done.\\n\")\n\nsys.stderr.write(\"Opening sheet..\")\n\n# Specify sheet within file to open\nsheet = workbook.sheet_by_name(\"all car fires 2006-2011\")\n\nsys.stderr.write(\"done.\\n\")\n\n# Dictionary of makes that we care about..\nmakes = {\"Chevrolet\": {\"year\": [], \"origin\": []},\n \"Dodge\": {\"year\": [], \"origin\": []},\n \"Ford\": {\"year\": [], \"origin\": []}\n }\n\nnum_rows = sheet.nrows\n\norigin = 21\nmake = 35\nyear = 37\n\nsys.stderr.write(\"Populating dictionaries..\")\n\ndef add_to_dict(n_make):\n try:\n year_val = int(sheet.cell(row, year).value)\n origin_val = str(sheet.cell(row, origin).value)\n\n # Convert \"undetermined\" to 100\n if (origin_val == \"UU\"):\n origin_val = 100\n\n # Make sure year and origin data exist\n if (year_val != xlrd.empty_cell.value and origin_val != xlrd.empty_cell.value):\n \n # Add model year to \"Ford\" key within dictionary of makes\n makes[n_make][\"year\"].append(year_val)\n \n # Add model origin to \"Ford\" key within dictionary of makes\n makes[n_make][\"origin\"].append(int(float(origin_val)))\n finally:\n return\n\n# Traverse across rows\nfor row in range(1, num_rows):\n \n # If make is a Chevrolet\n if (sheet.cell(row, make).value == \"CH\"):\n add_to_dict(\"Chevrolet\")\n\n # If make is a Ford\n elif (sheet.cell(row, make).value == \"DO\"):\n add_to_dict(\"Dodge\")\n\n # If make is a Ford\n elif (sheet.cell(row, make).value == \"FO\"):\n add_to_dict(\"Ford\")\n\nsys.stderr.write(\"done.\\n\")\n\nsys.stderr.write(\"Writing to files..\")\n\n# Write makes dictionary to files:\n# Contained in the \"samples.csv\" are \"Make, year\"\n# Contained in the \"targets.csv\" are the fire origins\ndef write_dict(items, label):\n for key, value in items:\n for val in value:\n if (key == \"year\"):\n open(\"ml/samples.csv\", 'a+').write(\"{0}, {1}\\n\".format(label, val))\n else:\n open(\"ml/targets.csv\", 'a+').write(\"{0}\\n\".format(val))\n\n# Parallelize the writing of the dictionary..one worker process per dictionary key\nwith ProcessPoolExecutor(max_workers=3) as e:\n e.submit(write_dict, makes[\"Chevrolet\"].items(), 1000)\n e.submit(write_dict, makes[\"Dodge\"].items(), 1001)\n e.submit(write_dict, makes[\"Ford\"].items(), 1002)\n\nsys.stderr.write(\"done.\\n\")\n\n# End the time\nend = time.time() - start\nsys.stderr.write(\"\\nTotal elapsed time: {0}.\\n\\n\".format(round(end, 2)))\nexit()\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"616896915","text":"from maya import cmds\n\nfrom ... import str_utils\n\nfrom . import baseSkeletonPart\nfrom . import constants\nfrom . import rig_utils\n\nclass Hand(baseSkeletonPart.SkeletonPart):\n HAS_PARITY = True\n\n AUTO_NAME = False # this part will handle its own naming...\n\n # odd indices are left sided, even are right sided\n FINGER_IDX_NAMES = ('Thumb', 'Index', 'Mid', 'Ring', 'Pinky',\n 'Sixth' 'Seventh', 'Eighth', 'Ninth', 'Tenth')\n\n PLACER_NAMES = FINGER_IDX_NAMES\n\n def getParity(self):\n \"\"\"\n the parity of a hand comes from the limb its parented to, not the idx of\n the finger part itself...\n \"\"\"\n\n parent = self.getParent()\n try:\n # if the parent has parity use it\n parentPart = baseSkeletonPart.SkeletonPart.InitFromItem(parent)\n\n except baseSkeletonPart.SkeletonError:\n # otherwise use the instance's index for parity...\n return super(self, Hand).getParity()\n\n return str_utils.Parity(parentPart.getParity())\n\n def iterFingerChains(self):\n \"\"\"\n iterates over each finger chain in the hand - a chain is simply a list of\n joint names ordered hierarchically\n \"\"\"\n for base in self.bases:\n children = cmds.listRelatives(base, ad=True, path=True, type='joint') or []\n children = [base] + baseSkeletonPart.sortByHierarchy(children)\n yield children\n\n @classmethod\n def _build(cls, parent=None, fingerCount=5, fingerJointCount=3, **kw):\n idx = str_utils.Parity(kw['idx'])\n partScale = kw.get('partScale', cls.PART_SCALE)\n\n parent = baseSkeletonPart.getParent(parent)\n parentPart = baseSkeletonPart.SkeletonPart.InitFromItem(parent)\n\n # try to determine a \"parity index\" based on the parent part. Ideally we want to\n # inherit the parity of the parent part instead of from this part's index\n limbIdx = parentPart.getParity() if parentPart.hasParity() else idx\n\n # for the first two hands this is an empty string - but for each additional hand\n # pair, this is incremented. ie the second two hands are called Hand1, the next\n # two hands are called Hand2 etc...\n typePairCountStr = str(idx / 2) if idx > 1 else ''\n\n minPos, maxPos = partScale / 25.0, -partScale / 25.0\n posRange = float(maxPos - minPos)\n allJoints = []\n\n length = partScale / 3.0 / fingerJointCount\n lengthInc = cls.ParityMultiplier(limbIdx) * (length / fingerJointCount)\n fwdVec = constants.BONE_AIM_VECTOR * lengthInc\n\n limbName = str_utils.Parity.NAMES[limbIdx]\n for nameIdx in range(fingerCount):\n fingerName = cls.FINGER_IDX_NAMES[nameIdx]\n prevParent = parent\n for n in range(fingerJointCount):\n j = baseSkeletonPart.createJoint('%s%s_%d%s' % (fingerName, typePairCountStr, n, limbName))\n cmds.parent(j, prevParent, r=True)\n cmds.move(fwdVec[0], fwdVec[1], fwdVec[2], j, r=True, os=True)\n\n if n == 0:\n sideDist = -maxPos + (posRange * nameIdx / (fingerCount - 1))\n sideVec = constants.BONE_OTHER_VECTOR * sideDist\n cmds.move(sideVec[0], sideVec[1], sideVec[2], j, r=True, os=True)\n else:\n cmds.setAttr('%s.t%s' % (j, constants.BONE_OTHER_AXIS.asName()), lock=True)\n\n allJoints.append(j)\n prevParent = j\n\n return allJoints\n\n def visualize(self):\n scale = self.getActualScale() / 5.0\n\n for base in self.bases:\n plane = cmds.polyPlane(\n w=scale, h=scale / 2, sx=1, sy=1,\n ax=constants.BONE_OTHER_AXIS.asVector(), cuv=2, ch=False)[0]\n\n cmds.parent(plane, base, relative=True)\n\n cmds.setAttr('%s.t%s' % (plane, constants.BONE_AIM_AXIS.asName()), self.getParityMultiplier() * scale / 2)\n cmds.makeIdentity(plane, a=True, t=True)\n\n cmds.parent(cmds.listRelatives(plane, shapes=True, pa=True), base, add=True, shape=True)\n cmds.delete(plane)\n\n def _align(self, _initialAlign=False):\n parity = self.getParity()\n\n parityMult = self.getParityMultiplier()\n\n for chain in self.iterFingerChains():\n upVector = rig_utils.getObjectBasisVectors(chain[0])[constants.BONE_ROTATE_AXIS]\n upVector = upVector * parityMult\n for n, item in enumerate(chain[:-1]):\n baseSkeletonPart.alignAimAtItem(item, chain[n + 1], parity, worldUpVector=upVector)\n\n baseSkeletonPart.autoAlignItem(chain[-1], parity, worldUpVector=upVector)\n\n#end\n","sub_path":"CONFIG_v2.5/mayaConfig/modules_local/UTSMOD/2016/mac/quarantined_scripts/zoo/zmaya/skeletonBuilder/skeletonPart_hand.py","file_name":"skeletonPart_hand.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"255768033","text":"## You can find this code on this link: https://www.tensorflow.org/tutorials/keras/basic_text_classification?hl=es\n#%% Importing some libraries\n# keras.datasets.imdb is broken in 1.13 and 1.14, by np 1.16.3\n# pip install -q tf_nightly\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport numpy as np\n\nprint(\"Tensorflow version: \" + tf.__version__)\nprint(\"Numpy version: \" + np.__version__)\nprint(\"Matplotlib added.\")\n\n#%% Download the IMDB dataset\nimdb = keras.datasets.imdb\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n# The dataset comes preprocessed: each example is an array of integers representing\n# the words of the movie review. Each label is an integer value of either 0 or 1, where\n# 0 is a negative review, and 1 is a positive review.\nprint(\"Training entries: {}, labels: {}\".format(len(train_data), len(train_labels)))\nprint(train_data[0])\n\n# Movie reviews may be different lengths. The below code shows the number of words\n# in the first and second reviews. Since inputs to a neural network must be the \n# same length, we will need to resolve this later.\nlen(train_data[0]), len(train_data[1])\n\n#%% Convert the integers back to words\n# A dictionary mapping words to an integer index\nword_index = imdb.get_word_index()\n\n# The first indices are reserved\nword_index = {k:(v+3) for k,v in word_index.items()}\nword_index[\"\"] = 0\nword_index[\"\"] = 1\nword_index[\"\"] = 2 # unknown\nword_index[\"\"] = 3\n\nrever_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\ndef decode_review(text):\n return ' '.join([rever_word_index.get(i, '?') for i in text])\n\n# Now we can use the decode_review function to display the text for the first review\ndecode_review(train_data[0])\n\n#%% Prepare the data\n# The reviews -- the arrays of integers -- must be converted to tensors before fed\n# into the neural network.\ntrain_data = keras.preprocessing.sequence.pad_sequences(train_data,\n value=word_index[\"\"],\n padding='post',\n maxlen=256)\n\ntest_data = keras.preprocessing.sequence.pad_sequences(test_data,\n value=word_index[\"\"],\n padding='post',\n maxlen=256)\n\nlen(train_data[0]), len(train_data[1])\n\nprint(train_data[0])\n\n\n#%% Build the model\n# The neural network is created by stacking layers -- this requires two main architectural\n# decisions:\n# · How many layers to use in the model?\n# · How many hidden units to use for each layer?\n\n# Input shape is the vocabulary count used for the movei reviews (10000 words)\nvocab_size = 10000\nmodel = keras.Sequential()\n# The first layer is an Embedding layer. This layer takes the integer-encoded \n# vocabulary and looks up the embedding vector for each word-index. These\n# vectors are learned as the model trains. The vectors add a dimension to the\n# output array. The resulting dimensions are: (batch, sequence, embedding)\nmodel.add(keras.layers.Embedding(vocab_size, 16))\n# The next layer returns a fixed-length output vector for each example by\n# averaging over the sequen dimension. This allow the model to handle \n# input of variable length, in the simplest way possible. \nmodel.add(keras.layers.GlobalAveragePooling1D())\n# The fixed-length output vector is piped through a fully-connected (Dense) layer\n# tih 16 hidden units.\nmodel.add(keras.layers.Dense(16, activation=tf.nn.relu))\n# The last layer is densely connected with a single output node. Using the sigmoid\n# activation function, this value is a float between 0 and 1, representing a \n# probability, or confidence level\nmodel.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\n\nmodel.summary()\n\n# If the model has more hidden units (a higher-dimensional representation space),\n# and/or more layers, then the network can learn more complex representations. However,\n# it makes the network more computationally expensive and may lead to learning\n# unwanted patterns -- patterns that improve performance on training data but not on \n# the test data. This is called overfitting.\n\n#%% Loss function and optimizer\n# A model needs a loss function and an optimizer for training. Since this is a binary\n# classification problem and the model outputs a probability we will use the \n# binary_crossentropy loss function. This is not the only choice for a loss function,\n# you could, for instance, choose mean_squared_error. But, generally, binary_crossentropy\n# is better for dealing with probabilities -- it measures the \"distance\" between probability\n# distributions, or in our case, between the ground-truth distribution and the predictions.\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['acc'])\n\n\n#%% Create a validation set\nx_val = train_data[:10000]\npartial_x_train = train_data[10000:]\n\ny_val = train_labels[:10000]\npartial_y_train = train_labels[10000:]\n\n#%% Train the model\n# Train the model for 40 epochs in mini-batches of 512 samples. This is 40 iterations\n# over all smaples in the x_train and y_train tensors. While training, monitor the model's loss\n# and accuracy on the 10000 samples from the validation set:\nhistory = model.fit(partial_x_train,\n partial_y_train,\n epochs=40,\n batch_size=512,\n validation_data=(x_val, y_val),\n verbose=1)\n\n\n#%% Evaluate the model\nresults = model.evaluate(test_data, test_labels)\nprint(results)\n\n# This fairly naive approach achieves an accuracy of about 87% With more advanced approaches, \n# the model should get closer to 95%\n\n#%% Create a graph of accuracy and loss over time\nhistory_dict = history.history\nhistory_dict.keys()\n\nimport matplotlib.pyplot as plt\n\nacc = history_dict['acc']\nval_acc = history_dict['val_acc']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n# \"bo\" is for \"blue dot\"\nplt.plot(epochs, loss, 'bo', label='Training loss')\n# b is for 'solid blue line'\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()\nplt.clf() # clear figure\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n\nplt.show()\n","sub_path":"getStartedTutorial/textClassificationWithMovieReviews/textClassificationWithMovieReviews.py","file_name":"textClassificationWithMovieReviews.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"409186783","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n''' A small script that I wrote months ago when I learned Python. It is a 'torrent cleaner'.\nThe script aims to detects all torrent files in a directory\n(by default 'My downloads' dir) and move them to a folder (by default a dir called 'TBDestroyed' on the Desktop) '''\n\nimport os\nfrom pathlib import Path\n\n# select the source and target folders\ndef get_user_directory():\n global file_source\n global file_destination\n default_source = r\"C:\\Users\\yams\\Downloads\"\n default_destination = r\"C:\\Users\\yams\\OneDrive\\Desktop\\TBDestroyed\"\n input_source = input(r\"Enter the directory of the folder to clean or leave as blank and press enter to leave as default\")\n input_destination = input(r\"Enter the directory of the destination folder or leave as blank and press enter to leave as default\")\n if input_source == '':\n file_source = default_source\n else:\n file_source = input_source\n if input_destination =='':\n file_destination = default_destination\n else:\n file_destination = input_destination\n\n# modification of the path\ndef move(x, y):\n Path(x).rename(y)\n\n# execution\nget_user_directory()\nlist_files = os.listdir(file_source)\n\nprint(\"===== STARTING CLEANING ======\")\n\ni = 0\nfor f in list_files:\n if \"torrent\" in f:\n i = i + 1\n cheminsrc = file_source + '\\\\' + f\n chemind = file_destination + '\\\\' + f\n if not os.path.exists(file_destination):\n os.makedirs(file_destination)\n move(cheminsrc, chemind)\n print(f + \" MOVED\")\n\nprint(\" ===== CLEANING COMPLETED ======\")\nprint(str(i) + \" ELEMENTS MOVED\")\nos.system(\"pause\")\n","sub_path":"torrent_cleaner/torrent_mover.py","file_name":"torrent_mover.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"520906576","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport datetime\n#\ndef write_OMI(dir_data,data_name,file_name,lim_i,lim_f):\n date_list,aod_list=np.loadtxt(dir_data+data_name,delimiter=\",\",skiprows=lim_i,max_rows=lim_f-lim_i,dtype=str,unpack=True)\n data_file=open(dir_data+dir_files+file_name,\"w\")\n data_file.write(\"date,day consecutive,AOD\\n\")\n for date,aod in zip(date_list,aod_list):\n year=int(\"20\"+date[0:2])\n month=int(date[2:4])\n day=int(date[4:6])\n data_file.write(date+\",\"+conse_day(year,month,day)+\",\"+aod+\"\\n\")\n data_file.close()\ndef conse_day(year,month,day):\n day_conse=str((datetime.date(year,month,day)-datetime.date(year,1,1)).days)\n return day_conse\ndir_data=\"../Data/\";dir_files=\"Fire_period/\"\nday_ini=\"200603\";day_lim=\"200831\";year_i=2020\n#<--------------Recopilacion de los datos de ozono--------------->\ndata_name=\"data_ozono_cf_ref.csv\"\nlim_i,lim_f=6570,6670\ndate_list,ozono_list,cloud_list,ref_surface_list=np.loadtxt(dir_data+\"data_OMI_OMT03.dat\",skiprows=lim_i,unpack=True,usecols=[0,11,15,16],dtype=str,max_rows=lim_f-lim_i)\ndata_file=open(dir_data+dir_files+data_name,\"w\")\ndata_file.write(\"date,ozono,cloud factor,effective surface reflectivity\\n\")\nfor date,ozono,cloud,ref_surface in zip(date_list,ozono_list,cloud_list,ref_surface_list):\n data_file.write(date[2:8]+\",\"+ozono+\",\"+cloud+\",\"+ref_surface+\"\\n\")\ndata_file.close()\n#<----------------Recopilacion de los datos de AOD 448 nm---------------->\nlim_i,lim_f=168,201\nfile_name=\"data_OMI.csv\"\ndata_name=\"AOD_OMI.csv\"\nwrite_OMI(dir_data,data_name,file_name,lim_i,lim_f)\n#<----------------Recopilacion de los datos de AOD 500 nm------------->\nlim_i,lim_f=260,303\nfile_name=\"data_OMI_500.csv\"\ndata_name=\"AOD_OMI_500.csv\"\nwrite_OMI(dir_data,data_name,file_name,lim_i,lim_f)","sub_path":"Scripts/read_data_for_fire.py","file_name":"read_data_for_fire.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"155288854","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shiptrack', '0012_shipmentitem_date_added'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='location',\n name='facility_area',\n field=models.CharField(default=0, max_length=10),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='location',\n name='facility_quaternary_address',\n field=models.CharField(max_length=80, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='location',\n name='facility_tertiary_address',\n field=models.CharField(max_length=80, null=True, blank=True),\n ),\n ]\n","sub_path":"shiptrack/migrations/0013_auto_20170424_1738.py","file_name":"0013_auto_20170424_1738.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"340793135","text":"##So the idea here is to import into a sqlite Database simulating\n#possibly a Redshift database and this would definitely be a lambda\n#But to scale I would probably use S3 and query it from Redshift\n\n\nimport pandas as pd\nimport json\nfrom db import *\nfrom sqlalchemy import *\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import *\nfrom sqlalchemy.sql import *\nimport datetime\nimport ast\n\ndef import_data(csv_file):\n df = pd.read_csv(csv_file,\n quotechar='\"',\n low_memory=False)\n\n ##clean data and create profit field\n df = df[['title', 'release_date','genres', 'production_companies' ,'popularity', 'revenue' ,'budget']]\n #Because of the messy parts of the data I will coerece the fields that are showing the wrong data\n #The idea is that I would need to ask the source owner why these fields are messy\n df['budget'] = pd.to_numeric(df['budget'], errors='coerce')\n df['popularity'] = pd.to_numeric(df['popularity'], errors='coerce')\n df['release_date'] = pd.to_datetime(df['release_date'], errors='coerce')\n df = df.astype({'genres':'string', 'production_companies': 'string'})\n\n df['profit'] = df['revenue'] - df['budget']\n df['release_year'] = pd.DatetimeIndex(df['release_date']).year\n \n #set nulls to a null date\n df['release_date']=df['release_date'].fillna(datetime.date(1900,1,1))\n df['release_year']=df['release_year'].fillna(1900)\n \n return df\n\n\n##run\ncreate_database()\n\ndf = import_data('./the-movies-dataset/movies_metadata.csv')\nrecord_insert_count = 0\nsession = sessionmaker(bind=engine)()\nfor index, row in df.iterrows():\n try:\n #Check for if movie already exists in database\n if session.query(movie).filter(movie.movie_name==row['title']).count() == 0:\n temp_mov = movie(movie_name=row['title'],\n release_date=row['release_date'],\n release_year=row['release_year'],\n budget=row['budget'],\n revenue=row['revenue'],\n profit=row['profit'],\n popularity=row['popularity'])\n session.add(temp_mov)\n \n #Add Genres to genre table\n for genre_i in ast.literal_eval(row['genres']):\n temp_genre = genre(genre=genre_i['name'])\n #Check to see if genre already exists\n if session.query(genre).filter(genre.genre==genre_i['name']).count() == 0:\n session.add(temp_genre)\n else:\n #get existing identifier\n temp_genre = session.query(genre).filter(genre.genre==genre_i['name'])[0]\n #populate junction table\n m_g = movie_genre(movie_id=temp_mov.movie_id, genre_id=temp_genre.genre_id)\n session.add(m_g)\n \n #Add names of production companies\n for prod_company_i in ast.literal_eval(row['production_companies']):\n temp_prod_company = prod_company(production_company=prod_company_i['name'])\n #Check to see if production company already exists\n if session.query(prod_company).filter(prod_company.production_company==prod_company_i['name']).count() == 0:\n session.add(temp_prod_company)\n else:\n #get existing identifier\n temp_prod_company = session.query(prod_company).filter(prod_company.production_company==prod_company_i['name'])[0]\n #populate junction table\n m_p = movie_prod_company(movie_id=temp_mov.movie_id, prod_company_id=temp_prod_company.prod_company_id)\n session.add(m_p)\n \n session.commit()\n record_insert_count=record_insert_count + 1\n except Exception as e:\n #Prefer to send this error to some kind of error table where we can log\n print('Unable to insert %s with error: %s'%(row['title'], e))\n #flush session\n session.flush()\n\nsession.close()\nprint('%s new records added'%(record_insert_count))\n\n\n\n\n\n##### Queries to answer requirements\n##By Production Company\n#budget per year\n#revenue per year\n#profit per year\n#average popularity of produced movies per year\n\"\"\"\nSELECT \n pc.production_company, \n m.release_year,\n SUM(budget) as budget,\n SUM(revenue) as revenue,\n SUM(profit) as profit,\n AVG(popularity) as popularity\nFROM production_company as pc\nLEFT JOIN movie_prod_comp as mpc ON pc.prod_company_id = mpc.prod_company_id\nLEFT JOIN movie as m ON m.movie_id = mpc.movie_id\nGROUP BY\npc.production_company, m.release_year\n\"\"\"\n#releases by genre per year\n\"\"\"\nSELECT \n pc.production_company, \n m.release_year,\n g.genre,\n COUNT(*) as number_of_movies\nFROM production_company as pc\nLEFT JOIN movie_prod_comp as mpc ON pc.prod_company_id = mpc.prod_company_id\nLEFT JOIN movie as m ON m.movie_id = mpc.movie_id\nLEFT JOIN movie_genre as mg ON mpc.movie_id = mg.movie_id\nLEFT JOIN genre as g ON g.genre_id = mg.genre_id\nGROUP BY\npc.production_company, \nm.release_year,\ng.genre\n\"\"\"\n##Movie Genre Details\n#budget by genre by year\n#revenue by genre by year\n#profit by genre by year\n\"\"\"\nSELECT\ng.genre,\nm.release_year,\nSUM(budget) as budget,\nSUM(revenue) as revenue,\nSUM(profit)\nFROM genre as g\nLEFT JOIN movie_genre as mg ON mg.genre_id = g.genre_id\nLEFT JOIN movie as m ON m.movie_id = mg.movie_id\nGROUP BY \ng.genre,\nm.release_year\n\"\"\"\n#most popular genre by year\n\"\"\"\nWITH D1 AS\n(SELECT\ng.genre,\nm.release_year,\nAVG(popularity) as popularity\nFROM genre as g\nLEFT JOIN movie_genre as mg ON mg.genre_id = g.genre_id\nLEFT JOIN movie as m ON m.movie_id = mg.movie_id\nGROUP BY \ng.genre,\nm.release_year) \n\nSELECT\n D1.genre,\n D1.release_year,\n D1.popularity\nFROM D1\nINNER JOIN (\n SELECT release_year, \n MAX(popularity) as max_pop \n FROM D1\n GROUP BY release_year\n ) AS D2 ON D1.release_year = D2.release_year\n AND D1.popularity = D2.max_pop\nORDER BY D1.release_year\n\n\n\"\"\"","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"501993106","text":"#Problem 2 : https://leetcode.com/problems/house-robber/\n#Test Cases Passed on LeetCode\n#Time Complexity-O(N)\n#Space Complexity-O(N*2)=O(N) *Please confirm if it is correct or not\n\nclass Solution:\n def rob(self, nums):\n if not nums or len(nums)==0:\n return 0\n #declare a 2 array where columns tell us whether the num [i] is selected or not\n dp=[[0 for col in range(2)] for row in range(len(nums))]\n dp[0][0]=0\n dp[0][1]=nums[0]\n for i in range(1,len(nums)):\n #if we do not select nums[i]\n dp[i][0]=max(dp[i-1][0],dp[i-1][1])\n #if we select nums[i]\n dp[i][1]=nums[i]+dp[i-1][0]\n return max(dp[-1][0],dp[-1][1])\nsol=Solution()\nnums = [1, 2, 3, 1]\nprint(sol.rob(nums))\n","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"341003114","text":"import time\nfrom selenium import webdriver\nimport ShopInfo\nimport GUI\n\n\nclass WebAppDriver:\n def __init__(self, UrlList, FinalProductList, QuantityList):\n self.urls = UrlList\n self.FinalProdList = FinalProductList\n self.QuantityList = QuantityList\n\n def testURL(self, driver):\n for num in range(len(self.urls)):\n driver.get(self.urls[num])\n driver.find_element_by_xpath('//*[@id=\"add-to-cart\"]').click()\n time.sleep(1)\n for quantity in range(self.QuantityList[num]-1):\n driver.find_element_by_xpath('//*[@id=\"cart\"]/div/div/div/div[2]/div/div/div[1]/div[1]/div[2]/div/div/button[2]').click()\n time.sleep(1)\n return True\n\n def checkout(self):\n try:\n firstname = ShopInfo.Login[\"FirstName\"][-1]\n lastname = ShopInfo.Login[\"LastName\"][-1]\n address = ShopInfo.Login[\"Address\"][-1]\n city = ShopInfo.Login[\"City\"][-1]\n zipcode = ShopInfo.Login[\"Zipcode\"][-1]\n email = ShopInfo.Login[\"Email\"][-1]\n except:\n errorMsg = GUI.MessageBox(True)\n errorMsg.done(1)\n return 1\n\n driver = webdriver.Chrome('./chromedriver')\n\n if not self.testURL(driver):\n return False\n\n time.sleep(1)\n # ADD OTHER OPTIONS TOO\n driver.find_element_by_xpath('//*[@id=\"cart\"]/div/div/div/div[3]/div[2]/div/a').click()\n time.sleep(1)\n\n # INFORMATION PAGE\n driver.find_element_by_xpath('//*[@id=\"checkout_shipping_address_first_name\"]').send_keys(firstname)\n driver.find_element_by_xpath('// *[ @ id = \"checkout_shipping_address_last_name\"]').send_keys(lastname)\n driver.find_element_by_xpath('//*[@id=\"checkout_shipping_address_address1\"]').send_keys(address)\n driver.find_element_by_xpath('//*[@id=\"checkout_shipping_address_city\"]').send_keys(city)\n driver.find_element_by_xpath('//*[@id=\"checkout_shipping_address_zip\"]').send_keys(zipcode)\n driver.find_element_by_xpath('// *[ @ id = \"checkout_email\"]').send_keys(email)\n driver.find_element_by_xpath('// *[ @ id = \"checkout_buyer_accepts_marketing\"]').click()\n time.sleep(5)\n\n # SHIPPING PAGE\n driver.find_element_by_xpath('/html/body/div[4]/div/div[1]/div[2]/div/form/div[2]/button').click()\n time.sleep(1)\n driver.find_element_by_xpath('/html/body/div[4]/div/div[1]/div[2]/div/form/div[2]/button').click()\n\n # PAYMENT PAGE\n time.sleep(1)\n\n button = GUI.MessageBox(False)\n button.done(1)\n\n driver.quit()\n return 0\n","sub_path":"SeleniumWebDriver.py","file_name":"SeleniumWebDriver.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"399480172","text":"import cv2\nimport numpy as np\n \n\ncap = cv2.VideoCapture('D2_mute.mp4')\n \nif (cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n \nmean_arr = np.zeros((3))\ncount = 0\n\nwhile(cap.isOpened()):\n\tret, frame = cap.read()\n\n\t# Mean\n\tif ret == True:\n\t\tcount += 1\n\t\ta = np.mean(frame, axis=0)\n\t\ta = np.mean(a, axis = 0)\n\t\tmean_arr += a\n\t\tprint(count)\n\tif ret == False:\n\t\tbreak\n\n\n# Mean\nprint(mean_arr/count)\nprint(count)\n \ncap.release()\n \ncv2.destroyAllWindows()","sub_path":"Lane-Segmentation/datasets/indian/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"566402536","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\n# matplotlib.use(\"Agg\")\nfrom matplotlib import pyplot as plt\n\nnp.random.seed(42)\n\n\nclass Scaler():\n # hint: https://machinelearningmastery.com/standardscaler-and-minmaxscaler-transforms-in-python/\n def __init__(self):\n self.mean = []\n self.std_dev = []\n def __call__(self,features, is_train=False):\n self.mean = np.mean(features,axis=0)\n self.std_dev = np.std(features,axis=0)\n # raise NotImplementedError\n\n\ndef get_features(csv_path,is_train=False,scaler=None,is_test=False):\n '''\n Description:\n read input feature columns from csv file\n manipulate feature columns, create basis functions, do feature scaling etc.\n return a feature matrix (numpy array) of shape m x n \n m is number of examples, n is number of features\n return value: numpy array\n '''\n\n '''\n Arguments:\n csv_path: path to csv file\n is_train: True if using training data (optional)\n scaler: a class object for doing feature scaling (optional)\n '''\n\n '''\n help:\n useful links: \n * https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\n * https://www.geeksforgeeks.org/python-read-csv-using-pandas-read_csv/\n '''\n\n #from -> https://stackoverflow.com/questions/20517650/how-to-delete-the-last-column-of-data-of-a-pandas-dataframe\n \n if is_test:\n data_frame = pd.read_csv(csv_path)\n else:\n data_frame = pd.read_csv(csv_path, nrows=1) # read just first line for columns\n columns = data_frame.columns.tolist() # get the columns\n cols_to_use = columns[:len(columns)-1] # drop the last one\n data_frame = pd.read_csv(csv_path, usecols=cols_to_use)\n x = data_frame.values #returns a numpy array\n if is_train:\n scaler.__call__(x,is_train)\n\n x_norm = ((x- scaler.mean)/(scaler.std_dev + 0.000000000000000000001)) #normalize\n x_norm = np.insert(x_norm,0,1,axis=1) ### for adding bias \n return x_norm\ndef get_targets(csv_path):\n '''\n Description:\n read target outputs from the csv file\n return a numpy array of shape m x 1\n m is number of examples\n '''\n #from -> https://stackoverflow.com/questions/20517650/how-to-delete-the-last-column-of-data-of-a-pandas-dataframe\n data_frame = pd.read_csv(csv_path, nrows=1) # read just first line for columns\n columns = data_frame.columns.tolist() # get the columns\n cols_to_use = columns[len(columns)-1:]\n df = pd.read_csv(csv_path, usecols=cols_to_use)\n x = df.values #returns a numpy array\n return x\n\n \n\ndef analytical_solution(feature_matrix, targets, C=0.0):\n '''\n Description:\n implement analytical solution to obtain weights\n as described in lecture 4b\n return value: numpy array\n '''\n\n '''\n Arguments:\n feature_matrix: numpy array of shape m x n\n weights: numpy array of shape m x 1\n '''\n x = feature_matrix\n y = targets\n xt = x.T\n I = np.identity(x.shape[1]) #identity matrix \n w = np.linalg.inv(xt.dot(x)+C*I).dot(xt.dot(y))\n # w = np.linalg.solve(xt.dot(x) + C ,xt.dot(y))\n return w\n\ndef get_predictions(feature_matrix, weights):\n '''\n description\n return predictions given feature matrix and weights\n return value: numpy array\n '''\n\n '''\n Arguments:\n feature_matrix: numpy array of shape m x n\n weights: numpy array of shape n x 1\n '''\n return feature_matrix.dot(weights)\n\ndef mse_loss(predications, targets):\n '''\n Description:\n Implement mean squared error loss function\n return value: float (scalar)\n '''\n\n '''\n Arguments:\n feature_matrix: numpy array of shape m x n\n weights: numpy array of shape n x 1\n targets: numpy array of shape m x 1\n '''\n mse = (np.square(predications-targets).mean(axis=None))\n return mse\n\ndef l2_regularizer(weights):\n '''\n Description:\n Implement l2 regularizer\n return value: float (scalar)\n '''\n\n '''\n Arguments\n weights: numpy array of shape n x 1\n '''\n return np.linalg.norm(weights)\n # raise NotImplementedError\n\ndef loss_fn(feature_matrix, weights, targets, C=0.0):\n '''\n Description:\n compute the loss function: mse_loss + C * l2_regularizer\n '''\n\n '''\n Arguments:\n feature_matrix: numpy array of shape m x n\n weights: numpy array of shape n x 1\n targets: numpy array of shape m x 1\n C: weight for regularization penalty\n return value: float (scalar)\n '''\n mse = mse_loss(feature_matrix,targets)\n return mse + C*l2_regularizer(weights)\n # raise NotImplementedError\n\ndef compute_gradients(feature_matrix, weights, targets, C=0.0):\n '''\n Description:\n compute gradient of weights w.r.t. the loss_fn function implemented above\n '''\n\n '''\n Arguments:\n feature_matrix: numpy array of shape m x n\n weights: numpy array of shape n x 1\n targets: numpy array of shape m x 1\n C: weight for regularization penalty\n return value: numpy array\n '''\n # print(feature_matrix)\n # print(weights)\n # print(targets)\n loss = (feature_matrix.dot(weights)-targets) \n\n # print(loss)\n\n # cost = np.sum(loss**2)/2*32 + \n # print(cost)\n # loss = loss_fn(feature_matrix,weights,targets,C)\n # print(feature_matrix.shape[0])\n gradient = (2/feature_matrix.shape[0])*(feature_matrix.T.dot(loss)) + 2.0*C*np.sum(weights)\n\n # return np.divide(gradient,feature_matrix.shape[0])\n return gradient\n # raise NotImplementedError\n\ndef sample_random_batch(feature_matrix, targets, batch_size):\n '''\n Description\n Batching -- Randomly sample batch_size number of elements from feature_matrix and targets\n return a tuple: (sampled_feature_matrix, sampled_targets)\n sampled_feature_matrix: numpy array of shape batch_size x n\n sampled_targets: numpy array of shape batch_size x 1\n '''\n\n '''\n Arguments:\n feature_matrix: numpy array of shape m x n\n targets: numpy array of shape m x 1\n batch_size: int\n ''' \n # print(feature_matrix.shape)\n rand_indices = np.random.randint(feature_matrix.shape[0], size=batch_size)\n sampled_features = feature_matrix[rand_indices, :]\n sampled_targets = targets[rand_indices,:]\n # print(sampled_features.shape)\n # print(sampled_targets.shape)\n return [sampled_features, sampled_targets]\n # raise NotImplementedError\n \ndef initialize_weights(n):\n '''\n Description:\n initialize weights to some initial values\n return value: numpy array of shape n x 1\n '''\n\n '''\n Arguments\n n: int\n '''\n return np.ones((n,1))\n\ndef update_weights(weights, gradients, lr):\n '''\n Description:\n update weights using gradient descent\n retuen value: numpy matrix of shape nx1\n '''\n\n '''\n Arguments:\n # weights: numpy matrix of shape nx1\n # gradients: numpy matrix of shape nx1\n # lr: learning rate\n ''' \n weights = weights - lr*gradients\n return weights\n # raise NotImplementedError\n\n# From -> https://stackoverflow.com/questions/279561/what-is-the-python-equivalent-of-static-variables-inside-a-function\ndef early_stopping(train_loss):\n last_k_losses.append(train_loss)\n if len(last_k_losses) < k:\n return False\n last_k_losses.pop(0)\n return (max(last_k_losses) - min(last_k_losses)) < min_diff\n\nk = 10 #for early stopping # check last k error values\nlast_k_losses = []\nmin_diff = 0.000009\n\n\ndef do_gradient_descent(train_feature_matrix, \n train_targets, \n dev_feature_matrix,\n dev_targets,\n lr=1.0,\n C=0.0,\n batch_size=32,\n max_steps=10000,\n eval_steps=5):\n '''\n feel free to significantly modify the body of this function as per your needs.\n ** However **, you ought to make use of compute_gradients and update_weights function defined above\n return your best possible estimate of LR weights\n\n a sample code is as follows -- \n '''\n m,n = train_feature_matrix.shape\n weights = initialize_weights(n)\n dev_loss = mse_loss(dev_feature_matrix.dot(weights), dev_targets)\n train_loss = mse_loss(train_feature_matrix.dot(weights), train_targets)\n\n print(\"step {} \\t dev loss: {} \\t train loss: {}\".format(0,dev_loss,train_loss))\n for step in range(1,max_steps+1):\n\n #sample a batch of features and gradients\n features,targets = sample_random_batch(train_feature_matrix,train_targets,batch_size)\n \n #compute gradients\n gradients = compute_gradients(features, weights, targets, C)\n \n #update weights\n weights = update_weights(weights, gradients, lr)\n \n if step%eval_steps == 0:\n dev_loss = mse_loss(dev_feature_matrix.dot(weights), dev_targets)\n train_loss = mse_loss(train_feature_matrix.dot(weights), train_targets)\n if(early_stopping(train_loss)):\n print('Stopping early')\n return weights\n print(\"step {} \\t dev loss: {} \\t train loss: {}\".format(step,dev_loss,train_loss))\n\n '''\n implement early stopping etc. to improve performance.\n '''\n\n return weights\n\ndef do_evaluation(feature_matrix, targets, weights):\n # your predictions will be evaluated based on mean squared error \n # predictions = get_predictions(feature_matrix, weights)\n predictions = feature_matrix.dot(weights)\n # pred_idx = np.insert(predictions, 0, range(0,predictions.size), axis=1)\n\n # df = pd.DataFrame(predictions)\n # np.savetxt('pred.csv', pred_idx, delimiter=',', header='instance_id,shares',fmt='%d,%f',comments=\"\")\n # np.savetxt(\"pred.csv\", np.dstack((range(1, predictions.size+1),predictions))[0],\"%d,%f\",header=\"instance_id,shares\")\n # df.to_csv('test_csv.csv', mode='a', index=True)\n # plt.plot(targets)\n # plt.plot(predictions)\n # plt.show()\n loss = mse_loss(predictions, targets)\n return loss\n\nif __name__ == '__main__':\n scaler = Scaler() #use of scaler is optional\n train_features, train_targets = get_features('data/train.csv',True,scaler), get_targets('data/train.csv')\n dev_features, dev_targets = get_features('data/dev.csv',False,scaler), get_targets('data/dev.csv')\n \n a_solution = analytical_solution(train_features, train_targets, C=1e-7)\n \n print('evaluating analytical_solution...')\n dev_loss=do_evaluation(dev_features, dev_targets, a_solution)\n train_loss=do_evaluation(train_features, train_targets, a_solution)\n print('analytical_solution \\t train loss: {}, dev_loss: {} '.format(train_loss, dev_loss))\n\n print('training LR using gradient descent...')\n gradient_descent_soln = do_gradient_descent(train_features, \n train_targets, \n dev_features,\n dev_targets,\n lr=0.0002,\n C=1e-7,\n batch_size=64,\n max_steps=6000000,\n eval_steps=10000)\n\n print('evaluating iterative_solution...')\n test_features = get_features('data/test.csv',False,scaler,True)\n predictions = get_predictions(test_features,gradient_descent_soln)\n pred_idx = np.insert(predictions, 0, range(0,predictions.size), axis=1)\n\n np.savetxt('pred.csv', pred_idx, delimiter=',', header='instance_id,shares',fmt='%d,%f',comments=\"\")\n # print(pred_test)\n np.savetxt(\"soln.csv\",a_solution,\"%f\",delimiter=',')\n dev_loss=do_evaluation(dev_features, dev_targets, gradient_descent_soln)\n train_loss=do_evaluation(train_features, train_targets, gradient_descent_soln)\n print('gradient_descent_soln \\t train loss: {}, dev_loss: {} '.format(train_loss, dev_loss))\n \n\n\n","sub_path":"LR.py","file_name":"LR.py","file_ext":"py","file_size_in_byte":11854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"253924161","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pynumdiff.utils import utility as utility\n\nparameters = {'omega_m': 420, # rad / sec\n 'T_m': 190, # M,\n 'beta': 0.4, #\n 'Cr': 0.01, \n 'Cd': 0.32,\n 'A': 2.4,\n 'g': 9.8, # m/s^2\n 'm': 3000, # kg\n 'rho': 1.3, # kg/m^3\n 'v_r': 30, # m/s \n 'k_p': 2,\n 'k_i': 2,\n }\n\ndef triangle(iterations, dt):\n t = np.arange(0, iterations*dt, dt)\n continuous_x = np.sin(0.02*t*np.sqrt(t))\n\n #return np.matrix(continuous_x)\n\n # find peaks and valleys\n peaks, valleys = utility.peakdet(continuous_x, 0.1)\n\n # organize peaks and valleys\n if len(peaks) > 0:\n reversal_idxs = peaks[:,0].astype(int).tolist()\n reversal_vals = peaks[:,1].tolist()\n else:\n reversal_idxs = []\n reversal_vals = []\n if len(valleys) > 0:\n reversal_idxs.extend(valleys[:, 0].astype(int).tolist())\n reversal_vals.extend(valleys[:, 1].tolist())\n\n reversal_idxs.extend([0, len(continuous_x)-1])\n reversal_vals.extend([0, continuous_x[-1]])\n\n\n idx = np.argsort(reversal_idxs)\n reversal_idxs = np.array(reversal_idxs)[idx]\n reversal_vals = np.array(reversal_vals)[idx]\n reversal_ts = t[reversal_idxs]\n\n x = np.interp(t, reversal_ts, reversal_vals)\n x = np.matrix(x)\n\n return x\n\ndef effective_wheel_radius(v):\n return 20\n\ndef Torque(omega):\n omega_m = parameters['omega_m']\n T_m = parameters['T_m']\n beta = parameters['beta']\n return T_m*(1 - beta*(omega / omega_m - 1)**2)\n\ndef step_forward(state_vals, disturbances, desired_velocity, dt):\n # state_vals = [position, velocity, road_angle]\n \n p = state_vals[0,-1]\n v = state_vals[1,-1]\n theta = disturbances[2,-1]\n \n m = parameters['m']\n g = parameters['g']\n Cr = parameters['Cr']\n \n rho = parameters['rho']\n Cd = parameters['Cd']\n A = parameters['A']\n \n v_r = desired_velocity[0,-1] #parameters['v_r']\n \n alpha_n = effective_wheel_radius(v)\n z = np.sum(desired_velocity[0,:] - state_vals[1,:])*dt\n k_p = parameters['k_p']\n k_i = parameters['k_i']\n u = k_p*(v_r-v) + k_i*z\n \n \n # rolling friction\n Fr = m*g*Cr*np.sign(v)\n # aerodynamic drag\n Fa = 0.5*rho*Cd*A*np.abs(v)*v\n # forces due to gravity\n Fg = m*g*np.sin(theta)\n # driving force\n Fd = alpha_n*u*Torque(alpha_n*v)\n \n vdot = 1/m*(Fd - (Fr + Fa + Fg))\n \n new_state = np.matrix([[p + dt*v], [v + vdot*dt], [theta]])\n \n return new_state, np.matrix(u)\n\n# disturbance\ndef hills(iterations, dt, factor):\n #t = np.linspace(0,n,n)\n #y = 1*np.sin(0*t*200/np.max(t)) + 5*np.sin(t*100/np.max(t)) + 100*np.sin(t*20/np.max(t)) + 10*np.sin(t*7/np.max(t)) \n #return y*np.pi/180.*1e-2\n return triangle(iterations, dt)*0.3/factor\n\n# desired velocity\ndef desired_velocity(n, factor):\n return np.matrix([2/factor]*n)\n\n\ndef run(timeseries_length=4, dt=0.01):\n t = np.arange(0, timeseries_length, dt)\n iterations = len(t)\n\n # hills\n disturbances = np.matrix(np.zeros([3, iterations+1]))\n h = hills(iterations+1, dt, factor=0.5*timeseries_length/2)\n disturbances[2,:] = h[:,0:disturbances.shape[1]]\n\n # controls\n controls = np.matrix([[0]])\n\n # initial condition\n state_vals = np.matrix([[0], [0], [0]])\n\n # desired vel\n v_r = desired_velocity(iterations, factor=0.5*iterations*dt/2)\n\n for i in range(1, iterations+1):\n new_state, u = step_forward(state_vals, disturbances[:,0:i], v_r[:,0:i], dt)\n state_vals = np.hstack((state_vals, new_state))\n controls = np.hstack((controls, u))\n\n return state_vals[0:2,1:], disturbances[2,1:], controls","sub_path":"pynumdiff/utils/__pi_cruise_control__.py","file_name":"__pi_cruise_control__.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"14203618","text":"from kafka import KafkaConsumer,KafkaProducer\nfrom kafka import TopicPartition\nfrom .utils import getConfig\nfrom itertools import chain\nimport tornado\nimport json\nimport time\n\nclass Kafka:\n def __init__(self):\n self.topicDict={\n 'user':'UGC_USER',\n 'opus':'UGC_OPUS',\n 'subscribe':'UGC_SUBSCRIBE',\n 'action':'UGC_USER_EVENT',\n 'push':'UGC_POUS_EVENT',\n 'blackList':'UGC_USER_BLACK_WHITE_LISTED'\n }\n \n def send(self,topic,data):\n if(data is None or len(data)==0):\n return\n if(not isinstance(data,list)):\n data=[data]\n\n producer=KafkaProducer(\n bootstrap_servers=getConfig('kafka_server'),\n client_id='recommend_producer',\n value_serializer=lambda m:json.dumps(m).encode('utf-8')\n )\n kafka_topic=self.topicDict[topic]\n for info in data:\n producer.send(kafka_topic,info).add_errback(\n lambda ex:tornado.log.app_log.error('send push error \\r\\n'+repr(ex))\n )\n producer.flush()\n\n #不调用kafka自带的deserialize方法,报错无法继续处理\n def get(self,topic):\n # \"kafka_server\":[\"Master2:9092\",\"Slave001:9092\",\"Slave002:9092\"],\n topic=self.topicDict[topic]\n consumer=KafkaConsumer(\n group_id=getConfig('kafka_Group'),\n bootstrap_servers=getConfig('kafka_server'),\n auto_offset_reset='earliest',\n enable_auto_commit=False\n )\n topic_Partitions=[TopicPartition(topic=topic,partition=0)]\n consumer.assign(topic_Partitions)\n\n offset_max=consumer.end_offsets(topic_Partitions)\n all_size=0\n for tp,offset in offset_max.items():\n offset_last=consumer.committed(tp)\n tornado.log.gen_log.info('consuming kafka data: \\r\\n \\\n topic: %s \\r\\n \\\n partition: %s \\r\\n\\\n lastIndex: %s \\r\\n \\\n currentIndex: %s' % (tp.topic,tp.partition,offset_last,offset))\n if(offset_last==None):\n offset_last=0\n all_size+=offset-offset_last\n\n result=list()\n start=time.time()\n while(True):\n msg=consumer.poll()\n data=[x.value for x in chain(*msg.values())]\n if(len(data)>0):\n start=time.time()\n result.extend(data)\n if(len(result)>=all_size or time.time()-start>=5):\n break\n consumer.commit()\n consumer.close()\n tornado.log.gen_log.info('kafka %s data consume success allLen: %s' % (topic,len(result)))\n return result","sub_path":"Helper/kafkaHelper.py","file_name":"kafkaHelper.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"253506867","text":"import argparse\nimport ipaddress\nimport getpass\nimport requests\nfrom datetime import datetime\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-H\", \"--host\",\n help=\"IP address of the Tweetcool server\",\n default='127.0.0.1') # Equals 'localhost'\nparser.add_argument(\"-P\", \"--port\",\n help=\"Post used by the Tweetcool server\",\n type=int,\n default=9876)\nargs = parser.parse_args()\n\ntry:\n server = {\n 'host': ipaddress.ip_address(args.host),\n 'port': args.port\n }\nexcept ValueError as e:\n print('The given host is not a valid IP address')\n exit(0)\n\nif not(1024 < server[\"port\"] < 65535):\n print('The given port number is not in the range between 1024 and 65535!')\n exit(0)\n\nserver[\"address\"] = 'http://' + server[\"host\"].compressed + ':' + str(server[\"port\"])\n\n# Logic starts here... somewhere..\n\nname = getpass.getuser()\n\n\ndef menu(): # Try to connect to server first\n try:\n r = requests.get(server[\"address\"])\n except:\n print(\"Connection failed\")\n exit()\n\n print(\"Welcome to TweetCool\\n\") # Menu starts here\n print(\"1. Previous tweets\\n2. New tweet\\n3. Quit (type exit or Ctrl+D)\\n\")\n choice = input()\n if choice == '1':\n get_tweets()\n elif choice == '2':\n new_tweet()\n elif choice == 'exit':\n print('Closing...')\n quit()\n else:\n print('This is not a valid option!')\n\ndef new_tweet(): # I put this function in a loop to post as many tweet as the user want, one after another\n poster = name\n while True:\n try:\n content = input(\"Enter your text here or press Ctrl+D to exit:\")\n body = {\n \"content\": content,\n \"poster\": poster\n }\n r = requests.post(server[\"address\"] + \"/tweet\", json=body)\n print(\"Success! You just tweeted!\")\n continue\n except(KeyboardInterrupt, EOFError): # End the tweeting loop with Ctrl+D\n quit()\n\n\ndef get_tweets():\n r = requests.get(server[\"address\"] + \"/tweet\")\n if len(r.json()) == 0:\n print(\"Sorry, there are no tweets to show\")\n else:\n for i in r.json():\n post_date = datetime.fromtimestamp(i[\"timestamp\"])\n print('{} <{}>: {}'.format(i[\"poster\"], post_date, i[\"content\"]))\n\nmenu()\n\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"394514848","text":"import os\nimport json\n\n\nclass FileStorage():\n\n def __init__(self, filename):\n self.filename = filename\n\n def read(self):\n if os.path.exists(self.filename):\n with open(self.filename) as file:\n data = json.load(file)\n return data\n else:\n return {}\n\n def save(self, data): # write data like json file\n try:\n old_data = self.read()\n if len(old_data.keys()) == 0:\n old_data[\"tweets\"] = []\n old_data[\"tweets\"].append(data)\n jsondata = json.dumps(old_data, indent=4, skipkeys=True, sort_keys=True)\n fd = open(self.filename, 'w')\n fd.write(jsondata)\n fd.close()\n print(self.filename + \" ha sido escrito exitosamente\")\n except Exception as e:\n print(e)\n print('ERROR writing', self.filename)\n\n\nfile_worker = FileStorage(\"results.json\")\ndata = file_worker.read()\nprint(data)","sub_path":"Data Extraction/Twitter/read_streaming.py","file_name":"read_streaming.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"630874804","text":"\"\"\"\nA collection of cost functions\n\"\"\"\n\nimport numpy as np\n\nimport math\n\n\ndef mse(output, target, derivative=False):\n \"\"\"\n The 'mean squared error'\n :param output: The predicted output of the model.\n :param target: The target output\n :param derivative:\n :return:\n \"\"\"\n if derivative:\n return output - target\n\n return np.asscalar(np.array([0.5], dtype=\"float64\")) \\\n * np.linalg.norm(output - target) ** np.asscalar(np.array([2.], dtype=\"float64\"))\n\n\ndef xent(output, target, derivative=False):\n \"\"\"\n 'Categorical Cross-Entropy'\n :param output: The predicted output of the model.\n :param target: The target output\n :param derivative:\n :return:\n \"\"\"\n if derivative:\n return output - target\n\n s1 = np.asscalar(np.array([1.]))\n return -(target * np.log(output) + (s1 - target) * np.log(s1 - output)).sum()\n\n\nmean_squared_error = MSE = mse\n\n_cost = {\n \"mse\": mse,\n \"xent\": xent\n}\n","sub_path":"bull/cost/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"545955554","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 9 13:57:31 2021\n\n@author: Asadbek\n\"\"\"\n\nismlar = ['Ali', 'Vali', 'Gani', 'Botir', 'Begzod', 'Javoxir']\n\nfor name in ismlar:\n print(f' Salom {name}, yaxshimisan?')\nprint(f' Kod {len(ismlar)} marta takrorlandi')\n\nsonlar = list(range(11, 100, 2))\nfor num in sonlar:\n print(num**3)\n\n\nprint(sonlar)\n#print(11*11*11)\n\nkinolar = []\n\nfor n in range(0,5):\n # kinolar.append(input(f'{n+1}-sevimli kino nomini kiriting:'))\n print(kinolar)\n \ncount = int(input('nechta odam bn suhbat qildingiz?>>>'))\npeople = []\n#print(input(f'{3+5}'))\nfor nth in range(count):\n people.append(input(f\"{nth+1}-suhbatlashgan odamingiz: \"))\nprint(people) \n \n\n ","sub_path":"for_sikl.py","file_name":"for_sikl.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"159848890","text":"from django.conf.urls import url\nfrom .views import *\n\n\n\nurlpatterns = [\nurl(r'^addtobase', add_to_base),\nurl(r'^goods/(?P[A-Za-z0-9_-]+)$', offer),\nurl(r'^1g$', pars_cat),\nurl(r'^2g$', pars_goods),\nurl(r'^catalog/(?P[A-Za-z0-9_-]+)$', catalog, name='catalog'),\nurl(r'^(?P[A-Za-z0-9_-]+)$', singlepage),\n\nurl(r'^$', home, name='home'),\n]\n","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"638814780","text":"from PIL import Image\n\nfile1 = Image.open('semutA.png')\nfile2 = Image.open('semutB.png')\n\npix1 = file1.load()\npix2 = file2.load()\n\npanjang = 370\n\nfor i in range(panjang):\n for j in range(panjang):\n if pix1[i,j] == pix2[i,j]:\n pix1[i,j] = 255\n else:\n pix1[i,j] =0\n\nfile1.save(\"flag.png\")","sub_path":"if.itb.ac.id/semut_forensic/semutsemut.py","file_name":"semutsemut.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"580751560","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.right = None\n self.left = None\n\nclass BalanceBinTree:\n\n @staticmethod\n def isBalance(node):\n if node == None:\n return True, 0\n balLeft, hLeft = BalanceBinTree.isBalance(node.left)\n if not balLeft:\n return False, 0\n balRight, hRight = BalanceBinTree.isBalance(node.right)\n if not balRight:\n return False, 0\n if abs(hLeft - hRight) > 1:\n return False ,0\n return True, max(hLeft, hRight) + 1\n\nif __name__ == '__main__':\n node4 = Node(4)\n node5 = Node(5)\n node2 = Node(2)\n node2.left = node4\n node2.right = node5\n node6 = Node(6)\n node7 = Node(7)\n node3 = Node(3)\n node3.left = node6\n node3.right = node7\n node1 = Node(1)\n node1.left = node2\n node1.right = node3\n\n print(BalanceBinTree.isBalance(node1))\n","sub_path":"nowcode/BalanceBinTree.py","file_name":"BalanceBinTree.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"596089173","text":"# Given a Weighted Directed Acyclic Graph and a source vertex in the graph, find the shortest paths from given source to all other vertices.\nfrom queue import PriorityQueue\nfrom collections import defaultdict\nfrom collections import deque\nimport heapq\ndef shortest_path(start):\n\tg = defaultdict(list)\n\tfor l,r,c in edges:\n\t\tg[l].append((r, c))\n\tprint('graph', g)\n\ttop_array = top_sort(g)\n\tprint('top', top_array)\n\tarray = top_sort_dfs(g)\n\tprint('array', array)\n\t#top_array = top_array[::-1]\n\tdist = {}\n\tfor item in top_array:\n\t\tif item == start:\n\t\t\tdist[item] = 0\n\t\telse:\n\t\t\tdist[item] = float('inf')\n\n\tprint(dist)\n\n\twhile top_array:\n\t\ti = top_array.pop(0)\n\n\t\tfor node, weight in g[i]:\n\t\t\tprint(node, weight)\n\t\t\tif dist[node] > dist[i] + weight:\n\t\t\t\tdist[node] = dist[i] + weight\n\treturn dist.values()\n\ndef top_sort(graph):\n\tin_degree = {}\n\tfor vertex in graph:\n\t\tif vertex not in in_degree:\n\t\t\tin_degree[vertex] = 0\n\t\tfor n in graph[vertex]:\n\t\t\tif n[0] not in in_degree:\n\t\t\t\tin_degree[n[0]] = 0\n\t\t\tin_degree[n[0]] += 1\n\ttop_sort_array = []\n\tq = deque()\n\tfor vertex, degree in in_degree.items():\n\t\tif degree == 0:\n\t\t\tq.append(vertex)\n\n\twhile q:\n\t\tnode = q.popleft()\n\t\ttop_sort_array.append(node)\n\t\tfor n in graph[node]:\n\t\t\tin_degree[n[0]] -= 1\n\t\t\tif in_degree[n[0]] == 0:\n\t\t\t\tq.append(n[0])\n\n\treturn top_sort_array\n\n\ndef top_sort_dfs(graph):\n\tvisited = [False] * 6\n\tstack = []\n\tfor i in range(6):\n\t\tif visited[i] == False:\n\t\t\thelper(i, visited, stack, graph)\n\n\t# getting back [5, 4, 3, 2, 1, 0] dont have to reverse it but if you do pop from front\n\treturn stack[::-1]\n\ndef helper(v, visited, stack, graph):\n\tvisited[v] = True\n\n\tfor n,w in graph[v]:\n\t\tif visited[n] == False:\n\t\t\thelper(n, visited, stack, graph)\n\n\tstack.append(v)\n\n\n\nedges = [\n (0, 1, 5),\n (0, 2, 3),\n (1, 3, 6),\n (1, 2, 2),\n (2, 4, 4),\n (2, 5, 2),\n (2, 3, 7),\n (3, 4, -1),\n (4, 5, -2),\n]\n\nprint(shortest_path(1))","sub_path":"IK/Graphs/ShortestPathDAG.py","file_name":"ShortestPathDAG.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"243140072","text":"#coding=gbk\r\nimport m_classifyQueue\r\n\r\ninstanceList = {}\r\n\r\n'''获取一个对象 如果不存在 则返回None'''\r\ndef GetInstance(unique):\r\n if unique in instanceList:\r\n return instanceList[unique]\r\n else:\r\n return None\r\n\r\n'''创建一个对象,该对象会存储在内存中,下次通过GetInstance方法来获取 \r\n该方法不会返回对象'''\r\ndef CreatInstance(unique, maxNum):\r\n obj = m_classifyQueue.ClassifyQueue(maxNum = maxNum)\r\n instanceList[unique] = obj","sub_path":"CrawlProject1/p_common/p_classifyQueue/m_classifyQueue_factory.py","file_name":"m_classifyQueue_factory.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"46794703","text":"def getMatchups():\n from bs4 import BeautifulSoup\n from urllib.error import HTTPError\n import urllib.request\n import re\n\n # using odds shark\n url = 'https://www.oddsshark.com/nba/odds'\n\n try:\n page = urllib.request.urlopen(url)\n except HTTPError as err:\n if err.code == 404:\n print('404 Error occurred')\n else:\n print(err)\n\n soup = BeautifulSoup(page, 'html.parser')\n\n # regex search to find the matchups\n regex = re.compile('op-matchup-wrapper basketball')\n regex2 = re.compile('op-matchup-links has-matchup-links')\n matchups_list = soup.find_all('div', attrs={'class': [regex, regex2]})\n matchTeamList = []\n for match in matchups_list:\n cnt = 0\n for a in match:\n if len(match) == 3:\n if cnt == 0:\n matchTime = a.text\n elif cnt == 2:\n matchTop = a.find('a', {'class': 'odds-link op-matchup-team-text'}).text\n matchBottom = a.find('div', {'class': 'op-matchup-team op-matchup-text op-team-bottom'}).text\n cnt += 1\n # matchDate = match.find('a', {'class': 'odds-link full-matchup'})['href']\n # print(matchDate)\n # parse and format the date\n # matchDate = getMatchDate(matchDate)\n\n # matchVs = str(matchDate) + ' ' + str(matchTime) + ': ' + str(matchTop) + ' vs ' + str(matchBottom)\n matchVs = str(matchTime) + ': ' + str(matchTop) + ' vs ' + str(matchBottom)\n # print(matchVs)\n\n if matchVs not in matchTeamList:\n matchTeamList.append(matchVs)\n # print(matchTeamList)\n return matchTeamList\n\n\ndef getMatchDate(string):\n import dateutil.parser as dparser\n # parse the link to just get the date\n strLeft = string.find('odds')\n strRight = string.rfind('-')\n\n string = string[strLeft + 5:strRight]\n\n # format date\n string = dparser.parse(str(string), fuzzy=True).strftime('%B %d %Y')\n\n return string\n","sub_path":"app/scripts/matchups.py","file_name":"matchups.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"321147021","text":"import gzip\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nimport chainer\n\ndef get_dataset():\n FILE_TRAIN_LABELS_PATH = \"../Dataset/train-labels-idx1-ubyte.gz\"\n FILE_TRAIN_IMAGES_PATH = \"../Dataset/train-images-idx3-ubyte.gz\"\n FILE_TEST_LABELS_PATH = \"../Dataset/t10k-labels-idx1-ubyte.gz\"\n FILE_TEST_IMAGES_PATH = \"../Dataset/t10k-images-idx3-ubyte.gz\"\n \n f = gzip.open(FILE_TRAIN_IMAGES_PATH,'r')\n f.read(16)\n num_train_images = 60000\n count_train_image_rows = 28\n count_train_image_cols = 28\n buf = f.read(count_train_image_rows * count_train_image_cols * num_train_images)\n train_data = np.frombuffer(buf, dtype=np.uint8)\n train_data = train_data.reshape(num_train_images, count_train_image_rows * count_train_image_cols) / 255\n\n train_labels = []\n\n f = gzip.open(FILE_TRAIN_LABELS_PATH,'r')\n f.read(8)\n num_train_labels = 60000\n for i in range(0, num_train_labels):\n buf = f.read(1)\n train_labels.append((np.frombuffer(buf, dtype=np.uint8).astype(np.int64)))\n\n train_labels = np.array(train_labels)\n\n onehot_encoder = OneHotEncoder(sparse=False)\n train_labels = train_labels.reshape(len(train_labels), 1)\n train_labels = onehot_encoder.fit_transform(train_labels)\n\n f = gzip.open(FILE_TEST_IMAGES_PATH,'r')\n f.read(16)\n num_test_images = 10000\n count_test_image_rows = 28\n count_test_image_cols = 28\n buf = f.read(count_test_image_rows * count_test_image_rows * num_test_images)\n test_data = np.frombuffer(buf, dtype=np.uint8)\n test_data = test_data.reshape(num_test_images, count_test_image_rows * count_test_image_rows) / 255\n\n test_labels = []\n\n f = gzip.open(FILE_TEST_LABELS_PATH,'r')\n f.read(8)\n num_test_labels = 10000\n for i in range(0, num_test_labels):\n buf = f.read(1)\n test_labels.append((np.frombuffer(buf, dtype=np.uint8).astype(np.int64)))\n\n test_labels = np.array(test_labels)\n\n onehot_encoder = OneHotEncoder(sparse=False)\n test_labels = test_labels.reshape(len(test_labels), 1)\n test_labels = onehot_encoder.fit_transform(test_labels)\n\n return train_data, train_labels, test_data, test_labels\n","sub_path":"HomeTask4/HomeTask4/HomeTask4/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"936562","text":"import core.text_detection as text_detection \nimport os \nimport re\n\n#ImagesPath = 'E:/IA/credit_card_ocr/assets/images/Cropped_images'\nImagesPath = 'E:/IA/credit_card_ocr/assets/images/docs'\nImagePath = 'E:/IA/credit_card_ocr/assets/images/Cropped_images/img4.jpeg'\n\ndef get_transformed_images():\n for filename in os.listdir(ImagesPath):\n if re.match('[A-ñ0-9]*.jpe?g',filename):\n text_detection.extract_text(os.path.join(ImagesPath,filename))\n\n\n\n\nif __name__ == \"__main__\":\n get_transformed_images()\n #text_detection.extract_text(ImagePath)","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"394225133","text":"__author__ = 'mcowger'\n\nimport logging\n\n\nlogging.basicConfig(level=logging.WARNING, format=\"%(asctime)s: %(levelname)s:%(funcName)s:%(module)s: %(message)s\")\nlogger = logging.getLogger()\nlogging.getLogger(\"requests.packages.urllib3.connectionpool\").setLevel(logging.WARN)\n\n\n\nimport requests\nimport json\nimport pygal\nimport datetime\nimport time\nfrom pprint import pprint\n\nimport boto3\nfrom options import *\n\nsession = boto3.session.Session(aws_access_key_id=S3_AKIA, aws_secret_access_key=S3_SECRET, region_name='us-east-1')\ndynamodb = session.resource('dynamodb')\nmileage_table = dynamodb.Table('mileage')\ns3 = session.resource('s3')\n\ndef km_to_miles(km):\n return int(float(km) * 0.621371)\n\ndef get_current_data_from_ford():\n\n ford_url = 'https://phev.myfordmobile.com/services/webLoginPS'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:30.0) Gecko/20100101 Firefox/30.0',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Content-Type': 'application/json; charset=UTF-8',\n 'Cache-Control': 'no-cache',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://phev.myfordmobile.com/content/mfm/en_us/site/login.html'\n }\n login_data = {\n 'PARAMS': {\n 'emailaddress': FORD_USER,\n 'password': FORD_PASS,\n 'persistent': '0'\n }\n }\n\n try:\n response = requests.post(ford_url,data=json.dumps(login_data),headers=headers)\n except:\n raise\n\n data = response.json()['response']\n #logger.info(json.dumps(data))\n\n\n chopped = {\n 'time': int(float(time.time())),\n 'dte':km_to_miles(data['ELECTRICDTE']),\n 'odometer':km_to_miles(data['ODOMETER']),\n 'soc':int(float(data['stateOfCharge'])),\n 'latlong': str(\",\".join([data['LATITUDE'],data['LONGITUDE']]))\n }\n\n logger.info(chopped)\n print(chopped)\n return chopped\n\ndef get_all_data():\n\n data = mileage_table.scan()\n line_chart = pygal.DateY(\n x_label_rotation=20,\n fill=True,\n human_readable=True,\n pretty_print=True,\n width=800,\n print_values=False,\n disable_xml_declaration=False\n )\n\n\n line_chart.title = \"odometer over time\"\n dates = []\n\n\n for datapoint in data['Items']:\n\n #pprint(datapoint)\n dates.append(\n (\n datetime.datetime.fromtimestamp(datapoint['time']),\n float(datapoint['odometer'])\n )\n )\n\n\n dates.sort(key=lambda tup: tup[0])\n #pprint(dates)\n\n line_chart.add(\"Odometer\",dates)\n\n return line_chart.render()\n\n\n\ndef save_to_s3(filename,data):\n\n try:\n\n s3_object = s3.Object(S3_BUCKET, filename).put(Body=data, ContentType='image/svg+xml', ACL='public-read')\n\n\n except Exception as e:\n logger.critical(\"BAD RESPONSE FROM Object: {}\".format(e))\n\n return s3_object\n\ndef push_to_db(data):\n\n try:\n\n mileage_table.put_item(Item=data)\n\n except:\n raise\n\n\ndef lambda_handler(event=None, context=None):\n try:\n push_to_db(get_current_data_from_ford())\n save_to_s3(\"odometer.svg\",get_all_data())\n except Exception as e:\n raise(e)\n\nif __name__ == \"__main__\":\n lambda_handler()\n","sub_path":"mileage.py","file_name":"mileage.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"616128574","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\n\nclass NHNN(nn.Module): \n def __init__(self,patience=5):\n super(NHNN, self).__init__()\n\n self.loss=0\n self.epoch=0\n self.patience=patience\n\n self.cnn_layers = nn.Sequential(\n # Defining a 1D convolution layer\n nn.Conv1d(in_channels=40, out_channels=128, kernel_size=15, bias=False, padding=7),\n nn.ReLU(inplace=True),\n nn.Conv1d(in_channels=128, out_channels=128, kernel_size=5, bias=False, dilation=2, padding=2+2),\n nn.ReLU(inplace=True),\n nn.AdaptiveMaxPool1d(1),\n nn.Dropout(p=0.2),\n )\n\n self.linear_layers = nn.Sequential(\n nn.Linear(128,128),\n nn.Linear(128, 3)\n )\n\n # Defining the forward pass \n def forward(self, x):\n h = self.cnn_layers(x)\n h = h.view(h.size(0), -1)\n# print(x.shape)\n h = self.linear_layers(h)\n return h\n\n def fit(self, optimizer, criterion, train_loader, val_loader):\n loss_train = 0\n loss_valid = 0\n\n for step in range(1,len(train_loader)+1):\n mfbs, label = next(iter(train_loader))\n mfbs=mfbs.cuda(2)\n label=label.cuda(2)\n optimizer.zero_grad()\n prediction=self.forward(mfbs)\n loss=criterion(prediction, label)\n loss.backward()\n optimizer.step()\n loss_train+=loss.item()\n \n new_train_loss=loss_train/len(train_loader)\n\n with torch.no_grad():\n for step in range(1,len(val_loader)+1):\n mfbs, label = next(iter(val_loader))\n mfbs=mfbs.cuda(2)\n label=label.cuda(2)\n prediction=self.forward(mfbs)\n loss=criterion(prediction, label)\n loss_valid+=loss.item()\n new_val_loss=loss_valid/len(val_loader)\n\n print(\"epoch \", self.epoch, \"train_loss=\",new_train_loss,\"val_loss=\",new_val_loss)\n\n self.loss=new_val_loss\n\n","sub_path":"NHNN.py","file_name":"NHNN.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"562698131","text":"#!/usr/bin/python\nimport sys\n\n\"\"\"\nDefinition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param: root: the root of binary tree\n @return: the length of the longest consecutive sequence path\n \"\"\"\n\n def longestConsecutive(self, root):\n # write your code here\n if not root:\n return 0\n self.result = 1\n self.dfs(root, 1)\n return self.result\n\n def dfs(self, node, l):\n if not node:\n return\n if node.right:\n if node.val + 1 == node.right.val:\n tmp = l + 1\n self.result = max(self.result, tmp)\n else:\n tmp = 1\n self.dfs(node.right, tmp)\n if node.left:\n if node.val + 1 == node.left.val:\n tmp = l + 1\n self.result = max(self.result, tmp)\n else:\n tmp = 1\n self.dfs(node.left, tmp)\n\n\ndef main():\n aa = Solution()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"LintCode/binaryTreeLongestConsecutiveSequence.py","file_name":"binaryTreeLongestConsecutiveSequence.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"482057847","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport mpld3\nimport pandas as pd\n\ndata = pd.read_csv('../csv/qtyTheoryData.csv')\ndataL = pd.read_csv('../csv/qtyTheoryDataL.csv')\ndataM = pd.read_csv('../csv/qtyTheoryDataM.csv')\ndataH = pd.read_csv('../csv/qtyTheoryDataH.csv')\n\n\nscale = 0.8\nw = 8*scale\nh = 7*scale\nfig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'),figsize=(w,h))\n\nx = np.arange(-10,10,0.001)\nybar = np.mean(data['gdp growth'])\ny = x -ybar\n\nax.plot(x,y)\nscatterL = ax.scatter(dataL['money growth'],dataL['inflation'],\n s=125,\n color='red',\n alpha=0.3,\n cmap=plt.cm.jet)\nscatterM = ax.scatter(dataM['money growth'],dataM['inflation'],\n s=125,\n color='green',\n alpha=0.3,\n cmap=plt.cm.jet)\nscatterH = ax.scatter(dataH['money growth'],dataH['inflation'],\n s=125,\n color='blue',\n alpha=0.3,\n cmap=plt.cm.jet)\nax.grid(color='white', linestyle='solid')\n\n# ax.set_title(\"This is a figure that containesa. dfa is the distance between asdfn \\n ads f adnnsd anfn da \", size=20)\nax.set_xlabel('money growth', fontsize=14)\nax.set_ylabel('inflation', fontsize=14)\nax.set_xlim([-0.2,1.4])\nax.set_ylim([-0.2,1.4])\n# ax.tick_params(axis='x', labelsize=1)\nax.xaxis.labelpad = 5\nax.yaxis.labelpad = 5\n\n\nlabelsL = ['
{title}
'.format(title=c) for c in dataL['country']]\nlabelsM = ['
{title}
'.format(title=c) for c in dataM['country']]\nlabelsH = ['
{title}
'.format(title=c) for c in dataH['country']]\n\ntooltipL = mpld3.plugins.PointHTMLTooltip(scatterL, labels=labelsL)\ntooltipM = mpld3.plugins.PointHTMLTooltip(scatterM, labels=labelsM)\ntooltipH = mpld3.plugins.PointHTMLTooltip(scatterH, labels=labelsH)\nmpld3.plugins.connect(fig, tooltipL)\nmpld3.plugins.connect(fig, tooltipM)\nmpld3.plugins.connect(fig, tooltipH)\n\n# mpld3.show()\n\nplt.tight_layout()\n# mpld3.save_html(fig,'test.html')\nhtmlString = mpld3.fig_to_html(fig,figid='figMoneyGrowthInflation')\n\n\n\nfor n in range(len(htmlString)):\n\tif htmlString[n:n+6] == \"\":\n\t\tS = htmlString[n+16:-9]\n\t\tbreak\n\t\t\nf = open('../figScript.js', 'w')\nf.write(S)\nf.close()","sub_path":"quantitytheory/python/quantityTheoryInteractiveFigure.py","file_name":"quantityTheoryInteractiveFigure.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"408785464","text":"# coding=utf-8\nimport pymysql\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport xlwt\nimport pymysql\nimport pandas as pd\nimport datetime\nimport urllib.request\n\n\ndef get_child(page):\n root_url='https://movie.douban.com/tag/2015?start=%s'%(page*20)\n session = requests.session()\n req = session.get(root_url, headers=headers)\n html = req.content.decode('utf-8')\n soup = BeautifulSoup(html, 'lxml')\n main_p=soup.contents[1]\n movie=main_p.find_all(class_='pl2')\n urls=[x.a['href'] for x in movie][:20]\n return urls\n\ndef get_info(url):\n session = requests.session()\n req = session.get(url, headers=headers)\n html = req.content.decode('utf-8')\n soup = BeautifulSoup(html, 'lxml')\n main_p=soup.contents[1]\n info_p=main_p.contents[3].contents[9].contents[1]\n try:\n name=info_p.find(property='v:itemreviewed').get_text()\n score=info_p.find(property='v:average').get_text()\n summary=info_p.find(property='v:summary').get_text().replace('\\n','').replace(' ','')\n info=[name,score,summary]\n print(name)\n except Exception as e:\n info=[0,0,0]\n return info\n\nif __name__ == '__main__':\n # 构造 Request headers\n agent = 'Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'\n headers = {\n 'User-Agent': agent,\n\n }\n # 代理ip\n proxies = {'http://10.20.1.128': 'http://10.10.1.10:5323'}\n\n config={\n 'host':'127.0.0.1',\n 'user':'root',\n 'password':'950807',\n 'database':'crawler'\n }\n # db=pymysql.connect(**config)\n # cur=db.cursor()\n items=['name','score','summary']\n for page in range(10,20):\n urls=get_child(page)\n wk=xlwt.Workbook()\n sh=wk.add_sheet('data')\n for i,item in enumerate(items):\n sh.write(0,i,item)\n for num,url in enumerate(urls):\n info=(get_info(url))\n for i,item in enumerate(info):\n sh.write(num+1,i,item)\n wk.save('douban%s.xlsx'%page)\n time.sleep(0.5)\n","sub_path":"douban/movie/doubanCrawler.py","file_name":"doubanCrawler.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"620904727","text":"import click\nfrom faker import Faker\nfrom random import randint\nfrom flask.cli import with_appcontext\nfrom flask import current_app\n\nfrom burddy.extensions import db\nfrom burddy.utils import get_random\nfrom burddy.articles.models import Article, Comment\nfrom burddy.user.models import User\n\n@click.group()\ndef cli():\n \"\"\" commands to do things with articles \"\"\"\n\n\n@cli.command()\n@click.option('--count', default=100, type=int)\n@with_appcontext\ndef seed(count):\n \"\"\" add some fake articles to the database \"\"\"\n f = Faker()\n\n if User.query.count() == 0:\n raise Exception('there are no users in the database')\n\n for i in range(count):\n a = Article(\n title=f.sentence(),\n subtitle=f.sentence(),\n body='\\n'.join(f.paragraphs(randint(5, 20))),\n views=randint(0, 100000),\n author=get_random(User)\n )\n db.session.add(a)\n db.session.commit()\n click.echo('added {} articles to the database'.format(Article.query.count()))\n\n\n@cli.command()\n@click.argument('tag_name')\n@with_appcontext\ndef add_tag(tag_name):\n from burddy.articles.models import Tag\n t = Tag.create({'name': tag_name})\n db.session.add(t)\n db.session.commit()\n click.echo('added tag: {}'.format(tag_name))\n","sub_path":"burddy/articles/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118586322","text":"# scScope is a deep-learning based approach designed to identify cell-type composition from large-scale scRNA-seq profiles.\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport random\r\nimport time\r\nfrom .ops import average_gradients, _variable_with_weight_decay, _variable_on_cpu\r\nimport phenograph\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\ndef train(train_data_set,\r\n latent_code_dim,\r\n use_mask=True,\r\n batch_size=64,\r\n max_epoch=100,\r\n epoch_per_check=100,\r\n T=2,\r\n exp_batch_idx_input=[],\r\n encoder_layers=[],\r\n decoder_layers=[],\r\n learning_rate=0.0001,\r\n beta1=0.05\r\n ):\r\n '''\r\n scScope training:\r\n This function is used to train the scScope model on gene expression data\r\n\r\n Parameters:\r\n\r\n train_data_set: gene expression matrix of dim n * m; n = number of cells, m = number of genes.\r\n latent_code_dim: feature dimension outputted by scScope.\r\n batch_size: number of cells used in each training iteration.\r\n max_epoch: maximal epoch used in training.\r\n epoch_per_check: step to display current loss.\r\n T: depth of recurrence used in deep learning framework.\r\n use_mask: flag indicating whether to use only non-zero entries in calculating losses.\r\n learning_rate: step length in gradient descending algorithm.\r\n beta1: beta1 parameter in AdamOptimizer.\r\n num_gpus: number of gpus used for training in parallel.\r\n exp_batch_idx_input: (optional) n * batch_num matrix in one-hot format, if provided, experimental batch ids are used for batch correction.\r\n encoder_layers: network structure for encoder layers of the autoencoder; e.g. [64,128] means adding two layers with 64 and 128 nodes between the input and hidden features\r\n decoder_layers: network structure for decoder layers of the autoencoder; e.g. [64,128] means adding two layers with 64 and 128 nodes between the hidden feature and the output layer\r\n\r\n\r\n\r\n Output:\r\n\r\n model: a dataframe of scScope outputs with keys:\r\n 'latent_code_session': tensorflow session used in training.\r\n 'test_input': tensorflow dataholder for test data.\r\n 'test_exp_batch_idx': tensorflow dataholder for experimental batch label.\r\n 'imputated_output': imputed gene expressions.\r\n 'latent_code': latent features by scScope.\r\n 'removed_batch_effect': correcting layer learning by scScope.\r\n\r\n Altschuler & Wu Lab 2018. \r\n Software provided as is under Apache License 2.0.\r\n '''\r\n num_gpus = 1\r\n batch_size = int(batch_size*num_gpus)\r\n learning_rate = learning_rate*num_gpus\r\n\r\n if len(exp_batch_idx_input) == 0:\r\n exp_batch_idx_input = np.zeros((np.shape(train_data_set)[0], 1))\r\n consider_exp_batch = False\r\n else:\r\n consider_exp_batch = True\r\n\r\n with tf.Graph().as_default(), tf.device('/cpu:0'):\r\n\r\n train_data = tf.placeholder(\r\n tf.float32, [batch_size, np.shape(train_data_set)[1]])\r\n exp_batch_idx = tf.placeholder(tf.float32,\r\n [batch_size, np.shape(exp_batch_idx_input)[1]])\r\n\r\n # Create an optimizer that performs gradient descent.\r\n\r\n opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1)\r\n\r\n tower_grads = []\r\n tower_grads2 = []\r\n\r\n with tf.variable_scope(tf.get_variable_scope()):\r\n\r\n for i in range(num_gpus):\r\n\r\n with tf.device('/gpu:%d' % (i+1)):\r\n\r\n with tf.name_scope('%s_%d' % ('tower', i)) as scope:\r\n\r\n itv = int(batch_size/num_gpus)\r\n\r\n if i == 0:\r\n\r\n re_use_flag = False\r\n\r\n else:\r\n\r\n re_use_flag = True\r\n\r\n loss = tower_loss(scope,\r\n train_data[(i) *\r\n itv:(i + 1) * itv, :],\r\n use_mask,\r\n latent_code_dim,\r\n T,\r\n encoder_layers,\r\n decoder_layers,\r\n exp_batch_idx[(\r\n i) * itv:(i + 1) * itv, :],\r\n re_use_flag)\r\n\r\n tf.get_variable_scope().reuse_variables()\r\n\r\n t_vars = tf.trainable_variables()\r\n\r\n inference_para = [\r\n var for var in t_vars if 'inference' in var.name]\r\n grads = opt.compute_gradients(loss, inference_para)\r\n tower_grads.append(grads)\r\n\r\n if consider_exp_batch:\r\n exp_batch_effect_para = [\r\n var for var in t_vars if 'batch_effect_removal' in var.name]\r\n grads2 = opt.compute_gradients(\r\n loss, exp_batch_effect_para)\r\n tower_grads2.append(grads2)\r\n\r\n # Save gradients from different GPUs.\r\n\r\n # Summarize gradients from multiple GPUs.\r\n grads = average_gradients(tower_grads)\r\n apply_gradient_op = opt.apply_gradients(grads)\r\n train_op = apply_gradient_op\r\n\r\n if consider_exp_batch:\r\n grads2 = average_gradients(tower_grads2)\r\n apply_gradient_op2 = opt.apply_gradients(grads2)\r\n train_op2 = apply_gradient_op2\r\n\r\n init = tf.global_variables_initializer()\r\n\r\n # Configuration of GPUs.\r\n config_ = tf.ConfigProto()\r\n\r\n config_.gpu_options.allow_growth = True\r\n\r\n config_.allow_soft_placement = True\r\n\r\n sess = tf.Session(config=config_)\r\n\r\n sess.run(init)\r\n\r\n total_data_size = np.shape(train_data_set)[0]\r\n\r\n total_sample_list = list(range(total_data_size))\r\n\r\n reconstruction_error = []\r\n\r\n start = time.time()\r\n for step in range(1, max_epoch+1):\r\n\r\n total_cnt = total_data_size/(batch_size)\r\n\r\n for itr_cnt in range(int(total_cnt)):\r\n\r\n sel_pos = random.sample(total_sample_list, batch_size)\r\n\r\n cur_data = train_data_set[sel_pos, :]\r\n cur_exp_batch_idx = exp_batch_idx_input[sel_pos, :]\r\n\r\n sess.run(train_op,\r\n feed_dict={train_data: cur_data,\r\n exp_batch_idx: cur_exp_batch_idx})\r\n if consider_exp_batch:\r\n sess.run(train_op2,\r\n feed_dict={train_data: cur_data,\r\n exp_batch_idx: cur_exp_batch_idx})\r\n\r\n if step % epoch_per_check == 0 and step > 0:\r\n\r\n all_input = tf.placeholder(\r\n tf.float32, [np.shape(train_data_set)[0], np.shape(train_data_set)[1]])\r\n exp_batch_idx_all = tf.placeholder(\r\n tf.float32, [np.shape(exp_batch_idx_input)[0], np.shape(exp_batch_idx_input)[1]])\r\n\r\n layer_output, train_latent_code, _ = Inference(\r\n all_input, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_idx_all, re_use=True)\r\n\r\n train_code_val, layer_output_val = sess.run(\r\n [train_latent_code[-1], layer_output[-1]], feed_dict={all_input: train_data_set, exp_batch_idx_all: exp_batch_idx_input})\r\n\r\n mask = np.sign(train_data_set)\r\n recon_error = np.linalg.norm(np.multiply(mask, layer_output_val)-np.multiply(\r\n mask, train_data_set))/np.linalg.norm(np.multiply(mask, train_data_set))\r\n reconstruction_error.append(recon_error)\r\n print(\"Finisheded epoch:\" + str(step))\r\n print('Current reconstruction error is: '+str(recon_error))\r\n\r\n if len(reconstruction_error) >= 2:\r\n if (abs(reconstruction_error[-1] - reconstruction_error[-2])/reconstruction_error[-2] < 1e-3):\r\n break\r\n\r\n Model = {}\r\n\r\n test_data_holder = tf.placeholder(\r\n tf.float32, [None, np.shape(train_data_set)[1]])\r\n test_exp_batch_idx = tf.placeholder(\r\n tf.float32, [None, np.shape(exp_batch_idx_input)[1]])\r\n\r\n test_layer_out, test_latent_code, removed_batch_effect = Inference(\r\n test_data_holder, latent_code_dim, T, encoder_layers, decoder_layers, test_exp_batch_idx, re_use=True)\r\n\r\n Model['latent_code_session'] = sess\r\n Model['test_input'] = test_data_holder\r\n Model['test_exp_batch_idx'] = test_exp_batch_idx\r\n Model['Imputated_output'] = test_layer_out\r\n Model['latent_code'] = test_latent_code\r\n Model['removed_batch_effect'] = removed_batch_effect\r\n\r\n duration = time.time()-start\r\n print('Finish training ' + str(len(train_data_set)) + ' samples after '+str(step)+' epochs. The total training time is ' +\r\n str(duration)+' seconds.')\r\n\r\n return Model\r\n\r\n\r\ndef predict(test_data, model, batch_effect=[]):\r\n '''\r\n Make predications using the learned scScope model.\r\n\r\n Parameter:\r\n test_data: input gene expression matrix.\r\n model: pre-trained scScope model.\r\n\r\n Output:\r\n latent_fea: scScope features output.\r\n output_val: gene expressions with imputations.\r\n predicted_batch_effect: batch effects inferenced by scScope, if experimental batches exist.\r\n\r\n Altschuler & Wu Lab 2018.\r\n Software provided as is under Apache License 2.0.\r\n '''\r\n\r\n sess = model['latent_code_session']\r\n test_data_holder = model['test_input']\r\n test_exp_batch_idx_holder = model['test_exp_batch_idx']\r\n output = model['Imputated_output']\r\n latent_code = model['latent_code']\r\n removed_batch_effect = model['removed_batch_effect']\r\n if len(batch_effect) == 0:\r\n batch_effect_idx = np.zeros((np.shape(test_data)[0], 1))\r\n else:\r\n batch_effect_idx = batch_effect\r\n\r\n for i in range(len(latent_code)):\r\n\r\n latent_code_val, output_val, predicted_batch_effect = sess.run([latent_code[i], output[i], removed_batch_effect], feed_dict={\r\n test_data_holder: test_data, test_exp_batch_idx_holder: batch_effect_idx})\r\n if i == 0:\r\n latent_fea = latent_code_val\r\n else:\r\n latent_fea = np.concatenate([latent_fea, latent_code_val], 1)\r\n\r\n return latent_fea, output_val, predicted_batch_effect\r\n\r\n\r\ndef Inference(input_d, latent_code_dim, T, encoder_layers, decoder_layer, exp_batch_idx=[], re_use=False):\r\n '''\r\n The deep neural network structure of scScope.\r\n\r\n Parameters:\r\n input_d: gene expression matrix of dim n * m; n = number of cells, m = number of genes.\r\n latent_code_dim: the dimension of features outputted by scScope.\r\n T: number of recurrent structures used in deep learning framework.\r\n encoder_layers: the network structure for encoder layers of the autoencoder.\r\n decoder_layers: the network structure for decoder layers of the autoencoder.\r\n exp_batch_idx: if provided, experimental batch labels are stored in an n * batch_num matrix in one-hot format.\r\n re_use: if re-use variables in training.\r\n\r\n Output:\r\n output_list: outputs of decoder (y_c in the paper) in T recurrent structures.\r\n latent_code_list: latent representations (h_c in the paper) in T recurrent structures.\r\n batch_effect_removal_layer: experimental batch effects inferred by scScope.\r\n\r\n\r\n Altschuler & Wu Lab 2018.\r\n Software provided as is under Apache License 2.0.\r\n '''\r\n\r\n input_shape = input_d.get_shape().as_list()\r\n\r\n input_dim = input_shape[1]\r\n\r\n with tf.variable_scope('scScope') as scope_all:\r\n\r\n if re_use == True:\r\n\r\n scope_all.reuse_variables()\r\n\r\n latent_code_list = []\r\n output_list = []\r\n exp_batch_id_shape = exp_batch_idx.get_shape().as_list()\r\n exp_batch_dim = exp_batch_id_shape[1]\r\n with tf.variable_scope('batch_effect_removal'):\r\n batch_effect_para_weight = _variable_with_weight_decay('batch_effect_weight', [exp_batch_dim, input_dim],\r\n stddev=0, wd=0)\r\n\r\n batch_effect_removal_layer = tf.matmul(\r\n exp_batch_idx, batch_effect_para_weight)\r\n\r\n with tf.variable_scope('inference'):\r\n for i in range(T):\r\n if i == 0:\r\n encoder_layer_list_W = []\r\n encoder_layer_list_b = []\r\n if len(encoder_layers) > 0:\r\n for l in range(len(encoder_layers)):\r\n if l == 0:\r\n encoder_layer_list_W.append(_variable_with_weight_decay('encoder_layer' + str(l),\r\n [input_dim,\r\n encoder_layers[l]],\r\n stddev=0.1, wd=0))\r\n encoder_layer_list_b.append(_variable_on_cpu('encoder_layer_bias' + str(l), [encoder_layers[l]],\r\n tf.constant_initializer(0)))\r\n else:\r\n encoder_layer_list_W.append(_variable_with_weight_decay('encoder_layer' + str(l),\r\n [encoder_layers[l-1],\r\n encoder_layers[l]],\r\n stddev=0.1, wd=0))\r\n encoder_layer_list_b.append(_variable_on_cpu('encoder_layer_bias' + str(l), [encoder_layers[l]],\r\n tf.constant_initializer(0)))\r\n latent_code_layer_input_dim = encoder_layers[-1]\r\n\r\n else:\r\n latent_code_layer_input_dim = input_dim\r\n\r\n W_fea = _variable_with_weight_decay('latent_layer_weights',\r\n [latent_code_layer_input_dim,\r\n latent_code_dim],\r\n stddev=0.1, wd=0)\r\n b_fea = _variable_on_cpu('latent_layer_bias', [latent_code_dim],\r\n tf.constant_initializer(0))\r\n\r\n decoder_layer_list_W = []\r\n decoder_layer_list_b = []\r\n if len(decoder_layer) > 0:\r\n for l in range(len(decoder_layer)):\r\n if l == 0:\r\n decoder_layer_list_W.append(_variable_with_weight_decay('dencoder_layer' + str(l),\r\n [latent_code_dim,\r\n decoder_layer[l]],\r\n stddev=0.1, wd=0))\r\n decoder_layer_list_b.append(\r\n _variable_on_cpu('decoder_layer_bias' + str(l), [decoder_layer[l]],\r\n tf.constant_initializer(0)))\r\n else:\r\n decoder_layer_list_W.append(_variable_with_weight_decay('dencoder_layer' + str(l),\r\n [decoder_layer[l - 1],\r\n decoder_layer[l]],\r\n stddev=0.1, wd=0))\r\n decoder_layer_list_b.append(\r\n _variable_on_cpu('decoder_layer_bias' + str(l), [decoder_layer[l]],\r\n tf.constant_initializer(0)))\r\n decoder_last_layer_dim = decoder_layer[-1]\r\n\r\n else:\r\n decoder_last_layer_dim = latent_code_dim\r\n\r\n W_recon = _variable_with_weight_decay('reconstruction_layer_weights',\r\n [decoder_last_layer_dim,\r\n input_dim],\r\n stddev=0.1, wd=0)\r\n b_recon = _variable_on_cpu('reconstruction_layer_bias', [input_dim],\r\n tf.constant_initializer(0))\r\n input_vec = tf.nn.relu(input_d-batch_effect_removal_layer)\r\n else:\r\n\r\n if i == 1:\r\n W_feedback_1 = _variable_with_weight_decay('impute_layer_weights',\r\n [input_dim, 64],\r\n stddev=0.1, wd=0)\r\n b_feedback_1 = _variable_on_cpu(\r\n 'impute_layer_bias', [64], tf.constant_initializer(0))\r\n\r\n W_feedback_2 = _variable_with_weight_decay('impute_layer_weights2',\r\n [64, input_dim],\r\n stddev=0.1, wd=0)\r\n b_feedback_2 = _variable_on_cpu(\r\n 'impute_layer_bias2', [input_dim], tf.constant_initializer(0))\r\n # else:\r\n # W_feedback_2 = ops._variable_with_weight_decay('impute_layer_weights2',\r\n # [input_dim, input_dim],\r\n # stddev=0.1, wd=0)\r\n # b_feedback_2 = ops._variable_on_cpu(\r\n # 'impute_layer_bias2', [input_dim], tf.constant_initializer(0))\r\n\r\n # if input_dim>500:\r\n intermediate_layer = tf.nn.relu(\r\n tf.matmul(output, W_feedback_1) + b_feedback_1)\r\n imputation_layer = tf.multiply(\r\n 1-tf.sign(input_d), (tf.matmul(intermediate_layer, W_feedback_2)+b_feedback_2))\r\n # else:\r\n # imputation_layer = tf.multiply(1 - tf.sign(input_d),\r\n # (tf.matmul(output, W_feedback_2) + b_feedback_2))\r\n\r\n input_vec = tf.nn.relu(\r\n imputation_layer+input_d-batch_effect_removal_layer)\r\n\r\n intermedate_encoder_layer_list = []\r\n if len(encoder_layer_list_W) > 0:\r\n for i in range(len(encoder_layer_list_W)):\r\n if i == 0:\r\n intermedate_encoder_layer_list.append(tf.nn.relu(\r\n tf.matmul(input_vec, encoder_layer_list_W[i])+encoder_layer_list_b[i]))\r\n else:\r\n intermedate_encoder_layer_list.append(tf.nn.relu(tf.matmul(\r\n intermedate_encoder_layer_list[-1], encoder_layer_list_W[i])+encoder_layer_list_b[i]))\r\n\r\n intermedate_encoder_layer = intermedate_encoder_layer_list[-1]\r\n else:\r\n intermedate_encoder_layer = input_vec\r\n\r\n latent_code = tf.nn.relu(\r\n tf.matmul(intermedate_encoder_layer, W_fea)+b_fea)\r\n\r\n inter_decoder_layer_list = []\r\n\r\n if len(decoder_layer_list_W) > 0:\r\n for i in range(len(decoder_layer_list_W)):\r\n if i == 0:\r\n inter_decoder_layer_list.append(tf.nn.relu(\r\n tf.matmul(latent_code, decoder_layer_list_W[i])+decoder_layer_list_b[i]))\r\n else:\r\n inter_decoder_layer_list.append(tf.nn.relu(tf.matmul(\r\n inter_decoder_layer_list[-1], decoder_layer_list_W[i])+decoder_layer_list_b[i]))\r\n inter_decoder_layer = inter_decoder_layer_list[-1]\r\n else:\r\n inter_decoder_layer = latent_code\r\n\r\n output = tf.nn.relu(\r\n tf.matmul(inter_decoder_layer, W_recon)+b_recon)\r\n latent_code_list.append(latent_code)\r\n output_list.append(output)\r\n\r\n return output_list, latent_code_list, batch_effect_removal_layer\r\n\r\n\r\ndef tower_loss(scope, batch_data, use_mask, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_id,\r\n re_use_flag):\r\n '''\r\n Overall losses of scScope on multiple GPUs.\r\n\r\n Parameter:\r\n scope: tensorflow name scope\r\n batch_data: cell batch for calculating the loss\r\n use_mask: flag indicating only use non-zero genes to calculate losses.\r\n latent_code_dim: the dimension of features outputted by scScope.\r\n T: number of recurrent structures used in deep learning framework.\r\n encoder_layers: the network structure for encoder layers of the autoencoder.\r\n decoder_layers: the network structure for decoder layers of the autoencoder.\r\n exp_batch_idx: if provided, experimental batch labels are stored in an n * batch_num matrix in one-hot format.\r\n re_use_flag: if re-use variables in training.\r\n\r\n Output:\r\n total_loss: total loss of multiple GPUs.\r\n\r\n Altschuler & Wu Lab 2018.\r\n Software provided as is under Apache License 2.0.\r\n '''\r\n\r\n layer_out, latent_code, batch_effect_removal_layer = Inference(\r\n batch_data, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_id, re_use=re_use_flag)\r\n\r\n _ = Cal_Loss(layer_out, batch_data, use_mask, batch_effect_removal_layer)\r\n\r\n losses = tf.get_collection('losses', scope)\r\n\r\n total_loss = tf.add_n(losses, name='total_loss')\r\n\r\n return total_loss\r\n\r\n\r\ndef Cal_Loss(outpout_layer_list, input_data, use_mask, removed_exp_batch_effect):\r\n '''\r\n Loss function of scScope.\r\n\r\n Parameter:\r\n outpout_layer_list: encoder output of T recurrent structures in scScope.\r\n input_data: original gene expression matrix inputted into scScope.\r\n use_mask: flag indicating only use non-zero genes to calculate losses.\r\n removed_exp_batch_effect: removed exeperimental batch effects\r\n\r\n Output:\r\n\r\n acc_loss: loss function value.\r\n\r\n Altschuler & Wu Lab 2018.\r\n Software provided as is under Apache License 2.0.\r\n '''\r\n\r\n input_data_corrected = input_data - removed_exp_batch_effect\r\n\r\n if use_mask:\r\n val_mask = tf.sign(input_data_corrected)\r\n else:\r\n val_mask = tf.sign(input_data_corrected + 1)\r\n\r\n for i in range(len(outpout_layer_list)):\r\n layer_out = outpout_layer_list[i]\r\n if i == 0:\r\n reconstruct_loss = tf.reduce_mean(\r\n tf.norm(tf.multiply(val_mask, (layer_out - input_data_corrected))))\r\n else:\r\n reconstruct_loss = reconstruct_loss + \\\r\n tf.reduce_mean(\r\n tf.norm(tf.multiply(val_mask, (layer_out - input_data_corrected))))\r\n acc_loss = reconstruct_loss\r\n tf.add_to_collection('losses', acc_loss)\r\n return acc_loss\r\n\r\n\r\ndef scalable_cluster(latent_code,\r\n kmeans_num=500,\r\n cluster_num=400,\r\n display_step=50,\r\n phenograh_neighbor=30\r\n ):\r\n '''\r\n Scalable cluster:\r\n To perform graph clustering on large-scale data, we designed a scalable clustering strategy by combining k-means and PhenoGraph.\r\n Briefly, we divide cells into M (kmeans_num) groups of equal size and perform K-means (cluster_num) clustering on each group independently. \r\n The whole dataset is split to M×K clusters and we only input the cluster centroids into PhenoGraph for graph clustering. \r\n Finally, each cell is assigned to graph clusters according to the cluster labels of its nearest centroids.\r\n\r\n Parameters:\r\n\r\n latent_code: n*m matrix; n = number of cells, m = dimension of feature representation.\r\n kmeans_num: number of independent K-means clusterings used. This is also the subset number.\r\n cluster_num: cluster number for each K-means clustering. This is also the \"n_clusters\" in KMeans function in sklearn package.\r\n display_step: displaying the process of K-means clustering.\r\n phenograh_neighbor: \"k\" parameter in PhenoGraph package.\r\n\r\n Output:\r\n\r\n Cluster labels for input cells.\r\n\r\n\r\n Altschuler & Wu Lab 2018. \r\n Software provided as is under Apache License 2.0.\r\n '''\r\n\r\n print('Scalable clustering:')\r\n print('Use %d subsets of cells for initially clustering...' % kmeans_num)\r\n\r\n stamp = np.floor(np.linspace(0, latent_code.shape[0], kmeans_num + 1))\r\n stamp = stamp.astype(int)\r\n\r\n cluster_ceter = np.zeros([kmeans_num * cluster_num, latent_code.shape[1]])\r\n mapping_sample_kmeans = np.zeros(latent_code.shape[0])\r\n\r\n for i in range(kmeans_num):\r\n\r\n low_bound = stamp[i]\r\n upp_bound = stamp[i + 1]\r\n sample_range = np.arange(low_bound, upp_bound)\r\n select_sample = latent_code[sample_range, :]\r\n\r\n kmeans = KMeans(n_clusters=cluster_num,\r\n random_state=0).fit(select_sample)\r\n label = kmeans.labels_\r\n\r\n for j in range(cluster_num):\r\n cluster_sample_idx = np.nonzero(label == j)[0]\r\n cluster_sample = select_sample[cluster_sample_idx, :]\r\n cluster_ceter[i * cluster_num + j,\r\n :] = np.mean(cluster_sample, axis=0)\r\n mapping_sample_kmeans[sample_range[cluster_sample_idx]\r\n ] = i * cluster_num + j\r\n\r\n if i % display_step == 0:\r\n print('\\tK-means clustering for %d subset.' % i)\r\n\r\n print('Finish intially clustering by K-means.')\r\n print('Start PhenoGraph clustering...\\n')\r\n\r\n label_pheno, graph, Q = phenograph.cluster(\r\n cluster_ceter, k=phenograh_neighbor, n_jobs=1)\r\n\r\n label = np.zeros(latent_code.shape[0])\r\n for i in range(label_pheno.max() + 1):\r\n center_index = np.nonzero(label_pheno == i)[0]\r\n for j in center_index:\r\n sample_index = np.nonzero(mapping_sample_kmeans == j)[\r\n 0] # samples belong to this center\r\n label[sample_index] = i\r\n print('Finish density down-sampling clustering.')\r\n\r\n return label\r\n","sub_path":"scscope_cpu/scscope/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":28246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"396232724","text":"import time\nimport os, os.path\nimport random\nimport cv2\nimport glob\nimport keras\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.decomposition import PCA\n\n\nimport pandas as pd\nimport numpy as np\n\n\n#DIR = \"./Data_example_ph2\"\n#codes = os.listdir(DIR)\n#codes.pop(0)\n#codes.sort()\n\ndef load_imgs():\n category_dir = os.listdir(DIR)\n stats=[]\n result_imgs = []\n result_labels = []\n for thing in category_dir:\n if thing!='.DS_Store':\n label= thing\n path = os.path.join(DIR,thing)\n file_names = os.listdir(path)\n for file in file_names:\n result_labels.append(label)\n image = cv2.imread(os.path.join(path,file))\n image = cv2.resize(image, (224,224))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = np.asarray(image)\n image =image/255\n result_imgs.append(image)\n result_imgs = np.asarray(result_imgs)\n result_labels = np.asarray(result_labels)\n return result_imgs,result_labels\n\n#X_train,X_lables = load_imgs()\n\n#vgg16_model = keras.applications.vgg16.VGG16(include_top=False, weights=\"imagenet\", input_shape=(224,224,3))\n\n\ndef covnet_transform(covnet_model, raw_images):\n\n # Pass our training data through the network\n pred = covnet_model.predict(raw_images)\n\n # Flatten the array\n flat = pred.reshape(raw_images.shape[0], -1)\n \n return flat\n\ndef create_train_kmeans(data, number_of_clusters):\n # n_jobs is set to -1 to use all available CPU cores. This makes a big difference on an 8-core CPU\n # especially when the data size gets much bigger. #perfMatters\n \n k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)\n # Let's do some timings to see how long it takes to train.\n start = time.time()\n\n # Train it up\n k.fit(data)\n\n # Stop the timing \n end = time.time()\n\n # And see how long that took\n print(\"Training took {} seconds\".format(end-start))\n \n return k\n\n#vgg16_output = covnet_transform(vgg16_model, X_train)\n\n#K_vgg16 = create_train_kmeans(vgg16_output)\n#k_vgg16_pred = K_vgg16.predict(vgg16_output)\n\ndef cluster_label_count(clusters, labels):\n \n count = {}\n \n # Get unique clusters and labels\n unique_clusters = list(set(clusters))\n unique_labels = list(set(labels))\n \n # Create counter for each cluster/label combination and set it to 0\n for cluster in unique_clusters:\n count[cluster] = {}\n \n for label in unique_labels:\n count[cluster][label] = 0\n \n # Let's count\n for i in range(len(clusters)):\n count[clusters[i]][labels[i]] +=1\n \n cluster_df = pd.DataFrame(count)\n \n return cluster_df\n\n\n#vgg16_pred_codes = [codes[x] for x in k_vgg16_pred]\n\nfrom sklearn.metrics import accuracy_score, f1_score\n\ndef print_scores(true, pred):\n acc = accuracy_score(true, pred)\n f1 = f1_score(true, pred, average=\"macro\")\n return \"\\n\\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}\".format(f1,acc)\n\n#print(\"KMeans VGG16:\", print_scores(X_lables, vgg16_pred_codes))\n\n\n\n","sub_path":"NSML_2/Image_retrival/vgg16_cluster.py","file_name":"vgg16_cluster.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"604818212","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nCATEGORY_OPTIONS = (\n\t('All', 'All'),\n\t('Business and Commerce', 'Business and Commerce'),\n\t('Architecture, Construction, Urban Design', 'Architecture, Construction, Urban Design'),\n\t('Fashion & Style', 'Fashion & Style'),\n\t('Computer Science and Information Technology', 'Computer Science and Information Technology'),\n\t('Healthcare','Healthcare'),\n\t('Retail & Sales','Retail & Sales'),\n\t('Food & Restaurant', 'Food & Restaurant'),\n\t('Engineering', 'Engineering'),\n\t('Charity and fundraising organising', 'Charity and fundraising organising'),\n\t('Research and Thesis', 'Research and Thesis'),\n\t)\n\nLOCATION_OPTIONS = (\n\t('All', 'All'),\n\t('Melbourne', 'Melbourne'),\n\t('Brunswick', 'Brunswick'),\n\t('WorldWide', 'WorldWide'),\n\n\t)\n#sligh modt\nclass ProjectTitle(models.Model):\n\tname = models.CharField(max_length = 100)\n\tuser = models.ForeignKey(User)\n\tprojectID = models.AutoField(primary_key=True)\n\t\n\t\n\tlocation = models.CharField(max_length=50, choices = LOCATION_OPTIONS)\n\tcontact_email = models.EmailField()\n\tdescription = models.TextField()\n\tteam_members = models.CharField(max_length = 100)\n\ttimestamp = models.DateTimeField(auto_now_add=True, auto_now=False)\n\tupdated = models.DateTimeField(auto_now_add=False, auto_now=True)\n\tactive = models.BooleanField(default = True)\n\texpertise = models.CharField(max_length=50, choices = CATEGORY_OPTIONS)\n\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n\n# Create your models here.\n\n#class searchProject(models.Model):\n","sub_path":"ProjectUnite1-Design1-master 5/ProjectUnite/projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"467055192","text":"# 题目给出一个数组,要求找出其中出现次数多于半数的那个数字。题目保证肯定存在这样的一个数字。\n# 方法1:投票法。\n# 我们从左到右的遍历数字。并且初始化的认为第一个数字是出现最多的那个数字,且数量为1。\n# 当遍历数字的时候,如果和现在认为的答案相同,则计数+1。否则计数-1,当计数为0的是否,修改答案为此时遍历的数字。\n# 出现的次数多于半数的那个数字最终会有多于半数的计数,从而得到答案。\n# 时间复杂度:O(n)\n# 空间复杂度:O(1)\nclass Solution:\n def majorityElement(self, nums: List[int]) -> int:\n count = 1\n result = nums[0]\n for i in range(1, len(nums)):\n if nums[i] == result:\n count += 1\n else:\n count -= 1\n if count == 0:\n result = nums[i]\n count = 1\n\n return result\n\n\n# 方法2:字典计数,取最大值。\n# 先将数组中的数字和其出现的次数统计在字典当中。然后执行max,函数。\n# 时间复杂度:O(n)\n# 空间复杂度:O(n)\nfrom collections import Counter\nclass Solution:\n def majorityElement(self, nums: List[int]) -> int:\n counterDict = Counter(nums)\n # Python的max函数, key指定一个接受一个参数的方法,其方法的返回值用于比较max.该方法所接受的参数为iterable对象中的元素。\n return max(counterDict, key=lambda k: counterDict.get(k))\n\n","sub_path":"面试-LeetCode题/LeetCode每日一题/LeetCode169(MajorityElement)/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"223943077","text":"#!/usr/bin/env python3\n\nimport re\n\n#file = input()\ntry :\n #fhand = open(file)\n #fhand = open('regex_sum_42.txt')\n fhand = open('regex_sum_99264.txt')\nexcept :\n print(\"Error opening file\")\n quit()\n\ntotal = 0\nfor line in fhand :\n line = line.strip()\n numbers = re.findall('[0-9]+', line)\n numbers = map(int, numbers)\n total += sum(numbers)\nprint(total)\n","sub_path":"Python/reg_ex_sum.py","file_name":"reg_ex_sum.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"504300825","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# initiatives-1.2.py\n# \n# Copyright 2012 Nathaniel Ray \n# \n# For tracking your D&D initiatives! Now with tweaked layout,\n# tweaked tweaks, tooltips, and ttk!\n#\n# Sorting after editing still doesn't work quite right...\n#\n# Version 1.2\n# \n\n#import tkinter\nfrom tkinter.constants import *\nfrom tkinter import *\nfrom tkinter.tix import *\nfrom tkinter.ttk import *\nimport random\n\nclass Inits:\n #define functions and classes\n def delete_item(self):\n try:\n index = listbox.curselection()[0]\n listbox.delete(index)\n except IndexError:\n pass\n \n def add_item3(self):\n nums = random.randint(1,20) + int(bonus_e.get())\n whole = str(nums).rjust(2, '0'), name_e.get()\n listbox.insert(END, whole)\n name_e.focus()\n name_e.delete(0, END)\n bonus_e.delete(0, END)\n bonus_e.insert(END, '0')\n\n def move_up(self):\n try:\n selected = listbox.curselection()[0]\n selected_i = listbox.index(selected)\n selected_v = listbox.get(selected_i)\n \n above_i = selected_i - 1\n \n listbox.delete(selected_i)\n listbox.insert(above_i, selected_v)\n listbox.select_set(above_i)\n listbox.activate(above_i)\n listbox.yview(above_i)\n except IndexError:\n pass\n\n def move_down(self):\n try:\n selected = listbox.curselection()[0]\n selected_i = listbox.index(selected)\n selected_v = listbox.get(selected_i)\n \n below_i = selected_i + 1\n \n listbox.delete(selected_i)\n listbox.insert(below_i, selected_v)\n listbox.select_set(below_i)\n listbox.activate(below_i)\n listbox.yview(below_i)\n except IndexError:\n pass\n\n def sort_list(self):\n temp_list = list(listbox.get(0, END))\n temp_list.sort(key=lambda thing: thing[0])\n listbox.delete(0, END)\n for item in temp_list:\n listbox.insert(END, item)\n\n class EditBox:\n def __init__(self, parent):\n top = self.top = Toplevel(parent)\n top.title('Edit Entry')\n top.geometry('+900+100')\n\n self.lf = LabelFrame(top, text='Edit Entry')\n self.lf.grid(column=0, row=0, columnspan=2, padx=5, pady=5)\n \n self.l = Label(self.lf, text='Format: \"## name\"')\n self.l.grid(column=0, row=1, columnspan=2, padx=5, pady=5)\n \n self.e = Entry(self.lf, width=15)\n self.e.insert(0, '')\n self.e.grid(column=0, row=2, padx=5, pady=5)\n\n b = Button(self.lf, text=\"ok\", command=self.ok)\n b.grid(column=1, row=2, padx=5, pady=5)\n\n def ok(self):\n self.result = self.e.get()\n self.top.destroy()\n\n def edit_item(self):\n try:\n index = listbox.curselection()[0]\n item_to_change = listbox.get(index)\n \n main_window.update()\n edit = self.EditBox(main_window)\n main_window.wait_window(edit.top)\n \n item_to_change = edit.result\n listbox.delete(index)\n except IndexError:\n pass\n listbox.insert(index, item_to_change)\n\n #set up window as init\n def __init__(self):\n global listbox, main_window, name_e, bonus_e\n \n main_window = Tk()\n main_window.title(\"Initiative Tracker\")\n main_window.geometry('+900+100')\n frame = Frame(main_window, relief=RIDGE, borderwidth=2)\n frame.pack(fill=BOTH, expand=5)\n\n toplabel = Label(frame, text=\"Initiative Tracker\", font=('Arial', 15, 'bold'), foreground='blue')\n toplabel.grid(column=0, row=0, columnspan=7, pady=5)\n\n #the listbox area\n listbox = Listbox(frame, selectmode=EXTENDED, height=11)\n listbox.grid(column=0, row=1, columnspan=3, rowspan=7)\n\n yscroll = Scrollbar(frame, command=listbox.yview, orient=VERTICAL)\n yscroll.grid(column=3, row=1, rowspan=7, sticky=(N,S))\n listbox.configure(yscrollcommand=yscroll.set)\n\n up_b = Button(frame, text=\"˄\", command=self.move_up, width=3)\n up_b.grid(column=0, row=8)\n\n down_b = Button(frame, text=\"˅\", command=self.move_down, width=3)\n down_b.grid(column=1, row=8)\n\n sort_b = Button(frame, text='sort', command=self.sort_list, width=8)\n sort_b.grid(column=2, row=8)\n\n #the add labelframe\n add_f = LabelFrame(frame, text=\"Add a new entry\")\n add_f.grid(column=4, row=1, columnspan=2, rowspan=3, padx=5)\n\n name_e = Entry(add_f, width=15)\n name_e.insert(0, '')\n name_e.grid(column=5, row=2, padx=2, pady=2)\n\n bonus_e = Entry(add_f, width=2)\n bonus_e.insert(0, '0')\n bonus_e.grid(column=6, row=2, padx=2, pady=2)\n bonus_e.config(justify=CENTER)\n\n add_b = Button(add_f, text=\"roll and add\", command=self.add_item3, width=16)\n add_b.grid(column=5, row=3, columnspan=2, padx=5, pady=5)\n\n #the other stuff\n edit_b = Button(frame, text=\"edit selected\", command=self.edit_item, width=16)\n edit_b.grid(column=4, row=4, columnspan=3, pady=4)\n\n del_b = Button(frame, text=\"delete selected\", command=self.delete_item, width=16)\n del_b.grid(column=4, row=5, columnspan=3)\n\n quit_b = Button(frame, text=\"exit\", command=main_window.destroy, width=16)\n quit_b.grid(column=4, row=8, columnspan=3, pady=4, sticky=S)\n\n status = Label(frame, width=16)\n status.grid(column=4, row=7, columnspan=3, padx=5)\n\n b = Balloon(frame, statusbar=status, initwait=0)\n b.bind_widget(name_e, statusmsg='Enter name')\n b.bind_widget(bonus_e, statusmsg='Enter init. bonus')\n\n main_window.mainloop()\n","sub_path":"sessiontools/initsTixC.py","file_name":"initsTixC.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"653042399","text":"import os, sys\n\n#def print(s):\n# sys.stdout.write(s + \"\\r\\n\")\ndef is_num(c):\n if ord(c)>=ord('0') and ord(c)<=ord('9'):\n return True\n else:\n return False\n\ndef gen_indices(inp_file):\n for line in inp_file:\n if ('Page' in line) or ('R' in line):\n print(line)\n elif is_num(line[0]):\n nums = line.split(',')\n for n in nums:\n yield int(n.strip())\n \n \n \nif __name__ == '__main__':\n if (len(sys.argv) != 3):\n print('usage: prog refs list')\n sys.exit(-1)\n frefs = open(sys.argv[1], 'r')\n orig_refs = frefs.readlines()\n frefs.close()\n\n orig2new = dict()\n new_index = 1\n new_refs = len(orig_refs) * [None]\n flist = open(sys.argv[2], 'r')\n for index in gen_indices(flist):\n if not(orig2new.has_key(index)):\n orig2new[index] = new_index\n new_refs[new_index - 1] = orig_refs[index - 1]\n new_index += 1\n print(\" %i --> %i \"%(index, orig2new[index]))\n flist.close()\n\n fout1 = open('output1.txt', 'w')\n fout2 = open('output2.txt', 'w')\n new_line = \"\\r\\n\"\n index = 1\n for line in new_refs:\n fout1.write(str(index) + \" :: \" + line + new_line)\n fout2.write( line[3:])\n index += 1\n fout1.close()\n fout2.close()\n","sub_path":"prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"516205850","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n left = 0\n right = len(nums) - 1\n \n while(left nums[right]):\n left = mid+1\n else: \n right = mid\n \n \n offset = left\n left = 0\n right = len(nums)-1\n \n while(left <= right):\n mid = (left+right)//2\n originalMid = (mid+offset) % len(nums)\n \n if(nums[originalMid] == target):\n return originalMid\n if(nums[originalMid] > target):\n right = mid-1\n else:\n left = mid+1\n \n return -1\n \n","sub_path":"Leetcode/Q33.py","file_name":"Q33.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"42974580","text":"import random\nobstacles_list = []\n\n\ndef create_random_obstacles():\n \"\"\"\n Creates a list of at least 10 obstacles with each coodinate in the \n range of (-100, 100) for the x-coordinate and (-200, 200) for the \n y-coordinate\n return: a list of obstacles\n \"\"\"\n print(\"in obstacles\")\n global obstacles_list\n num_of_obstacles = random.randint(1,10)\n for i in range(num_of_obstacles):\n x = random.randint(-100,101)\n y = random.randint(-200,201)\n obstacles_list.append((x,y))\n return obstacles_list\n \n\n\ndef is_position_blocked(x,y):\n \"\"\"\n It checks if the new position is not in the blocked\n position.\n :param x: the new/proposed x position.\n :param y: the new/proposed y position.\n :return: True if the it falls in the blocked position.\n \"\"\"\n\n obstacles_list = get_obstacles()\n for i in obstacles_list:\n if x in range(i[0],i[0] + 8) and y in range(i[1],i[1] + 8):\n return True\n return False\n \n\ndef is_path_blocked(x1,y1, x2, y2):\n \"\"\"\n It checks if the path which the turtle has to pass is not blocked.\n :para x1: first x-coordinate of the obstacles.\n :para x2: second x-coordinate of the obstacles.\n :para y1: first y-coordinate of the obstacles.\n :para y1: second y-coordinate of the obstacles.\n :return: True if the it falls in the blocked position.\n \"\"\" \n obstacles_list = get_obstacles()\n \n \n for i in obstacles_list:\n if x1 == x2 and x1 in range(i[0], i[0] + 8) and (i[1] in range(y1, y2) or i[1] in range(y1, y2,-1)):\n return True\n elif y1 == y2 and y1 in range(i[1], i[1]+8) and (i[0] in range(x1, x2) or i[0] in range(x1, x2, -1)):\n return True\n return False\n\n\ndef get_obstacles():\n \"\"\"\n returns the list of randomly creared obsticles.\n \"\"\"\n obstacles = obstacles_list\n return obstacles","sub_path":"my_python_projects/submission_003-robot-5/maze/obstacles.py","file_name":"obstacles.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"614557656","text":"import FWCore.ParameterSet.Config as dune\n\n#load the prototype description of the MemoryUseProd EDProducer\nfrom DuneRawSim.Modules.memoryUseProd_cfi import memoryUseProd as _memoryUseProd\n\nprocess = dune.Process(\"RAW\")\n\nprocess.source = dune.Source(\"EmptySource\")\n\n\ntasks =[]\ncheckClusters = []\ninteractionFinders =[]\n\ndeleteEarlyList = []\n\ndef generateProductName(moduleName):\n return \"chars_\"+moduleName+\"__RAW\"\ndef generateMightGet(moduleNames):\n return [ generateProductName(mod) for mod in moduleNames]\n\n#create per APA modules\nfor x in range(1,151):\n #this module simulates reading one APA's data from storage on demand\n apaName = \"apa\"+str(x)\n setattr(process, \"apa\"+str(x), _memoryUseProd.clone(dataSizes = [40*1000*1000], consume = []))\n #want to setup to delete the memory\n deleteEarlyList.extend(generateMightGet([apaName]))\n\n #this module simulates finding clusters in one given APA\n clusterName = \"cluster\"+str(x)\n setattr(process, clusterName, \n _memoryUseProd.clone( dataSizes= [400*1000], \n consume = [apaName],\n uSleeps = [100],\n mightGet = generateMightGet([apaName])\n ))\n\n tasks.append( dune.Task(getattr(process,apaName), getattr(process,clusterName) ) )\n\n #for each nearest neighbor APAs we want to create a InteractionFinder\n # which simulates handling an interaction which crosses an APA boundaries\n checkClusters.append(clusterName)\n if len(checkClusters) > 3:\n checkClusters.pop(0)\n if len(checkClusters) == 3:\n interactionFinderName=\"interaction\"+str(x-1)\n setattr(process, interactionFinderName, \n _memoryUseProd.clone( consume = checkClusters,\n mightGet = generateMightGet(checkClusters))\n )\n interactionFinders.append(interactionFinderName)\n\n#This module looks at all APA triplets and simulates finding the best interactions\nprocess.interactions = _memoryUseProd.clone( consume = interactionFinders,\n mightGet = generateMightGet(interactionFinders) )\n\n#This simulates the time it takes to do the rest of the processing on the interactions\nprocess.processInteraction = _memoryUseProd.clone( uSleeps = [20000],\n consume = [\"interactions\"],\n mightGet = generateMightGet([\"interactions\"]) )\n\ninteractionsTask = dune.Task( *(getattr(process, i) for i in interactionFinders) )\ntasks.append(interactionsTask)\n\nprocess.p = dune.Path(process.interactions+process.processInteraction, dune.Task(*tasks) )\n\n#######################\n#parameters to change\n\nnThreads = 1\n\nprocess.maxEvents.input = 10*nThreads\nprocess.options.numberOfThreads = nThreads\nprocess.options.numberOfStreams = nThreads\n\n#delete each APA data once it is no longer needed\nprocess.options.canDeleteEarly = deleteEarlyList\n\n###################\n#helpful for debugging\n\n#process.out = dune.EDAnalyzer(\"EventContentAnalyzer\")\n#process.o = dune.EndPath(process.out)\n\n#print(process.dumpPython() )\n \n#process.add_(dune.Service(\"Tracer\"))\n","sub_path":"memoryUse_workflow_cfg.py","file_name":"memoryUse_workflow_cfg.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"210940944","text":"import networkx as nx\nimport numpy as np\nimport os\nimport re\nimport random\nimport torch\nimport torch_geometric as tg\nfrom torch_geometric.data import Data\nfrom graph_sampler import GraphSampler\nimport pickle\nfrom util import precompute_dist_data\n\ndef read_graphfile(datadir, dataname, max_nodes=None):\n ''' Read data from https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets\n graph index starts with 1 in file\n\n Returns:\n List of networkx objects with graph and node labels\n '''\n prefix = os.path.join(datadir, dataname, dataname)\n filename_graph_indic = prefix + '_graph_indicator.txt'\n # index of graphs that a given node belongs to\n graph_indic={}\n with open(filename_graph_indic) as f:\n i=1\n for line in f:\n line=line.strip(\"\\n\")\n graph_indic[i]=int(line)\n i+=1\n\n filename_nodes=prefix + '_node_labels.txt'\n node_labels=[]\n try:\n with open(filename_nodes) as f:\n for line in f:\n line=line.strip(\"\\n\")\n node_labels+=[int(line) - 1]\n num_unique_node_labels = max(node_labels) + 1\n except IOError:\n print('No node labels')\n \n filename_node_attrs=prefix + '_node_attributes.txt'\n node_attrs=[]\n try:\n with open(filename_node_attrs) as f:\n for line in f:\n line = line.strip(\"\\s\\n\")\n attrs = [float(attr) for attr in re.split(\"[,\\s]+\", line) if not attr == '']\n node_attrs.append(np.array(attrs))\n except IOError:\n print('No node attributes')\n \n label_has_zero = False\n filename_graphs=prefix + '_graph_labels.txt'\n graph_labels=[]\n\n # assume that all graph labels appear in the dataset \n #(set of labels don't have to be consecutive)\n label_vals = []\n with open(filename_graphs) as f:\n for line in f:\n line=line.strip(\"\\n\")\n val = int(line)\n #if val == 0:\n # label_has_zero = True\n if val not in label_vals:\n label_vals.append(val)\n graph_labels.append(val)\n #graph_labels = np.array(graph_labels)\n label_map_to_int = {val: i for i, val in enumerate(label_vals)}\n graph_labels = np.array([label_map_to_int[l] for l in graph_labels])\n\n \n filename_adj=prefix + '_A.txt'\n adj_list={i:[] for i in range(1,len(graph_labels)+1)} \n index_graph={i:[] for i in range(1,len(graph_labels)+1)}\n num_edges = 0\n with open(filename_adj) as f:\n for line in f:\n line=line.strip(\"\\n\").split(\",\")\n e0,e1=(int(line[0].strip(\" \")),int(line[1].strip(\" \")))\n adj_list[graph_indic[e0]].append((e0,e1))\n index_graph[graph_indic[e0]]+=[e0,e1]\n num_edges += 1\n for k in index_graph.keys():\n index_graph[k]=[u-1 for u in set(index_graph[k])]\n\n graphs=[]\n for i in range(1,1+len(adj_list)):\n # indexed from 1 here\n G=nx.from_edgelist(adj_list[i])\n if max_nodes is not None and G.number_of_nodes() > max_nodes:\n continue\n \n # add features and labels\n G.graph['label'] = graph_labels[i-1]\n for u in G.nodes():\n if len(node_labels) > 0:\n node_label_one_hot = [0] * num_unique_node_labels\n node_label = node_labels[u-1]\n node_label_one_hot[node_label] = 1\n G.node[u]['label'] = node_label_one_hot\n if len(node_attrs) > 0:\n G.node[u]['feat'] = node_attrs[u-1]\n if len(node_attrs) > 0:\n G.graph['feat_dim'] = node_attrs[0].shape[0]\n\n # relabeling\n mapping={}\n it=0\n if float(nx.__version__)<2.0:\n for n in G.nodes():\n mapping[n]=it\n it+=1\n else:\n for n in G.nodes:\n mapping[n]=it\n it+=1\n \n # indexed from 0\n graphs.append(nx.relabel_nodes(G, mapping))\n return graphs\n\n\ndef prepare_data(graphs, args, test_graphs=None, max_nodes=0):\n\n random.shuffle(graphs)\n if test_graphs is None:\n train_idx = int(len(graphs) * args.train_ratio)\n test_idx = int(len(graphs) * (1-args.test_ratio))\n train_graphs = graphs[:train_idx]\n val_graphs = graphs[train_idx: test_idx]\n test_graphs = graphs[test_idx:]\n else:\n train_idx = int(len(graphs) * args.train_ratio)\n train_graphs = graphs[:train_idx]\n val_graphs = graphs[train_idx:]\n print('Num training graphs: ', len(train_graphs),\n '; Num validation graphs: ', len(val_graphs),\n '; Num testing graphs: ', len(test_graphs))\n\n print('Number of graphs: ', len(graphs))\n print('Number of edges: ', sum([G.number_of_edges() for G in graphs]))\n print('Max, avg, std of graph size: ',\n max([G.number_of_nodes() for G in graphs]), ', '\n \"{0:.2f}\".format(np.mean([G.number_of_nodes() for G in graphs])), ', '\n \"{0:.2f}\".format(np.std([G.number_of_nodes() for G in graphs])))\n\n # minibatch\n dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n train_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.num_workers)\n\n dataset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n val_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers)\n\n dataset_sampler = GraphSampler(test_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n test_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers)\n\n return train_dataset_loader, val_dataset_loader, test_dataset_loader, \\\n dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim\n\ndef prepare_val_data(graphs, args, val_idx, max_nodes=0):\n\n random.shuffle(graphs)\n val_size = len(graphs) // 10\n train_graphs = graphs[:val_idx * val_size]\n if val_idx < 9:\n train_graphs = train_graphs + graphs[(val_idx+1) * val_size :]\n val_graphs = graphs[val_idx*val_size: (val_idx+1)*val_size]\n print('Num training graphs: ', len(train_graphs),\n '; Num validation graphs: ', len(val_graphs))\n\n print('Number of graphs: ', len(graphs))\n print('Number of edges: ', sum([G.number_of_edges() for G in graphs]))\n print('Max, avg, std of graph size: ',\n max([G.number_of_nodes() for G in graphs]), ', '\n \"{0:.2f}\".format(np.mean([G.number_of_nodes() for G in graphs])), ', '\n \"{0:.2f}\".format(np.std([G.number_of_nodes() for G in graphs])))\n\n # minibatch\n dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n train_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.num_workers)\n\n dataset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n val_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers)\n\n return train_dataset_loader, val_dataset_loader, \\\n dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim\n\ndef prepare_val_data_tg(tg_list, args, val_idx, max_nodes=0):\n\n random.shuffle(tg_list)\n val_size = len(tg_list) // 10\n train_tg_list = tg_list[:val_idx * val_size]\n if val_idx < 9:\n train_tg_list = train_tg_list + tg_list[(val_idx+1) * val_size :]\n val_tg_list = tg_list[val_idx*val_size: (val_idx+1)*val_size]\n print('Num training graphs: ', len(train_tg_list),\n '; Num validation graphs: ', len(val_tg_list))\n\n print('Number of graphs: ', len(tg_list))\n print('Number of edges: ', sum([tg.edge_index.shape[1] for tg in tg_list]))\n print('Max, avg, std of graph size: ',\n max([len(tg.nodes_set[0]) for tg in tg_list]), ', '\n \"{0:.2f}\".format(np.mean([len(tg.nodes_set[0]) for tg in tg_list])), ', '\n \"{0:.2f}\".format(np.std([len(tg.nodes_set[0]) for tg in tg_list])))\n\n # minibatch\n dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n train_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.num_workers)\n\n dataset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n val_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers)\n\n return train_dataset_loader, val_dataset_loader, \\\n dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim\n\ndef nx_to_tg_data(graphs, features, edge_labels=None):\n data_list = []\n for i in range(len(graphs)):\n feature = features[i]\n graph = graphs[i].copy()\n graph.remove_edges_from(graph.selfloop_edges())\n\n # relabel graphs\n keys = list(graph.nodes)\n vals = range(graph.number_of_nodes())\n mapping = dict(zip(keys, vals))\n nx.relabel_nodes(graph, mapping, copy=False)\n\n x = np.zeros(feature.shape)\n graph_nodes = list(graph.nodes)\n for m in range(feature.shape[0]):\n x[graph_nodes[m]] = feature[m]\n x = torch.from_numpy(x).float()\n\n # get edges\n edge_index = np.array(list(graph.edges))\n edge_index = np.concatenate((edge_index, edge_index[:,::-1]), axis=0)\n edge_index = torch.from_numpy(edge_index).long().permute(1, 0)\n\n data = Data(x=x, edge_index=edge_index)\n\n # get edge_labels\n if edge_labels is not None:\n edge_label = edge_labels[i]\n mask_link_positive = np.stack(np.nonzero(edge_label))\n data.mask_link_positive = mask_link_positive\n data_list.append(data)\n\n return data_list\n\n\ndef Graph_load_batch(datadir, dataname, max_nodes=None, node_attributes = True, graph_labels=True):\n '''\n load many graphs, e.g. enzymes\n :return: a list of graphs\n '''\n print('Loading graph dataset: '+str(dataname))\n G = nx.Graph()\n # load data\n prefix = os.path.join(datadir, dataname, dataname)\n\n data_adj = np.loadtxt(prefix+'_A.txt', delimiter=',').astype(int)\n data_node_label = np.loadtxt(prefix + '_node_labels.txt', delimiter=',').astype(int)\n if node_attributes:\n data_node_att = np.loadtxt(prefix+'_node_attributes.txt', delimiter=',')\n else:\n data_node_att = data_node_label.reshape(-1, 1)\n data_graph_indicator = np.loadtxt(prefix+'_graph_indicator.txt', delimiter=',').astype(int)\n if graph_labels:\n data_graph_labels = np.loadtxt(prefix+'_graph_labels.txt', delimiter=',').astype(int)\n\n data_tuple = list(map(tuple, data_adj))\n\n # add edges\n G.add_edges_from(data_tuple)\n # add node attributes\n for i in range(data_node_label.shape[0]):\n G.add_node(i+1, feature = data_node_att[i])\n G.add_node(i+1, label = data_node_label[i])\n G.remove_nodes_from(list(nx.isolates(G)))\n\n # split into graphs\n graph_num = data_graph_indicator.max()\n node_list = np.arange(data_graph_indicator.shape[0])+1\n graphs = []\n features = []\n edge_labels = []\n for i in range(graph_num):\n # find the nodes for each graph\n nodes = node_list[data_graph_indicator==i+1]\n G_sub = G.subgraph(nodes)\n if graph_labels:\n G_sub.graph['label'] = data_graph_labels[i]\n\n if max_nodes is not None and G_sub.number_of_nodes() > max_nodes:\n continue\n else:\n graphs.append(G_sub)\n\n # assign edge labels\n n = len(nodes)\n label = np.zeros((n, n), dtype=int)\n for i, u in enumerate(G_sub.nodes()):\n for j, v in enumerate(G_sub.nodes()):\n if data_node_label[u - 1] == data_node_label[v - 1] and u > v:\n label[i, j] = 1\n\n edge_labels.append(label)\n\n # assign node features\n idx = [node - 1 for node in nodes]\n feature = data_node_att[idx, :]\n features.append(feature)\n\n print('Loaded')\n return graphs, features, edge_labels\n\n\ndef load_tg_dataset(datadir, dataname, max_nodes=None, node_attributes = True, graph_labels=True):\n graphs, features, edge_labels = Graph_load_batch(\n datadir, dataname, max_nodes, node_attributes, graph_labels)\n\n return nx_to_tg_data(graphs, features, edge_labels)\n\n\ndef get_tg_dataset(args, node_attributes=True):\n # \"Cora\", \"CiteSeer\" and \"PubMed\"\n if args.bmname in ['Cora', 'CiteSeer', 'PubMed']:\n dataset = tg.datasets.planetoid(root='datasets/' + args.bmname, name=args.bmname)\n else:\n dataset = load_tg_dataset(args.datadir, args.bmname, args.max_nodes, node_attributes=node_attributes)\n\n # precompute shortest path\n if not os.path.isdir('datasets/cache'):\n os.mkdir('datasets')\n os.mkdir('datasets/cache')\n f1_name = 'datasets/cache/' + args.bmname + str(args.approximate) + '_dists.dat'\n\n if args.cache and (os.path.isfile(f1_name) and args.task!='link'):\n with open(f1_name, 'rb') as f1:\n dists_list = pickle.load(f1)\n\n print('Cache loaded!')\n data_list = []\n for i, data in enumerate(dataset):\n data.dists = torch.from_numpy(dists_list[i]).float()\n if args.rm_feature:\n data.x = torch.ones((data.x.shape[0],1))\n data_list.append(data)\n else:\n data_list = []\n dists_list = []\n for i, data in enumerate(dataset):\n dists = precompute_dist_data(data.edge_index.numpy(), data.num_nodes, approximate=args.approximate)\n dists_list.append(dists)\n data.dists = torch.from_numpy(dists).float()\n if args.rm_feature:\n data.x = torch.ones((data.x.shape[0],1))\n data_list.append(data)\n\n with open(f1_name, 'wb') as f1:\n pickle.dump(dists_list, f1)\n print('Cache saved!')\n\n return data_list\n","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":14897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"82709147","text":"import pytest\nfrom flask import url_for\nfrom sqlalchemy import and_\nfrom sqlalchemy.sql.expression import exists\n\nfrom train_schedule.db.models import schedule_table\n\n\ndef check_for_existence_in_db(app, from_, to, start, arrive):\n with app.db.connect() as conn:\n with conn.begin():\n s = schedule_table.select().where(\n and_(\n schedule_table.c[\"from\"] == from_,\n schedule_table.c.to == to,\n schedule_table.c.start == start,\n schedule_table.c.arrive == arrive,\n )\n )\n exist = conn.execute(exists(s).select()).scalar()\n return exist\n\n\n@pytest.mark.parametrize(\n (\"from_\", \"to\", \"start\", \"arrive\", \"status\"),\n [\n (\"Moscow\", \"Prague\", \"2077-04-01\", \"2077-04-05\", 201),\n (\"Example\", \"Example\", \"2077-04-11\", \"2077-04-15\", 201),\n (\"Moscow\", \"Prague\", \"2077-04-01\", None, 400),\n (\"Moscow\", \"Prague\", None, \"2077-04-05\", 400),\n (\"Moscow\", None, \"2077-04-01\", \"2077-04-05\", 400),\n (None, \"Prague\", \"2077-04-01\", \"2077-04-05\", 400),\n (None, None, \"2077-04-01\", \"2077-04-05\", 400),\n (\"Moscow\", \"Prague\", None, None, 400),\n (\"Moscow\", \"Prague\", \"2000-04-01\", \"2000-04-05\", 400),\n (\"Moscow\", \"Prague\", \"2077-04-01\", \"2077-03-05\", 400),\n (\"\", \"Prague\", \"2077-04-01\", \"2077-04-05\", 400),\n (\"Moscow\", \"\", \"2077-04-01\", \"2077-04-05\", 400),\n (\"Moscow\", \"Prague\", \"\", \"2077-04-05\", 400),\n (\"Moscow\", \"Prague\", \"2077-04-01\", \"\", 400),\n (\"Moscow\", \"Prague\", \"2077\", \"2077-04-05\", 400),\n (\"Moscow\", \"Prague\", \"example\", \"2077-04-05\", 400),\n (\"Moscow\", \"Prague\", \"2077-04-01\", \"04-05\", 400),\n (\"Moscow\", \"Prague\", \"2077-04-01\", \"one\", 400),\n ],\n)\ndef test_create_schedule(client, app, from_, to, start, arrive, status):\n data = {\n \"from\": from_,\n \"to\": to,\n \"start\": start,\n \"arrive\": arrive,\n }\n response = client.post(url_for(\"schedule.create_train_schedule\"), json=data)\n assert response.status_code == status\n\n if response.status_code == 201:\n assert check_for_existence_in_db(app, from_, to, start, arrive) is True\n\n\n@pytest.mark.parametrize(\n (\"data\", \"status\"),\n [\n (\n {\n \"from\": \"Moscow\",\n \"to\": \"Adler\",\n \"start\": \"2077-10-10\",\n \"arrive\": \"2077-12-10\",\n \"example\": \"example\",\n },\n 400,\n ),\n (\n {\n \"from\": \"Moscow\",\n \"to\": \"Adler\",\n \"start\": \"2077-10-10\",\n \"error\": \"2077-12-10\",\n },\n 400,\n ),\n ({}, 400),\n ({\"from\": \"2020-10-15\", }, 400,),\n ({\"to\": \"2020-10-15\", }, 400,),\n ({\"to\": \"2020-10-15\", }, 400,),\n ({\"start\": \"Moscow\", }, 400,),\n ({\"arrive\": \"Moscow\", }, 400,),\n ],\n)\ndef test_create_schedule_extra_field(client, app, data, status):\n first_response = client.post(url_for(\"schedule.create_train_schedule\"), json=data)\n assert first_response.status_code == status\n","sub_path":"tests/api/test_schedule_post.py","file_name":"test_schedule_post.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"490422222","text":"from django.conf.urls import url\nfrom django.views.decorators.csrf import csrf_exempt\nfrom . import views\n\napp_name = \"jamf\"\nurlpatterns = [\n # setup > jamf instances\n url(r'instances/$', views.JamfInstancesView.as_view(), name=\"jamf_instances\"),\n url(r'instances/create/$', views.CreateJamfInstanceView.as_view(), name=\"create_jamf_instance\"),\n url(r'instances/(?P\\d+)/$', views.JamfInstanceView.as_view(), name=\"jamf_instance\"),\n url(r'instances/(?P\\d+)/setup/$', views.SetupJamfInstanceView.as_view(), name=\"setup_jamf_instance\"),\n url(r'instances/(?P\\d+)/update/$', views.UpdateJamfInstanceView.as_view(), name=\"update_jamf_instance\"),\n url(r'instances/(?P\\d+)/delete/$', views.DeleteJamfInstanceView.as_view(), name=\"delete_jamf_instance\"),\n url(r'instances/(?P\\d+)/tag_configs/create/$',\n views.CreateTagConfigView.as_view(),\n name=\"create_tag_config\"),\n url(r'instances/(?P\\d+)/tag_configs/(?P\\d+)/update/$',\n views.UpdateTagConfigView.as_view(),\n name=\"update_tag_config\"),\n url(r'instances/(?P\\d+)/tag_configs/(?P\\d+)/delete/$',\n views.DeleteTagConfigView.as_view(),\n name=\"delete_tag_config\"),\n # API\n url(r'^post_event/(?P\\S+)/$', csrf_exempt(views.PostEventView.as_view()), name='post_event'),\n]\n\n\nsetup_menu_cfg = {\n 'items': (\n ('jamf_instances', 'jamf instances', False, ('jamf.view_jamfinstance',)),\n )\n}\n","sub_path":"zentral/contrib/jamf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"549386338","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2014, 2015 Metaswitch Networks\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nfelix.config\n~~~~~~~~~~~~\n\nConfiguration management for Felix.\n\nOn instantiation, this module automatically parses the configuration file and\nbuilds a singleton configuration object. That object may (once) be changed by\netcd configuration being reported back to it.\n\"\"\"\nimport os\n\nimport ConfigParser\nimport logging\nimport socket\n\nfrom calico import common\n\n# Logger\nlog = logging.getLogger(__name__)\n\n# Convert log level names into python log levels.\nLOGLEVELS = {\"none\": None,\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warn\": logging.WARNING,\n \"warning\": logging.WARNING,\n \"err\": logging.ERROR,\n \"error\": logging.ERROR,\n \"crit\": logging.CRITICAL,\n \"critical\": logging.CRITICAL}\n\n# Sources of a configuration parameter. The order is highest-priority first.\nDEFAULT = \"Default\"\nENV = \"Environment variable\"\nFILE = \"Configuration file\"\nGLOBAL_ETCD = \"Global etcd configuration\"\nLOCAL_ETCD = \"Host specific etcd configuration\"\nDEFAULT_SOURCES = [ ENV, FILE, GLOBAL_ETCD, LOCAL_ETCD ]\n\n\nclass ConfigException(Exception):\n def __init__(self, message, parameter):\n super(ConfigException, self).__init__(message)\n self.message = message\n self.parameter = parameter\n\n def __str__(self):\n return \"%s (value %r for %s (%s), read from %r)\" \\\n % (self.message,\n self.parameter.value,\n self.parameter.name,\n self.parameter.description,\n self.parameter.active_source)\n\n\nclass ConfigParameter(object):\n \"\"\"\n A configuration parameter. This contains the following information.\n - The name of the field.\n - Where the location can validly be read from\n - The current value\n - Where the value was read from\n \"\"\"\n def __init__(self, name, description, default,\n sources=DEFAULT_SOURCES, value_is_int=False,\n value_is_bool=False):\n \"\"\"\n Create a configuration parameter.\n :param str description: Description for logging\n :param list sources: List of valid sources to try\n :param str default: Default value\n :param bool value_is_int: Integer value?\n \"\"\"\n self.description = description\n self.name = name\n self.sources = sources\n self.value = default\n self.active_source = None\n self.value_is_int = value_is_int\n self.value_is_bool = value_is_bool\n\n def set(self, value, source):\n \"\"\"\n Set a value of a parameter - unless already set.\n :param value: value\n :param source: source; for example \"Configuration file /etc/felix.cfg\"\n \"\"\"\n if self.active_source is None:\n log.debug(\"Read value %r for %s (%s) from %r\",\n value,\n self.name,\n self.description,\n source)\n\n self.active_source = source\n\n if self.value_is_int:\n # Set value before the call to int, so the ConfigException has\n # the right value if / when it goes wrong.\n self.value = value\n try:\n self.value = int(value)\n except ValueError:\n raise ConfigException(\"Field was not integer\",\n self)\n elif self.value_is_bool:\n lower_val = str(value).lower()\n log.debug(\"Parsing %r as a Boolean.\", lower_val)\n if lower_val in (\"true\", \"1\", \"yes\", \"y\", \"t\"):\n self.value = True\n elif lower_val in (\"false\", \"0\", \"no\", \"n\", \"f\"):\n self.value = False\n else:\n raise ConfigException(\"Field was not a valid Boolean\",\n self)\n else:\n # Calling str in principle can throw an exception, but it's\n # hard to see how in practice, so don't catch and wrap.\n self.value = str(value)\n else:\n log.warning(\"Ignore %r value for %s (%s) - already set from %r\",\n source,\n self.name,\n self.description,\n self.active_source)\n\n\nclass Config(object):\n def __init__(self, config_path):\n \"\"\"\n Create a config. This reads data from the following sources.\n - Environment variables\n - Configuration file (/etc/calico/felix.cfg)\n - per-host etcd (/calico/vX/config)\n - global etcd (/calico/vX/host//config)\n\n After object creation, the environment variables and config file have\n been read, and the variables ETCD_ADDR and HOSTNAME have been set and\n validated. The caller is then responsible for reading the remaining\n config from etcd and calling report_etcd_config with the returned\n values before the rest of the config structure can be used.\n\n :raises EtcdException\n \"\"\"\n self.parameters = {}\n\n self.add_parameter(\"EtcdAddr\", \"Address and port for etcd\",\n \"localhost:4001\", sources=[ENV, FILE])\n self.add_parameter(\"FelixHostname\", \"Felix compute host hostname\",\n socket.gethostname(), sources=[ENV, FILE])\n self.add_parameter(\"EtcdScheme\", \"Protocol type for http or https\",\n \"http\", sources=[ENV, FILE])\n self.add_parameter(\"EtcdKeyFile\", \"Path to etcd key file\",\n \"none\", sources=[ENV, FILE])\n self.add_parameter(\"EtcdCertFile\", \"Path to etcd certificate file\",\n \"none\", sources=[ENV, FILE])\n self.add_parameter(\"EtcdCaFile\", \"Path to etcd CA certificate file\",\n \"none\", sources=[ENV, FILE])\n\n self.add_parameter(\"StartupCleanupDelay\",\n \"Delay before cleanup starts\",\n 30, value_is_int=True)\n self.add_parameter(\"PeriodicResyncInterval\",\n \"How often to do cleanups, seconds\",\n 60 * 60, value_is_int=True)\n self.add_parameter(\"IptablesRefreshInterval\",\n \"How often to refresh iptables state, in seconds\",\n 60, value_is_int=True)\n self.add_parameter(\"MetadataAddr\", \"Metadata IP address or hostname\",\n \"127.0.0.1\")\n self.add_parameter(\"MetadataPort\", \"Metadata Port\",\n 8775, value_is_int=True)\n self.add_parameter(\"InterfacePrefix\", \"Interface name prefix\", None)\n self.add_parameter(\"DefaultEndpointToHostAction\",\n \"Action to take for packets that arrive from\"\n \"an endpoint to the host.\", \"DROP\")\n self.add_parameter(\"LogFilePath\",\n \"Path to log file\", \"/var/log/calico/felix.log\")\n self.add_parameter(\"EtcdDriverLogFilePath\",\n \"Path to log file for etcd driver\",\n \"/var/log/calico/felix-etcd.log\")\n self.add_parameter(\"LogSeverityFile\",\n \"Log severity for logging to file\", \"INFO\")\n self.add_parameter(\"LogSeveritySys\",\n \"Log severity for logging to syslog\", \"ERROR\")\n self.add_parameter(\"LogSeverityScreen\",\n \"Log severity for logging to screen\", \"ERROR\")\n self.add_parameter(\"IpInIpEnabled\",\n \"IP-in-IP device support enabled\", False,\n value_is_bool=True)\n self.add_parameter(\"IpInIpMtu\",\n \"MTU to set on the IP-in-IP device\", 1440,\n value_is_int=True)\n self.add_parameter(\"ReportingIntervalSecs\",\n \"Status reporting interval in seconds\",\n 30, value_is_int=True)\n self.add_parameter(\"ReportingTTLSecs\",\n \"Status report time to live in seconds\",\n 90, value_is_int=True)\n self.add_parameter(\"EndpointReportingEnabled\",\n \"Whether Felix should report per-endpoint status \"\n \"into etcd\",\n False, value_is_bool=True)\n self.add_parameter(\"EndpointReportingDelaySecs\",\n \"Minimum delay between per-endpoint status reports\",\n 1, value_is_int=True)\n self.add_parameter(\"MaxIpsetSize\",\n \"Maximum size of the ipsets that Felix uses to \"\n \"represent profile tag memberships. Should be set \"\n \"to a value larger than the expected number of \"\n \"IP addresses using a single tag.\",\n 2**20, value_is_int=True)\n\n # Read the environment variables, then the configuration file.\n self._read_env_vars()\n self._read_cfg_file(config_path)\n self._finish_update(final=False)\n\n def add_parameter(self, name, description, default, **kwargs):\n \"\"\"\n Put a parameter in the parameter dictionary.\n \"\"\"\n self.parameters[name] = ConfigParameter(\n name, description, default, **kwargs)\n\n def _finish_update(self, final=False):\n \"\"\"\n Config has been completely read. Called twice - once after reading from\n environment and config file (so we should be able to access etcd), and\n once after reading from etcd (so we have all the config ready to go).\n\n Responsible for :\n - storing the parameters in the relevant fields in the structure\n - validating the configuration is valid (for this stage in the process)\n - updating logging parameters\n\n Note that we complete the logging even before etcd configuration\n changes are read. Hence, for example, if logging to file is turned on\n after reading environment variables and config file, then the log file\n is created and logging to it starts - even if later on etcd\n configuration turns the file off. That's because we must log if etcd\n configuration load fails, and not having the log file early enough is\n worse.\n\n :param final: Have we completed (rather than just read env and config file)\n \"\"\"\n self.ETCD_ADDR = self.parameters[\"EtcdAddr\"].value\n self.HOSTNAME = self.parameters[\"FelixHostname\"].value\n self.ETCD_SCHEME = self.parameters[\"EtcdScheme\"].value\n self.ETCD_KEY_FILE = self.parameters[\"EtcdKeyFile\"].value\n self.ETCD_CERT_FILE = self.parameters[\"EtcdCertFile\"].value\n self.ETCD_CA_FILE = self.parameters[\"EtcdCaFile\"].value\n self.STARTUP_CLEANUP_DELAY = self.parameters[\"StartupCleanupDelay\"].value\n self.RESYNC_INTERVAL = self.parameters[\"PeriodicResyncInterval\"].value\n self.REFRESH_INTERVAL = self.parameters[\"IptablesRefreshInterval\"].value\n self.METADATA_IP = self.parameters[\"MetadataAddr\"].value\n self.METADATA_PORT = self.parameters[\"MetadataPort\"].value\n self.IFACE_PREFIX = self.parameters[\"InterfacePrefix\"].value\n self.DEFAULT_INPUT_CHAIN_ACTION = \\\n self.parameters[\"DefaultEndpointToHostAction\"].value\n self.LOGFILE = self.parameters[\"LogFilePath\"].value\n self.DRIVERLOGFILE = self.parameters[\"EtcdDriverLogFilePath\"].value\n self.LOGLEVFILE = self.parameters[\"LogSeverityFile\"].value\n self.LOGLEVSYS = self.parameters[\"LogSeveritySys\"].value\n self.LOGLEVSCR = self.parameters[\"LogSeverityScreen\"].value\n self.IP_IN_IP_ENABLED = self.parameters[\"IpInIpEnabled\"].value\n self.IP_IN_IP_MTU = self.parameters[\"IpInIpMtu\"].value\n self.REPORTING_INTERVAL_SECS = self.parameters[\"ReportingIntervalSecs\"].value\n self.REPORTING_TTL_SECS = self.parameters[\"ReportingTTLSecs\"].value\n self.REPORT_ENDPOINT_STATUS = \\\n self.parameters[\"EndpointReportingEnabled\"].value\n self.ENDPOINT_REPORT_DELAY = \\\n self.parameters[\"EndpointReportingDelaySecs\"].value\n self.MAX_IPSET_SIZE = self.parameters[\"MaxIpsetSize\"].value\n\n self._validate_cfg(final=final)\n\n # Update logging.\n common.complete_logging(self.LOGFILE,\n self.LOGLEVFILE,\n self.LOGLEVSYS,\n self.LOGLEVSCR,\n gevent_in_use=True)\n\n if final:\n # Log configuration - the whole lot of it.\n for name, parameter in self.parameters.iteritems():\n log.info(\"Parameter %s (%s) has value %r read from %s\",\n name,\n parameter.description,\n parameter.value,\n parameter.active_source)\n\n def _read_env_vars(self):\n \"\"\"\n Read all of the variables from the environment.\n \"\"\"\n for name, parameter in self.parameters.iteritems():\n # All currently defined config parameters have ENV as a valid\n # source.\n assert(ENV in parameter.sources)\n # ENV is the first source, so we can assert that using defaults.\n assert(parameter.active_source is None)\n\n env_var = (\"FELIX_%s\" % name).upper()\n if env_var in os.environ:\n parameter.set(os.environ[env_var],\n \"Environment variable %s\" % env_var)\n\n def _read_cfg_file(self, config_file):\n parser = ConfigParser.ConfigParser()\n parser.read(config_file)\n cfg_dict = {}\n\n # Build up the cfg dictionary from the file.\n for section in parser.sections():\n cfg_dict.update(dict(parser.items(section)))\n\n source = \"Configuration file %s\" % config_file\n\n for name, parameter in self.parameters.iteritems():\n # Config parameters are lower-cased by ConfigParser\n name = name.lower()\n if FILE in parameter.sources and name in cfg_dict:\n # This can validly be read from file.\n parameter.set(cfg_dict.pop(name), source)\n self._warn_unused_cfg(cfg_dict, source)\n\n def report_etcd_config(self, host_dict, global_dict):\n \"\"\"\n Report configuration parameters read from etcd to the config\n component. This must be called only once, after configuration is\n initially read and before the config structure is used (except for\n ETCD_ADDR and HOSTNAME).\n\n :param host_dict: Dictionary of etcd parameters\n :param global_dict: Dictionary of global parameters\n :raises ConfigException\n \"\"\"\n log.debug(\"Configuration reported from etcd\")\n for source, cfg_dict in ((LOCAL_ETCD, host_dict),\n (GLOBAL_ETCD, global_dict)):\n for name, parameter in self.parameters.iteritems():\n if source in parameter.sources and name in cfg_dict:\n parameter.set(cfg_dict.pop(name), source)\n\n self._warn_unused_cfg(cfg_dict, source)\n\n self._finish_update(final=True)\n\n def _validate_cfg(self, final=True):\n \"\"\"\n Firewall that the config is not invalid. Called twice, once when\n environment variables and config file have been read, and once\n after those plus the etcd configuration have been read.\n :param final: Is this after final etcd config has been read?\n :raises ConfigException\n \"\"\"\n fields = self.ETCD_ADDR.split(\":\")\n if len(fields) != 2:\n raise ConfigException(\"Invalid format for field - must be \"\n \"hostname:port\", self.parameters[\"EtcdAddr\"])\n self._validate_addr(\"EtcdAddr\", fields[0])\n\n try:\n int(fields[1])\n except ValueError:\n raise ConfigException(\"Invalid port in field\",\n self.parameters[\"EtcdAddr\"])\n\n # Set default or python None value for each etcd \"none\" config value\n if self.ETCD_SCHEME.lower() == \"none\":\n self.ETCD_SCHEME = \"http\"\n if self.ETCD_KEY_FILE.lower() == \"none\":\n self.ETCD_KEY_FILE = None\n if self.ETCD_CERT_FILE.lower() == \"none\":\n self.ETCD_CERT_FILE = None\n if self.ETCD_CA_FILE == \"none\":\n self.ETCD_CA_FILE = None\n\n if self.ETCD_SCHEME == \"https\":\n # key and certificate must be both specified or both not specified\n if bool(self.ETCD_KEY_FILE) != bool(self.ETCD_CERT_FILE):\n if not self.ETCD_KEY_FILE:\n raise ConfigException(\"Missing etcd key file. Key and \"\n \"certificate must both be specified \"\n \"or both be blank.\",\n self.parameters[\"EtcdKeyFile\"])\n else:\n raise ConfigException(\"Missing etcd certificate. Key and \"\n \"certificate must both be specified \"\n \"or both be blank.\",\n self.parameters[\"EtcdCertFile\"])\n\n # Make sure etcd key and certificate are readable\n if self.ETCD_KEY_FILE and self.ETCD_CERT_FILE:\n if not (os.path.isfile(self.ETCD_KEY_FILE) and\n os.access(self.ETCD_KEY_FILE, os.R_OK)):\n raise ConfigException(\"Cannot read key file. Key file \"\n \"must be a readable path.\",\n self.parameters[\"EtcdKeyFile\"])\n if not (os.path.isfile(self.ETCD_CERT_FILE) and\n os.access(self.ETCD_CERT_FILE, os.R_OK)):\n raise ConfigException(\"Cannot read cert file. Cert file \"\n \"must be a readable path.\",\n self.parameters[\"EtcdCertFile\"])\n\n # If Certificate Authority cert provided, check it's readable\n if (self.ETCD_CA_FILE and\n not (os.path.isfile(self.ETCD_CA_FILE) and\n os.access(self.ETCD_CA_FILE, os.R_OK))):\n raise ConfigException(\"Missing CA certificate or file is \"\n \"unreadable. Value must be readable \"\n \"file path.\",\n self.parameters[\"EtcdCaFile\"])\n elif self.ETCD_SCHEME != \"http\":\n raise ConfigException(\"Invalid protocol scheme. Value must be one \"\n \"of: \\\"\\\", \\\"http\\\", \\\"https\\\".\",\n self.parameters[\"EtcdScheme\"])\n\n try:\n self.LOGLEVFILE = LOGLEVELS[self.LOGLEVFILE.lower()]\n except KeyError:\n raise ConfigException(\"Invalid log level\",\n self.parameters[\"LogSeverityFile\"])\n\n try:\n self.LOGLEVSYS = LOGLEVELS[self.LOGLEVSYS.lower()]\n except KeyError:\n raise ConfigException(\"Invalid log level\",\n self.parameters[\"LogSeveritySys\"])\n\n try:\n self.LOGLEVSCR = LOGLEVELS[self.LOGLEVSCR.lower()]\n except KeyError:\n raise ConfigException(\"Invalid log level\",\n self.parameters[\"LogSeverityScreen\"])\n\n # Log files may be \"None\" (the literal string, case insensitive). In\n # this case no log file should be written.\n if self.LOGFILE.lower() == \"none\":\n self.LOGFILE = None\n if self.DRIVERLOGFILE.lower() == \"none\":\n self.DRIVERLOGFILE = None\n\n if self.METADATA_IP.lower() == \"none\":\n # Metadata is not required.\n self.METADATA_IP = None\n self.METADATA_PORT = None\n else:\n # Metadata must be supplied as IP or address, but we store as IP\n self.METADATA_IP = self._validate_addr(\"MetadataAddr\",\n self.METADATA_IP)\n\n if not common.validate_port(self.METADATA_PORT):\n raise ConfigException(\"Invalid field value\",\n self.parameters[\"MetadataPort\"])\n\n if self.DEFAULT_INPUT_CHAIN_ACTION not in (\"DROP\", \"RETURN\", \"ACCEPT\"):\n raise ConfigException(\n \"Invalid field value\",\n self.parameters[\"DefaultEndpointToHostAction\"]\n )\n\n # For non-positive time values of reporting interval we set both\n # interval and ttl to 0 - i.e. status reporting is disabled.\n if self.REPORTING_INTERVAL_SECS <= 0:\n log.warning(\"Reporting disabled.\")\n self.REPORTING_TTL_SECS = 0\n self.REPORTING_INTERVAL_SECS = 0\n\n # Ensure the TTL is longer than the reporting interval, defaulting\n # it if not.\n if (self.REPORTING_TTL_SECS <= self.REPORTING_INTERVAL_SECS or\n self.REPORTING_TTL_SECS == 0):\n log.warning(\"Reporting TTL set to %s.\", self.REPORTING_TTL_SECS)\n self.REPORTING_TTL_SECS = self.REPORTING_INTERVAL_SECS * 5/2\n\n if self.ENDPOINT_REPORT_DELAY < 0:\n log.warning(\"Endpoint status delay is negative, defaulting to 1.\")\n self.ENDPOINT_REPORT_DELAY = 1\n\n if self.MAX_IPSET_SIZE <= 0:\n log.warning(\"Max ipset size is non-positive, defaulting to 2^20.\")\n self.MAX_IPSET_SIZE = 2**20\n\n if not final:\n # Do not check that unset parameters are defaulted; we have more\n # config to read.\n return\n\n for parameter in self.parameters.itervalues():\n if parameter.value is None:\n # No value, not even a default\n raise ConfigException(\"Missing undefaulted value\",\n parameter)\n\n def _warn_unused_cfg(self, cfg_dict, source):\n # Warn about any unexpected items - i.e. ones we have not used.\n for lKey in cfg_dict:\n log.warning(\"Got unexpected config item %s=%s\",\n lKey, cfg_dict[lKey])\n\n def _validate_addr(self, name, addr):\n \"\"\"\n Validate an address, returning the IP address it resolves to. If the\n address cannot be resolved then an exception is returned.\n\n Parameters :\n - name of the field, for use in logging\n - address to resolve\n \"\"\"\n try:\n stripped_addr = addr.strip()\n if not stripped_addr:\n raise ConfigException(\"Blank value\",\n self.parameters[name])\n\n return socket.gethostbyname(addr)\n except socket.gaierror:\n raise ConfigException(\"Invalid or unresolvable value\",\n self.parameters[name])\n","sub_path":"calico/felix/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":23990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"390911456","text":"#!/usr/bin/env python\n\nimport os\nimport os.path as osp\nimport shutil\n\n# import cv2\nimport numpy as np\nimport skimage.io\nimport yaml\n\nimport cv_bridge\nfrom geometry_msgs.msg import TransformStamped\nimport genpy.message\nimport rospy\nfrom sensor_msgs.msg import CameraInfo\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Header\nimport tf\n\n\nclass DatasetCollectedOnShelfMultiView(object):\n\n def __init__(self):\n self.ids = []\n self.root = '/data/projects/arc2017/datasets/JSKV3'\n for id_ in sorted(os.listdir(self.root)):\n self.ids.append(id_)\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, i):\n id_ = self.ids[i]\n data_dir = osp.join(self.root, id_)\n\n frame_idx = int(\n open(osp.join(data_dir, 'view_frame.txt')).read().strip())\n img = skimage.io.imread(osp.join(data_dir, 'image.jpg'))\n depth = np.load(osp.join(data_dir, 'depth.npz'))['arr_0']\n camera_info = yaml.load(\n open(osp.join(data_dir,\n 'camera_info_right_hand_camera_left.yaml')))\n tf_camera_from_base = yaml.load(\n open(osp.join(data_dir, 'tf_camera_rgb_from_base.yaml')))\n\n return frame_idx, img, depth, camera_info, tf_camera_from_base\n\n\ndef main():\n dataset = DatasetCollectedOnShelfMultiView()\n\n out_dir = '/data/projects/arc2017/datasets/JSKV3_scenes'\n if not osp.exists(out_dir):\n os.makedirs(out_dir)\n\n scene_idx = 0\n for i in xrange(len(dataset)):\n frame_idx, img, depth = dataset[i][:3]\n if frame_idx == 1:\n scene_idx += 1\n\n scene_dir = osp.join(out_dir, 'scene-%04d' % scene_idx)\n if not osp.exists(scene_dir):\n os.makedirs(scene_dir)\n\n frame_dir = osp.join(dataset.root, dataset.ids[i])\n shutil.copytree(frame_dir, osp.join(scene_dir, dataset.ids[i]))\n print('%s -> %s' % (frame_dir, osp.join(scene_dir, dataset.ids[i])))\n\n # cv2.imshow('create_scenes_dataset_v3', img[:, :, ::-1])\n # k = cv2.waitKey(0)\n # if k == ord('q'):\n # break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"jsk_arc2017_common/scripts/create_scenes_dataset_v3.py","file_name":"create_scenes_dataset_v3.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"462453436","text":"from datetime import datetime\nimport time, re, requests\nfrom bs4 import BeautifulSoup\nimport mechanize\n\nurl = 'http://www.pepperfry.com/casacraft-ben-floor-lamp-maroon-jute-shade-1375807.html'\ndriver = mechanize.Browser()\ndriver = mechanize.Browser()\ndriver.open(url)\nhtml = driver.response().read()\nsoup = BeautifulSoup(html)\n\ntry:\n attributes = soup.findAll(attrs = {'class':'vip-dtl-desc'})\nexcept:\n attributes = ''\n \ndef getData(element):\n for attribute in attributes:\n if attribute.find('b').getText().strip() == element:\n return attribute.find('span').getText()\n \ntry:\n PID = getData('Sku:')\nexcept:\n PID = ''\nURL_raw = url\ntry:\n Title = soup.find(attrs = {'class':'vip-product-title'}).getText().strip()\nexcept:\n Title = ''\ntry:\n Brand = getData('Brand:')\nexcept:\n Brand = ''\ntry:\n Seller = soup.find(attrs = {'class':'more-from-brand'}).getText().replace('More From ','').strip()\nexcept:\n Seller = ''\ntry:\n IMG_medium = soup.find(attrs = {'id':'vipImage'}).find('img')['src']\nexcept:\n IMG_medium = ''\ntry:\n IMG_large = soup.find(attrs = {'id':'bigImageContainer'})['src']\nexcept:\n IMG_large = ''\ntry:\n Price_mrp = soup.find(attrs = {'class':'vip-prices'}).findAll('li')[0].getText().strip().replace('\"','').replace('Retail Price: Rs.','').replace(',','')\n Price_mrp = re.compile(r'(\\d+)').search(Price_mrp).group(0)\nexcept:\n Price_mrp = ''\ntry:\n Price_selling = re.findall(r'\\d+', soup.find(attrs = {'class':'vip-prices'}).findAll('li')[1].getText().strip().replace(',','').replace('Offer Price:','').replace('Rs.','').replace(',',''))[0]\nexcept:\n Price_selling = Price_mrp\n\nif Price_mrp == '':\n Price_mrp = Price_selling\n\ntry:\n pincode='110001'\n prc_code=soup.find(attrs = {'id':'cod_prc_code'})['value']#'4122' \n sku= PID #'LL1375807-P-WH11390'\n supplier=soup.find(attrs = {'id':'cod_supplier_id'})['value'] #'1' #\n cod_exist=soup.find(attrs = {'id':'cod_open'})['value']#'0'#\n int_ship=soup.find(attrs = {'id':'int_ship'})['value']#'0'#\n brand_id=soup.find(attrs = {'id':'brand_id'})['value']#'3155'#\n assembly_check=soup.find(attrs = {'id':'assembly_check'})['value']#'0'#\n product_id=soup.find(attrs = {'id':'product_id'})['value']#'1375807'#\n is_customized='0'\n customized_id='0'\n ccid=re.findall('var\\sccid\\s=\\s\\\"(.*?)\\\";', html)[0]#'2757' #\n uu = 'https://www.pepperfry.com/pincode/is_product_serviceable'\n data = {'pincode':pincode,'prc_code':prc_code,'sku':sku, 'supplier':supplier ,'cod_exist':cod_exist,\\\n 'int_ship':int_ship, 'brand_id':brand_id, 'assembly_check':assembly_check, 'product_id':product_id,\\\n 'is_customized':is_customized,'customized_id':customized_id ,'ccid':ccid}\n resp = requests.post(uu, data = data)\n data = dict(eval(resp.content.replace('true', 'True').replace('false','False')))\n\n try:\n COD = data['cod']\n except:\n COD = ''\n\n try:\n Delivery = data['tentative_delivery_date']\n except:\n Delivery = ''\n \nexcept:\n pass\ntry:\n Price_shipping = soup.find(attrs = {'class':'tdcolor1'}).getText()\nexcept:\n Price_shipping = ''\ntry:\n soup.find(attrs = {'id':'emi_strip'})\n EMI = 'Available'\nexcept:\n EMI = ''\ntry:\n breadcrums = soup.find(attrs = {'class':'breadcrumb container'}).find(attrs = {'class':'cat_tree'}).findAll('span')\n b = ''\n for bread in breadcrums:\n try:\n b = b + '|' + bread.find('span').getText().strip()\n except:\n pass\n Category_path = b[1:].strip()\nexcept:\n Category_path = ''\ntry:\n Description = soup.find(attrs = {'class':'vip-dtl-para'}).getText().strip()\nexcept:\n Description = ''\n\ntry:\n Offers = soup.find(attrs = {'class':'vip-offer-text'}).getText()\nexcept:\n try:\n Offers = soup.find(attrs = {'class':'vip-cpn-box'}).getText()\n except:\n Offers = ''\ntry:\n Average_rating = soup.find(attrs = {'class':'rating-text'}).getText()\nexcept:\n Average_rating = ''\nReviews = ''\ntry:\n if 'THIS ITEM IS SOLD OUT!' in str(soup):\n Status = 'OUT OF STOCK'\n else:\n Status = 'IN STOCK'\nexcept:\n Status = 'IN STOCK'\n\nCondition = 'NEW'\nTimeStamp = str(datetime.now())\n\nprint [PID, URL_raw, Title, Brand,Seller, IMG_medium, IMG_large, Price_mrp, Price_selling, Price_shipping, Delivery,\\\n COD,EMI, Category_path,Description,Offers,Average_rating,Reviews,Status,Condition,TimeStamp]\n\n'''\npincode:110001\nprc_code:4122 #soup.find(attrs = {'id':'cod_prc_code'})['value']\nsku:LL1375807-P-WH11390\nsupplier:1 #soup.find(attrs = {'id':'cod_supplier_id'})['value']\ncod_exist:0#soup.find(attrs = {'id':'cod_open'})['value']\nint_ship:0#soup.find(attrs = {'id':'int_ship'})['value']\nbrand_id:3155#soup.find(attrs = {'id':'brand_id'})['value']\nassembly_check:0#soup.find(attrs = {'id':'assembly_check'})['value']\nproduct_id:1375807#soup.find(attrs = {'id':'product_id'})['value']\nis_customized:0\ncustomized_id:0\nccid:2757 #re.findall('var\\sccid\\s=\\s\\\"(.*?)\\\";', html)[0]\n'''\n","sub_path":"e-commerce-scraper3/25 April/pepperfry_check.py","file_name":"pepperfry_check.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"244616080","text":"from game.drawer import Drawer\n\nclass Dealer:\n '''The person who directs the game. Keeps track of score\n and controls the sequence of play.\n \n Attributes:\n keep_playing (bool): Whether the player continues to play\n score (int): The total number of points earned\n drawer (Drawer): Deals directly with cards\n\n I will add documentation here later\n '''\n\n def __init__(self):\n '''The class constructor.\n \n Args:\n self (Dealer): instance of Dealer\n '''\n self.keep_playing = True\n self.score = 300\n self.drawer = Drawer()\n\n def start_game(self):\n '''Starts game loop and controls sequence of play.\n \n Args:\n self (Dealer): instance of Dealer\n '''\n while self.keep_playing:\n self.do_outputs()\n\n def get_inputs(self):\n '''Get the input from the user on whether the card will be higher or lower.\n \n Args:\n self (Dealer): instance of Dealer\n '''\n repeat = True\n while repeat:\n user_choice = input('Higher or lower? [h/l] ')\n if user_choice.lower() == 'h':\n return True\n elif user_choice.lower() == 'l':\n return False\n\n def do_updates(self, is_higher):\n '''Update the user's score\n \n Args:\n self (Dealer): instance of Dealer\n is_higher (bool): bool representation of user's choice of higher or lower\n '''\n points = self.drawer.score_card(is_higher)\n self.score += points\n\n def do_outputs(self):\n '''Prints game information, including score and card values, during and after each round.\n \n Args:\n self (Dealer): instance of Dealer\n '''\n new_card, old_card = self.drawer.draw_card()\n print(f'\\nThe card is: {old_card}')\n card_choice = self.get_inputs()\n self.do_updates(card_choice)\n print(f'Next card was: {new_card}')\n print(f'Your score is: {self.score}')\n\n if self.drawer.can_draw(self.score):\n choice_play = input('Keep playing? [y/n] ')\n self.keep_playing = (choice_play == 'y')\n else:\n self.keep_playing = False","sub_path":"hilo/game/dealer.py","file_name":"dealer.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"39806522","text":"\nimport os, sys, codecs\nimport copy\n\nclass N21Item:\n def __init__(self, target, text):\n self.target = target\n self.text = text\n\n self.target_id = None\n self.token_ids = None # it should be array \n\n def set_id(self, target_id, token_ids):\n self.target_id = target_id\n self.token_ids = token_ids\n\n def get_tokens(self):\n # currently, only support 'character' \n return list(self.text) \n\n\nclass N2NItem:\n def __init__(self, targets, tokens):\n self.text = \"\".join(tokens)\n self.target_txts = targets\n self.token_txts = tokens\n\n self.target_ids = None\n self.token_ids = None # it should be array \n\n def set_id(self, target_ids, token_ids):\n self.target_ids = target_ids\n self.token_ids = token_ids\n\n def get_tokens(self):\n # currently, only support 'character' \n return list(self.text) \n\n\nclass N2MItem:\n def __init__(self, src_tokens, tar_tokens):\n self.src_text = \"\".join(src_tokens)\n self.tar_text = \"\".join(tar_tokens)\n\n self.src_token_txts = src_tokens\n self.tar_token_txts = tar_tokens\n\n self.src_token_ids = None # it should be array \n self.tar_token_ids = None # it should be array \n\n def set_id(self, src_token_ids, tar_token_ids):\n self.src_token_ids = src_token_ids\n self.tar_token_ids = tar_token_ids\n\n def get_src_tokens(self):\n # currently, only support 'character' \n return list(self.src_text) \n\n def get_tar_tokens(self):\n # currently, only support 'character' \n return list(self.tar_text)\n \n\nclass N21TextData:\n def __init__(self, src=None, mode='file'): # mode = 'file' | 'sentence'\n self.data = []\n \n if mode == 'file': self.load_text_file_data(src)\n if mode == 'sentence': self.load_text_data(src)\n\n def add_to_data(self, target, text):\n # normalize\n target = target.upper()\n text = text.upper()\n self.data.append( N21Item(target, text) )\n\n def load_text_data(self, line):\n # mode = 'sentence'\n # format of line : \"TAG \\t SENTENCE\"\n line = line.rstrip('\\n\\r')\n target, text = line.split('\\t')\n self.add_to_data(target, text)\n\n def load_text_file_data(self, fn):\n # mode = 'file' \n with codecs.open(fn, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.rstrip('\\n\\r')\n target, text = line.split('\\t')\n self.add_to_data(target, text)\n\n\n\nclass N2NTextData:\n def __init__(self, src=None, mode='file'): # mode = 'file' | 'sentence'\n self.data = []\n \n if mode == 'file': self.load_text_file_data(src)\n if mode == 'sentence': self.load_text_data(src)\n\n def add_to_data(self, targets, tokens):\n # normalize\n targets = [ t.upper() for t in targets ]\n tokens = [ t.upper() for t in tokens ] \n self.data.append( N2NItem(targets, tokens) )\n\n def load_text_data(self, line):\n # mode = 'sentence'\n line = line.rstrip('\\n\\r')\n tokens = list(line)\n targets = [ 'O' for x in tokens ]\n self.add_to_data(targets, tokens)\n\n\n def load_text_file_data(self, fn):\n # mode = 'file' \n with codecs.open(fn, 'r', encoding='utf-8') as f:\n a_sent_data = []\n tokens = []\n targets = []\n for line in f:\n line = line.rstrip('\\n\\r')\n\n if line.startswith('-------'): \n self.add_to_data(targets, tokens)\n tokens = []\n targets = []\n continue \n\n fields = line.split('\\t')\n assert (len(fields) >= 2), \"Not implemented spec\"\n assert (len(fields) == 2), \"Wrong data format \"\n\n token, tag = fields[0], fields[1]\n tokens.append( token ) \n targets.append( tag ) \n\n\nclass N2MTextData:\n def __init__(self, src=None, mode='file'): # mode = 'file' | 'sentence'\n self.data = []\n \n if mode == 'file': self.load_text_file_data(src)\n if mode == 'sentence': self.load_text_data(src)\n\n def add_to_data(self, src_tokens, tar_tokens):\n # normalize\n self.data.append( N2MItem(src_tokens, tar_tokens) )\n\n def load_text_data(self, line):\n # mode = 'sentence'\n line = line.rstrip('\\n\\r')\n \n # line = source_text \\t target_text\n fields = line.split('\\t')\n \n if len(fields) == 2:\n src_text, tar_text = fields[0], fields[1]\n\n src_tokens = list(src_text)\n tar_tokens = list(tar_text) + ['_EOS']\n self.add_to_data(src_tokens, tar_tokens)\n\n if len(fields) == 1:\n src_text = fields[0]\n\n src_tokens = list(src_text)\n tar_tokens = [ '_EOS' for x in range(128) ] \n self.add_to_data(src_tokens, tar_tokens)\n\n def load_text_file_data(self, fn):\n # mode = 'file' \n with codecs.open(fn, 'r', encoding='utf-8') as f:\n src_tokens = []\n tar_tokens = []\n\n for line in f:\n line = line.rstrip('\\n\\r')\n\n fields = line.split('\\t')\n\n src_text, tar_text = fields[0], fields[1]\n\n src_tokens = list(src_text)\n tar_tokens = list(tar_text) + ['_EOS']\n self.add_to_data(src_tokens, tar_tokens)\n\n","sub_path":"nlp_applications/common/nlp/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"395918928","text":"\n\nimport glfw\nfrom OpenGL.GL import * \nfrom OpenGL.GLU import * \nimport numpy as np\n\nT = np.array([[1., 0., 0.,0.], \n [0., 1., 0.,0.],\n [0., 0., 1.,0.],\n [0., 0., 0.,1.]])\ncamAng= 0\n\ndef render(M, canAng): \n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) \n glEnable(GL_DEPTH_TEST)\n glLoadIdentity()\n # draw cooridnate\n glOrtho(-1,1,-1,1,-1,1)\n gluLookAt(.1*np.sin(camAng),.1,.1*np.cos(camAng), 0,0,0, 0,1,0)\n\n glBegin(GL_LINES) \n glColor3ub(255, 0, 0) \n glVertex3fv(np.array([0.,0.,0.])) \n glVertex3fv(np.array([1.,0.,0.])) \n glColor3ub(0, 255, 0) \n glVertex3fv(np.array([0.,0.,0.])) \n glVertex3fv(np.array([0.,1.,0.])) \n glColor3ub(0, 0, 255) \n glVertex3fv(np.array([0.,0.,0.])) \n glVertex3fv(np.array([0.,0.,1.]))\n glEnd()\n # draw triangle\n\n glBegin(GL_TRIANGLES)\n glColor3ub(255,255, 255)\n glVertex3fv((M @ np.array([.0,.5,0., 1.]))[:-1])\n glVertex3fv((M @ np.array([.0,.0,0., 1.]))[:-1])\n glVertex3fv((M @ np.array([.5,.0,0., 1.]))[:-1])\n glEnd()\n\ndef key_callback(window, key, scancode, action, mods):\n \n global T, camAng\n if action == glfw.PRESS:\n\n if key == glfw.KEY_Q:\n newArr = np.array([[1., 0.,0., -0.1],\n [0., 1., 0.,0.],\n [0., 0., 1.,0.],\n [0., 0., 0.,1.]])\n T = newArr@ T\n \n if key ==glfw.KEY_E:\n newArr = np.array([[1., 0.,0., 0.1],\n [0., 1., 0.,0.],\n [0., 0., 1.,0.],\n [0., 0., 0.,1.]])\n T = newArr@ T\n \n if key == glfw.KEY_A:\n newArr = np.array([[1., 0.,0., 0.],\n [0., 1., 0.,0.],\n [0., 0., 1.,0.],\n [0., 0., 0.,1.]])\n th=np.radians(-10)\n newArr[0][0]=np.cos(th)\n newArr[0][2]=np.sin(th)\n newArr[2][0]=-np.sin(th)\n newArr[2][2]=np.cos(th)\n T = T @ newArr\n \n if key ==glfw.KEY_D:\n newArr = np.array([[1., 0.,0., 0.],\n [0., 1., 0.,0.],\n [0., 0., 1.,0.],\n [0., 0., 0.,1.]])\n th=np.radians(10)\n newArr[0][0]=np.cos(th)\n newArr[0][2]=np.sin(th)\n newArr[2][0]=-np.sin(th)\n newArr[2][2]=np.cos(th)\n T = T @ newArr\n \n if key ==glfw.KEY_W:\n newArr = np.array([[1., 0.,0., 0.],\n [0., 1., 0.,0.],\n [0., 0., 1.,0.],\n [0., 0., 0.,1.]])\n th=np.radians(-10)\n newArr[1][1]=np.cos(th)\n newArr[1][2]=-np.sin(th)\n newArr[2][1]=np.sin(th)\n newArr[2][2]=np.cos(th)\n T = T @ newArr\n\n if key ==glfw.KEY_S:\n newArr = np.array([[1., 0.,0., 0.],\n [0., 1., 0.,0.],\n [0., 0., 1.,0.],\n [0., 0., 0.,1.]])\n th=np.radians(10)\n newArr[1][1]=np.cos(th)\n newArr[1][2]=-np.sin(th)\n newArr[2][1]=np.sin(th)\n newArr[2][2]=np.cos(th)\n T = T @ newArr\n\n if key == glfw.KEY_1:\n camAng-=np.radians(10)\n return \n if key == glfw.KEY_3:\n camAng+=np.radians(10)\n return \n\n\n\ndef main():\n if not glfw.init():\n return\n window = glfw.create_window(480,480,\"2016026026\", None,None)\n if not window: \n glfw.terminate() \n return new\n \n global T, camAng\n glfw.set_key_callback(window, key_callback)\n\n glfw.make_context_current(window)\n\n while not glfw.window_should_close(window):\n glfw.poll_events() \n render(T, camAng)\n glfw.swap_buffers(window)\n \n glfw.terminate()\n\n\nif __name__ == \"__main__\": \n main()\n\n","sub_path":"assign7.py","file_name":"assign7.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"396183649","text":"# This file will contain different approaches to solving systems of equations\n# of the form Ax = b\n\nimport numpy as np\nimport timer\n\n\n# +----------------------------------------------------------------------------+\n# Gauss Elimination |\n# +----------------------------------------------------------------------------+\n\ndef gauss_elim(A, b):\n \"\"\"This function will solve a system of equations using the gaussian\n elimination method\n Inputs:\n A - coefficient matrix of Ax = b\n b - constant matrix of Ax = b\"\"\"\n\n # Forward Elimination Phase\n for i in range(len(b) - 1):\n for j in range(i, len(b) - 1):\n A[j+1, i:] = A[j+1, i:] - A[j+1, i] / A[i, i] * A[i, i:]\n b[j+1] = b[j+1] - A[j+1, i] / A[i, i] * b[i]\n\n # Back Substitution Phase\n x = np.zeros(len(b))\n x[-1] = b[-1] / A[-1, -1]\n for i in range(len(b) - 2, -1, -1):\n x[i] = (b[i] - np.dot(np.array(A[i, i+1:])[0], x[i+1:])) / A[i, i]\n\n return x\n\n\nif __name__ == \"__main__\":\n A = np.matrix([[5, 2, 3, 5, 6],\n [3, 4, 1, 9, 3],\n [4, 2, 6, 8, 1],\n [5, 2, 8, 1, 9],\n [4, 1, 3, 4, 6]])\n b = np.matrix([3, 5, 2, 6, 3]).transpose()\n\n print(\"\\n\")\n print(\"Gaussian Elimination\\n\\t\", end=\"\")\n print(gauss_elim(A, b), end=\"\\n\\t\")\n timer.timer(gauss_elim, [A, b], 500)\n print(\"\\n\")\n\n print(\"Numpy's linalg.solve() Method\", end=\"\\n\\t\")\n print(np.linalg.solve(A, b).transpose(), end=\"\\n\\t\")\n timer.timer(np.linalg.solve, [A, b], 50000)\n print(\"\\n\")\n","sub_path":"notes/numerical_methods_engineering_py3/code/systems_of_equations.py","file_name":"systems_of_equations.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"87247102","text":"# -----------------------------------------------------------------------\n# Get User Events Information\n# -----------------------------------------------------------------------\ndef tte_user_events_api_get(ttesession,user_id):\n user_events_start = 1\n user_events_total = 1000\n all_user_events = list()\n tteuser_events_url = 'https://tabletop.events/api/'\n while user_events_total >= user_events_start:\n user_events_params = {'session_id': ttesession, 'convention_id': tteconvention_id, '_page_number': user_events_start}\n user_events_response = requests.get(tteuser_events_url, params= user_events_params)\n user_events_json = user_events_response.json()\n user_events_data = user_events_data['result']['items']\n user_events_total = int(user_events_data['result']['paging']['total_pages'])\n user_events_start = int(user_events_data['result']['paging']['page_number'])\n for user_events in user_events_data:\n all_user_events.append(user_events_data)\n if user_events_start < user_events_total:\n user_events_start = int(user_events_data['result']['paging']['next_page_number'])\n elif user_events_start == user_events_total:\n break\n else:\n break\n return(all_user_events)\n\n# -----------------------------------------------------------------------\n# Get Event Dayparts Information\n# -----------------------------------------------------------------------\ndef tte_eventdayparts_api_get(ttesession,tteconvention_id,event_id):\n eventdayparts_start = 1\n eventdayparts_total = 1000\n all_eventdayparts = list()\n tteeventdayparts_url = 'https://tabletop.events/api/event/' + event_id + '/dayparts'\n while eventdayparts_total >= eventdayparts_start:\n eventdayparts_params = {'session_id': ttesession, 'convention_id': tteconvention_id, '_page_number': eventdayparts_start, '_include_relationships': 1}\n eventdayparts_response = requests.get(tteeventdayparts_url, params= eventdayparts_params)\n eventdayparts_json = eventdayparts_response.json()\n eventdayparts_data = eventdayparts_data['result']['items']\n eventdayparts_total = int(eventdayparts_data['result']['paging']['total_pages'])\n eventdayparts_start = int(eventdayparts_data['result']['paging']['page_number'])\n for daypart in eventdayparts_data:\n all_event.append(daypart)\n if eventdayparts_start < eventdayparts_total:\n eventdayparts_start = int(eventdayparts_data['result']['paging']['next_page_number'])\n elif eventdayparts_start == eventdayparts_total:\n break\n else:\n break\n return(all_eventdayparts)\n\n# -----------------------------------------------------------------------\n# Get Event Information\n# -----------------------------------------------------------------------\ndef tte_event_api_get(ttesession,tteconvention_id,event_id):\n event_start = 1\n event_total = 1000\n all_event = list()\n tteeevnt_url = 'https://tabletop.events/api/event/' + event_id\n while event_total >= event_start:\n event_params = {'session_id': ttesession, 'convention_id': tteconvention_id, '_page_number': event_start, '_include_relationships': 1}\n event_response = requests.get(tteeevnt_url, params= event_params)\n event_data = event_response.json()\n convention_event = event_data['result']['items']\n event_total = int(event_data['result']['paging']['total_pages'])\n event_start = int(event_data['result']['paging']['page_number'])\n for event in convention_event:\n all_event.append(event)\n if event_start < event_total:\n event_start = int(event_data['result']['paging']['next_page_number'])\n elif event_start == event_total:\n break\n else:\n break\n return(all_event)\n","sub_path":"FunctionsNoLongerNeeded.py","file_name":"FunctionsNoLongerNeeded.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"440216637","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 9 15:30:06 2019\n\n@author: huzhen\n\"\"\"\n\nimport numpy as np\nfrom tqdm import tqdm\nimport json\n\nbatch_size = 256\n\ndef data_generator():\n while True:\n X,Y = [],[]\n with open(r'../data/train.txt','r',encoding='utf8') as f:\n for line in f:\n line = line.strip()\n if not line:\n continue\n line = line.split(',')\n y = int(float(line[-1]))\n x = [float(_) for _ in line[:-1]]\n X.append(x)\n Y.append([y])\n if len(X) == batch_size:\n yield np.array(X),np.array(Y)\n X,Y = [],[]\n if X:\n yield np.array(X),np.array(Y)\n X,Y = [],[]\n \ndef data_generator_test():\n with open(r'../data/test.txt','r',encoding='utf8') as f:\n for line in f:\n line = line.strip()\n if not line:\n continue\n line = line.split(',')\n y = int(float(line[-1]))\n x = [float(_) for _ in line[:-1]]\n yield np.array([x]),y\n \n \nfrom keras.layers import Input,Dense,Dropout,Reshape,GlobalAveragePooling1D,Layer\nfrom keras.models import Model\nfrom keras.utils import plot_model\nfrom keras.callbacks import Callback\nfrom keras import backend as K\n\nclass GCNN(Layer): # 定义GCNN层,结合残差\n def __init__(self, output_dim=None, residual=False, **kwargs):\n super(GCNN, self).__init__(**kwargs)\n self.output_dim = output_dim\n self.residual = residual\n def build(self, input_shape):\n if self.output_dim == None:\n self.output_dim = input_shape[-1]\n self.kernel = self.add_weight(name='gcnn_kernel',\n shape=(3, input_shape[-1],\n self.output_dim * 2),\n initializer='glorot_uniform',\n trainable=True)\n def call(self, x):\n _ = K.conv1d(x, self.kernel, padding='same')\n _ = _[:,:,:self.output_dim] * K.sigmoid(_[:,:,self.output_dim:])\n if self.residual:\n return _ + x\n else:\n return _\n\n\nsen = Input(shape=(1000,),dtype='float32',name='input')\ndense = Dense(1000*10,activation='relu')(sen)\ndense = Reshape((50,200))(dense)\n#cnn = Conv1D(100,3,padding='same',activation='relu')(dense)\n#cnn = Conv1D(100,3,padding='same',activation='relu')(cnn)\n#cnn = Conv1D(100,3,padding='same',activation='relu')(cnn)\ncnn = GCNN(residual=True)(dense)\ncnn = GCNN(residual=True)(cnn)\ncnn = GCNN(residual=True)(cnn)\npool = GlobalAveragePooling1D()(cnn)\ndropout = Dropout(0.5)(pool)\noutput = Dense(1,activation='sigmoid',name='output')(dropout)\nmodel = Model(sen,output)\nplot_model(model,to_file=r'../model_png/model.png',show_shapes=True,show_layer_names=True)\nmodel.compile(\n optimizer = 'adam',\n loss = 'binary_crossentropy',\n metrics = ['accuracy']\n )\n#if 'model.h5' in os.listdir(r'../model/'):\n# model.load_weights(r'../model/model.h5')\n# print('success load model')\n\nclass Evaluate(Callback):\n def __init__(self):\n self.highest_acc = 0.\n \n def on_epoch_end(self, epoch, logs=None):\n total = 0\n true = 0\n for x,y in tqdm(data_generator_test(),desc='验证中...'):\n y_pred = model.predict(x)[0][0]\n if y_pred >= 0.5:\n y_pred = 1\n else:\n y_pred = 0\n if y_pred == y:\n true += 1\n total += 1\n acc = true / total\n print('acc: ',acc)\n if acc > self.highest_acc:\n self.highest_acc = acc\n print('highest_acc: ',self.highest_acc)\n with open(r'../model/acc.json','w',encoding='utf8') as f:\n json.dump(str(self.highest_acc),f,ensure_ascii=False,indent = 4)\n model.save_weights(r'../model/model.h5')\n\nevaluator = Evaluate()\nmodel.fit_generator(\n data_generator(),\n steps_per_epoch = 500000 // batch_size + 1,\n epochs = 200,\n callbacks = [evaluator]\n )","sub_path":"hu/go.py","file_name":"go.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"290857551","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 26 12:20:33 2020\n\n@author: Pauline\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nx,y,z = np.genfromtxt('rand_points.txt', unpack=True)\n\nax = plt.subplot(121,projection='3d')\nax.plot(x,y,z,'.')\nax.set_title('Random points C')\n\n\nx,y,z = np.random.rand(3,10000)\n\nax = plt.subplot(122,projection='3d')\nax.plot(x,y,z,'.')\nax.set_title('Random points Python')","sub_path":"Assignment6/plot_random.py","file_name":"plot_random.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"619053758","text":"from telegram_util import commitRepo\n\nclass DB(object):\n lists = ['whitelist']\n\n def readFile(self, filename):\n with open('db/' + filename) as f:\n content = [x.strip() for x in f.readlines()]\n setattr(self, filename, set([x for x in content if x]))\n\n def saveFile(self, filename):\n with open('db/' + filename, 'w') as f:\n f.write('\\n'.join(sorted(getattr(self, filename))))\n\n def __init__(self):\n for l in self.lists:\n self.readFile(l)\n\n def record(self, mlist, target):\n tid = str(target.id)\n for l in self.lists:\n if l == mlist:\n getattr(self, l).add(tid)\n else:\n getattr(self, l).discard(tid)\n self.saveFile(l)\n commitRepo()","sub_path":"db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"601352969","text":"import os\nimport re\n\n#\n# INTERNAL DEPENDENCIES\n#\n\nimport logger\nfrom scanomatic.generics.singleton import SingeltonOneInit\n\n#\n# EXCEPTIONS\n#\n\n\nclass InvalidRoot(Exception):\n pass\n\n#\n# CLASSES\n#\n\n\nclass Paths(SingeltonOneInit):\n\n def __one_init__(self, *args):\n\n self._logger = logger.Logger(\"Paths Class\")\n if len(args) > 0:\n self._logger.warning(\n \"Some class instantiated a Paths object wit parameters.\" +\n \" They are ignorded as this is no longer valid\")\n\n self.root = os.path.join(os.path.expanduser(\"~\"), \".scan-o-matic\")\n\n if os.path.isdir(self.root) is False:\n raise InvalidRoot(self.root)\n\n self.config = os.path.join(self.root, \"config\")\n self.fixtures = os.path.join(self.config, \"fixtures\")\n self.images = os.path.join(self.root, \"images\")\n\n self.source_location_file = os.path.join(self.root, \"source_location.txt\")\n\n self.desktop_file = \"scan-o-matic.desktop\"\n self.desktop_file_path = os.path.join(\n self.config, self.desktop_file)\n self.install_filezilla = os.path.join(\n self.config, \"install_filezilla.sh\")\n\n self.scanomatic = \"scan_o_matic\"\n self.analysis = \"scan-o-matic_analysis\"\n self.experiment = \"scan-o-matic_experiment\"\n self.make_project = \"scan-o-matic_compile_project\"\n self.install_autostart = \"scan-o-matic_autostart\"\n\n self.config_main_app = os.path.join(self.config, 'main.config')\n self.config_mac = os.path.join(self.config, 'mac_address.config')\n self.config_rpc = os.path.join(self.config, 'rpc.config')\n self.config_rpc_admin = os.path.join(self.config, 'rpc.admin')\n self.config_scanners = os.path.join(self.config, 'scanners.config')\n\n self.rpc_queue = os.path.join(self.root, 'job_queue.cfg')\n self.rpc_jobs = os.path.join(self.root, 'jobs.cfg')\n self.rpc_scanner_status = os.path.join(self.root, 'scanner_status.cfg')\n\n self.ui_root = os.path.join(self.root, \"ui_server\")\n self.ui_css = os.path.join(self.ui_root, \"style\")\n self.ui_js = os.path.join(self.ui_root, \"js\")\n self.ui_font = os.path.join(self.ui_root, \"fonts\")\n self.ui_templates = os.path.join(self.ui_root, \"templates\")\n self.ui_help_file = \"help.html\"\n self.ui_qc_norm_file = \"qc_norm.html\"\n self.ui_maintain_file = \"maintain.html\"\n self.ui_fixture_file = \"fixture.html\"\n self.ui_root_file = 'root.html'\n self.ui_compile_file = 'compile.html'\n self.ui_experiment_file = 'experiment.html'\n self.ui_status_file = 'status.html'\n self.ui_analysis_file = 'analysis.html'\n self.ui_settings_template = 'settings.html'\n\n self.marker = os.path.join(self.images, \"orientation_marker_150dpi.png\")\n self.martin = os.path.join(self.images, \"martin3.png\")\n self.logo = os.path.join(self.images, \"scan-o-matic.png\")\n\n self.fixture_conf_file_suffix = \".config\"\n self.fixture_conf_file_rel_pattern = \"{0}\" + \\\n self.fixture_conf_file_suffix\n self.fixture_image_file_rel_pattern = \"{0}.npy\"\n self.fixture_conf_file_pattern = os.path.join(\n self.fixtures, self.fixture_conf_file_rel_pattern)\n self.fixture_image_file_pattern = os.path.join(\n self.fixtures, self.fixture_image_file_rel_pattern)\n self.fixture_tmp_scan_image = \\\n self.fixture_image_file_pattern.format(\".tmp\")\n self.fixture_grid_history_pattern = \"{0}.grid.history\"\n\n self.log = os.path.join(self.root, \"logs\")\n self.log_ui_server = os.path.join(self.log, \"ui_server.log\")\n self.log_server = os.path.join(self.log, \"server.log\")\n self.log_scanner_out = os.path.join(self.log, \"scanner_{0}.stdout\")\n self.log_scanner_err = os.path.join(self.log, \"scanner_{0}.stderr\")\n\n self.log_relaunch = os.path.join(self.log, \"relaunch.log\")\n self.log_project_progress = os.path.join(self.log, \"progress.projects\")\n\n self.experiment_scan_image_pattern = \"{0}_{1}_{2:.4f}.tiff\"\n self.experiment_analysis_relative_path = \"analysis\"\n self.experiment_analysis_file_name = \"analysis.log\"\n self.experiment_rebuild_instructions = \"rebuild.instructions\"\n\n self.analysis_polynomial = os.path.join(\n self.config, \"calibration.polynomials\")\n self.analysis_calibration_data = os.path.join(\n self.config, \"{0}calibration.data\")\n self.analysis_graycsales = os.path.join(\n self.config, \"grayscales.cfg\")\n\n self.analysis_run_log = 'analysis.log'\n self.analysis_model_file = 'analysis.model'\n\n self.experiment_first_pass_analysis_relative = \"{0}.1_pass.analysis\"\n self.experiment_first_pass_log_relative = \".1_pass.log\"\n self.experiment_local_fixturename = \\\n self.fixture_conf_file_rel_pattern.format(\"fixture\")\n self.experiment_grid_image_pattern = \"grid___origin_plate_{0}.svg\"\n self.grid_pattern = \"grid_plate___{0}.npy\"\n self.grid_size_pattern = \"grid_size___{0}.npy\"\n self.experiment_grid_error_image = \"_no_grid_{0}.npy\"\n\n self.ui_server_phenotype_state_lock = \"phenotypes_state.lock\"\n self.phenotypes_csv_pattern = \"phenotypes.{0}.plate_{1}.csv\"\n self.phenotypes_raw_npy = \"phenotypes_raw.npy\"\n self.vector_phenotypes_raw = \"phenotypes_vectors_raw.npy\"\n self.vector_meta_phenotypes_raw = \"phenotypes_meta_vector_raw.npy\"\n self.normalized_phenotypes = \"normalized_phenotypes.npy\"\n self.phenotypes_filter = \"phenotypes_filter.npy\"\n self.phenotypes_reference_offsets = \"phenotypes_reference_offsets.npy\"\n self.phenotypes_filter_undo = \"phenotypes_filter.undo.pickle\"\n self.phenotypes_meta_data = \"meta_data.pickle\"\n self.phenotypes_meta_data_original_file_patern = \"meta_data_{0}.{1}\"\n self.phenotypes_input_data = \"curves_raw.npy\"\n self.phenotypes_input_smooth = \"curves_smooth.npy\"\n self.phenotypes_extraction_params = \"phenotype_params.npy\"\n self.phenotype_times = \"phenotype_times.npy\"\n\n self.phenotypes_extraction_log = \"phenotypes.extraction.log\"\n\n self.image_analysis_img_data = \"image_{0}_data.npy\"\n self.image_analysis_time_series = \"time_data.npy\"\n\n self.project_settings_file_pattern = \"{0}.project.settings\"\n self.project_compilation_pattern = \"{0}.project.compilation\"\n self.project_compilation_instructions_pattern = \"{0}.project.compilation.instructions\"\n self.project_compilation_log_pattern = \"{0}.project.compilation.log\"\n\n self.scan_project_file_pattern = \"{0}.scan.instructions\"\n self.scan_log_file_pattern = \"{0}.scan.log\"\n\n def join(self, attr, *other):\n \n if hasattr(self, attr):\n return os.path.join(getattr(self, attr), *other)\n else:\n raise AttributeError(\"Unknown path attribute '{0}'\".format(attr))\n\n def _is_fixture_file_name(self, fixture_name):\n\n suffix_l = len(self.fixture_conf_file_suffix)\n if (len(fixture_name) > suffix_l and\n fixture_name[-suffix_l:] ==\n self.fixture_conf_file_suffix):\n\n return True\n\n else:\n\n return False\n\n def get_fixture_name(self, fixture_path):\n\n fixture = os.path.basename(fixture_path)\n if len(fixture) > len(self.fixture_conf_file_suffix):\n if fixture[-len(self.fixture_conf_file_suffix):] == \\\n self.fixture_conf_file_suffix:\n\n fixture = fixture[:-len(self.fixture_conf_file_suffix)]\n\n return fixture.capitalize().replace(\"_\", \" \")\n\n def get_project_settings_path_from_scan_model(self, scan_model):\n\n return self.project_settings_file_pattern.format(\n os.path.join(scan_model.directory_containing_project, scan_model.project_name, scan_model.project_name))\n\n def get_project_compile_path_from_compile_model(self, compile_model):\n \"\"\"\n\n :type compile_model: scanomatic.models.compile_project_model.CompileInstructionsModel\n :rtype : str\n \"\"\"\n\n if os.path.isdir(compile_model.path):\n\n return self.project_compilation_pattern.format(\n self.get_project_directory_name_with_file_prefix_from_path(compile_model.path))\n\n return compile_model.path\n\n @staticmethod\n def get_project_directory_name_with_file_prefix_from_path(path):\n\n if os.path.isdir(path):\n dir_name = path\n else:\n dir_name = os.path.dirname(path)\n return os.path.join(dir_name, dir_name.rstrip(os.sep).split(os.sep)[-1])\n\n def get_project_compile_instructions_path_from_compile_model(self, compile_model):\n\n return self.get_project_compile_instructions_path_from_compilation_path(compile_model.path)\n\n def get_project_compile_instructions_path_from_compilation_path(self, path):\n\n return self.project_compilation_instructions_pattern.format(\n self.get_project_directory_name_with_file_prefix_from_path(path))\n\n def get_project_compile_log_path_from_compile_model(self, compile_model):\n\n return self.project_compilation_log_pattern.format(\n self.get_project_directory_name_with_file_prefix_from_path(compile_model.path))\n\n def get_scan_instructions_path_from_compile_instructions_path(self, path):\n\n return self.scan_project_file_pattern.format(self.get_project_directory_name_with_file_prefix_from_path(path))\n\n @staticmethod\n def get_scanner_path_name(scanner):\n\n return scanner.lower().replace(\" \", \"_\")\n\n @staticmethod\n def get_scanner_index(scanner_path):\n\n candidates = map(int, re.findall(r\"\\d+\", scanner_path))\n if len(candidates) > 0:\n return candidates[-1]\n else:\n return None\n\n def get_fixture_path(self, fixture_name, conf_file=True, own_path=None,\n only_name=False):\n\n fixture_name = fixture_name.lower().replace(\" \", \"_\")\n\n if self._is_fixture_file_name(fixture_name):\n fixture_name = fixture_name[:-len(self.fixture_conf_file_suffix)]\n\n if only_name:\n return fixture_name\n\n if own_path is not None:\n if conf_file:\n f_pattern = self.fixture_conf_file_rel_pattern\n else:\n f_pattern = self.fixture_image_file_rel_pattern\n\n if own_path == \"\":\n\n f = f_pattern.format(fixture_name)\n if os.path.isfile(f):\n return f\n else:\n f = os.path.join(own_path, f_pattern.format(fixture_name))\n if os.path.isfile(f):\n return f\n\n if conf_file:\n return self.fixture_conf_file_pattern.format(fixture_name)\n else:\n return self.fixture_image_file_pattern.format(fixture_name)\n","sub_path":"scanomatic/io/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":10996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449350707","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 7 10:37:29 2020\nproject name:get crossRoadName's longitude and laititude from 高德地图\n@author: 18120900\n\"\"\"\nimport pandas as pd\nimport requests\nimport json\nimport time\n\n\ndef getintersectionlatlng(temp_crossroadname, result, error):\n try:\n url_1 = 'https://restapi.amap.com/v3/geocode/geo?address='\n url_2 = '&batch=true&output=json&key=eff48ee434d763609e59839fa946b9e1'\n url = url_1 + '|'.join(temp_crossroadname) + url_2 # 对把交叉口名包含在url中\n\n r_text = requests.get(url)\n r_text.raise_for_status() # 当出现错误时及时抛出错误\n content = json.loads(r_text.content)\n r_text.close() # 很重要的一步!!!,否则会导致错误\n\n status = content[\"status\"]\n for k in range(int(content[\"count\"])):\n if status == \"1\":\n adcode = content[\"geocodes\"][k][\"adcode\"]\n formatted_address = content[\"geocodes\"][k][\"formatted_address\"]\n location = content[\"geocodes\"][k][\"location\"]\n level = content[\"geocodes\"][k][\"level\"]\n result.append((temp_crossroadname[k], formatted_address, adcode, location, level))\n else:\n error.append(temp_crossroadname[k])\n print('error!')\n except TimeoutError:\n print('timeout error')\n\n\nif __name__ == \"__main__\":\n result = [] # 设置一个列表用来存放提取结果\n error = [] # 设置一个列表用来存放请求失败的交叉口数据\n result.append(('Name', 'formatted_address', 'adcode', 'location', 'level'))\n error.append('Name')\n\n with open(r'F:\\18120900\\桌面\\地理逆编码.txt', 'r', encoding='utf-8') as f:\n crossRoad = f.readlines()\n print(len(crossRoad))\n\n temp_crossRoadName = [] # 设置一个列表用来存放交叉口名称\n i = -1\n for crossRoadName in crossRoad:\n i += 1\n temp_crossRoadName.append(crossRoadName.replace('\\n', ''))\n if i % 10 == 9 or i == len(crossRoad) - 1:\n print(i + 1) # 显示处理到那一个交叉口了\n getintersectionlatlng(temp_crossRoadName, result, error)\n temp_crossRoadName.clear()\n time.sleep(2)\n df = pd.DataFrame(result)\n df.to_excel(r'F:\\18120900\\桌面\\地理逆编码处理结果.xlsx')\n print('Finished')\n","sub_path":"网络爬虫/获取交叉口经纬度数据.py","file_name":"获取交叉口经纬度数据.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"39291663","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.views.generic import ListView\n\nfrom Generator.generators.forms import GeneratorForm\nfrom Generator.generators.generator import evaluate_generator_text\nfrom Generator.generators.models import Generator, get_generator_by_link\n\n\nclass GeneratorListView(ListView):\n model = Generator\n\n def get_queryset(self):\n if 'user' in self.kwargs:\n user = get_object_or_404(User, username=self.kwargs['user'])\n return Generator.objects.filter(user=user)\n else:\n return Generator.objects.all()\n\n@login_required\ndef edit_generator(request, link=None, template_name='generators/generator_form.html'):\n if link:\n generator = get_generator_by_link(link)\n if generator.user != request.user:\n return HttpResponseForbidden()\n else:\n generator = Generator(user=request.user)\n\n if request.POST:\n form = GeneratorForm(request.POST, instance=generator)\n if form.is_valid():\n form.save(commit=True)\n\n # If the save was successful, redirect to another page\n redirect_url = reverse('generators:detail', kwargs={'user': form.instance.user.username,\n 'slug': form.instance.slug})\n return HttpResponseRedirect(redirect_url)\n else:\n form = GeneratorForm(instance=generator)\n\n return render_to_response(template_name, {\n 'form': form,\n }, context_instance=RequestContext(request))\n\n\ndef eval_generator(request):\n context = RequestContext(request)\n gen_text = None\n response = ''\n if request.method == 'GET':\n gen_text = request.GET['generator_text']\n if gen_text:\n response = evaluate_generator_text(gen_text)\n return HttpResponse(response)\n","sub_path":"Generator/generators/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"587084674","text":"#!/user/bin/env python\n#!encoding=utf-8\nimport tkinter as tk\nfrom suds.client import Client\nfrom Common.PathTools import image_path\nfrom Common.CommonTools import CommonTools\nfrom tkinter import scrolledtext\n'''手机号码归属信息查询界面'''\n\nclass TelFunGui:\n def CreateGui(self):\n '''构建查询界面'''\n self.root=tk.Toplevel()\n self.root.title('手机号码信息查询')\n self.root.geometry('500x400')\n self.root.resizable(width=False,height=False)\n\n frame1=tk.Frame(self.root)\n frame2=tk.Frame(self.root)\n frame3=tk.Frame(self.root)\n frame4=tk.Frame(self.root)\n frame5=tk.Frame(self.root)\n\n '''====================[frame1:title框架]===================='''\n #title图片\n photopath=image_path+'telfungui_bg.png'\n self.photo=tk.PhotoImage(file=photopath)\n\n #title\n title_label=tk.Label(frame1,text='手机号码信息查询',font=('微软雅黑',25),fg='white',justify=tk.CENTER,\n image=self.photo,compound=tk.CENTER)\n title_label.grid(column=0,row=0)\n\n '''====================[frame2:查询提示语框架]===================='''\n #提示语\n tip_label=tk.Label(frame2,text='输入手机号码查询手机归属地、运营商等信息',fg='blue',font=('微软雅黑',10),justify=tk.LEFT,height=2)\n tip_label.grid(column=0,row=0,sticky=tk.W)\n\n #占位\n zw_label=tk.Label(frame2,width=15).grid(column=1,row=0,columnspan=2)\n\n '''====================[frame3:查询输入框框架]===================='''\n\n #输入框label\n select_label=tk.Label(frame3,text='输入查询手机号码 : ',font=('微软雅黑',10),justify=tk.LEFT)\n select_label.grid(column=0,row=1,sticky=tk.W,padx=10)\n\n #输入框entry\n self.tel=tk.StringVar()\n tel_entry=tk.Entry(frame3,textvariable=self.tel,justify=tk.LEFT,width=24)\n tel_entry.grid(column=1,row=1,sticky=tk.W)\n\n #查询按钮button\n select_button=tk.Button(frame3,text='点击查询',font=('微软雅黑',8),justify=tk.LEFT,\n activebackground='grey', activeforeground='white',command=self.SelectTelInfo)\n select_button.grid(column=2,row=1,sticky=tk.W,padx=15)\n\n '''====================[frame4:信息显示框架]===================='''\n #文本框label\n desc_label=tk.Label(frame4,text='查询结果 :',font=('微软雅黑',10),justify=tk.LEFT,height=2)\n desc_label.grid(column=0,row=0,sticky=tk.W)\n\n #占位\n zw_label2=tk.Label(frame4,width=44).grid(column=1,row=0,sticky=tk.W,columnspan=2)\n\n #文本框Text\n self.select_text=tk.Text(frame4,height=8,width=52)\n self.select_text.grid(column=0,row=1,columnspan=3)\n\n #文本框内嵌滚条文本\n self.scr=tk.scrolledtext.ScrolledText(self.select_text,width=50,height=8,wrap=tk.WORD)\n self.scr.grid(column=0,columnspan=3)\n\n '''====================[frame5:信息显示框架]===================='''\n # 占位\n zw_label4 = tk.Label(frame5).grid(column=0, row=0)\n # 清空按钮\n clear_button = tk.Button(frame5, text='一键清空', font=('微软雅黑', 8), justify=tk.LEFT, command=self.ClearText,\n activebackground='grey', activeforeground='white', width=11)\n clear_button.grid(column=0, row=1)\n\n frame1.pack(side=tk.TOP)\n frame2.pack(side=tk.TOP)\n frame3.pack(side=tk.TOP)\n frame4.pack(side=tk.TOP)\n frame5.pack(side=tk.TOP)\n\n self.root.mainloop()\n\n '''====================[对应函数封装]===================='''\n\n def CheckTelnum(self,telnum):\n '''检查电话号码规范'''\n commontools=CommonTools()\n\n #获取电话号码\n tel=telnum\n telnumlist=[0,1,2,3,4,5,6,7,8,9]\n\n # 判断手机号码输入为空\n if tel=='':\n commontools.MakeMsgBox('WARNING','号码检查','手机号码输入不能为空')\n return False\n #判断手机号码输入位数不合规\n elif tel !='' and len(str(tel))!=11:\n commontools.MakeMsgBox('WARNING','号码检查','手机号码位数不符合规范')\n return False\n #判断手机号码输入的数据是数字0-9\n else:\n for i in tel:\n if int(i) in telnumlist:\n continue\n else:\n commontools.MakeMsgBox('ERROR', '号码检查', '手机号码只能输入数字')\n return False\n\n def SendRequest(self,telnum):\n '''发送查询请求'''\n commontools=CommonTools()\n url=r'http://ws.webxml.com.cn/WebServices/MobileCodeWS.asmx?wsdl'\n client = Client(url)\n res = client.service.getMobileCodeInfo(telnum)\n result='[%s]'%commontools.NowTime()+res\n return result\n\n def SelectTelInfo(self):\n '''输入手机号码,查询相关信息'''\n #获取手机号码\n telnum=self.tel.get()\n try:\n #检查手机号码规范性,正确才能发送请求\n if self.CheckTelnum(telnum)!=False:\n result=self.SendRequest(telnum)\n try:\n if result!='' and telnum!='':\n self.scr.insert(tk.END,'%s\\n'%result)\n except:\n pass\n except:\n self.scr.insert(tk.END, '[ERROR] 查询结果为空,检查输入项是否正确或超过查询次数限制\\n')\n\n def ClearText(self):\n '''清空文本框内容'''\n self.scr.delete(0.0,tk.END)\n\nif __name__==\"__main__\":\n telfungui=TelFunGui().CreateGui()","sub_path":"SpeedySelect/Tools/Gui/FunGui/TelFunGui.py","file_name":"TelFunGui.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"5398367","text":"# -- coding:UTF-8 --\nimport os\n\n# 6- 训练数据和验证数据可视化分布\n\n# 目前统计训练和验证数据,然后统计图片的方式在一张图中展示\n\n# 获取数据内容\nbase_path = '../data/'\ntrain_path = os.path.join(base_path,'40_garbage_classify-for-pythorch/train.txt')\nval_path = os.path.join(base_path,'40_garbage_classify-for-pythorch/val.txt')\n\n# print('train_path = ',train_path)\n# print('val_path = ',val_path)\n# train_path = ../data/40_garbage_classify-for-pythorch/train.txt\n# val_path = ../data/40_garbage_classify-for-pythorch/val.txt\n\n# 统计\n\nfrom glob import glob\nimport codecs\nlabel_idx_list = []\ndef get_label_idx_list(data_path):\n label_idx_list = []\n\n for line in codecs.open(data_path,'r'):\n line = line.strip()\n label_idx = line.split('\\t')[1]\n label_idx_list .append(label_idx)\n return label_idx_list\n\nfrom collections import Counter\n\n# Counter统计label出现次数\n# dict类型转换操作\ntrain_dict = dict(Counter(get_label_idx_list(train_path)))\n\nval_dict = dict(Counter(get_label_idx_list(val_path)))\n\nprint('train_dict00 = ',train_dict)\nprint('val_dict00 = ',val_dict)\n\n# 对dict中的key进行sort asc\n\ntrain_dict = dict(sorted(train_dict.items()))\nval_dict = dict(sorted(val_dict.items()))\n\nprint('train_dict = ',train_dict)\nprint('val_dict = ',val_dict)\n\n\n# 可视化操作\nimport matplotlib.pyplot as plt\n\ndef autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x()+rect.get_width()/2.- 0.2, 1.03*height, '%s' % int(height))\n\nplt.figure(figsize=(15, 8))\n\n# 构建数据\n\n# check train == val key\nassert train_dict.keys() == val_dict.keys()\nx = list(train_dict.keys())\n\n# train\ntrain_y = list(train_dict.values())\n\n# val\nval_y = list(val_dict.values())\n\n# 创建Bar示例\nwidth = 0.3\nautolabel(plt.bar(x,train_y,width,color='r',label='Train'))\nx2 = []\nfor i in x:\n x2.append(int(i)+width)\n\nautolabel(plt.bar(x2,val_y,width,color='g',label='Val'))\nplt.xticks(x, x, rotation=30) # 这里是调节横坐标的倾斜度,rotation是度数\n# 设置全局参数\n\nplt.title('garbage classify Train/Val',color='blue')\n\n# 展示图标\nplt.legend()\nplt.show()\n\n\n\nprint(\"步骤六\"+\"* \"* 60)\n\n\n\n","sub_path":"process/06训练数据和验证数据可视化分布.py","file_name":"06训练数据和验证数据可视化分布.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"439792850","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n#date:\"2017-12-30,15:36\"\n\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\n\nfrom blog import views\n\nurlpatterns = [\n url(r\"^article_comment/$\",views.article_comment),\n url(r\"^poll/$\",views.poll),\n url(r\"^backend/$\",views.backendIndex),\n url (r\"^backend/addArticle/$\",views.addArticle),\n url (r\"^(?P.*)/articles/(?P\\d+)/$\",views.articleDetail,name = \"article_detail\"),\n url (r\"^(?P.*)/(?Pcategory|tag|date)/(?P.*)/$\",views.homeSite),\n url (r\"^(?P.*)/$\",views.homeSite,name = \"aaa\"),\n\n\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"45110106","text":"import sys,string, math, itertools\n\nnt,kt = input().split()\nnt,kt = int(nt),int(kt)\nLeet = [ int(x) for x in input().split()]\n#print(nt,kt, Leet)\nfor i in range(0,nt) :\n if (86400-Leet[i]) >= kt :\n print(i+1)\n sys.exit()\n kt -= (86400-Leet[i])\n","sub_path":"pro56.py","file_name":"pro56.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"315123308","text":"import requests\n\nmyrequest = requests.get(\"https://www.themealdb.com/api/json/v1/1/filter.php?i=beef\")\ndatajson = myrequest.json()\n\nstr.replace(\"\\\\\", \"\", \"\")\n\noutfile = open(\"food.html\", \"w\")\noutfile.write(\" \")\noutfile.write(\"

\"\"There are \" + str(len(datajson['meals'])) + \" \" + \"foods available to make:\"\"

\")\noutfile.write(\"
\")\nfor i in range(len(datajson['meals'])):\n\toutfile.write(datajson[\"meals\"][i][\"strMeal\"] + \"\")\noutfile.write(\"
\")\n\n#outfile.write(\"\")\n#for x in range(len(datajson['meals'])):\n\t#outfile.write(\"\")\noutfile.close()\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"431688923","text":"from funcs import all_factors, triangle_numbers\r\n\r\ndef main():\r\n for n in triangle_numbers():\r\n if len(all_factors(n)) > 500:\r\n break\r\n\r\n return n\r\n\r\nif __name__ == '__main__':\r\n print(main())\r\n\r\nelse:\r\n assert main() == 76576500\r\n","sub_path":"Python/p012.py","file_name":"p012.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"611020411","text":"# -*- coding: utf-8 -*- {{{\n#\n# Your license here\n# }}}\n\nfrom __future__ import absolute_import\n\nimport os\nimport json\nimport csv\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\n\nimport utils\nfrom weather_services.weather_service import WeatherService\nfrom weather_services.points import PointEnum\n\n\nclass CsvWeatherService(WeatherService):\n \"\"\"\n This class provides an example of pulling weather data from a CSV file\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(CsvWeatherService, self).__init__(*args, **kwargs)\n\n def get_data(self, zip_code, points, start_time, end_time, resolution):\n \"\"\"\n This function queries data from a CSV data file.\n :param zip_code:\n :param points:\n :param start_time: datetime object\n :param end_time: datetime object\n :param resolution:\n :return: an array of dictionary. For example:\n [{'ts': datetime.datetime(2018, 1, 1, 8, 0), 'temperature': '72', 'relative_humidity': '50'},\n {'ts': datetime.datetime(2018, 1, 1, 9, 0), 'temperature': '73', 'relative_humidity': '49']\n \"\"\"\n result = []\n weather_file_name = 'test_csv_weather.csv'\n weather_file_path = './csv_weather/' + weather_file_name\n\n reader = csv.DictReader(open(weather_file_path))\n\n # Let's reformat output to a standard format so others can use this csv service the same way as\n # other services (e.g., tmy3_weather_service)\n for row in reader:\n # Convert csv weather format to the standard format\n item = {\n PointEnum.ts: parser.parse(row['Timestamp']),\n PointEnum.temperature: row['Temp[F]'],\n PointEnum.relative_humidity: row['RH[%]']\n }\n\n # Push the converted item to the output list\n result.append(item)\n\n # NOTE: The 2 features below can be added later (similar to what has been done in tmy3_weather_service)\n # Slice data for interested time frame\n # Filter data for interested points\n\n return result\n\n @classmethod\n def pretty_print_result(cls, result, points):\n for rec in result:\n if len(points) == 0:\n print(rec)\n else:\n print(utils.format_timestamp(rec[PointEnum.ts]))\n for point in points:\n print(\"{point}: {value}\".format(point=point, value=rec[point]))\n print(os.linesep)\n\n\nif __name__ == '__main__':\n weather_service = CsvWeatherService()\n\n print(os.linesep)\n print('Test: get_data')\n start_time = parser.parse(\"2018-01-01 08:00:00\")\n end_time = parser.parse(\"2018-01-01 09:00:00\")\n resolution = timedelta(minutes=15)\n points = [] # All points\n weather_data = weather_service.get_data('99352', points, start_time, end_time, resolution)\n weather_service.pretty_print_result(weather_data, points)\n","sub_path":"src/weather_services/csv_weather_service.py","file_name":"csv_weather_service.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"96632482","text":"#/bin/python\nfrom pyb import *\nfrom time import sleep\n\nx = 0\nflag = 0\nled1 = LED(1)\nled2 = LED(2)\n\ndef onBtnRIGHTPressed(evt):\n global x,flag\n x += 10\n\nExtInt(Pin('RIGHT'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnRIGHTPressed) \n\ndef onBtnLEFTPressed(evt):\n global x,flag\n x += -20\n\nExtInt(Pin('LEFT'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnLEFTPressed) \n\ndef onBtnBTNAPressed(evt):\n global x,flag\n # LED aus\n flag = 0\n\nExtInt(Pin('BTNA'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnBTNAPressed) \n\ndef onBtnBTNBPressed(evt):\n global x,flag\n # LED an\n flag = 1\n\nExtInt(Pin('BTNB'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnBTNBPressed) \n\nflag = 1\nx = 100\nwhile True:\n if flag == 0:\n led1.off()\n else:\n if flag == 1:\n led1.intensity(x)\n sleep(0.1)\n","sub_path":"code/05-keys-intensity.py","file_name":"05-keys-intensity.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218968776","text":"# Copyright (c) 2010, Panos Louridas, GRNET S.A.\n#\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of GRNET S.A, nor the names of its contributors\n# may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n# OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys, time\nfrom pageRank import pageRank\n\nlinks = [[]]\n\n\ndef read_file(filename):\n f = open(filename, 'r')\n for line in f:\n (frm, to) = map(int, line.split(\" \"))\n extend = max(frm - len(links), to - len(links)) + 1\n for i in range(extend):\n links.append([])\n links[frm].append(to)\n f.close()\n\n\nfn = \"1000.txt\"\nread_file(fn)\n\nf = open(\"time-%s\" % fn, 'w')\nfor i in range(5):\n start = int(round(time.time() * 1000))\n pr = pageRank(links, alpha=0.85, convergence=0.00001, checkSteps=10)\n used = int(round(time.time() * 1000)) - start\n f.writelines([\"no.%d time used: %s ms\\n\" %(i,used)])\nf.close()\n\n# sum = 0\n# for i in range(len(pr)):\n# print i, \"=\", pr[i]\n# sum = sum + pr[i]\n# print \"s = \" + str(sum)\n","sub_path":"python/pagerank_test.py","file_name":"pagerank_test.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118460719","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n'''\n批量修改文件名\n'''\n\n# 哦原来自己定义的函数要放在调用之前啊,而且不用加self\ndef rename_file(dir, old, new):\n\t''' 将给定路径 dir 下的旧文件名old改为新文件名new '''\n\tos.rename(os.path.join(dir, old), os.path.join(dir, new))\n\n# 工作目录为除掉py脚本名之外的第一个参数\nwork_dir = sys.argv[1]\nold_extension = sys.argv[2]\nnew_extension = sys.argv[3]\n\n# 判断输入的目录是否存在\n# 注意:目录中如果有空格,可能会提示”目录不存在!“\nif not os.path.exists(work_dir):\n\tprint('目录不存在!')\n\t# 或者创建该目录\n\t# os.makedirs(work_dir)\n\n# 注意这个os.listdir()返回的是str,即字符串类型\nfiles = os.listdir(work_dir)\n\nfor filename in files:\n\t# os.path.splitext()将文件名分为两部分,一个是后缀名,一个是后缀名前面的那一块\n\tfile_extension = os.path.splitext(filename)[1]\n\tif old_extension == file_extension:\n\t\t# str的方法replace(),将某字符串中第一个参数替换为第二个参数\n\t\tnewfile = filename.replace(old_extension, new_extension)\n\t\trename_file(work_dir, filename, newfile)\n\n# 跳出循环之后,提示用户已经完成重命名\nprint(\"重命名成功!\")\n\t","sub_path":"batch-file-rename.py","file_name":"batch-file-rename.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"83178151","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import abc\n\nimport numpy as np\nimport torch\n\n\ndef cast_tensor_type(inputs, src_type, dst_type):\n if isinstance(inputs, torch.Tensor):\n return inputs.to(dst_type)\n elif isinstance(inputs, str):\n return inputs\n elif isinstance(inputs, np.ndarray):\n return inputs\n elif isinstance(inputs, abc.Mapping):\n return type(inputs)({\n k: cast_tensor_type(v, src_type, dst_type)\n for k, v in inputs.items()\n })\n elif isinstance(inputs, abc.Iterable):\n return type(inputs)(\n cast_tensor_type(item, src_type, dst_type) for item in inputs)\n else:\n return inputs\n","sub_path":"PyTorch/contrib/cv/detection/SOLOv1/mmdet/core/fp16/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"566148074","text":"\"\"\"\nНаш калькулятор свихнулся на цензуре и отказывается использовать некоторые слова. Вам необходимо обмануть его и написать программу для для суммирования чисел.\n\nДан массив чисел, необходимо найти сумму этих чисел. Ваше решение не должно содержать запрещенные слова, даже как часть слов.\n\nСписок запретных слов:\n\nsum\nimport\nfor\nwhile\nreduce\n\"\"\"\n\n\ndef checkio(data):\n if len(data) == 0:\n return 0\n if len(data) == 1:\n return data[0]\n next_sm = data[-1] + data[0]\n data.pop(0)\n data[-1] = next_sm\n return checkio(data)\n","sub_path":"Electronic station/Restricted Sum.py","file_name":"Restricted Sum.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"62240267","text":"import time\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport csv\n\ndef get_soup(url):\n\theaders = {\n\t\t'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n\t}\n\tparams = {\"show_ram\":1}\n\tresponse = requests.get(url,params=params, headers=headers)\n\tresponse.encoding = 'utf-8' \n\tsoup = BeautifulSoup(response.text, 'html.parser')\n\treturn soup\n\ndef get_page_list():\n\tsoup = get_soup('https://www.cngold.org/yehq/list_109_all.html')\n\tdivs = soup.find_all('div',class_='history_news_content')\n\tdate2url = {}\n\tfor div in divs:\n\t\threfs = div.find_all('a')#.find定位到所需数据位置 .find_all查找所有的tr(表格)\n\t\tfor href in hrefs:\n\t\t\tdate2url[href.get_text().strip()] = href.attrs[\"href\"]\n\treturn date2url\n\ndef get_page_data(url):\n\tsoup = get_soup(url)\n\tuls = soup.find_all('ul',class_='news_list pb20')\n\tif uls is None or len(uls)==0:\n\t\tuls = soup.find_all('div',class_='left_info')\n\tfor ul in uls:\n\t\threfs = ul.find_all('a')#.find定位到所需数据位置 .find_all查找所有的tr(表格)\n\t\tfor href in hrefs:\n\t\t\treturn href.attrs[\"href\"]\n\ndef get_price_1(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='article_con')\n\tif article is None:\n\t\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[2:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[2].text.strip(),tds[6].text.strip()])\n\treturn l\n\ndef get_price_2(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[2:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[2].text.strip(),tds[6].text.strip()])\n\treturn l\n\ndef get_price_3(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[2].text.strip()[:4],tds[5].text.strip()])\n\treturn l\n\ndef get_price_4(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[1].text.strip(),date])\n\treturn l\n\ndef get_price_5(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[2].text.strip()[:4],tds[5].text.strip()])\n\treturn l\n\ndef get_price_6(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[1].text.strip(),tds[4].text.strip(),tds[5].text.strip()])\n\treturn l\n\ndef get_price_6_1(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[2].text.strip(),tds[5].text.strip(),tds[-1].text.strip()])\n\treturn l\n\ndef get_price_6_6(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[1].text.strip(),tds[4].text.strip(),tds[-1].text.strip()])\n\treturn l\n\ndef get_price_7(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 8:\n\t\t\tl.append([tds[2].text.strip(),tds[5].text.strip(),tds[-1].text.strip()])\n\treturn l\n\ndef get_price_8(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 8:\n\t\t\tl.append([tds[0].text.strip(),tds[2].text.strip(),date])\n\treturn l\n\t\ndef get_price_9(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[1].text.strip(),date])\n\treturn l\n\ndef get_price_10(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[-1].text.strip(),date])\n\treturn l\n\ndef get_price_11(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 6:\n\t\t\tl.append([tds[0].text.strip(),tds[5].text.strip(),date])\n\treturn l\n\ndef get_price_12(url,date):\n\tl = []\n\tsoup = get_soup(url)\n\tarticle = soup.find('div',class_='content w680')\n\ttrs = article.find_all('tr')\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\tif len(tds) >= 3:\n\t\t\tl.append([tds[0].text.strip(),tds[-1].text.strip(),date])\n\treturn l\n\npage_config = [\n\t{\"start\":\"2018-01-30\",\"end\":\"2025-01-01\",\"f\":get_price_1},\n\t{\"start\":\"2017-11-21\",\"end\":\"2018-01-29\",\"f\":get_price_2},\n\t{\"start\":\"2013-08-07\",\"end\":\"2017-11-20\",\"f\":get_price_3},\n\t{\"start\":\"2013-03-27\",\"end\":\"2013-08-06\",\"f\":get_price_4},\n\t{\"start\":\"2013-03-11\",\"end\":\"2013-03-26\",\"f\":get_price_5},\n\t{\"start\":\"2013-01-21\",\"end\":\"2013-03-10\",\"f\":get_price_6},\n\t{\"start\":\"2013-01-17\",\"end\":\"2013-01-18\",\"f\":get_price_6_1},\n\t{\"start\":\"2013-01-16\",\"end\":\"2013-01-16\",\"f\":get_price_6},\n\t{\"start\":\"2013-01-10\",\"end\":\"2013-01-15\",\"f\":get_price_7},\n\t{\"start\":\"2012-12-28\",\"end\":\"2013-01-09\",\"f\":get_price_6},\n\t{\"start\":\"2012-10-23\",\"end\":\"2012-12-27\",\"f\":get_price_6_6},\n\t{\"start\":\"2012-01-04\",\"end\":\"2012-11-23\",\"f\":get_price_7},\n\t{\"start\":\"2011-10-17\",\"end\":\"2011-12-31\",\"f\":get_price_8},\n\t{\"start\":\"2011-10-14\",\"end\":\"2011-10-14\",\"f\":get_price_9},\n\t{\"start\":\"2011-08-05\",\"end\":\"2011-10-13\",\"f\":get_price_10},\n\t{\"start\":\"2011-06-14\",\"end\":\"2011-08-04\",\"f\":get_price_11},\n\t{\"start\":\"2011-02-16\",\"end\":\"2011-06-10\",\"f\":get_price_12},\n]\n\ndef main(year):\n\tm = get_page_list()\n\trecords = []\n\tfor (k,v) in m.items():\n\t\tif str(k[:4]) != year:\n\t\t\tcontinue\n\t\turl = get_page_data(v)\n\t\tprint(url)\n\t\tif url is None:\n\t\t\tcontinue\n\t\tfor cfg in page_config:\n\t\t\tif cfg[\"start\"] <= k and k <= cfg[\"end\"]:\n\t\t\t\tf = cfg[\"f\"]\n\t\t\t\tl = f(url,k)\n\t\t\t\tfor i in l:\n\t\t\t\t\tif len(i[1]) > 4:\n\t\t\t\t\t\ti[1] = i[1][:4]\n\t\t\t\trecords = records + l\n\n\tout = open(\"./\"+year+\".csv\",'w+', newline='',encoding='utf-8')\n\tcsv_writer = csv.writer(out, dialect = \"excel\")\n\tfor row in records:\n\t\tcsv_writer.writerow(row)\n\nmain(sys.argv[1])","sub_path":"lpg/data/price/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"516751066","text":"import pygame, sys\npygame.init()\n\n#Definir colores\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n\nsize = (800, 500)\n\n#Crear ventana\nscreen = pygame.display.set_mode(size)\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n #Color de fondo\n screen.fill(WHITE)\n\n ###### --- Zona de DIBUJO --- #####\n\n for x in range(100, 700, 100):\n pygame.draw.rect(screen, BLACK, (x, 230, 50, 50))\n pygame.draw.line(screen, GREEN, [x, 0], [x, 100], 5)\n\n\n ###### --- Zona de DIBUJO --- #####\n\n\n #Actualizar pantalla\n pygame.display.flip()","sub_path":"dibujando_con_loops.py","file_name":"dibujando_con_loops.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"492670980","text":"from django.contrib.auth.models import User\nfrom rest_framework import routers, serializers, viewsets\n\nfrom proman.models import BillingStatus, BillingType, Client, Company, Department, HumanResource, HumanResourceCategory, \\\n HumanResourceType, HumanResourceTask, InvoicingType, JobTitle, Location, ObjectType, Priority, ProjectType, \\\n ProjectStatus, Project, ProjectCategory, Status, Task, TaskType, TaskCategory, TaskStatus,\\\n Team, TeamResource, TodoType, TodoStatus, Todo, Tenant, Ticket, TicketPriority, TicketStatus,\\\n TicketType, TicketResolution, TodoCategory, TrafficLight\n\nrouter = routers.DefaultRouter(schema_title='Procon API')\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('username', 'first_name', 'last_name', 'is_staff', 'is_active', 'is_superuser', 'email', 'date_joined' )\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass BillingStatusSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = BillingStatus\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass BillingStatusViewSet(viewsets.ModelViewSet):\n queryset = BillingStatus.objects.all()\n serializer_class = BillingStatusSerializer\n\n\nclass BillingTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = BillingType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass BillingTypeViewSet(viewsets.ModelViewSet):\n queryset = BillingType.objects.all()\n serializer_class = BillingTypeSerializer\n\n\nclass ClientSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Client\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass ClientViewSet(viewsets.ModelViewSet):\n queryset = Client.objects.all()\n serializer_class = ClientSerializer\n\n\nclass CompanySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Company\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja', 'tenant', 'created', 'modified')\n\n\nclass CompanyViewSet(viewsets.ModelViewSet):\n queryset = Company.objects.all()\n serializer_class = CompanySerializer\n\n\nclass DepartmentSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Department\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass DepartmentViewSet(viewsets.ModelViewSet):\n queryset = Department.objects.all()\n serializer_class = DepartmentSerializer\n\n\nclass HumanResourceSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = HumanResource\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass HumanResourceViewSet(viewsets.ModelViewSet):\n queryset = HumanResource.objects.all()\n serializer_class = HumanResourceSerializer\n\n\nclass HumanResourceCategorySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = HumanResourceCategory\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass HumanResourceCategoryViewSet(viewsets.ModelViewSet):\n queryset = HumanResourceCategory.objects.all()\n serializer_class = HumanResourceCategorySerializer\n\n\nclass HumanResourceTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = HumanResourceType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass HumanResourceTypeViewSet(viewsets.ModelViewSet):\n queryset = HumanResourceType.objects.all()\n serializer_class = HumanResourceTypeSerializer\n\n\nclass HumanResourceTaskSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = HumanResourceTask\n fields = ('id', 'task', 'resource', 'status', 'created', 'modified')\n\n\nclass HumanResourceTaskViewSet(viewsets.ModelViewSet):\n queryset = HumanResourceTask.objects.all()\n serializer_class = HumanResourceTaskSerializer\n\n\nclass InvoicingTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = InvoicingType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass InvoicingTypeViewSet(viewsets.ModelViewSet):\n queryset = InvoicingType.objects.all()\n serializer_class = InvoicingTypeSerializer\n\n\nclass JobTitleSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = JobTitle\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\n\nclass JobTitleViewSet(viewsets.ModelViewSet):\n queryset = JobTitle.objects.all()\n serializer_class = JobTitleSerializer\n\n\nclass LocationSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Location\n fields = ('id','address_suffix_name',\n 'address_suffix_name_de',\n 'address_suffix_name_en',\n 'address_suffix_name_ja',\n 'address_suffix_name_zh',\n 'city_name',\n 'city_name_de',\n 'city_name_en',\n 'city_name_ja',\n 'city_name_zh',\n 'company',\n 'country',\n 'created',\n 'geolocation',\n 'modified',\n 'name',\n 'name_de',\n 'name_en',\n 'name_ja',\n 'name_zh',\n 'street_name',\n 'street_name_en',\n 'street_name_ja',\n 'street_name_zh',\n 'zipCode',\n 'street_name_de')\n\nclass LocationViewSet(viewsets.ModelViewSet):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n\nclass ObjectTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = ObjectType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja', 'created', 'modified')\n\nclass ObjectTypeViewSet(viewsets.ModelViewSet):\n queryset = ObjectType.objects.all()\n serializer_class = ObjectTypeSerializer\n\n\nclass PrioritySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Priority\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass PriorityViewSet(viewsets.ModelViewSet):\n queryset = Priority.objects.all()\n serializer_class = PrioritySerializer\n\n\nclass ProjectTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = ProjectType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass ProjectTypeViewSet(viewsets.ModelViewSet):\n queryset = ProjectType.objects.all()\n serializer_class = ProjectTypeSerializer\n\n\nclass ProjectStatusSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = ProjectStatus\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass ProjectStatusViewSet(viewsets.ModelViewSet):\n queryset = ProjectStatus.objects.all()\n serializer_class = ProjectStatusSerializer\n\n\nclass ProjectSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Project\n fields = ('id','billingStatus',\n 'description_zh',\n 'description_en',\n 'description_ja',\n 'description_de',\n 'description',\n 'created',\n 'client',\n 'billingType',\n 'end',\n 'invoicingType',\n 'jobNo',\n 'matchcode',\n 'modified',\n 'name',\n 'name_de',\n 'name_ja',\n 'name_en',\n 'name_zh',\n 'price',\n 'priority',\n 'trafficLight',\n 'status',\n 'start',\n 'projectType',\n 'projectStatus',\n 'projectManager',\n 'projectCategory')\n\nclass ProjectViewSet(viewsets.ModelViewSet):\n queryset = Project.objects.all()\n serializer_class = ProjectSerializer\n\nclass ProjectCategorySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = ProjectCategory\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass ProjectCategoryViewSet(viewsets.ModelViewSet):\n queryset = ProjectCategory.objects.all()\n serializer_class = ProjectCategorySerializer\n\n\nclass StatusSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Status\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass StatusViewSet(viewsets.ModelViewSet):\n queryset = Status.objects.all()\n serializer_class = StatusSerializer\n\nclass TaskSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Task\n fields = ('id','name','description')\n\nclass TaskViewSet(viewsets.ModelViewSet):\n queryset = Task.objects.all()\n serializer_class = TaskSerializer\n\nclass TaskTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TaskType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TaskTypeViewSet(viewsets.ModelViewSet):\n queryset = TaskType.objects.all()\n serializer_class = TaskTypeSerializer\n\n\nclass TaskCategorySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TaskCategory\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TaskCategoryViewSet(viewsets.ModelViewSet):\n queryset = TaskCategory.objects.all()\n serializer_class = TaskCategorySerializer\n\nclass TaskStatusSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TaskStatus\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TaskStatusViewSet(viewsets.ModelViewSet):\n queryset = TaskStatus.objects.all()\n serializer_class = TaskStatusSerializer\n\nclass TeamSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Team\n fields = ('id', 'name', 'resources', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TeamViewSet(viewsets.ModelViewSet):\n queryset = Team.objects.all()\n serializer_class = TeamSerializer\n\n\nclass TeamResourceSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TeamResource\n fields = ('id','team','resource', 'status', 'created', 'modified')\n\nclass TeamResourceViewSet(viewsets.ModelViewSet):\n queryset = TeamResource.objects.all()\n serializer_class = TeamResourceSerializer\n\n\nclass TodoTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TodoType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TodoTypeViewSet(viewsets.ModelViewSet):\n queryset = TodoType.objects.all()\n serializer_class = TodoTypeSerializer\n\n\nclass TodoStatusSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TodoStatus\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TodoStatusViewSet(viewsets.ModelViewSet):\n queryset = TodoStatus.objects.all()\n serializer_class = TodoStatusSerializer\n\n\nclass TodoSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Todo\n fields = ('id',\n 'description_ja'\n 'description_de'\n 'priority'\n 'description'\n 'task'\n 'matchcode'\n 'private'\n 'created'\n 'assignee'\n 'modified'\n 'name_zh'\n 'status'\n 'name_en'\n 'description_en'\n 'due'\n 'assigner'\n 'name'\n 'trafficLight'\n 'name_de'\n 'name_ja'\n 'description_zh'\n 'todoType'\n 'todoStatus')\n\nclass TodoViewSet(viewsets.ModelViewSet):\n queryset = Todo.objects.all()\n serializer_class = TodoSerializer\n\n\nclass TenantSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Tenant\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TenantViewSet(viewsets.ModelViewSet):\n queryset = Tenant.objects.all()\n serializer_class = TenantSerializer\n\nclass TicketSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Ticket\n fields = ('id','name',\n 'assignee',\n 'description_ja',\n 'description_en',\n 'matchcode',\n 'assigner',\n 'modified',\n 'description_de',\n 'description',\n 'name_de',\n 'created',\n 'description_zh',\n 'name_ja',\n 'name_en',\n 'name_zh',\n 'priority',\n 'project',\n 'resolution',\n 'status',\n 'task',\n 'ticketStatus',\n 'ticketType',\n 'trafficLight')\n\nclass TicketViewSet(viewsets.ModelViewSet):\n queryset = Ticket.objects.all()\n serializer_class = TicketSerializer\n\n\nclass TicketPrioritySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TicketPriority\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TicketPriorityViewSet(viewsets.ModelViewSet):\n queryset = TicketPriority.objects.all()\n serializer_class = TicketPrioritySerializer\n\n\nclass TicketStatusSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TicketStatus\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TicketStatusViewSet(viewsets.ModelViewSet):\n queryset = TicketStatus.objects.all()\n serializer_class = TicketStatusSerializer\n\nclass TicketTypeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TicketType\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TicketTypeViewSet(viewsets.ModelViewSet):\n queryset = TicketType.objects.all()\n serializer_class = TicketTypeSerializer\n\nclass TicketResolutionSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TicketResolution\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TicketResolutionViewSet(viewsets.ModelViewSet):\n queryset = TicketResolution.objects.all()\n serializer_class = TicketResolutionSerializer\n\n\nclass TodoCategorySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TodoCategory\n fields = ('id', 'name', 'name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TodoCategoryViewSet(viewsets.ModelViewSet):\n queryset = TodoCategory.objects.all()\n serializer_class = TodoCategorySerializer\n\n\nclass TrafficLightSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = TrafficLight\n fields = ('id', 'name', 'color','name_en', 'name_de', 'name_zh', 'name_ja',\n 'description', 'description_en', 'description_de',\n 'description_zh', 'description_ja', 'created', 'modified')\n\nclass TrafficLightViewSet(viewsets.ModelViewSet):\n queryset = TrafficLight.objects.all()\n serializer_class = TrafficLightSerializer\n\n\n\nrouter.register(r'traffic_light', TrafficLightViewSet)\nrouter.register(r'todo_category', TodoCategoryViewSet)\nrouter.register(r'ticket_resolution', TicketResolutionViewSet)\nrouter.register(r'ticket_type', TicketTypeViewSet)\nrouter.register(r'ticket_status', TicketStatusViewSet)\nrouter.register(r'ticket_priority', TicketPriorityViewSet)\nrouter.register(r'ticket', TicketViewSet)\nrouter.register(r'tenant', TenantViewSet)\nrouter.register(r'todo', TodoViewSet)\nrouter.register(r'todo_status', TodoStatusViewSet)\nrouter.register(r'todo_type', TodoTypeViewSet)\nrouter.register(r'team_resource', TeamResourceViewSet)\nrouter.register(r'tean', TeamViewSet)\nrouter.register(r'task_status', TaskStatusViewSet)\nrouter.register(r'task_category', TaskCategoryViewSet)\nrouter.register(r'task_type', TaskTypeViewSet)\nrouter.register(r'task', TaskViewSet)\nrouter.register(r'status', StatusViewSet)\nrouter.register(r'project_category', ProjectCategoryViewSet)\nrouter.register(r'project', ProjectViewSet)\nrouter.register(r'roject_status', ProjectStatusViewSet)\nrouter.register(r'project_type', ProjectTypeViewSet)\nrouter.register(r'priority', PriorityViewSet)\nrouter.register(r'object_type', ObjectTypeViewSet)\nrouter.register(r'location', LocationViewSet)\nrouter.register(r'job_title', JobTitleViewSet)\nrouter.register(r'invoicing_type', InvoicingTypeViewSet)\nrouter.register(r'human_resource_task', HumanResourceTaskViewSet)\nrouter.register(r'human_resource_types', HumanResourceTypeViewSet)\nrouter.register(r'human_resource_categories', HumanResourceCategoryViewSet)\nrouter.register(r'human_resources', HumanResourceViewSet)\nrouter.register(r'departments', DepartmentViewSet)\nrouter.register(r'companies', CompanyViewSet)\nrouter.register(r'clients', ClientViewSet)\nrouter.register(r'billing_types', BillingTypeViewSet)\nrouter.register(r'billing_statuses', BillingStatusViewSet)\nrouter.register(r'users', UserViewSet)\n","sub_path":"proman/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":21228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"97808984","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.common.exceptions import StaleElementReferenceException\nimport requests\nfrom bs4 import BeautifulSoup\nimport time \nimport re \nfrom operator import itemgetter \n\nurl = \"http://www.petronet.co.kr/v3/jsp/pet/prc/foreign/KDFQ0100_l.jsp\"\ndef get_page_selenium(url):\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n driver = webdriver.Chrome(\n executable_path='./chromedriver', \n options=chrome_options)\n driver.get(url)\n finded = driver.find_element_by_class_name(\"outcome_table line_green\").text\n driver.close()\n return finded \n\ndef getPage(url):\n req = requests.get(url)\n return BeautifulSoup(req.text, 'html.parser') \n\ndef find_idx(a, b):\n return a.index(b) \n\nret = getPage(url) \n\ntr = ret.find_all(\"tr\", class_=\"\")\ncnt = 0\nnamelist = ['Dubai', 'Brent', 'WTI', 'Oman']\nn = len(tr) \nresult = \"[\"\nfor i in range(len(tr[n - 1].contents)): \n content = tr[n - 1].contents[i] \n if str(content).strip() == \"\" or i == 1: \n continue\n ret = re.sub(r'<(.+?)>', \"\", str(content).strip()) \n result += ('{\"name\":\"' + namelist[cnt] + '\"')\n result += (',\"value\":' + ret + '}') \n if cnt != 3:\n result += ','\n cnt += 1\n\nresult += \"]\"\n\nf = open(\"./inbum.json\", mode = 'w' , encoding='utf8')\nf.write(result)\nf.close() ","sub_path":"test/test_inbum.py","file_name":"test_inbum.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"268435899","text":"import torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport sys\nimport os\nfrom pyramid import PyramidTransformer\nfrom stack_dataset import StackDataset\nfrom aug import aug_stacks, aug_input\nfrom normalizer import Normalizer\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--count', type=int, default=5)\nparser.add_argument('--dataset', type=int, default=0)\nparser.add_argument('--archive', type=str, default='pt/fprod_correct_enc6.pt')\nargs = parser.parse_args()\n\nif not os.path.isdir('inspect'):\n os.makedirs('inspect')\nif not os.path.isdir('inspect/{}'.format(args.archive[3:-3])):\n os.makedirs('inspect/{}'.format(args.archive[3:-3]))\n\nhm_dataset = StackDataset(os.path.expanduser('~/../eam6/mip5_mixed.h5'), mip=5)\ndatasets = [hm_dataset]\ntrain_dataset = datasets[args.dataset]\ntrain_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=5, pin_memory=True)\n\nmodel = PyramidTransformer.load(args.archive, height=8, dim=1152, skips=0, k=7)\n\nnormalizer = Normalizer(5)\n\nfor t, tensor_dict in enumerate(train_loader):\n if t == args.count:\n break\n\n X = tensor_dict['X']\n # Get inputs\n X = Variable(X, requires_grad=False).cuda()\n stacks, top, left = aug_stacks([X], padding=0)\n X = stacks[0]\n src, target = X[0,0].clone(), X[0,1].clone()\n src = aug_input(src)[0]\n target = aug_input(target)[0]\n\n src = Variable(torch.FloatTensor(normalizer.apply(src.data.cpu().numpy()))).cuda()\n target = Variable(torch.FloatTensor(normalizer.apply(target.data.cpu().numpy()))).cuda()\n\n model.apply(src,target,vis='inspect/{}/sample{}'.format(args.archive[3:-3], t))\n","sub_path":"training/data_handling/inspect_enc.py","file_name":"inspect_enc.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"429018853","text":"#-*- coding:utf-8 -*-\n\n\"\"\"\nAuthor: Zhengwang Ruan \nStart: 2016年 10月 15日 星期六 18:16:17 CST\n\n交易数据统计模块\n\"\"\"\n\nimport core.emulation as emul\n\n# Tick统计属性\nTK_FLOAT_MOV \t\t= 'Float_Move'\t\t\t#浮赢\nTK_FLOAT_CUM \t\t= 'Float_Pro_Cum'\t\t#累积浮赢\nTK_FLOAT_POS \t\t= 'Float_Pro_Pos'\t\t#持仓浮赢\nTK_ORD_PROFIT \t\t= 'Order_Profit'\t\t#平仓利润\nTK_ADD_POS \t\t= 'Add_Pos'\t\t\t#加仓\nTK_CUT_LOSES \t\t= 'Cut_Loss'\t\t\t#止损\nTK_STOP_WINS \t\t= 'Stop_Win'\t\t\t#止赢\nTK_ORD_WINS \t\t= 'Order_Win'\t\t\t#赢利单数\nTK_ORD_LOSS \t\t= 'Order_Loss'\t\t\t#亏损单数\nTK_ORD_FLAT \t\t= 'Order_Flat'\t\t\t#持平单数\nTK_RES_POS \t\t= 'Res_Pos'\t\t\t#仓位(tick恢复)\nTK_RES_CAP \t\t= 'Res_Cap'\t\t\t#资金(tick恢复)\nTK_RES_ACT \t\t= 'Res_Act'\t\t\t#操作类型(tick恢复)\nTK_SP_MODE \t\t= 'SP_Mode'\t\t\t# SP模式\n\n# Tick统计表头\nTICK_STATS = [TK_FLOAT_MOV, TK_FLOAT_CUM, TK_FLOAT_POS, TK_ORD_PROFIT, TK_ADD_POS, \\\n\t TK_CUT_LOSES, TK_STOP_WINS, TK_ORD_WINS, TK_ORD_LOSS, TK_ORD_FLAT, \\\n\t TK_RES_POS, TK_RES_CAP, TK_RES_ACT, TK_SP_MODE]\n\nclass TickStat:\n\tdef __init__ (self):\n\t\t\"\"\"\n\t\tTick统计记录\n\t\t\"\"\"\n\t\tself.floatProfit = 0.0\n\t\tself.floatProPos = 0.0\n\t\tself.floatProCum = 0.0\n\t\tself.orderProfit = 0.0\t#平仓利润\n\t\tself.addPos = 0\n\t\tself.cutLoss = 0\n\t\tself.stopWin = 0\n\t\tself.ordWins = 0\t#赢利单数\n\t\tself.ordLoses = 0\t#亏损单数\n\t\tself.ordFlat = 0\t#持平单数\n\t\tself.resPos = 0\n\t\tself.resCap = 0.0\n\t\tself.resAct = emul.MEUL_FUT_ACT_SKIP\n\t\tself.tagTradeEnd = False\t#交易结束标志\n\t\tself.reqtype = 0\t#sched req类型\n\t\tself.spMode = 0\t\t# SP模式\n\n\tdef values (self):\n\t\t\"\"\"\n\t\t生成tick数据插入记录\n\t\t:return: 统计数据列表\n\t\t\"\"\"\n\t\treturn [self.floatProfit, self.floatProCum, self.floatProPos, \\\n\t\t\tself.orderProfit, self.addPos, self.cutLoss, self.stopWin, \\\n\t\t\tself.ordWins, self.ordLoses, self.ordFlat, \\\n\t\t\tself.resPos, self.resCap, self.resAct, self.spMode]\n\n# 交易统计属性\nTRD_TICK_START\t\t= 'Tick_Start'\t\t\t#开始Tick\nTRD_TICK_END\t\t= 'Tick_End'\t\t\t#结束Tick\nTRD_DAYS_LAST\t\t= 'Days_Last'\t\t\t#持续天数\nTRD_ADD_POS \t\t= TK_ADD_POS\t\t\t#总加仓数\nTRD_CUT_LOSES \t\t= TK_CUT_LOSES\t\t\t#总止损数\nTRD_STOP_WINS \t\t= TK_STOP_WINS\t\t\t#总止赢数\nTRD_ORD_WINS \t\t= TK_ORD_WINS\t\t\t#总赢利单数\nTRD_ORD_LOSS\t\t= TK_ORD_LOSS\t\t\t#总亏损单数\nTRD_ORD_FLAT\t\t= TK_ORD_FLAT\t\t\t#总持平单数\nTRD_TICK_FLOAT_MAX\t= 'Tick_Float_Max'\t\t#浮赢最高Tick\nTRD_TICK_FLOAT_MIN\t= 'Tick_Float_Min'\t\t#浮赢最低Tick\nTRD_FLOAT_MEAN\t\t= 'Float_Mean'\t\t\t#浮羸均值\nTRD_FLOAT_STD\t\t= 'Float_Std'\t\t\t#浮赢标准差\nTRD_FLOAT_MIN\t\t= 'Float_Min'\t\t\t#浮赢最低值\nTRD_FLOAT_25\t\t= 'Float_0.25'\t\t\t#浮赢0.25分位\nTRD_FLOAT_50\t\t= 'Float_0.50'\t\t\t#浮赢0.50分位\nTRD_FLOAT_75\t\t= 'Float_0.75'\t\t\t#浮赢0.75分位\nTRD_FLOAT_MAX\t\t= 'Float_Max'\t\t\t#浮赢最高值\nTRD_PROFIT\t\t= 'Profit'\t\t\t#利润\n\n# 交易统计表头\nTRADE_STATS = [TRD_TICK_START, TRD_TICK_END, TRD_DAYS_LAST, TRD_ADD_POS, \\\n\t TRD_CUT_LOSES, TRD_STOP_WINS, TRD_ORD_WINS, TRD_ORD_LOSS, TRD_ORD_FLAT, \\\n\t TRD_TICK_FLOAT_MAX, TRD_TICK_FLOAT_MIN, TRD_FLOAT_MEAN, TRD_FLOAT_STD, \\\n\t TRD_FLOAT_MIN, TRD_FLOAT_25, TRD_FLOAT_50, TRD_FLOAT_75, TRD_FLOAT_MAX, TRD_PROFIT]\n\nclass TradeStat:\n\tdef __init__ (self):\n\t\t\"\"\"\n\t\t交易统计记录(交易始于仓数由0变1,止于仓数由>=1变为0)\n\t\t\"\"\"\n\t\tself.tickStart = None\n\t\tself.tickEnd = None\n\t\tself.profit = 0\n\t\tself.daysLast = 0\n\t\tself.tickFloatMax = None\n\t\tself.tickFloatMin = None\n\t\tself.cumFloat = 0.0\n\n\tdef values (self, sumBuf, descBuf):\n\t\t\"\"\"\n\t\t生成交易数据,按插入次序排列\n\t\t:param sumBuf: sum(交易表)汇总数据\n\t\t:param descBuf: 交易表.describe()数据\n\t\t:return: 统计数据列表\n\t\t\"\"\"\n\t\treturn [self.tickStart, self.tickEnd, self.daysLast, \\\n\t\t\tsumBuf[TRD_ADD_POS], sumBuf[TRD_CUT_LOSES], sumBuf[TRD_STOP_WINS], \\\n\t\t\tsumBuf[TRD_ORD_WINS], sumBuf[TRD_ORD_LOSS], sumBuf[TRD_ORD_FLAT], \\\n\t\t\tself.tickFloatMax, self.tickFloatMin, descBuf['mean'], descBuf['std'], \\\n\t\t\tdescBuf['min'], descBuf['25%'], descBuf['50%'], descBuf['75%'], descBuf['max'], \\\n\t\t\tself.profit]\n\nclass CommonStat:\n\tdef __init__ (self):\n\t\t\"\"\"\n\t\t通用统计属性,定义会被用到但非专用统计属性\n\t\t\"\"\"\n\t\tself.cumProfit = 0.0\n","sub_path":"core/tradestat.py","file_name":"tradestat.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2826864","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# 管道: 数据清洗、去重。\n# 持久化:写txt,csv。写入数据库。\n\n# scrapy框架将爬取spider模块和处理层pipeline分离开,使得程序更容易扩展。\n# spider yield生成的item会交给pipline处理。如果爬取速度跟处理速度不一致的话,scrapy框架会自动调度。\n#\n\nclass MoviePipeline(object):\n def process_item(self, item, spider):\n with open('my_meiju.txt', 'a', encoding='utf_8') as f:\n f.write(str(item['name']) + '\\n')\n return item\n","sub_path":"movie/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"98191747","text":"import pytest\n\nfrom mock import patch\n\nfrom crons.tasks.heartbeats import heartbeat_builds, heartbeat_experiments, heartbeat_jobs\nfrom factories.factory_build_jobs import BuildJobFactory, BuildJobStatusFactory\nfrom factories.factory_experiments import ExperimentFactory, ExperimentStatusFactory\nfrom factories.factory_jobs import JobFactory, JobStatusFactory\nfrom lifecycles.experiments import ExperimentLifeCycle\nfrom lifecycles.jobs import JobLifeCycle\nfrom tests.base.case import BaseTest\n\n\n@pytest.mark.crons_mark\nclass TestHeartBeatCrons(BaseTest):\n def test_heartbeat_experiments(self):\n experiment1 = ExperimentFactory()\n ExperimentStatusFactory(experiment=experiment1, status=ExperimentLifeCycle.SCHEDULED)\n experiment2 = ExperimentFactory()\n ExperimentStatusFactory(experiment=experiment2, status=ExperimentLifeCycle.CREATED)\n experiment3 = ExperimentFactory()\n ExperimentStatusFactory(experiment=experiment3, status=ExperimentLifeCycle.FAILED)\n experiment4 = ExperimentFactory()\n ExperimentStatusFactory(experiment=experiment4, status=ExperimentLifeCycle.STARTING)\n experiment5 = ExperimentFactory()\n ExperimentStatusFactory(experiment=experiment5, status=ExperimentLifeCycle.RUNNING)\n\n with patch('scheduler.tasks.experiments'\n '.experiments_check_heartbeat.apply_async') as mock_fct:\n heartbeat_experiments()\n\n assert mock_fct.call_count == 1\n\n def test_heartbeat_jobs(self):\n job1 = JobFactory()\n JobStatusFactory(job=job1, status=JobLifeCycle.SCHEDULED)\n job2 = JobFactory()\n JobStatusFactory(job=job2, status=JobLifeCycle.CREATED)\n job3 = JobFactory()\n JobStatusFactory(job=job3, status=JobLifeCycle.FAILED)\n job4 = JobFactory()\n JobStatusFactory(job=job4, status=JobLifeCycle.RUNNING)\n\n with patch('scheduler.tasks.jobs.jobs_check_heartbeat.apply_async') as mock_fct:\n heartbeat_jobs()\n\n assert mock_fct.call_count == 1\n\n def test_heartbeat_builds(self):\n build1 = BuildJobFactory()\n BuildJobStatusFactory(job=build1, status=JobLifeCycle.SCHEDULED)\n build2 = BuildJobFactory()\n BuildJobStatusFactory(job=build2, status=JobLifeCycle.CREATED)\n build3 = BuildJobFactory()\n BuildJobStatusFactory(job=build3, status=JobLifeCycle.FAILED)\n build4 = BuildJobFactory()\n BuildJobStatusFactory(job=build4, status=JobLifeCycle.RUNNING)\n\n with patch('scheduler.tasks.build_jobs.build_jobs_check_heartbeat.apply_async') as mock_fct:\n heartbeat_builds()\n\n assert mock_fct.call_count == 1\n","sub_path":"platform/core/tests/test_crons/test_heartbeat_checks.py","file_name":"test_heartbeat_checks.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"109328740","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n '''\n 思路1: 不需反转链表,递归+迭代遍历。将链表分成两半,\n 一半从前往后,另一半从后往前遍历,比较是否每次都相等\n 思路2: 反转其中一半链表\n '''\n def isPalindrome(self, head: ListNode) -> bool:\n fast = slow = head\n while fast and fast.next:\n fast, slow = fast.next.next, slow.next \n def rec(slow):\n if not slow:\n return (head, True)\n node, flag = rec(slow.next)\n flag = flag and (node.val==slow.val)\n return (node.next, flag)\n _, flag = rec(slow)\n return flag\n \n def isPalindrome2(self, head: ListNode) -> bool:\n fast = slow = head\n while fast and fast.next:\n fast, slow = fast.next.next, slow.next \n pre, tmp_slow = None, slow\n while slow:\n slow.next, pre, slow = pre, slow, slow.next\n head2 = pre\n while head and head is not tmp_slow:\n if head.val!=head2.val:\n return False\n head, head2 = head.next, head2.next\n return True\n \n \n \n","sub_path":"19.01.2020-leetcode234/PalindromeLinkedList.py","file_name":"PalindromeLinkedList.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"430018145","text":"from fastquant import get_stock_data\nimport asyncio\nimport time\nimport itertools\nfrom concurrent.futures import ThreadPoolExecutor\nfrom psycopg2.pool import ThreadedConnectionPool\nimport psql_pool\nimport concurrent.futures\n\nimport psycopg2\nimport keys\n\n\n_executor = ThreadPoolExecutor()\n\nDSN = f\"postgresql://{keys.user}:{keys.password}@{keys.host}:5432/postgres\"\ntcp = ThreadedConnectionPool(1, 800, DSN)\n\nfrom asgiref.sync import sync_to_async\n\n\ndef push_threaded_data(df):\n conn = tcp.getconn()\n cur = conn.cursor()\n psql_pool.Pcursor().execute(f\"\"\"insert into mock (name, number) values ('{df[1]}', {df[0].iloc[0]['open']});\"\"\")\n\ndef insert_data(df, name):\n conn = psycopg2.connect(database=\"postgres\", user=keys.user, password=keys.password, host=keys.host, port=\"5432\")\n cur = conn.cursor()\n que = f\"\"\"insert into mock (name, number) values ('{name}', {df.iloc[0]['open']});\"\"\"\n cur.execute(que)\n print(que)\n print(cur.fetchone())\n conn.commit()\n conn.close()\n\n\nasync def main():\n arr = ['aapl','msft','tsla','fb']\n dfs = []\n for item in arr:\n #df = await loop.run_in_executor(_executor, get_stock_data, item, \"2019-1-1\", \"2021-7-1\")\n df = await sync_to_async(get_stock_data)(item,\"2019-1-1\", \"2021-7-1\")\n push_threaded_data([df,item])\n\n #with concurrent.futures.ThreadPoolExecutor() as executor:\n # executor.map(push_threaded_data, dfs)\n\nif __name__ == \"__main__\":\n start_time = time.perf_counter()\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n print(f\"Elapsed run time: {elapsed_time} seconds\")","sub_path":"api/db/optimized_db/thread_testing.py/asyncio_test.py","file_name":"asyncio_test.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"598184911","text":"\"\"\"empty message\n\nRevision ID: f0220ff558f7\nRevises: e22a152ccbf2\nCreate Date: 2018-10-25 16:36:37.173751\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f0220ff558f7'\ndown_revision = 'e22a152ccbf2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('translations', sa.Column('source_text', sa.String(length=512), nullable=True))\n op.add_column('translations', sa.Column('translated_text', sa.String(length=512), nullable=True))\n op.drop_column('translations', 'original_text')\n op.drop_column('translations', 'translation_text')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('translations', sa.Column('translation_text', sa.VARCHAR(length=512), autoincrement=False, nullable=True))\n op.add_column('translations', sa.Column('original_text', sa.VARCHAR(length=512), autoincrement=False, nullable=True))\n op.drop_column('translations', 'translated_text')\n op.drop_column('translations', 'source_text')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/f0220ff558f7_.py","file_name":"f0220ff558f7_.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"100790139","text":"file = input()\nclas = input()\n\nf = open(file, 'r')\n\nno = True\n\nfor line in f:\n l = list(line.split())\n if (l[0] == clas):\n print(line)\n no = False\n break\n\nif no:\n print('Not found')\n\nf.close()\n","sub_path":"Tasks/Lesson_12.1/Everything/Oleg_1.12_4.py","file_name":"Oleg_1.12_4.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"175695274","text":"import pickle\nimport numpy as np\n\n# Load model\nsavedmodel = 'resources/model.sav'\nfile = open(savedmodel,'r')\nmodel = pickle.load(file)\n\n# Load preprocessed data\nsavedscore = 'resources/preprocessed_score.sav'\nfile = open(savedscore,'r')\nscore_data = pickle.load(file)\n\nXte = score_data['X_score']\n\n# SCORING\n#prediction on test data\npred_te = model.predict(Xte)\n\nscore_path = 'resources/score.csv'\nnp.savetxt(score_path, pred_te, delimiter=\",\")\n","sub_path":"scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"268235299","text":"\n# NAME\n# touch - change file access and modification times\n\n# SYNOPSIS\n# touch\n\n# DESCRIPTION\n# change file access and modification times.\n\n# IMPORTS #\n\nimport os # imports os module\nimport sys # import sys module -functions relevant to system of computer.\nimport datetime # import datetime module\nfrom os import utime\nimport time \nimport stat\n\n# TOUCH METHOD #\n\ndef touch(flags, params, directs): # passing in filename\n\n if os.path.exists(params[0]):\n os.utime(params[0])\n stinfo = os.stat()\n print(stinfo)\n else:\n f = open(params[0],\"w+\")\n print(stinfo)\n f.close()\n\n \n# DEBUGGING CODE #\n \nif __name__ == \"__main__\":\n if os.path.exists():\n os.utime(\"OperatingSystem.txt\")\n stinfo = os.stat()\n print (stinfo)\n else:\n with open (\"OperatingSystem.txt\", 'a') as f:\n pass\n ","sub_path":"Assignments/P01-Shell/cmd_pkg/touch.py","file_name":"touch.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"182220240","text":"arr = []\r\nfor line in open('Data.txt', 'r'):\r\n spl = line.split()\r\n name = spl[0] + ' ' + spl[1]\r\n day = spl[2]\r\n avsum = (int(spl[3]) + int(spl[4]) + int(spl[5])) / 3.0\r\n ball = spl[3] + ' ' + spl[4] + ' ' + spl[5]\r\n arr.append([name, day, ball, avsum])\r\n\r\narr = sorted(arr, key=lambda x: x[3], reverse=True)\r\n\r\nfor line in arr:\r\n out = line[0] + \" | \" + line[1] + \" | \" + line[2] + \" -> \" + str(line[3])\r\n print(out)","sub_path":"informatic/lab3/listing/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"587400669","text":"import unittest\n\n\nclass Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n listSize = len(strs)\n if not listSize or not strs[0]:\n return \"\"\n substring = \"\"\n lens = -1\n for i in range(0, len(strs[0])):\n toadd = (strs[0][i])\n substring += toadd\n lens += 1\n # print(substring)\n for str in strs:\n if lens >= len(str) or str[lens] != toadd:\n return substring[:-1]\n\n return substring\n\n\nclass Test(unittest.TestCase):\n def test_one(self):\n strs = ['aa', 'a']\n sol = Solution()\n self.assertEqual(sol.longestCommonPrefix(strs), 'a')\n\n def test_two(self):\n strs = ['aa', 'aa', 'aa']\n sol = Solution()\n self.assertEqual(sol.longestCommonPrefix(strs), 'aa')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"56025260","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport unittest\n\nfrom tests import PhotoLikeTestCase, VideoLikeTestCase, DiscussionLikeTestCase\n\nif __name__ == '__main__':\n\n suite = unittest.TestSuite((\n unittest.makeSuite(PhotoLikeTestCase),\n unittest.makeSuite(VideoLikeTestCase),\n unittest.makeSuite(DiscussionLikeTestCase)\n ))\n result = unittest.TextTestRunner().run(suite)\n\n sys.exit(not result.wasSuccessful())\n","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"647246666","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Dave Lasley \n# Copyright: 2015 LasLabs, Inc [https://laslabs.com]\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom carepoint import Carepoint\nfrom sqlalchemy import (Column,\n Integer,\n String,\n DateTime,\n Boolean,\n ForeignKey,\n Text,\n SmallInteger)\n\n\nclass Patient(Carepoint.BASE):\n __dbname__ = 'cph'\n __tablename__ = 'cppat'\n\n pat_id = Column(Integer, primary_key=True)\n cmt_id = Column(Integer)\n pat_status_cn = Column(Integer)\n pat_type_cn = Column(Integer)\n nh_pat_id = Column(String)\n chart_id = Column(String)\n lname = Column(String)\n lname_sdx = Column(String)\n mname = Column(String)\n title_lu = Column(String)\n suffix_lu = Column(String)\n alias = Column(String)\n mmname = Column(String)\n alt1_id = Column(String)\n pref_meth_cont_cn = Column(String)\n best_cont_time = Column(String)\n ssn = Column(String)\n dln = Column(String)\n dln_state_cd = Column(String)\n email = Column(String)\n gender_cd = Column(String)\n birth_date = Column(DateTime)\n death_date = Column(DateTime)\n no_safety_caps_yn = Column(Boolean)\n generics_yn = Column(Boolean)\n label_style_cn = Column(Integer)\n primary_md_id = Column(\n Integer,\n ForeignKey('cpmd.md_id')\n )\n secondary_md_id = Column(\n Integer,\n ForeignKey('cpmd.md_id')\n )\n edu_level_cn = Column(Integer)\n ethnicity_cd = Column(String)\n maritial_status_cd = Column(String)\n religion_cn = Column(Integer)\n name_spouse = Column(String)\n primary_lang_cd = Column(String)\n rec_release_status_cn = Column(Integer)\n rec_release_date = Column(DateTime)\n hh_relation_cn = Column(Integer)\n hh_pat_id = Column(\n Integer,\n ForeignKey('cppat.pat_id'),\n )\n fam_relation_cn = Column(String)\n fam_pat_id = Column(\n Integer,\n ForeignKey('cppat.pat_id'),\n )\n primary_store_id = Column(\n Integer,\n ForeignKey('csstore.store_id'),\n )\n bad_check_yn = Column(Boolean)\n price_formula_id = Column(Integer)\n cmt = Column(Text)\n status_cn = Column(Integer)\n facility_pat_yn = Column(Boolean)\n conv_code = Column(String)\n app_flags = Column(Integer)\n timestmp = Column(DateTime)\n comp_cn = Column(Integer)\n hp_blnAdmExt = Column(SmallInteger)\n hp_ExtAtt = Column(SmallInteger)\n user_def_1 = Column(String)\n user_def_2 = Column(String)\n user_def_3 = Column(String)\n user_def_4 = Column(String)\n sc_pat_id = Column(\n Integer,\n ForeignKey('cppat.pat_id'),\n )\n resp_party_id = Column(Integer)\n auto_refill_cn = Column(Integer)\n fill_yn = Column(Boolean)\n fill_stop_date = Column(DateTime)\n fill_resume_date = Column(DateTime)\n fill_stop_reason_cn = Column(Integer)\n fill_stop_user_id = Column(\n Integer,\n ForeignKey('csuser.user_id'),\n )\n registration_date = Column(DateTime)\n anonymous_yn = Column(Boolean)\n market_yn = Column(Boolean)\n representative = Column(String)\n discharge_date = Column(DateTime)\n do_not_resuscitate_yn = Column(Boolean)\n discharge_exp_date = Column(DateTime)\n pat_loc_cn = Column(Integer)\n rx_priority_default_cn = Column(Integer)\n ship_cn = Column(Integer)\n residence_cn = Column(Integer)\n add_user_id = Column(\n Integer,\n ForeignKey('csuser.user_id'),\n )\n add_date = Column(DateTime)\n chg_user_id = Column(\n Integer,\n ForeignKey('csuser.user_id'),\n )\n chg_date = Column(DateTime)\n","sub_path":"carepoint/models/cph/patient.py","file_name":"patient.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"50001360","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 8 17:40:55 2017\r\n\r\n@author: ga25tur\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\n#from mpl_toolkits.mplot3d import Axes3D\r\nimport numpy as np\r\n\r\ndef plotKinetics(datadict, xlabel='Time (min)', ylabel='Fluorescence Intensity (a.u.)',\r\n marker='.',filename='kinetics.png'):\r\n time=datadict['time']\r\n fluorescence=datadict['fluorescence']\r\n legend = list(fluorescence.keys())\r\n #f, axes = plt.\r\n for key in fluorescence:\r\n plt.plot(time, fluorescence[key],'.')\r\n plt.xlabel(xlabel,{\"fontsize\":16})\r\n plt.ylabel(ylabel,{\"fontsize\":16})\r\n #plt.yscale('log')\r\n plt.legend((legend),loc=0)\r\n plt.savefig(filename)\r\n plt.show()\r\n\r\ndef plotNullclines(X1,Y1,X2,Y2,trajectories={},legend=['xdot = 0','ydot = 0']):\r\n plt.plot(X1, Y1,'-')\r\n plt.plot(X2, Y2,'-')\r\n plt.xlabel('x',{\"fontsize\":16})\r\n plt.ylabel('y',{\"fontsize\":16})\r\n plt.legend((legend),loc=0)\r\n if trajectories != {}:\r\n fluorescence=trajectories['fluorescence']\r\n plt.plot(fluorescence['X'],fluorescence['Y'])\r\n plt.show()\r\n \r\ndef plotMultiKinetics(datadict, c, xlabel='Time', ylabel='Concentration',\r\n marker='.'):\r\n time=datadict['time']\r\n fluorescence=datadict['fluorescence']\r\n f, axes = plt.subplots(1,7)\r\n cc=c\r\n for i in range(7):\r\n c=cc[i]\r\n axes[i].plot(time*c/max(cc), fluorescence['X'+str(c)],'.')\r\n axes[i].plot(time*c/max(cc), fluorescence['Y'+str(c)],'.')\r\n axes[i].set_xlabel(xlabel,{\"fontsize\":16})\r\n axes[i].set_ylabel(ylabel,{\"fontsize\":16})\r\n axes[i].legend(('x','y'),loc=0)\r\n axes[i].set_title('c = ' +str(c))\r\n plt.show()\r\n\r\n#def plot3Dtrajectory(x, y, z, xlabel = 'x', ylabel = 'y', zlabel = 'z'):\r\n# fig = plt.figure()\r\n# ax = fig.gca(projection='3d')\r\n# ax.plot(x, y, z) \r\n# ax.set_zlabel(zlabel)\r\n# ax.set_ylabel(ylabel)\r\n# ax.set_xlabel(xlabel)\r\n# plt.show()\r\n","sub_path":"Modelling/Plots.py","file_name":"Plots.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"538315302","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import ndimage\nimport scipy.fftpack\nfrom numpy import sqrt,exp\nimport os\n\nwl = 1.4235e-2 #nm\n\n\n\n\n\n\nloaddir = '/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti45Nb/Peaks/'\nsavedir = '/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti45Nb/FFT/Raw/'\nchkl = '/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti45Nb/Results/Data/Chkl_parameter.txt'\ndirectorylist = []\n\nfor file in os.listdir('/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti45Nb/Integrated/'):\n if file.endswith('.txt'):\n directorylist.append(file[:-4])\ndirectorylist.sort()\n\n\nhkl_list = [110, 200, 211]\nfor j, directory in enumerate (directorylist):\n reversedir = loaddir+directory\n filelist = []\n\n for file in os.listdir(reversedir):\n if file.endswith('.txt'):\n filelist.append(file)\n filelist.sort()\n\n print (directory)\n for i, file in enumerate(filelist):\n\n filedir = savedir + directory\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n\n Chkl_data = np.loadtxt(chkl)\n data = np.loadtxt(os.path.join(reversedir, file))\n theta = np.deg2rad(data[:, 0] / 2)\n intensity = data[:, 1]\n\n maxtheta = theta[np.argmax(intensity)]\n\n K = 2 * np.sin(theta) / wl\n K0 = 2 * np.sin(maxtheta) / wl\n a3 = 1 / (np.max(K) - np.min(K))\n\n Chkl = Chkl_data[j, i+1]\n K2Chkl = K0**2*Chkl\n\n FY = scipy.fftpack.fft(intensity)\n # print (FY)\n n_array = np.linspace(0, len(FY) - 1, len(FY))\n FX = n_array * a3\n\n plt.plot(FX, np.log(np.abs(FY)), linestyle='None', marker='.', color='navy')\n plt.title('%s %s' % (directory, file[-7:-4]), loc='right')\n plt.xlabel(r'$L, nm$')\n plt.ylabel(r'$lnA(L)$')\n #plt.show()\n #plt.savefig(saveAL, dpi=150)\n plt.clf()\n\n ffttxt = filedir + '/%i_%s_%.6f.txt' % (i, hkl_list[i], K2Chkl)\n #print (ffttxt)\n fft = np.array((np.row_stack((FX, np.abs(FY)))).T)\n np.savetxt(ffttxt, fft, fmt='%.6f %.6f')\n","sub_path":"DESY_2018/Ti45Nb/9_fft.py","file_name":"9_fft.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"137643473","text":"\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\nReplace this with more appropriate tests for your application.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.template import RequestContext\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom .models import HTTPRequest\nfrom .middleware import HTTPRequestSave\n\n\nclass MainTest(TestCase):\n fixtures = ['initial_data.json']\n\n def test_basic_addition(self):\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Ruslan')\n self.assertContains(response, 'ruslans@42cc.co')\n self.assertContains(response, 'vxml12')\n\n def test_http_requests(self):\n count_before = HTTPRequest.objects.count()\n self.factory = RequestFactory()\n request = self.factory.get(reverse('test3'))\n HTTPRequestSave.process_request(HTTPRequestSave(), request)\n count_after = HTTPRequest.objects.count()\n count_after -= 1\n self.assertEqual(count_before, count_after)\n response = self.client.get(reverse('test3'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'http://')\n\n def test_context_settings(self):\n request = RequestFactory().get('/')\n c = RequestContext(request)\n self.assertEqual(c['SETTINGS'].TIME_ZONE, 'America/Chicago')\n\n def test_edit(self):\n response = self.client.get(reverse('edit'))\n self.assertEqual(response.status_code, 302)\n self.client.login(username='admin', password='admin')\n response = self.client.get(reverse('edit'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Name')\n form = response.context['form']\n data = form.initial\n data['name'] = 'John'\n self.client.post(reverse('edit'))\n response_new = self.client.get(reverse('edit'))\n form_new = response_new.context['form']\n data_new = form_new.initial\n self.assertEqual(data['name'], 'John')\n","sub_path":"dhw/hello/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"91574462","text":"#!/usr/bin/env python\n## -*- coding:utf-8 -*-\n## Author: HongJun\n\n'''\n Description: <独步天下>alive脚本---检测内网卡状态\n Create Date: 2013-06-26\n'''\n\nimport subprocess\n\nfrom lib.alarmSwitch import opRedis\nfrom lib.aliveUtil import getTimeNow, sendMail, chkPort\n\nimport saveLog\nfrom dbtx.Merge.etc import telIP, lanIP,ps1,dbOneIp,dbTwoIp\n\nsDb = opRedis() ## 实例化 Redis库操作类\n\ndef doPing(tIP, dbPort):\n if dbPort == 3307:\n mString = \"备库-%s-3307端口不通\" % tIP\n else:\n mString = \"主库-%s-3306端口不通\" % tIP\n p = subprocess.Popen(\"ping -c7 %s\" % tIP,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n pResult = p.stdout.read()\n lossRate = pResult.split(\"---\")[-1].split(\",\")[2].split()[0] ## 取丢包率\n if float(lossRate[:-1]) > 20:\n sub = \"DBTX LanIP Alarm: %s\" % ps1\n msg = \"DateTime: %s\\n%s, ICMP-Ping丢包率:%s%% !!!\" % (getTimeNow(), mString, lossRate)\n else:\n sub = \"DBTX LanIP-Port Alarm: %s\" % ps1\n msg = \"DateTime: %s\\n%s, ICMP-Ping正常,可能是MySQL挂了或者防火墙对应端口没有开启 !!!\" % (getTimeNow(), mString)\n saveLog.error(msg)\n\n if sDb.read(telIP, \"lanAlarm\") == \"ON\": ## 状态为\"NO\"且开关为\"ON\"时,报故障\n sendMail(sub, msg)\n sDb.update(telIP, \"lanAlarm\", \"OFF\") ## 报警后,将开关置为\"OFF\"\n\n\ndef pingLan():\n if lanIP == dbOneIp and lanIP != dbTwoIp:\n portOK = chkPort(dbTwoIp, int(3307))\n if not portOK:\n doPing(dbTwoIp, int(3307))\n else:\n if sDb.read(telIP, \"lanAlarm\") == \"OFF\": ## 状态OK且开关为\"OFF\"时,报恢复\n sub = \"DBTX LanIP is OK: %s\" % ps1\n msg = \"DateTime: %s\\n 备库: %s, 网络与端口恢复正常.\" % (getTimeNow(), dbTwoIp)\n saveLog.info(msg)\n sendMail(sub, msg)\n sDb.update(telIP, \"lanAlarm\", \"ON\")\n elif lanIP != dbOneIp and lanIP == dbTwoIp:\n portOK = chkPort(dbOneIp, int(3306))\n if not portOK:\n doPing(dbOneIp, int(3306))\n else:\n if sDb.read(telIP, \"lanAlarm\") == \"OFF\":\n sub = \"DBTX LanIP is OK: %s\" % ps1\n msg = \"DateTime: %s\\n 备库: %s, 网络与端口恢复正常.\" % (getTimeNow(), dbTwoIp)\n saveLog.info(msg)\n sendMail(sub, msg)\n sDb.update(telIP, \"lanAlarm\", \"ON\")\n else:\n pass","sub_path":"python/dbtx/Alive/lib/pingLanIP.py","file_name":"pingLanIP.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"328697903","text":"# coding=utf-8\nimport hashlib\n\n'''\n签名验证\nauthor:王俊\n'''\n\n\ndef get_md5(s):\n x = hashlib.md5()\n x.update(s.encode('utf8'))\n return x.hexdigest()\n\n\ndef parse_param(s):\n d = [x.split('=') for x in s.split('&')]\n d = [x for x in d if len(x) == 1 or len(x) == 2]\n d = [((x[0], x[1]) if len(x) == 2 else (x[0], '')) for x in d if x[0] != 'sign']\n return dict(d)\n\n\nparam = {\n 'option': 'GetResourceMarkInfo',\n 'uid': 'b11dfe19ef4d4f60860dda673dfa7863',\n 'page_classname': 'PersonalCenterViewController',\n 'pageType': '1',\n 'ver': '1.0',\n 'UUID': '9E2C3354-EA4E-49FA-8CE0-C4D81FF7E59A',\n 'userId': 'b11dfe19ef4d4f60860dda673dfa7863',\n 'app_token': '',\n 'version': '1.1',\n 'api_type': 'post'\n}\nparam = parse_param(\n 'userUid=b47af3bfd0ab4c79ab9a1520c7603590&sign=C2B551C76A60ACD5AAE523BFCFC1D055&safeCode=&page_classname=com.sensu.automall.activity_search.FastEntryActivity%40b290277&userId=876e49e3fe0b4c4084f6611809f78f99&uuid=ffffffff-d163-fd87-e8eb-fc9a10c8a501&mac=8c%3Aeb%3Ac6%3Aee%3A17%3A8f&ip=172.16.16.213&domain=http%3A%2F%2Ff.qipeilong.net%2F&modelType=0&ver=1.0&userUID=876e49e3fe0b4c4084f6611809f78f99')\n\ndata = [x for x in param.items() if len(x[0]) <= 32 and len(x[1]) <= 32 and x[0] != 'sign']\nprint('长度过滤后的结果', data)\ndata = sorted(data, key=lambda x: x[0])\nprint('排序后的结果', data)\nsign_str = '&'.join(['{0}={1}'.format(x[0], x[1]) for x in data]) + 'www_qipeilong_cn'\nsign_str = sign_str.lower()\nprint('加密字符串', sign_str)\nprint('加密结果', get_md5(sign_str).upper())\n","sub_path":"test_/test/syntax_test/sign_test.py","file_name":"sign_test.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"412566741","text":"from datetime import datetime\nfrom json import dumps\nfrom time import sleep\n\nfrom paho.mqtt.client import Client, MQTTv311\nimport RPi.GPIO as GPIO\nimport wiringpi as wp\n\nimport dht11\n\nHOST = 'fiware'\nPORT = 1883\nUSERNAME = 'iota'\nPASSWORD = 'password'\n\nTOPIC = \"/apikey1/sensor01/attrs\"\n\nSPI_CH = 0\nPIN_BASE = 64\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.cleanup()\n\ninstance = dht11.DHT11(pin=14)\n\n\ndef sensor_init():\n wp.mcp3002Setup(PIN_BASE, SPI_CH)\n\n\ndef read_odor():\n return wp.analogRead(PIN_BASE)\n\n\ndef read_dht11():\n values = 0\n while True:\n values = instance.read()\n if values.temperature is not 0 and values.humidity is not 0:\n break\n sleep(0.01)\n return values.temperature, values.humidity\n\n\ndef read_sensor_data():\n temp, hum = read_dht11()\n data = {}\n data.update({\"odor\": read_odor()})\n data.update({\"temperature\": temp})\n data.update({\"humidity\": hum})\n return data\n\n\ndef main():\n client = Client(protocol=MQTTv311)\n client.username_pw_set(USERNAME, password=PASSWORD)\n client.connect(HOST, port=PORT, keepalive=60)\n\n while True:\n data = read_sensor_data()\n print(data)\n client.publish(TOPIC, payload=dumps(data))\n sleep(5)\n\n\nif __name__ == '__main__':\n sensor_init()\n main()\n","sub_path":"pocs/odor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"121237798","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: wxnacy(wxnacy@gmail.com)\n# Description: for while generator list_comprehension map 对比速度\n\ndef _abs(i):\n if i >= 0:\n return i\n return -i\n\ndef loop_for(n):\n res = []\n for i in range(n):\n res.append(_abs(i))\n return res\n\ndef loop_while(n):\n i = 0\n res = []\n while i < n:\n res.append(_abs(i))\n i += 1\n return res\n\ndef loop_generator(n):\n res = (_abs(i) for i in range(n))\n res = list(res)\n return res\n\ndef loop_list_compre(n):\n res = [_abs(i) for i in range(n)]\n return res\n\ndef loop_map(n):\n res = map(_abs, range(n))\n res = list(res)\n return res\n\nimport utils\nimport unittest\n\nclass TestMain(unittest.TestCase):\n\n def setUp(self):\n '''before each test function'''\n pass\n\n def tearDown(self):\n '''after each test function'''\n pass\n\n def test_func(self):\n n = 10\n # flag = (loop_for(n) == loop_while(n) == loop_generator(n) ==\n # loop_list_compre(n) == loop_map(n))\n # self.assertTrue(flag)\n\nif __name__ == \"__main__\":\n count = 1000\n n = 1000\n utils.print_func_run_time(count, loop_for, n = n)\n utils.print_func_run_time(count, loop_while, n = n)\n utils.print_func_run_time(count, loop_generator, n = n)\n utils.print_func_run_time(count, loop_list_compre, n = n)\n utils.print_func_run_time(count, loop_map, n = n)\n unittest.main()\n\n# .\n# ----------------------------------------------------------------------\n# Ran 1 test in 0.000s\n#\n# OK\n# loop_for run 1000 times used 0.22006184199926793s\n# loop_while run 1000 times used 0.2734469540009741s\n# loop_generator run 1000 times used 0.1969178159997682s\n# loop_list_compre run 1000 times used 0.15887818199917092s\n# loop_map run 1000 times used 0.13055954499941436s\n","sub_path":"python/leetcode/loop_speed_compariso2.py","file_name":"loop_speed_compariso2.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"317002507","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom django.shortcuts import render\nfrom .version import (get_current_version)\nfrom .log_handler import get_logger\nlogger = get_logger(__name__)\n\n\ndef index(request):\n context = {}\n return render(request, 'site_index.html', context)\n\n\ndef check_version(request):\n r = requests.get(\"https://pypi.org/pypi/cartoview/json\")\n context = dict(\n latest_version=r.json()[\"info\"][\"version\"],\n current_version=get_current_version())\n return render(\n request,\n \"cartoview/check_version.js\",\n context=context,\n content_type=\"text/javascript\")\n","sub_path":"cartoview/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"455488620","text":"#!/usr/bin/env python\n# Kyle Fitzsimmons, 2014\nfrom flask import Flask, render_template, jsonify, json\nfrom werkzeug.contrib.cache import SimpleCache\nfrom urllib2 import HTTPError\nimport gtfs_database as gdb\n\napp = Flask(__name__)\ncache = SimpleCache()\n\n@app.route(\"/\")\ndef hello():\n routes = gdb.get_trips_now()\n route_nums = sorted(routes.keys())\n cache.set('routes', routes, timeout=3*60)\n return render_template('index.html', active_routes=route_nums)\n\n@app.route(\"/route\")\ndef get_geojson(route):\n available_routes = cache.get('routes')\n if not available_routes:\n available_routes = gdb.get_trips_now()\n geojson = gdb.route_geojson(routes=available_routes, selected_route=route)\n if not geojson:\n return ('', 204) # HTTP no content\n return jsonify(geojson)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n\n","sub_path":"mapper_app.py","file_name":"mapper_app.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"378418965","text":"from .exceptions import *\nimport random\n\nclass GuessAttempt(object):\n\n def __init__(self, letter, hit=False, miss=False):\n\n # Truth values for 'hit' and 'miss' must be different\n if hit == miss:\n raise InvalidGuessAttempt\n\n self.hit = hit\n self.miss = miss\n\n def is_hit(self):\n return self.hit\n\n def is_miss(self):\n return self.miss\n\n\n\nclass GuessWord(object):\n\n def __init__(self, answer):\n\n if not answer:\n raise InvalidWordException\n\n self.answer = answer\n self.masked = '*'*len(answer)\n\n def perform_attempt(self, guessed_letter):\n\n if len(guessed_letter) > 1:\n raise InvalidGuessedLetterException\n\n if guessed_letter.lower() in self.answer.lower():\n\n new_masked = ''\n\n for idx, letter in enumerate(self.answer):\n\n # If there is a new match, or the current letter has already been uncovered\n if guessed_letter.lower() == letter.lower() or self.masked[idx] != '*':\n new_masked += letter.lower()\n else:\n new_masked += '*'\n\n self.masked = new_masked\n return GuessAttempt(guessed_letter, hit=True)\n\n else:\n\n return GuessAttempt(guessed_letter, miss=True)\n\n\nclass HangmanGame(object):\n\n WORD_LIST = ['rmotr', 'python', 'awesome']\n\n @classmethod\n def select_random_word(cls, word_list):\n\n if word_list:\n return random.choice(word_list)\n else:\n raise InvalidListOfWordsException\n\n\n def __init__(self, word_list=None, number_of_guesses=5):\n\n if not word_list:\n word_list = HangmanGame.WORD_LIST\n\n self.word = GuessWord(self.select_random_word(word_list))\n self.remaining_misses = number_of_guesses\n self.previous_guesses = []\n self.finished = False\n self.won = False\n self.lost = False\n\n def guess(self, guessed_letter):\n\n if self.finished:\n raise GameFinishedException\n\n self.previous_guesses.append(guessed_letter.lower())\n\n attempt = self.word.perform_attempt(guessed_letter)\n\n if attempt.is_miss():\n self.remaining_misses -= 1\n if self.remaining_misses == 0:\n self.lost = True\n self.finished = True\n raise GameLostException\n\n elif self.word.answer == self.word.masked:\n self.won = True\n self.finished = True\n raise GameWonException\n\n return attempt\n\n\n def is_won(self):\n return self.won\n\n\n def is_lost(self):\n return self.lost\n\n\n def is_finished(self):\n return self.finished\n\n\n\n\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"395123879","text":"from tkinter import *\r\nimport time\r\nimport DictTrie\r\n\r\nroot = Tk()\r\nroot.title(\"Class and Keybinds\")\r\nroot.geometry(\"800x600\")\r\n\r\nclass keyTable():\r\n def __init__(self):\r\n self.keyBinds = [['q', 'q', 0], ['w', 'w', 0], ['e', 'e', 0], ['r', 'r', 0], ['t', 't', 0], ['y', 'y', 0], ['u', 'u', 0], ['i', 'i', 0],\r\n ['o', 'o', 0], ['p', 'p', 0], ['a', 'a', 0], ['s', 's', 0], ['d', 'd', 0], ['f', 'f', 0], ['g', 'g', 0], ['h', 'h', 0],\r\n ['j', 'j', 0], ['k', 'k', 0], ['l', 'l', 0], ['z', 'z', 0], ['x', 'x', 0], ['c', 'c', 0], ['v', 'v', 0], ['b', 'b', 0],\r\n ['n', 'n', 0], ['m', 'm', 0]]\r\n self.defaultKeys = [['q', 'q', 0], ['w', 'w', 0], ['e', 'e', 0], ['r', 'r', 0], ['t', 't', 0], ['y', 'y', 0],\r\n ['u', 'u', 0], ['i', 'i', 0], ['o', 'o', 0], ['p', 'p', 0], ['a', 'a', 0], ['s', 's', 0],\r\n ['d', 'd', 0], ['f', 'f', 0], ['g', 'g', 0], ['h', 'h', 0], ['j', 'j', 0], ['k', 'k', 0],\r\n ['l', 'l', 0], ['z', 'z', 0], ['x', 'x', 0], ['c', 'c', 0], ['v', 'v', 0], ['b', 'b', 0],\r\n ['n', 'n', 0], ['m', 'm', 0]]\r\n self.undoList = []\r\n self.currentString = \"Type Here ----> \"\r\n self.textPred1 = \"---\"\r\n self.textPred2 = \"---\"\r\n self.textPred3 = \"---\"\r\n self.textPreds = \"---\\n---\\n---\"\r\n self.editMode = False\r\n self.swapChar = \"\"\r\n self.swapKey = -1\r\n self.word = \"\"\r\n self.render()\r\n\r\n def enableEdit(self):\r\n for e in range(len(self.keyBinds)):\r\n if self.keyBinds[e][2] != 0:\r\n self.keyBinds[e][2] = 0\r\n self.swapKey = -1\r\n self.editMode = not self.editMode\r\n # print(self.editMode)\r\n\r\n def insertChar(self, nextchar):\r\n if ord(nextchar) == 8:\r\n self.word = self.word[:-1]\r\n self.getPred((self.word))\r\n elif ord(nextchar) == 32:\r\n self.currentString += str(nextchar)\r\n self.word = \"\"\r\n self.textPreds = \"---\\n---\\n---\"\r\n else:\r\n self.currentString += str(nextchar)\r\n self.word += str(nextchar.lower())\r\n self.getPred((self.word))\r\n # print(self.word)\r\n\r\n def getPred(self, pre):\r\n startTime = time.time()\r\n results = DictTrie.trie.preWord(str(pre))\r\n self.textPred1 = str(results[0])\r\n self.textPred2 = str(results[1])\r\n self.textPred3 = str(results[2])\r\n self.textPreds = str(self.textPred1 + \"\\n\" + self.textPred2 + \"\\n\" + self.textPred3)\r\n print(str(pre) + \" time taken \" + str((time.time() - startTime)*100) + \" ms\")\r\n # print(self.word)\r\n # print(self.textPreds + \"\\n\")\r\n\r\n def keyPress(self, event):\r\n if len(event.char) > 0:\r\n if ((ord(event.char) >= 65 and ord(event.char) <= 90) or (ord(event.char) >= 97 and ord(event.char) <= 122)) and not self.editMode:\r\n for k in range(len(self.keyBinds)):\r\n if str(event.char) == str(self.keyBinds[k][0]):\r\n self.insertChar(str(self.keyBinds[k][1]))\r\n break\r\n elif str(event.char) == str(self.keyBinds[k][0]).upper():\r\n self.insertChar(str(self.keyBinds[k][1]).upper())\r\n break\r\n elif ((ord(event.char) >= 65 and ord(event.char) <= 90) or (ord(event.char) >= 97 and ord(event.char) <= 122)) and self.editMode:\r\n if self.swapKey == -1:\r\n for k in range(len(self.keyBinds)):\r\n if (str(event.char) == str(self.keyBinds[k][0])) and self.keyBinds[k][2] == 0:\r\n self.keyBinds[k][2] = 1\r\n self.swapChar = self.keyBinds[k][1]\r\n self.swapKey = k\r\n break\r\n else:\r\n for k in range(len(self.keyBinds)):\r\n if (str(event.char) == str(self.keyBinds[k][0])) and not self.keyBinds[k][2] == 1:\r\n self.undoList.append([self.keyBinds[self.swapKey][0], self.keyBinds[k][0]])\r\n self.keyBinds[self.swapKey][2] = 0\r\n self.keyBinds[self.swapKey][1] = self.keyBinds[k][1]\r\n self.keyBinds[k][1] = self.swapChar\r\n self.swapKey = -1\r\n break\r\n elif ord(event.char) == 8:\r\n self.currentString = self.currentString[:-1]\r\n self.insertChar(chr(8))\r\n elif ord(event.char) == 32:\r\n self.insertChar(chr(32))\r\n elif ord(event.char) == 96:\r\n self.enableEdit()\r\n elif ord(event.char) == 49 and self.textPred1 != \"---\":\r\n self.currentString = self.currentString + str(self.textPred1[len(self.word):len(self.textPred1)] + \" \")\r\n self.word = \"\"\r\n self.textPred1 = \"---\"\r\n self.textPreds = \"---\\n---\\n---\"\r\n elif ord(event.char) == 50 and self.textPred2 != \"---\":\r\n self.currentString = self.currentString + str(self.textPred2[len(self.word):len(self.textPred2)] + \" \")\r\n self.word = \"\"\r\n self.textPred2 = \"---\"\r\n self.textPreds = \"---\\n---\\n---\"\r\n elif ord(event.char) == 51 and self.textPred3 != \"---\":\r\n self.currentString = self.currentString + str(self.textPred3[len(self.word):len(self.textPred3)] + \" \")\r\n self.word = \"\"\r\n self.textPred3 = \"---\"\r\n self.textPreds = \"---\\n---\\n---\"\r\n elif ord(event.char) == 92:\r\n if len(self.undoList) > 0:\r\n swapNum1 = -1\r\n swapNum2 = -1\r\n swapTemp = ''\r\n undoTemp = self.undoList[-1]\r\n for i in range(len(self.keyBinds)):\r\n if undoTemp[0] == self.keyBinds[i][0]:\r\n swapNum1 = i\r\n break\r\n for j in range(len(self.keyBinds)):\r\n if undoTemp[1] == self.keyBinds[j][0]:\r\n swapNum2 = j\r\n break\r\n swapTemp = self.keyBinds[swapNum1][1]\r\n self.keyBinds[swapNum1][1] = self.keyBinds[swapNum2][1]\r\n self.keyBinds[swapNum2][1] = swapTemp\r\n self.undoList = self.undoList[:-1]\r\n print(self.undoList)\r\n elif ord(event.char) == 47:\r\n self.keyBinds = self.defaultKeys\r\n self.undoList = []\r\n self.render()\r\n\r\n def mouseClick(self, event):\r\n # print(\"x = %s, y = %s\" % (event.x, event.y))\r\n if event.x > 0 and event.x < 35 and event.y > 0 and event.y < 35:\r\n self.enableEdit()\r\n self.render()\r\n\r\n def render(self):\r\n posX = 0\r\n t = Text(root, width=98, height=25, bg=\"White\")\r\n t.place(x=5, y=5)\r\n t.insert(END, str(self.currentString))\r\n # t.insert(END, \"Dummy\")\r\n\r\n Label(relief=RAISED, width=2, height=2, bg=\"Grey\").place(x=5, y=413)\r\n for k in range(10):\r\n Label(text=str(self.keyBinds[k][1]).upper(), relief=RIDGE, width=5, height=2, font=(\"Comic Sans MS\", 10),\r\n bg=\"White\" if self.keyBinds[k][2] == 0 else \"Yellow\", fg=\"Black\").place(x=30 + posX, y=410)\r\n posX += 50\r\n Label(relief=RAISED, width=2, height=2, bg=\"Grey\").place(x=30+posX, y=413)\r\n\r\n posX = 0\r\n Label(relief=RAISED, width=5, height=2, bg=\"Grey\").place(x=5, y=463)\r\n for k in range(9):\r\n Label(text=str(self.keyBinds[k+10][1]).upper(), relief=RIDGE, width=5, height=2, font=(\"Comic Sans MS\", 10),\r\n bg=\"White\" if self.keyBinds[k+10][2] == 0 else \"Yellow\", fg=\"Black\").place(x=55 + posX, y=460)\r\n posX += 50\r\n Label(relief=RAISED, width=5, height=2, bg=\"Grey\").place(x=60+posX, y=463)\r\n\r\n posX = 0\r\n Label(relief=RAISED, width=8, height=2, bg=\"Grey\").place(x=5, y=513)\r\n for k in range(7):\r\n Label(text=str(self.keyBinds[k+19][1]).upper(), relief=RIDGE, width=5, height=2, font=(\"Comic Sans MS\", 10),\r\n bg=\"White\" if self.keyBinds[k+19][2] == 0 else \"Yellow\", fg=\"Black\").place(x=75 + posX, y=510)\r\n posX += 50\r\n Label(relief=RAISED, width=16, height=2, bg=\"Grey\").place(x=80+posX, y=513)\r\n\r\n Label(text=\"Edit\", relief=RAISED, width=4, height=2, bg=\"Green\" if self.editMode else \"Red\", fg=\"Black\").place(x=5, y=555)\r\n\r\n p1 = Text(root, width=14, height=3, font=(\"Comic Sans MS\", 18), bg=\"White\")\r\n p1.place(x=580, y=412)\r\n p1.insert(END, str(self.textPreds))\r\n # p1.insert(END, \"---\\n---\\n---\")\r\n\r\n\r\n\r\n\r\n\r\nkb = keyTable()\r\nroot.bind(\"\", kb.keyPress)\r\nroot.bind(\"\", kb.mouseClick)\r\nroot.mainloop()","sub_path":"NormalKey.py","file_name":"NormalKey.py","file_ext":"py","file_size_in_byte":9124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"257439987","text":"# Import Statements:\nfrom tkinter import *\n\n########################################################################################################################\n# Instantiating a Tk interface\n# Root is a standard widget, a window with a title bar stylized by Windows, only create one for each program and before\n# Anything else.\nroot = Tk()\n\n# Create a label in the root window. This label is also a widget that is a 'Child' to the 'Parent' root window\n# Label can display text or icons or images, here we use the text option to display text\n# The pack method makes the label visible and fits the text to the widget\nw = Label(root, text='Hello, world!')\nw.pack()\n\n# This loop create the window that we have made and makes it visible. The loop will end when we close the pop-up window.\n# It handles everything such as user events, system events, and Tkinter events.\n# So of these things include display updates and geometry management (such as the pack method)\nroot.mainloop()\n","sub_path":"Tkinter Test/Hello World.py","file_name":"Hello World.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"365446699","text":"from django.conf.urls import url\nfrom basic_app import views\n\napp_name = 'basic_app'\n\nurlpatterns = [\n url('register/', views.register_views, name = 'register'),\n url('user_login', views.user_login_views, name = 'user_login'),\n url('logout/', views.logout_views, name = 'logout')\n]\n","sub_path":"learning_users/basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"484355590","text":"#import pdb; pdb.set_trace()\nfrom Estado import Estado\nclass AFD:\n\tdef __init__(self,estados,transiciones,finales,alfabeto):\n\t\tself.estados=estados\n\t\tself.transiciones=transiciones\n\t\tself.finales=finales\n\t\tself.tokens = []\n\t\tself.alfabeto = alfabeto\n\nclass AFN:\n\t#Constructor de la clase.\n\tdef __init__(self, simbolo = None):\n\t\tif simbolo == None:\n\t\t\tself.estado_inicial = 0\n\t\t\tself.estados_aceptacion = []\n\t\t\tself.alfabeto = []\n\t\t\tself.estados = {}\n\t\telse:\n\t\t\tself.estado_inicial = 0\n\t\t\tself.estados_aceptacion = [1]\n\t\t\tself.alfabeto = [simbolo]\n\t\t\tself.estados = {}\n\t\t\tself.agregar_estado(0)\n\t\t\tself.agregar_estado(1)\n\t\t\tself.anadir_transicion(0,simbolo,1)\n\t#Agregar un simbolo al lenguaje.\n\tdef agregar_simbolo(self,simbolo):\n\t\tif simbolo in self.alfabeto:\n\t\t\tprint(\"El simbolo |\"+simbolo+\"| ya se encuentra en el alfabeto.\")\n\t\telse:\n\t\t\t#print(\"Insertando simbolo en el alfabeto.\")\n\t\t\tself.alfabeto.append(simbolo)\n\t\t\t#print(self.alfabeto)\n\t#Agregar un estado al lenguaje.\n\tdef agregar_estado(self,id):\n\t\tif self.estados.get(id) == None:\n\t\t\t#print(\"Ingresando un nuevo estado: \"+str(id))\n\t\t\tself.estados.setdefault(id,Estado(id))\n\t\telse:\n\t\t\tprint(\"El estado ya se encuentra en el conjunto\")\n\t#Agregar una transicion a un estado existente:\n\tdef anadir_transicion(self,id,simbolo,id_final):\n\t\tif self.estados.get(id) == None:\n\t\t\tprint(\"El estado no existe\")\n\t\telse:\n\t\t\tself.estados.get(id).anadir_transicion(simbolo,id_final)\n\t#Agregar un estado dd aceptacion a la lista:\n\tdef anadir_estado_aceptacion(self,simbolo):\n\t\tself.estados_aceptacion.append(simbolo)\n\t#Hacer una union entre dos AFNs.\n\tdef union(self,AFN2):\n\t\t#print(\"union\")\n\t\tfor elemento in AFN2.alfabeto:\n\t\t\tif elemento not in self.alfabeto:\n\t\t\t\tself.alfabeto.append(elemento)\n\t\tself.estado_inicial = -1\n\t\tself.agregar_estado(-1)\n\t\tself.anadir_transicion(-1,'ε',0)\n\t\tnumero_nodos_AFN1=len(list(self.estados))\n\t\t#Agregando la otra ruta de la union.\n\t\tself.anadir_transicion(self.estado_inicial,'ε',self.estado_inicial+numero_nodos_AFN1)\n\t\t#print(\"Generando una transicion del \"+str(self.estado_inicial)+\" e \"+str(self.estado_inicial+numero_nodos_AFN1))\n\t\t#Actualizar los numeros de los nodos.\n\t\tself.estados=self.recorrer_estados(self.estados,1)\n\t\tfor key in list(self.estados.keys()):\n\t\t\t#print(\"AFN1 \"+str(key))\n\t\t\tself.estados.get(key).actualizar_transiciones(1)\n\t\t\t#print(self.estados.get(key).transiciones)\n\t\t#Actualizar los numeros de los nodos del segundo AFN.\n\t\tAFN2.estados=self.recorrer_estados(AFN2.estados,numero_nodos_AFN1)\n\t\tfor key in list(AFN2.estados.keys()):\n\t\t\t#print(\"AFN2 \"+str(key))\n\t\t\tAFN2.estados.get(key).actualizar_transiciones(numero_nodos_AFN1)\n\t\t\t#print(AFN2.estados.get(key).transiciones)\n\t\tnumero_nodos_total=len(list(self.estados))+len(list(AFN2.estados))\n\t\tself.anadir_transicion(numero_nodos_AFN1-1,'ε',numero_nodos_total)\n\t\tself.estados_aceptacion=[numero_nodos_total]\n\t\tself.agregar_estado(numero_nodos_total)\n\t\tAFN2.anadir_transicion(numero_nodos_total-1,'ε',numero_nodos_total)\n\t\tself.estado_inicial=0\n\t\tself.estados.update(AFN2.estados)\n\t#Hacer una concatenacion de AFNs.\n\tdef concatenacion(self,AFN2):\n\t\tfor elemento in AFN2.alfabeto:\n\t\t\tif elemento not in self.alfabeto:\n\t\t\t\tself.alfabeto.append(elemento)\n\t\tnumero_nodos_AFN1=len(list(self.estados))-1\n\t\t#Actualizar los numeros de los nodos del segundo AFN.\n\t\tAFN2.estados=self.recorrer_estados(AFN2.estados,numero_nodos_AFN1)\n\t\tfor key in list(AFN2.estados.keys()):\n\t\t\t#print(\"AFN2 \"+str(key))\n\t\t\tAFN2.estados.get(key).actualizar_transiciones(numero_nodos_AFN1)\n\t\t\t#print(AFN2.estados.get(key).transiciones)\n\t\tself.estados.update(AFN2.estados)\n\t\t#self.imprimir_transiciones()\n\t\tnumero_nodos_total=len(list(self.estados))-1\n\t\tself.estados_aceptacion=[numero_nodos_total]\n\t#Actualizar los id de los estados al agregar nodos antes.\n\tdef cerradura_positiva(self):\n\t\tself.estado_inicial = -1\n\t\tself.agregar_estado(-1)\n\t\tself.anadir_transicion(-1,'ε',0)\n\t\t#Recorrer los estados.\n\t\tself.estados=self.recorrer_estados(self.estados,1)\n\t\tfor key in list(self.estados.keys()):\n\t\t\tself.estados.get(key).actualizar_transiciones(1)\n\t\t#Agregar el ultimo nodo y las transiciones para la cerradura positiva.\n\t\tnumero_nodos_total=len(list(self.estados))\n\t\tself.anadir_transicion(numero_nodos_total-1,'ε',numero_nodos_total)\n\t\tself.estados_aceptacion=[numero_nodos_total]\n\t\tself.agregar_estado(numero_nodos_total)\n\t\tself.anadir_transicion(numero_nodos_total-1,'ε',1)\n\t#La cerradura de kleene se forma de una positiva mas una transicion epsilon.\n\tdef cerradura_kleene(self):\n\t\tself.cerradura_positiva()\n\t\tself.anadir_transicion(0,'ε',len(list(self.estados))-1)\n\t#No se como se llama esta operacion.\n\tdef interrogacion(self):\n\t\tself.estado_inicial = -1\n\t\tself.agregar_estado(-1)\n\t\tself.anadir_transicion(-1,'ε',0)\n\t\t#Recorrer los estados.\n\t\tself.estados=self.recorrer_estados(self.estados,1)\n\t\tfor key in list(self.estados.keys()):\n\t\t\tself.estados.get(key).actualizar_transiciones(1)\n\t\t#Agregar el ultimo nodo y las transiciones para la cerradura positiva.\n\t\tnumero_nodos_total=len(list(self.estados))\n\t\tself.anadir_transicion(numero_nodos_total-1,'ε',numero_nodos_total)\n\t\tself.estados_aceptacion=[numero_nodos_total]\n\t\tself.agregar_estado(numero_nodos_total)\n\t\tself.anadir_transicion(0,'ε',numero_nodos_total)\n\t#La funcion que reemplaza los numeros de estados viejos por los nuevos.\n\tdef recorrer_estados(self,estados,no_posiciones):\n\t\tnuevos_estados={}\n\t\tfor key in list(estados.keys()):\n\t\t\tnuevos_estados.setdefault(key+no_posiciones,estados.get(key))\n\t\treturn nuevos_estados\n\t#Funcion para imprimimir los conjuntos de transiciones.\n\tdef imprimir_transiciones(self):\n\t\tfor key in range(len(list(self.estados.keys()))):\n\t\t\tprint(\"FINAL \"+str(key))\n\t\t\tprint(self.estados.get(key).transiciones)\n\t#Funcion ir_a\n\tdef ir_a(self):\n\t\tconjunto_conjuntos=[]\n\t\tcerraduras_revisadas=[]\n\t\tconjuntos_por_revisar=[]\n\t\tconjuntos_por_revisar.append([0])\n\t\tconjuntos_transiciones=[]\n\t\twhile len(conjuntos_por_revisar)>0:\n\t\t\ts=[]\n\t\t\t#print(\"Conjuntos por revisar: \",end=\"\")\n\t\t\t#print(conjuntos_por_revisar)\n\t\t\t#print(\"Cerraduras revisadas:\",end=\"\")\n\t\t\t#print(cerraduras_revisadas)\n\t\t\te=conjuntos_por_revisar.pop()\n\t\t\t#print(\"e:\")\n\t\t\t#print(e)\n\t\t\t#print(\"Cerradura epsilon\")\n\t\t\tfor var in e:\n\t\t\t\ts=list(set(s)|set(self.cerradura_e(var)))\n\t\t\tif s not in conjunto_conjuntos:\n\t\t\t\tconjunto_conjuntos.append(s)\n\t\t\t\tcerraduras_revisadas.append(e)\n\t\t\t#print(\"s:\")\n\t\t\t#print(s)\n\t\t\tfor simbolo in self.alfabeto:\n\t\t\t\tm=self.mover(s,simbolo)\n\t\t\t\tif m in cerraduras_revisadas or len(m) == 0:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tconjuntos_por_revisar.append(m)\n\t\t\t\t\t#print(\"\\tm:\")\n\t\t\t\t\t#print(\"\\t\"+str(m))\n\t\t\t\tif [e,simbolo,m] not in conjuntos_transiciones:\n\t\t\t\t\tconjuntos_transiciones.append([e,simbolo,m])\n\t\tlista_finales=[]\n\n\t\t#Asignar los tokens a los nuevos estados del AFN:\n\t\tfor lista in conjunto_conjuntos:\n\t\t\tlista_finales.append(0)\n\t\t\ttoken=10\n\t\t\tfor final in self.estados_aceptacion:\n\t\t\t\tif final in lista:\n\t\t\t\t\tlista_finales.pop()\n\t\t\t\t\tlista_finales.append(token)\n\t\t\t\ttoken+=10\n\t\t#print(lista_finales)\n\t\treturn(self.crear_AFD(cerraduras_revisadas,conjuntos_transiciones,lista_finales,self.alfabeto))\n\t#Crear un nuevo AFD\n\tdef crear_AFD(self,cerraduras,transiciones,finales,alfabeto):\n\t\t#print(\"CREANDO AFD\")\n\t########################################################\n\t\t#Cambiando los indices de los conjuntos:\n\t\tlista_temp=[]\n\t\tindice_temp=0\n\t\ta=0\n\t\tb=0\n\t\tfor x in range(len(finales)):\n\t\t\tfor posicion in range(len(finales)-1):\n\t\t\t\tif cerraduras[posicion]>cerraduras[posicion+1]:\n\t\t\t\t\ta=cerraduras[posicion+1]\n\t\t\t\t\tcerraduras[posicion+1]=cerraduras[posicion]\n\t\t\t\t\tcerraduras[posicion]=a\n\n\t\t\t\t\tb=finales[posicion+1]\n\t\t\t\t\tfinales[posicion+1]=finales[posicion]\n\t\t\t\t\tfinales[posicion]=b\n\t\tfor lista in cerraduras:\n\t\t\tlista_temp.append(indice_temp)\n\t\t\tfor transicion in transiciones:\n\t\t\t\tif lista in transicion:\n\t\t\t\t\tif lista == transicion[0]:\n\t\t\t\t\t\ttransicion.pop(0)\n\t\t\t\t\t\ttransicion.insert(0,indice_temp)\n\t\t\t\t\tif lista == transicion[2]:\n\t\t\t\t\t\ttransicion.pop(2)\n\t\t\t\t\t\ttransicion.insert(2,indice_temp)\n\t\t\tindice_temp-=1\n\t\tfor lista in transiciones:\n\t\t\tlista[0]*=-1\n\t\t\tlista[2]*=-1\n\t\tdic_AFD={}\n\t\tfor lista in transiciones:\n\t\t\tif not isinstance(lista[2],list):\n\t\t\t\tif dic_AFD.get(lista[0]) == None:\n\t\t\t\t\tdic_AFD.setdefault(lista[0],[[lista[1],lista[2]]])\n\t\t\t\telse:\n\t\t\t\t\tconjunto_ids = dic_AFD.get(lista[0])\n\t\t\t\t\tconjunto_ids.append([lista[1],lista[2]])\n\t\tcerraduras=[]\n\t\tfor elemento in lista_temp:\n\t\t\tcerraduras.append(elemento*-1)\n\t\tnuevo_AFD=AFD(cerraduras,dic_AFD,finales,self.alfabeto)\n\n\n\t\treturn nuevo_AFD\n\t#Funcion mover\n\tdef mover(self,conjunto_epsilon,simbolo):\n\t\tlista_resultado=[]\n\t\tfor var in conjunto_epsilon:\n\t\t\tif(self.estados.get(var).transiciones.get(simbolo)!=None):\n\t\t\t\tfor elemento in self.estados.get(var).transiciones.get(simbolo):\n\t\t\t\t\tlista_resultado.append(elemento)\n\t\treturn lista_resultado\n\t#Funcion cerradura epsilon\n\tdef cerradura_e(self,var):\n\t\tlista_temp=[]\n\t\tlista_temp.append(var)\n\t\tnueva_lista=[]\n\t\tnueva_lista2=[]\n\t\twhile len(lista_temp) != 0:\n\t\t\tvar=lista_temp.pop()\n\t\t\tif var not in nueva_lista:\n\t\t\t\tnueva_lista.append(var)\n\t\t\t\tif(self.estados.get(var).transiciones.get('ε')!=None):\n\t\t\t\t\tnueva_lista2=self.estados.get(var).transiciones.get('ε')\n\t\t\t\t\tfor elemento in nueva_lista2:\n\t\t\t\t\t\tlista_temp.append(elemento)\n\n\t\treturn nueva_lista\n\t#Recorrer los numeros de los estados finales.\n\tdef recorrer_finales(self,posiciones):\n\t\tfor posicion in range(len(self.estados_aceptacion)):\n\t\t\t#print(\"Cambiando el estado final de \"+str(elemento)+\" a \"+str(elemento+posiciones))\n\t\t\tself.estados_aceptacion[posicion]+=posiciones\n\t#Unir varios AFN a un solo inicio:\n\tdef union_especial(self,lista_AFN):\n\t\tfor AFNx in lista_AFN:\n\t\t\tfor simbolo in AFNx.alfabeto:\n\t\t\t\tif simbolo not in self.alfabeto:\n\t\t\t\t\tself.alfabeto.append(simbolo)\n\t\tposicion=0\n\t\tself.agregar_estado(-1)\n\t\tself.anadir_transicion(-1,'ε',0)\n\t\tself.estados=self.recorrer_estados(self.estados,1)\n\t\tself.recorrer_finales(1)\n\t\tfor key in list(self.estados.keys()):\n\t\t\tself.estados.get(key).actualizar_transiciones(1)\n\t\tposicion+=len(list(self.estados))\n\t\t#print(\"Recorriendo \"+str(posicion))\n\t\tfor AFN in lista_AFN:\n\t\t\tself.anadir_transicion(0,'ε',posicion)\n\t\t\tAFN.estados=AFN.recorrer_estados(AFN.estados,posicion)\n\t\t\tAFN.recorrer_finales(posicion)\n\t\t\tfor key in list(AFN.estados.keys()):\n\t\t\t\tAFN.estados.get(key).actualizar_transiciones(posicion)\n\t\t\tposicion+=len(list(AFN.estados))\n\n\t\tfor AFN in lista_AFN:\n\t\t\tfor estado in AFN.estados_aceptacion:\n\t\t\t\tself.estados_aceptacion.append(estado)\n\t\t\tself.estados.update(AFN.estados)\n","sub_path":"AFN.py","file_name":"AFN.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"114644551","text":"class AdvancedArithmetic(object):\n def divisorSum(n):\n raise NotImplementedError\n\nclass Calculator(AdvancedArithmetic):\n def divisorSum(self, n):\n sum = 0\n for i in range(1, n+1):\n if n % i == 0:\n sum = sum +i\n return sum \n\ncal = Calculator()\nprint(cal.divisorSum(6))\nprint(\"I implemented: \" + type(cal).__bases__[0].__name__)\n\n\n# class A:\n# def __init__(self):\n# print('A')\n\n\n# class B(A):\n# def __init__(self):\n# super().__init__()\n# print('B')\n\n\n# class C(A):\n# def __init__(self):\n# super().__init__()\n# print('C')\n\n\n# class D(B, C):\n# def __init__(self):\n# super().__init__()\n# print('D')\n\n\n# d = D()\n# print(type(d).__bases__[0].__name__)\n# print(type(d).__mro__)","sub_path":"divisor_sum.py","file_name":"divisor_sum.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"149245919","text":"import boto.ec2, sys, time\n\nrunningInstance = []\n\ndef getConnectionInstance(instanceRegion): \n try: \n conn = boto.ec2.connect_to_region(instanceRegion,aws_access_key_id='',aws_secret_access_key='')\n return conn\n except:\n return None\n\ndef startInstance(noOfInstance,instanceRegion):\n aws = getConnectionInstance(instanceRegion)\n if not aws is None: \n global runningInstance \n try:\n print ('Initializing '+str(noOfInstance)+' EC2 instance(s) in '+instanceRegion+' region ...')\n for i in range(noOfInstance):\n reservation = aws.run_instances('ami-9ff7e8af',instance_type = 't2.micro')\n runningInstance.append(reservation.instances[0])\n print ('Initialized '+str(runningInstance[-1])+' State: '+runningInstance[-1].state)\n except:\n print (sys.exc_info()[1])\n while True:\n try: \n task = int(input('Enter:\\n1 to terminate all instance(s)\\n2 to terminate a specific instance only\\n0 to exit\\n')) \n if task == 0:\n sys.exit(0)\n elif task == 1:\n stopInstance(noOfInstance,'us-west-2') \n elif task == 2:\n terminationId = input('Enter instacnce ID: ')\n stopInstance(noOfInstance,'us-west-2',terminationId)\n time.sleep(2)\n else:\n raise ValueError\n except ValueError as e:\n print ('Invalid value provided')\n continue \n except:\n print (sys.exc_info()[1])\n break \n else:\n print ('Error establishing connection to AWS')\n sys.exit(0)\n \ndef stopInstance(noOfInstance,instanceRegion,*args):\n global runningInstance \n if len(args) == 0:\n try:\n print ('Initializing termination sequence for running instance(s) ...')\n for instance in runningInstance:\n print ('Terminating '+str(instance))\n instance.terminate() \n time.sleep(2)\n except:\n print (sys.exc_info()[1]) \n else:\n try: \n x = 0\n aws = getConnectionInstance(instanceRegion)\n for arg in args:\n if arg == 'stop':\n reservations = aws.get_all_reservations() \n for reservation in reservations:\n if reservation.instances[0].state == 'running' or reservation.instances[0].state == 'pending':\n try:\n print ('Terminating '+str(reservation.instances[0]))\n reservation.instances[0].terminate()\n x += 1\n except:\n print (sys.exc_info()[1]) \n if x == 0:\n print ('Not enough instances available')\n else: \n print ('Initiating termination sequence for Instance:'+str(arg))\n aws.terminate_instances(instance_ids=[arg]) \n print ('Instance terminated')\n except:\n print (sys.exc_info()[1])\n\n\n\n\n\nnoOfInstance = int(sys.argv[1])\ninstanceRegion = sys.argv[2]\nactivity = sys.argv[3]\n\nif activity == \"start\":\n startInstance(noOfInstance, instanceRegion)\nelif activity == \"stop\":\n stopInstance(noOfInstance,instanceRegion,activity)","sub_path":"launch_instance.py","file_name":"launch_instance.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449793491","text":"#!/usr/bin/env python3\n\n\"\"\"\n peer.py - A Peer acts as a Server and a Client\n Author:\n - Nidesh Chitrakar (nideshchitrakar@bennington.edu)\n - Hoanh An (hoanhan@bennington.edu)\n Date: 11/15/2017\n\"\"\"\n\nimport zerorpc\n\nfrom chord_instance import ChordInstance\nfrom const import *\nfrom utils import *\nfrom threading import Thread\n\nclass Server(Thread):\n def __init__(self):\n Thread.__init__(self)\n\n def run(self):\n try:\n print(\"SERVER_THREAD IS RUNNING\")\n s = zerorpc.Server(my_chord_instance)\n s.bind(\"tcp://{0}:{1}\".format(my_IP, default_port))\n s.run()\n except KeyboardInterrupt:\n print(\"Exit using KeyboardInterrupt\")\n\nclass Client(Thread):\n def __init__(self):\n Thread.__init__(self)\n\n def run(self):\n try:\n print(\"CLIENT_THREAD IS RUNNING\")\n your_IP = input(\"Enter IP to join: \")\n c = zerorpc.Client()\n c.connect(\"tcp://{0}:{1}\".format(your_IP, default_port))\n\n if c.is_alive():\n instance_list = deserialize(c.get_instance_list())\n\n # append to a local list and join the first instance (can be anyone in the list)\n instance_list.append(my_chord_instance)\n my_chord_instance.join(instance_list[0])\n\n # update the instance list locally\n my_chord_instance.set_instance_list(serialize(instance_list))\n\n # update other instance list using RPC as well\n for instance in my_chord_instance.instance_list:\n if instance.IP_ADDRESS != my_IP:\n temp_client = zerorpc.Client()\n temp_client.connect(\"tcp://{0}:{1}\".format(instance.IP_ADDRESS, default_port))\n temp_client.set_instance_list(serialize(instance_list))\n\n except KeyboardInterrupt:\n print(\"Exit using KeyboardInterrupt\")\n\nif __name__ == '__main__':\n my_IP = get_my_IP()\n\n # temporary instance list, use to startup a chord instance\n # real instance list is an attribute in chord instance\n instance_list = []\n\n my_chord_instance = ChordInstance(my_IP, default_port)\n instance_list.append(my_chord_instance)\n\n server = Server()\n client = Client()\n\n server.start()\n client.start()\n","sub_path":"ChordServer (alt.)/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"294857181","text":"# Copyright 2014 MongoDB, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nlogger = logging.getLogger('giza.content.options.tasks')\n\nfrom giza.tools.files import expand_tree, verbose_remove, safe_create_directory\nfrom giza.tools.strings import hyph_concat\nfrom giza.content.options.inheritance import OptionDataCache\nfrom giza.content.options.views import render_options\nfrom giza.config.content import new_content_type\nfrom giza.core.task import Task\n\ndef register_options(conf):\n conf.system.content.add(name='options', definition=new_content_type(name='option', task_generator=option_tasks, conf=conf))\n\ndef write_options(option, fn, conf):\n content = render_options(option, conf)\n content.write(fn)\n logger.info('wrote options file: ' + fn)\n\ndef option_tasks(conf):\n o = OptionDataCache(conf.system.content.options.sources, conf)\n o.create_output_dir()\n\n tasks = []\n for dep_fn, option in o.content_iter():\n output_fn = os.path.join(conf.system.content.options.fn_prefix,\n hyph_concat(option.directive, option.program, option.name) + '.rst')\n\n t = Task(job=write_options,\n args=(option, output_fn, conf),\n description='generating option file \"{0}\" from \"{1}\"'.format(output_fn, dep_fn),\n target=output_fn,\n dependency=[dep_fn])\n tasks.append(t)\n\n logger.info(\"added tasks for {0} option generation tasks\".format(len(tasks)))\n return tasks\n\ndef option_clean(conf, app):\n register_options(conf)\n\n for fn in conf.system.options.sources:\n task = app.add('task')\n task.job = verbose_remove\n task.args = [fn]\n task.description = 'removing {0}'.format(fn)\n","sub_path":"giza/giza/content/options/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"503701948","text":"from random import randint\r\nszyfr=['a','ą','b','c','ć','d','e','ę','f','g','h','i','j','k','l','ł','m','n','ń','o','ó','p','q','r','s','ś','t','u','v','w','x','y','z','ż','ź']\r\nprint('1. Zaszyfruj')\r\nprint('2. Odszyfruj')\r\nz=int(input())\r\ne=''\r\nrand=randint(1,9)\r\nif z==1:\r\n b=input('Wprowadź wyraz do zaszyfrowania ')\r\n b=b.lower()\r\n c=list(b)\r\n for d in range(0,len(c)):\r\n temp=szyfr.index(c[d])\r\n if temp>=34-rand:\r\n temp-=35\r\n e=e+szyfr[temp+rand]\r\n print(str(rand)+str(e))\r\nelif z==2:\r\n b=input('Wprowadź wyraz do odszyfrowania ')\r\n b=b.lower()\r\n c=list(b)\r\n for d in range(1,len(c)):\r\n temp=szyfr.index(c[d])\r\n e=e+szyfr[temp-int(c[0])]\r\n print(str(e))\r\n","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"558824539","text":"# encoding=utf-8\n\nimport game\nimport os\nimport pygame\nimport traceback\nimport numpy \nfrom numpy.fft import fft \nfrom math import log10 \nimport math\nfrom random import randint\nimport copy\nimport game.globals as globals\nimport mutagen.oggvorbis\n\n\nclass Radio(game.Entity):\n def __init__(self):\n super(Radio, self).__init__((globals.WIDTH, globals.HEIGHT))\n # set up the mixer\n \n try: \n pygame.mixer.quit()\n except: \n pass\n \n freq = 44100\t # audio CD quality\n bitsize = -16\t# unsigned 16 bit\n channels = 2\t # 1 is mono, 2 is stereo\n buffer = 2048\t# number of samples (experiment to get right sound)\n if config.SOUND_ENABLED:\n pygame.mixer.init(freq, bitsize, channels, buffer)\n self.osc = Oscilloscope() \n self.osc.open(self)\n self.paused = True\n self.loaded = False\n self.spectrum = None \n self.filename = \"\"\n \n def play_rnd(self):\n if config.SOUND_ENABLED:\n files = load_files()\n file = files[randint(0,len(files)-1)]\n self.filename = file\n pygame.mixer.music.load(file)\n self.spectrum = LogSpectrum(file,force_mono=True) \n pygame.mixer.music.play()\n self.loaded = True\n self.paused = False\n \n def play(self):\n if self.loaded and config.SOUND_ENABLED:\n self.paused = False\n pygame.mixer.music.unpause()\n else:\n self.play_rnd()\n \n def stop(self):\n self.paused = True\n pygame.mixer.music.pause()\n\n def update(self, *args, **kwargs):\n super(Radio, self).update(*args, **kwargs)\n\n def render(self, *args, **kwargs):\n if not self.paused :\n f,p = None,[0 for i in range(21)]\n start = pygame.mixer.music.get_pos() / 1000.0\n try:\n f,p = self.spectrum.get_mono(start-0.001, start+0.001)\n except:\n pass\n self.osc.update(start*50,f,p)\t\n if self.osc:\n self.blit(self.osc.screen, (550, 150))\n \n metadata = mutagen.File(filename, easy = True)\n \n selectFont = pygame.font.Font('monofonto.ttf', 24)\n basicFont = pygame.font.Font('monofonto.ttf', 22)\n \n text = selectFont.render(game.Entity.name, True, (105, 251, 187), (0, 0, 0))\n \n #text = selectFont.render(\" - Random Play Radio \", True, (105, 251, 187), (0, 0, 0))\n \n self.blit(text, (75, 75))\n text = basicFont.render(\" 'r' selects a random song \", True, (105, 251, 187), (0, 0, 0))\n self.blit(text, (75, 100))\n text = basicFont.render(\" 'p' to play 's' to stop \", True, (105, 251, 187), (0, 0, 0))\n self.blit(text, (75, 120))\n \n if self.filename:\n text = selectFont.render(\" %s \" % metadata[\"ARTIST\"] + ' - ' + metadata[\"TITLE\"], True, (105, 251, 187), (0, 0, 0))\n \n #text = selectFont.render(u\" %s \" % self.filename[self.filename.rfind(os.sep)+1:], True, (105, 251, 187), (0, 0, 0))\n self.blit(text, (75, 200))\n \n super(Radio, self).update(*args, **kwargs)\n\nclass Oscilloscope:\n \n def __init__(self): \n # Constants\n self.WIDTH, self.HEIGHT = 210, 200\n self.TRACE, self.AFTER, self.GREY = (80, 255, 100),(20, 155, 40),(20, 110, 30)\n self.embedded = False\n \n def open(self, screen=None):\n # Open window\n pygame.init()\n if screen:\n '''Embedded'''\n self.screen = pygame.Surface((self.WIDTH, self.HEIGHT), 0)\n self.embedded = True\n else:\n '''Own Display'''\n self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT), 0)\n \n # Create a blank chart with vertical ticks, etc\n self.blank = numpy.zeros((self.WIDTH, self.HEIGHT, 3))\n # Draw x-axis\n self.xaxis = self.HEIGHT/2\n self.blank[::, self.xaxis] = self.GREY\n self.blank[::, self.HEIGHT - 2] = self.TRACE\n self.blank[::, self.HEIGHT - 1] = self.TRACE\n self.blank[::50, self.HEIGHT - 4] = self.TRACE\n self.blank[::50, self.HEIGHT - 3] = self.TRACE\n self.blank[self.WIDTH - 2, ::] = self.TRACE\n self.blank[self.WIDTH - 1, ::] = self.TRACE\n self.blank[self.WIDTH - 3, ::40] = self.TRACE\n self.blank[self.WIDTH - 4, ::40] = self.TRACE\n \n # Draw vertical ticks\n vticks = [-80, -40, +40, +80]\n for vtick in vticks: self.blank[::5, self.xaxis + vtick] = self.GREY # Horizontals\n for vtick in vticks: self.blank[::50, ::5] = self.GREY\t\t\t # Verticals\n \n # Draw the 'blank' screen.\n pygame.surfarray.blit_array(self.screen, self.blank)\t # Blit the screen buffer\n pygame.display.flip()\t\t\t\t\t\t\t\t\t # Flip the double buffer\n \n \n def update(self,time,frequency,power):\n try:\n pixels = copy.copy(self.blank)\n offset = 1\n for x in range(self.WIDTH):\n offset = offset - 1\n if offset < -1:\n offset = offset + 1.1\t\t \n try:\n pow = power[int(x/10)]\n log = math.log10( pow )\n offset = ((pow / math.pow(10, math.floor(log))) + log)*1.8\n except:\n pass\n try: \n y = float(self.xaxis) - (math.sin((float(x)+float(time))/5.0)*2.0*offset) \n pixels[x][y] = self.TRACE\n pixels[x][y-1] = self.AFTER\n pixels[x][y+1] = self.AFTER\n if abs(y) > 120:\n pixels[x][y-2] = self.AFTER\n pixels[x][y+2] = self.AFTER\n except: \n pass\n pygame.surfarray.blit_array(self.screen, pixels)\t # Blit the screen buffer\n if not self.embedded:\n pygame.display.flip() \n except Exception:\n _, err, _ = sys.exc_info()\n print(traceback.format_exc())\n\ndef play_pygame(file):\n if config.SOUND_ENABLED:\n clock = pygame.time.Clock()\n # set up the mixer\n freq = 44100\t # audio CD quality\n bitsize = -16\t# unsigned 16 bit\n channels = 2\t # 1 is mono, 2 is stereo\n buffer = 2048\t# number of samples (experiment to get right sound)\n if config.SOUND_ENABLED:\n pygame.mixer.init(freq, bitsize, channels, buffer)\n \n while not pygame.mixer.get_init():\n clock.tick(50)\n \n pygame.mixer.music.load(file)\n s = LogSpectrum(file,force_mono=True) \n osc = Oscilloscope() \n osc.open()\n \n f = None\n p = None\n running = True\n paused = False\n pygame.mixer.music.play()\n \n while pygame.mixer.music.get_busy() and running : \n if not paused:\n start = pygame.mixer.music.get_pos() / 1000.0\n try:\n f,p = s.get_mono(start-0.001, start+0.001)\n except:\n pass\n osc.update(start*50,f,p)\t\t\t \n pygame.time.wait(50)\n \n for event in pygame.event.get():\n if (event.type == pygame.KEYUP) or (event.type == pygame.KEYDOWN):\n if (event.key == pygame.K_UP):\n pygame.mixer.music.pause()\n paused = True\n elif (event.key == pygame.K_DOWN):\n pygame.mixer.music.unpause()\n paused = False\n elif event.type == pygame.QUIT:\n running = False\n pygame.mixer.quit()\n \nif __name__ == \"__main__\":\n try:\n files = load_files()\n if files and config.SOUND_ENABLED:\n play_pygame(files[randint(0,len(files)-1)])\n except Exception:\n _, err, _ = sys.exc_info()\n print(traceback.format_exc())\n","sub_path":"game/radio.py","file_name":"radio.py","file_ext":"py","file_size_in_byte":8136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"157963265","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport codecs\n\n\nclass Phone2idx(object):\n \"\"\"Convert from phone to index.\n Args:\n vocab_file_path (string): path to the vocabulary file\n remove_list (list): phones to neglect\n \"\"\"\n\n def __init__(self, vocab_file_path, remove_list=[]):\n # Load the vocabulary file\n self.map_dict = {}\n vocab_count = 0\n with codecs.open(vocab_file_path, 'r', 'utf-8') as f:\n for line in f:\n p = line.strip()\n if p in remove_list:\n continue\n self.map_dict[p] = vocab_count\n vocab_count += 1\n\n # Add \n self.map_dict['>'] = vocab_count\n\n def __call__(self, str_phone):\n \"\"\"\n Args:\n str_phone (string): string of space-divided phones\n Returns:\n indices (np.ndarray): phone indices\n \"\"\"\n # Convert phone strings into the corresponding indices\n phone_list = str_phone.split(' ')\n indices = list(map(lambda x: self.map_dict[x], phone_list))\n\n return np.array(indices)\n\n\nclass Idx2phone(object):\n \"\"\"Convert from index to phone.\n Args:\n vocab_file_path (string): path to the vocabulary file\n remove_list (list): phones to neglect\n \"\"\"\n\n def __init__(self, vocab_file_path, remove_list=[]):\n # Load the vocabulary file\n self.map_dict = {}\n vocab_count = 0\n with codecs.open(vocab_file_path, 'r', 'utf-8') as f:\n for line in f:\n p = line.strip()\n if p in remove_list:\n continue\n self.map_dict[vocab_count] = p\n vocab_count += 1\n\n # Add \n self.map_dict[vocab_count] = '>'\n\n def __call__(self, indices):\n \"\"\"\n Args:\n indices (list): phone indices\n Returns:\n str_phone (string): a sequence of phones\n \"\"\"\n # Convert phone indices to the corresponding strings\n phone_list = list(map(lambda x: self.map_dict[x], indices))\n str_phone = ' '.join(phone_list)\n\n return str_phone\n","sub_path":"utils/io/labels/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"62862770","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.\n# \n# This is a no op module that has the same interface as the normalization module\n# except that it does nothing\n#\n\nimport longranger.cnv.coverage_matrix as coverage_matrix\nimport pandas as pd\n\n__MRO__ = \"\"\"\nstage DO_NOTHING_ABOUT_GC_BIAS(\n in h5 raw_singlecell_profiles,\n in h5 tracks,\n in string reference_path,\n in float linear,\n in float quadratic,\n # \n out h5 normalized_singlecell_profiles, \n #\n src py \"stages/copy_number_processor/normalize_gc_bias\",\n) \n\"\"\"\n\ndef main(args, outs):\n raw_profiles, mask = coverage_matrix.load_matrix(\n args.raw_singlecell_profiles, args.reference_path)\n chromosomes = coverage_matrix.list_primary_contigs(\n args.raw_singlecell_profiles, args.reference_path)\n print(chromosomes)\n\n bin_size = coverage_matrix.get_bin_size(args.raw_singlecell_profiles)\n tracks = pd.HDFStore(args.tracks, 'r')\n coverage_matrix.store_matrix(\n file_name=outs.normalized_singlecell_profiles,\n chroms=chromosomes,\n profiles=raw_profiles,\n tracks=tracks,\n window_size=bin_size)\n tracks.close()\n\n","sub_path":"mro/stages/copy_number_processor/do_nothing_about_gc_bias/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"244963746","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport http.cookiejar\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport urllib.request, urllib.parse, urllib.error\n\n\n# 1、网站登录操作\nlogin_url = 'https://www.amazon.co.jp/ap/signin?_encoding=UTF8&openid.assoc_handle=jpflex&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.ns.pape=http%3A%2F%2Fspecs.openid.net%2Fextensions%2Fpape%2F1.0&openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2Fwww.amazon.co.jp%2F%3Fref_%3Dnav_ya_signin'\nvalues = {'email': '44059346@qq.com', 'password': '1qaz2wsx@19', 'submit': 'Login'}\npostdata = urllib.parse.urlencode(values).encode()\nuser_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'\nheaders = {'User-Agent': user_agent, 'Connection': 'keep-alive'}\n\ncookie_filename = 'cookie.txt'\ncookie = http.cookiejar.MozillaCookieJar(cookie_filename)\nhandler = urllib.request.HTTPCookieProcessor(cookie)\nopener = urllib.request.build_opener(handler)\nrequest = urllib.request.Request(login_url, postdata, headers)\n\ntry:\n response = opener.open(request)\n page = response.read().decode()\nexcept urllib.error.URLError as e:\n print(e.code, ':', e.reason)\n \ncookie.save(ignore_discard = True, ignore_expires = True)\n\nprint(\"Cookie info: \")\nfor item in cookie:\n print('Name = ' + item.name)\n print('Value = ' + item.value)\n\n\n# 2、数据获取操作\nget_url = \"https://www.amazon.co.jp/%E5%85%AB%E6%B5%B7%E5%B1%B1-%E5%85%AB%E6%B5%B7%E5%B1%B1-%E7%B4%94%E7%B1%B3%E5%90%9F%E9%86%B8-%EF%BC%88%E6%96%B0%E6%BD%9F%EF%BC%89-1-8L-1%E6%9C%AC/dp/B002LA0NUO/ref=sr_1_1?s=food-beverage&ie=UTF8&qid=1480987112&sr=1-1\"\nget_request = urllib.request.Request(get_url, headers=headers)\nget_response = opener.open(get_request)\n\nbsobj = BeautifulSoup(get_response.read().decode(), \"html.parser\")\n\nprint(\"\\nProduct info: \")\nfor el in bsobj.find_all(\"span\", {\"id\": \"productTitle\"}):\n print(el.get_text())\n\n# id=\"brand\" 产品品牌\nfor el in bsobj.find_all(\"a\",{\"id\":\"brand\"}):\n print(el.get_text())\n# id=\"acrCustomerReviewText\" 评论数量\nfor el in bsobj.find_all(\"span\",{\"id\":\"acrCustomerReviewText\"}):\n print(el.get_text())\n# id=\"id=\"priceblock_ourprice\"\" 售价 \nfor el in bsobj.find_all(\"span\",{\"id\":{\"priceblock_ourprice\",\"priceb\"}}):\n print(el.get_text())\n\n#id=\"price-shipping-message\" 配送费\nfor el in bsobj.find_all(\"span\",{\"id\":{\"price-shipping-message\"}}):\n print(el.get_text())\n\n # id=\"wayfinding-breadcrumbs_feature_div\" 进入路径\nfor el in bsobj.find_all(\"a\",{\"class\":\"a-link-normal a-color-tertiary\"}):\n print(el.get_text())\n# id=\"productTitle\" 产品标题\n\n\n# id=\"price-shipping-message\" 送货运费\nfor el in bsobj.find_all(\"span\",{\"id\":\"price-shipping-message\"}):\n print(el.get_text())\n \n# class=\"a-size-medium a-color-success\" span 剩余多少\n\nfor el in bsobj.find_all(\"span\",{\"class\":\"a-size-medium a-color-success\"}):\n print(el.get_text())\n\n# id=\"ddmDeliveryMessage\" div 是否直接送达该邮政号码\nfor el in bsobj.find_all(\"div\",{\"id\":\"ddmDeliveryMessage\"}):\n print(el.get_text())\n\n# id=\"merchant-info\" div 快递单位\nfor el in bsobj.find_all(\"div\",{\"id\":\"merchant-info\"}):\n print(el.get_text())\n","sub_path":"Amazon/AmazonJPS.py","file_name":"AmazonJPS.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"110081379","text":"### OSX\n\n### Models [0 to N]\n\n\nimport bpy\n\n\n\nfor i in range(106):\n# for i in range(30):\n\n\n number = \"%03d\"%(i)\n tmp_name = \"obj_\" + number\n\n path = \"/Users/iMac-2008/Documents/3D_Morphing_Models_MetaBall/Text_Pipe_DANCING/FBX_Plane/\" + number + \".fbx\"\n print(path)\n\n\n\n ### Import FBX\n bpy.ops.import_scene.fbx(filepath = path)\n\n\n ### get Scene\n scene = bpy.context.selected_objects[0]\n scene.name = \"scene\"\n scene.scale = (1, 1 ,1)\n scene.rotation_euler = (0,0,0)\n\n obj = bpy.context.selected_objects[1]\n obj.name = tmp_name\n obj.scale = (1, 2.5 ,1)\n obj.rotation_euler = (0, 0, 0)\n\n\n ### Get material\n mat_tmp = bpy.data.materials.get(\"NormalVec\")\n\n ### Assign it to object\n if obj.data.materials:\n # assign to 1st material slot\n obj.data.materials[0] = mat_tmp\n else:\n # no slots\n obj.data.materials.append(mat_tmp)\n\n\n ### Modifier\n bpy.context.scene.objects.active = obj\n\n\n ##### SUBSURF\n # bpy.ops.object.modifier_add(type='SUBSURF')\n # bpy.data.objects[tmp_name].modifiers[\"Subsurf\"].render_levels = 2\n #bpy.ops.object.modifier_apply()\n\n\n\n ### KeyFrame Assign\n obj.location = (0, 0, 20)\n obj.keyframe_insert(data_path = \"location\", frame=i)\n obj.location = (0, 0, 0)\n obj.keyframe_insert(data_path = \"location\", frame=i+1)\n obj.location = (0, 0, 20)\n obj.keyframe_insert(data_path = \"location\", frame=i+2)","sub_path":"Text_Pipe_DANCING/import_plane.py","file_name":"import_plane.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"149668430","text":"# Imports\n\nfrom webpage import Webpage\n\nimport html\n\n# Constants\n# Functions\n# Classes\n\nclass WebpageToSelectWifi(object):\n def __init__(self, action_to_rescan = None, action_to_skip_step = None):\n self.__wifis = []\n self.__connected_services = []\n self.__action_to_rescan = action_to_rescan\n self.__action_to_skip_step = action_to_skip_step\n\n def append_wifi_with_name(self,\n name,\n action_when_selected,\n action_params = []):\n link = html.link_with_name(name,\n action = action_when_selected,\n params = action_params)\n\n self.__wifis.append(link)\n\n def append_connected_service_with_name(self, name):\n self.__connected_services.append(name)\n\n def render(self):\n body = ''\n\n if self.__wifis:\n body += html.paragraph_with_text('Select WIFI:')\n body += html.ordered_list_with_elements(self.__wifis)\n\n if self.__action_to_rescan:\n body += html.form_with_text('Re-scan',\n action = self.__action_to_rescan)\n\n if self.__connected_services:\n body += html.paragraph_with_text(\n 'Already connected to: %s' %\n ', '.join(self.__connected_services))\n\n if self.__action_to_skip_step:\n body += html.form_with_text('Skip',\n action = self.__action_to_skip_step)\n\n return Webpage(body = body).render()\n","sub_path":"webpage_to_select_wifi.py","file_name":"webpage_to_select_wifi.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"636214140","text":"from selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom Web_Sz.PageLocators.myapi_locators import MyApiLocators as mal\nfrom Web_Sz.Common.basepage import BasePage\n\n\nclass IndexPage(BasePage):\n\n\n\n def isExist_logout_ele(self):\n #找到退出按钮就返回TRue\n try:\n WebDriverWait(self.driver,10).until(EC.visibility_of_element_located((By.XPATH,'//a[@class=\"account-logout\"]')))\n return True\n except:\n return False\n\n #点击个人api\n def user_centor(self,men,women):\n #点击我的-个人中心\n # WebDriverWait(self.driver,20).until(EC.visibility_of_element_located(mal.user_centor))\n doc = '点击api功能-测试生肖'\n self.wait_eleVisible(mal.user_centor,doc)\n # self.driver.find_element(*mal.user_centor).click()\n self.click_element(mal.user_centor,doc)\n #点击我的api进入\n # WebDriverWait(self.driver,20).until(EC.visibility_of_element_located(mal.data_centor))\n self.wait_eleVisible(mal.data_centor, doc)\n # self.driver.find_element(*mal.data_centor).click()\n self.click_element(mal.data_centor, doc)\n # self.driver.find_element(*mal.user_centor_api).click()\n self.click_element(mal.user_centor_api, doc)\n #点击生肖配对接口\n # WebDriverWait(self.driver,20).until(EC.visibility_of_element_located(mal.shengxiao_but))\n self.wait_eleVisible(mal.shengxiao_but, doc)\n # self.driver.find_element(*mal.shengxiao_but).click()\n self.click_element(mal.shengxiao_but, doc)\n #获取窗口句柄\n handles = self.driver.window_handles\n #切换到最新的窗口句柄\n self.driver.switch_to.window(handles[-1])\n # WebDriverWait(self.driver,20).until(EC.visibility_of_element_located(mal.shengxiao_men_but))\n self.wait_eleVisible(mal.shengxiao_men_but, doc)\n # self.driver.find_element(*mal.shengxiao_men_but).send_keys(men)\n self.input_element(mal.shengxiao_men_but,text=men)\n # self.driver.find_element(*mal.shengxiao_women_but).send_keys(women)\n self.input_element(mal.shengxiao_women_but, text=women)\n # self.driver.find_element(*mal.ceshi_but).click()\n self.click_element(mal.ceshi_but,doc)\n # WebDriverWait(self.driver,20).until(EC.visibility_of_element_located(mal.res_data))\n self.wait_eleVisible(mal.res_data, doc)\n # res = self.driver.find_element(*mal.res_data).text\n res = self.get_element(mal.res_data,doc)\n print(res)\n\n\n\n\n\n\n\n","sub_path":"Web_Sz/PageObjects/index_page.py","file_name":"index_page.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"64572987","text":"import matplotlib.pyplot as plt\r\nimport numpy\r\nfrom numpy.ma import arange\r\n\r\n#chart comparing likert easiness responses for with glasses and without, of participants who used the glasses first\r\n\r\n#data setup\r\n#numpy.nan is preferred to 0 or null to keep the chart clean\r\nwglasses = [3.5, 5, 4, 2, 2, 2.5, 2, 1]\r\nwoglasses = [5, 3, 4, 4, 3, 4, 5, 4]\r\nx = arange(8)\r\npnames = ['P2','P3','P5','P6','P10','P12','P14','P16']\r\n\r\n#plot the with glasses and then without\r\nplt.scatter(x, wglasses, c='b', label='with glasses')\r\nplt.scatter(x, woglasses, c='r', marker='x', label='without glasses')\r\n\r\n\r\n#formatting\r\nplt.gca().invert_yaxis()\r\nplt.grid()\r\nplt.axes().set_axisbelow(True)\r\nplt.xticks(arange(8))\r\nplt.axes().set_xticklabels(pnames)\r\nplt.ylabel('Likert Rating')\r\nplt.xlabel('Participants')\r\nplt.title('Participants with Glasses as Second Condition')\r\n\r\nplt.show()\r\n","sub_path":"glassesFirstUsability.py","file_name":"glassesFirstUsability.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"601656672","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nINTEL CONFIDENTIAL\nCopyright 2017-2020 Intel Corporation.\nThis software and the related documents are Intel copyrighted materials, and\nyour use of them is governed by the express license under which they were\nprovided to you (License).Unless the License provides otherwise, you may not\nuse, modify, copy, publish, distribute, disclose or transmit this software or\nthe related documents without Intel's prior written permission.\n\nThis software and the related documents are provided as is, with no express or\nimplied warranties, other than those that are expressly stated in the License.\n\"\"\"\n\nfrom enum import Enum\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms\nfrom cryptography.hazmat.backends import default_backend\n\nfrom .IComponent import IComponent\nfrom ..LibException import LibException, ComponentException\nfrom ..utils import validate_file, hashed_key_printer\nfrom ..structures import AesEncryption\nfrom ..LibConfig import LibConfig\n\n\nclass AesKeyComponent(IComponent):\n\n\n \"\"\"\n encryptionTypes maps AES key length to value that should be put into generated binary - don't change\n \"\"\"\n encryptionTypes = {None: 0,\n AesEncryption.KeyLength.Aes128: 1,\n AesEncryption.KeyLength.Aes256: 2, }\n\n class ComponentProperty(Enum):\n Key = \"key\"\n EncryptionType = \"encryption_type\"\n Enabled = \"enabled\"\n\n key_length = None\n\n def __init__(self, xml_node, **kwargs):\n super().__init__(xml_node, **kwargs)\n\n self._key = None\n self._key_length = None\n\n @property\n def key(self):\n if self._key is None:\n self._parse_key()\n return self._key\n\n @property\n def key_length(self):\n if self._key_length is None:\n self._parse_key()\n return self._key_length\n\n def _parse_string_value(self, value):\n self._set_value(value)\n self._key = None # lazy reloading\n self._key_length = None # lazy reloading\n return self.value\n\n def _parse_basic_attributes(self, xml_node):\n super()._parse_basic_attributes(xml_node)\n self._parse_legacy_attribute(xml_node)\n\n @staticmethod\n def _parse_key_from_file(path):\n with open(path, 'rb') as file:\n return file.read()\n\n def _parse_key(self):\n if not self._is_enabled():\n self._key = bytes([])\n return\n\n validate_file(self.value)\n\n self._key = self._parse_key_from_file(self.value)\n self._key_length = self._calc_key_length(self._key)\n\n if LibConfig.isVerbose:\n print(self.name)\n hashed_key_printer(self._key, None)\n print(\"\")\n\n def _calc_key_length(self, key):\n return AesEncryption.get_key_length_type(len(key) * 8, self.is_legacy)\n\n def _get_property(self, component_property, _=False):\n self._check_error()\n if component_property == self.ComponentProperty.Key:\n return self.key\n if component_property == self.ComponentProperty.EncryptionType:\n return self.encryptionTypes[self.key_length]\n if component_property == self.ComponentProperty.Enabled:\n return self._is_enabled()\n\n def _should_omit_parsing(self, xml_node):\n # We never want to skip parsing AesKeyComponent but we need initialisation\n # which is done in this method\n super()._should_omit_parsing(xml_node)\n return False\n\n def get_encrypted_data_size(self, data_size, encryption_mode_name):\n try:\n padding = AesEncryption.get_padding_instance(encryption_mode_name)\n except LibException as e:\n raise ComponentException(str(e), self.name)\n\n encrypted_data_size = padding.get_encrypted_data_size(data_size)\n return encrypted_data_size\n\n def encrypt(self, data, encryption_mode_name, iv=None):\n self._check_error()\n mode = AesEncryption.get_mode_instance(encryption_mode_name, iv)\n padding = AesEncryption.get_padding_instance(encryption_mode_name)\n cipher = Cipher(algorithms.AES(self.key),\n mode,\n backend=default_backend())\n encryptor = cipher.encryptor()\n\n try:\n data = bytes(padding.preencrypt(data))\n encrypted_data = encryptor.update(data) + encryptor.finalize()\n encrypted_data = padding.postencrypt(encrypted_data)\n except Exception as e:\n raise ComponentException(str(e), self.name)\n\n return encrypted_data\n\n def _copy_to(self, dst):\n super()._copy_to(dst)\n dst._key = self._key\n dst._key_length = self._key_length\n","sub_path":"Intel/EagleStreamRpPkg/Tool/FTool/SPS/Tools/FlashImageTool/FITm_Python_Version/library/tool/components/AesKeyComponent.py","file_name":"AesKeyComponent.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"628754523","text":"from dict_hash import sha256\nfrom .utils import create_dict\nimport os\nimport numpy as np\n\n\ndef test_dict_hash():\n path = sha256(create_dict())\n assert os.path.exists(path)\n os.remove(path)\n\n\ndef test_dict_hash_with_approximation():\n d = create_dict()\n d[\"this_is_big\"] = [np.zeros((10000, 100))]\n path = sha256(d)\n assert os.path.exists(path)\n os.remove(path)\n","sub_path":"tests/test_2_dict_hash.py","file_name":"test_2_dict_hash.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"462535108","text":"import numpy as np\r\nimport MyEnum as en\r\nimport math\r\nimport sys\r\n\r\nclass Layer:\r\n def __init__(self, N, D, actFunc = en.ActivationFunction.SIGMOID):\r\n self.D = D # number of inputs (col number)\r\n self.N = N # number of neurons for the current layer (row number)\r\n self.W = np.zeros((self.N,self.D))\r\n self.B = np.zeros((self.N,1))\r\n self.GradW = np.zeros((self.N, self.D))\r\n self.GradB = np.zeros((self.N, 1))\r\n self.S = np.zeros((self.N, 1))\r\n self.A = np.zeros((self.N, 1))\r\n self.Fprime = np.zeros((self.N, 1))\r\n self.Delta = np.zeros((self.N, 1))\r\n self.actFunc = actFunc\r\n #initialize random weights and bias\r\n self.W = 0.01*np.random.randn(self.N,self.D) \r\n self.B = 0.01*np.random.randn(self.N,1)\r\n\r\n def Evaluate(self, indata):\r\n self.S = np.dot(self.W, indata.reshape(len(indata),1))\r\n self.S = np.reshape(self.S,(len(self.S),1))\r\n self.S = self.S + self.B\r\n if self.actFunc == en.ActivationFunction.SIGMOID:\r\n self.Sigmoid_ActivationFunction()\r\n else:\r\n self.Softmax_ActivationFunction()\r\n self.Fprime = np.multiply(self.A ,(1.0 - self.A))\r\n return self.A\r\n\r\n def Sigmoid_ActivationFunction(self):\r\n self.A = 1 + np.exp(-1.0*self.S)\r\n self.A = np.reciprocal(self.A)\r\n\r\n def Softmax_ActivationFunction(self):\r\n self.A = np.exp(self.S) # Exponent of the summation for all neurons\r\n #print(self.A)\r\n sum = np.sum(self.A, keepdims = True) # sum the exponent of the summations from each neurons\r\n #print(sum)\r\n self.A = self.A/sum\r\n\r\n\r\n\r\n","sub_path":"MatrixFormBackPropPython/Layer.py","file_name":"Layer.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"648878123","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom config import get_data\n\n\n__all__ = ['radiosondelist']\n\n\ndef radiosondelist(minimal=True, with_igra=False):\n \"\"\" Read Radiosonde List Predefined\n\n Returns\n -------\n DataFrame\n \"\"\"\n table = pd.read_csv(get_data('radiosondeslist.csv'), sep=\";\", index_col=0)\n for icol in table.columns:\n if table[icol].dtype == 'object':\n table.loc[table[icol].isnull(), icol] = ''\n\n if minimal:\n return table[['lon', 'lat', 'alt', 'name']]\n elif with_igra:\n return table[['lon', 'lat', 'alt', 'name', 'id_igra']]\n else:\n return table\n\n\ndef wmo2igra(ident):\n ident = str(ident) # make sure\n # igra2wmo = None\n igra2wmo = pd.read_json(get_data('igra2wmo.json'), dtype=False) # sa strings\n if igra2wmo.wmo.str.contains(ident).any():\n return igra2wmo[igra2wmo.wmo.str.contains(ident)].index.tolist()[0]\n return None\n\n\ndef igra2wmo(ident):\n ident = str(ident) # make sure\n igra2wmo = pd.read_json(get_data('igra2wmo.json'), dtype=False) # sa strings\n if igra2wmo.index.str.contains(ident).any():\n return igra2wmo[igra2wmo.index.str.contains(ident)].wmo.tolist()[0]\n return None\n","sub_path":"raso/radiosondelist.py","file_name":"radiosondelist.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"451115409","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport collections\nimport functools\nimport os\nimport operator\nimport threading\nimport multiprocessing as mp\nimport numpy as np\n\nShmInfo = collections.namedtuple(\n 'ShmInfo', ['name', 'shape', 'dtype']\n)\n\ndef create_shm(tensor_infos, name_suffix = None) :\n shm_names = []\n shms = []\n shm_arrs = []\n for info in tensor_infos :\n shm_name = '{}_{}_{}'.format(\n info.name, os.getpid(), threading.currentThread().ident,\n )\n if name_suffix is not None :\n shm_name = '{}_{}'.format(shm_name, name_suffix)\n shm_size = \\\n functools.reduce(operator.mul, info.shape) * \\\n np.dtype(info.dtype).itemsize\n\n shm = mp.shared_memory.SharedMemory(\n name = shm_name, create = True, size=shm_size)\n shm_arr = np.ndarray(info.shape, dtype=info.dtype, buffer=shm.buf)\n\n shm_names.append(shm_name)\n shms.append(shm)\n shm_arrs.append(shm_arr)\n\n return shm_names, shms, shm_arrs\n\n\ndef get_shm(names, tensor_infos) :\n shms = []\n shm_arrs = []\n assert len(names) == len(tensor_infos)\n for name, info in zip(names, tensor_infos) :\n shm = mp.shared_memory.SharedMemory(name = name, create=False)\n shm_arr = np.ndarray(info.shape, dtype=info.dtype, buffer=shm.buf)\n\n shms.append(shm)\n shm_arrs.append(shm_arr)\n\n return shms, shm_arrs\n\ndef close_shm(shms, shm_arrs, unlink) :\n for arr in shm_arrs :\n del arr\n \n try :\n for shm in shms :\n shm.close()\n if unlink :\n shm.unlink()\n except :\n pass\n","sub_path":"SimpleDBI/shm_utils.py","file_name":"shm_utils.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"427399211","text":"#Returns factorial of the argument \"number\"\n\nnumber = int(input(\"please enter a non-negative integer to take the factorial of:\"))\n\nproduct = 1\n\nfor i in range(number):\n product = product * (i+1)\n\nprint (product)\n \n","sub_path":"Factorial Taker copy.py","file_name":"Factorial Taker copy.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"465175978","text":"#!/usr/bin/env python3\nimport os\nimport time\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\nfrom darkflow.net.build import TFNet\n\nimages = '../sample_img/'\nuse_camera = False\n\n\ndef _iterate_files():\n while 1:\n for subdir, dirs, files in os.walk(images):\n for file in files:\n yield os.path.join(subdir, file)\n\n\ntfnet = TFNet({\"model\": \"tiny-yolo-voc.cfg\", \"load\": \"tiny-yolo-voc.weights\"})\n\nwith tfnet.graph.as_default():\n layers = tfnet.darknet.layers\n\n def _get_var(layer, name):\n return tf.get_variable(\n 'layer_%d_%s' % (layer, name),\n shape=layers[layer].wshape[name],\n initializer=layers[layer].w[name],\n )\n\n inpt = tf.placeholder(tf.float32, shape=(1, 416, 416, 3))\n\n conv1 = tf.nn.conv2d(\n inpt, layers[0].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch1 = tf.nn.batch_normalization(\n conv1,\n _get_var(0, 'moving_mean'),\n _get_var(0, 'moving_variance'),\n offset=layers[0].w['biases'],\n scale=_get_var(0, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky1 = tf.nn.leaky_relu(batch1, alpha=.1)\n\n maxpool1 = tf.nn.max_pool(\n leaky1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n )\n\n conv2 = tf.nn.conv2d(\n maxpool1, layers[3].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch2 = tf.nn.batch_normalization(\n conv2,\n _get_var(3, 'moving_mean'),\n _get_var(3, 'moving_variance'),\n offset=layers[3].w['biases'],\n scale=_get_var(3, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky2 = tf.nn.leaky_relu(batch2, alpha=.1)\n\n maxpool2 = tf.nn.max_pool(\n leaky2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n )\n\n conv3 = tf.nn.conv2d(\n maxpool2, layers[6].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch3 = tf.nn.batch_normalization(\n conv3,\n _get_var(6, 'moving_mean'),\n _get_var(6, 'moving_variance'),\n offset=layers[6].w['biases'],\n scale=_get_var(6, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky3 = tf.nn.leaky_relu(batch3, alpha=.1)\n\n maxpool3 = tf.nn.max_pool(\n leaky3,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n )\n\n conv4 = tf.nn.conv2d(\n maxpool3, layers[9].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch4 = tf.nn.batch_normalization(\n conv4,\n _get_var(9, 'moving_mean'),\n _get_var(9, 'moving_variance'),\n offset=layers[9].w['biases'],\n scale=_get_var(9, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky4 = tf.nn.leaky_relu(batch4, alpha=.1)\n\n maxpool4 = tf.nn.max_pool(\n leaky4,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n )\n\n conv5 = tf.nn.conv2d(\n maxpool4, layers[12].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch5 = tf.nn.batch_normalization(\n conv5,\n _get_var(12, 'moving_mean'),\n _get_var(12, 'moving_variance'),\n offset=layers[12].w['biases'],\n scale=_get_var(12, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky5 = tf.nn.leaky_relu(batch5, alpha=.1)\n\n maxpool5 = tf.nn.max_pool(\n leaky5,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n )\n\n conv6 = tf.nn.conv2d(\n maxpool5, layers[15].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch6 = tf.nn.batch_normalization(\n conv6,\n _get_var(15, 'moving_mean'),\n _get_var(15, 'moving_variance'),\n offset=layers[15].w['biases'],\n scale=_get_var(15, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky6 = tf.nn.leaky_relu(batch6, alpha=.1)\n\n maxpool6 = tf.nn.max_pool(\n leaky6,\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1],\n padding='SAME',\n )\n\n conv7 = tf.nn.conv2d(\n maxpool6, layers[18].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch7 = tf.nn.batch_normalization(\n conv7,\n _get_var(18, 'moving_mean'),\n _get_var(18, 'moving_variance'),\n offset=layers[18].w['biases'],\n scale=_get_var(18, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky7 = tf.nn.leaky_relu(batch7, alpha=.1)\n\n conv8 = tf.nn.conv2d(\n leaky7, layers[20].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n batch8 = tf.nn.batch_normalization(\n conv8,\n _get_var(20, 'moving_mean'),\n _get_var(20, 'moving_variance'),\n offset=layers[20].w['biases'],\n scale=_get_var(20, 'gamma'),\n variance_epsilon=1e-5,\n )\n\n leaky8 = tf.nn.leaky_relu(batch8, alpha=.1)\n\n conv9 = tf.nn.conv2d(\n leaky8, layers[22].w['kernel'],\n strides=[1, 1, 1, 1], padding='SAME')\n\n biased9 = tf.nn.bias_add(conv9, layers[22].w['biases'])\n\n output = tf.identity(biased9)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n if use_camera:\n camera = cv2.VideoCapture(0)\n else:\n files = _iterate_files()\n\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', 600, 600)\n\n while not use_camera or camera.isOpened():\n start_time = time.time()\n\n choice = cv2.waitKey(1)\n if choice == 27:\n break\n\n if use_camera:\n ok, orig_img = camera.read()\n if not ok:\n continue\n else:\n img_file = next(files)\n print(img_file)\n orig_img = cv2.imread(img_file)\n if orig_img is None:\n continue\n\n img = tfnet.framework.resize_input(orig_img)\n img = np.expand_dims(img, 0)\n h, w, _ = orig_img.shape\n\n out = sess.run(output, {inpt: img})[0]\n\n threshold = 0.5\n boxes = tfnet.framework.findboxes(out)\n for box in boxes:\n box = tfnet.framework.process_box(box, h, w, threshold)\n if not box:\n continue\n cv2.rectangle(\n orig_img, (box[0], box[2]), (box[1], box[3]), (0, 255, 0), 3)\n cv2.putText(orig_img, box[4], (box[0], box[2]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n cv2.imshow('image', orig_img)\n\n print(time.time() - start_time)\n\n if not use_camera:\n time.sleep(1)\n","sub_path":"tensorflow/tiny-yolo.py","file_name":"tiny-yolo.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"476088923","text":"class Solution(object):\n def findOrder(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: List[int]\n \"\"\"\n prerequisites = self.reverseGraph(self.genGraph(prerequisites, numCourses), numCourses)\n print(prerequisites)\n\n if self.hasDirectLoopCheck(prerequisites):\n print(\"loop pfound\")\n return []\n else:\n output = []\n markList1 = [False, ] * numCourses\n for i in range(numCourses):\n if markList1[i] is False:\n self.dfsHelper1(prerequisites, i, markList1, output)\n output.reverse()\n return output\n\n def genGraph(self, pairs, nodeNum):\n graph = []\n for i in range(nodeNum):\n graph.append([])\n for j in pairs:\n graph[j[0]].append(j[1])\n return graph\n\n\n\n def hasDirectLoopCheck(self, prerequisites):\n markList = [False, ]*len(prerequisites)\n pathSet = set([])\n res = False\n for i in range(len(markList)):\n if markList[i] is False:\n res = res or self.dfsHelper0(prerequisites, i, markList, pathSet)\n return res\n\n def dfsHelper0(self, graph, cur, markList, pathSet):\n # 用来检测图中是否存在有向环\n adj = graph[cur]\n if markList[cur] is True:\n return True if cur in pathSet else False\n else:\n markList[cur] = True\n pathSet.add(cur)\n res = False\n for n in adj:\n res = res or self.dfsHelper0(graph, n, markList, pathSet)\n pathSet.remove(cur)\n return res\n\n def dfsHelper1(self, graph, cur, markList, output):\n # 用来进行你后续序列的生成\n if markList[cur] is True:\n return\n else:\n markList[cur] = True\n adj = graph[cur]\n for n in adj:\n self.dfsHelper1(graph, n, markList, output)\n output.append(cur)\n return\n\n def reverseGraph(self, graphSrc, nodeNum):\n newGraph = []\n for i in range(nodeNum):\n newGraph.append([])\n for i in range(nodeNum):\n for dest in graphSrc[i]:\n newGraph[dest].append(i)\n return newGraph\n\n\nif __name__ == \"__main__\":\n s = Solution()\n lists = [[0, 1], [3, 1], [1, 3], [3, 2]]\n x = s.findOrder(4, lists)\n print(x)\n\n","sub_path":"exercise/leetcode/python_src/by2017_Sep/Leet210.py","file_name":"Leet210.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542569141","text":"from PyQt5 import Qt,QtCore\n\nimport Game\n\n\n\nclass SnakeGame(Game.Game):\n \"\"\"description of class\"\"\"\n\n def __init__(self,name):\n super().__init__(name)\n\n self.field_size = Qt.QPoint(0,0)\n\n self.step = 0\n self.timer = QtCore.QTimer()\n\n self.food_list = []\n self.border_list = []\n\n # ================ methodes ===================\n\n def set_field_size(self,w,h):\n self.field_size.setX(w)\n self.field_size.setY(h)\n\n\n def stop_game(self):\n self.timer.stop()\n return super().stop_game()\n","sub_path":"Game/SnakeGame.py","file_name":"SnakeGame.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"78119916","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport signal\nimport os\nimport unittest\nimport logging\nfrom simple.readwrite import SunnyDayTest\nfrom TestUtils import WALOG, ACCUMULO_HOME\n\nlog = logging.getLogger('test.auto')\n\nclass WriteAheadLog(SunnyDayTest):\n\n order = 25\n\n settings = SunnyDayTest.settings.copy()\n \n # roll the log at least once\n settings['tserver.walog.max.size'] = '2M'\n settings['gc.cycle.delay'] = 1\n settings['gc.cycle.start'] = 1\n\n # compact frequently\n settings['tserver.memory.maps.max'] = '200K'\n settings['tserver.compaction.major.delay'] = 1\n\n # split frequently\n tableSettings = SunnyDayTest.tableSettings.copy()\n tableSettings['test_ingest'] = { \n 'table.split.threshold': '750K',\n }\n\n def runTest(self):\n self.sleep(3)\n waitTime = self.waitTime()\n self.waitForStop(self.ingester, waitTime)\n log.info(\"Stopping tablet servers hard\")\n self.stop_accumulo(signal.SIGKILL)\n self.sleep(5)\n self.start_accumulo()\n h = self.runOn(self.masterHost(), [self.accumulo_sh(), \"gc\"])\n self.sleep(3)\n log.info(\"Verifying Ingestion\")\n self.waitForStop(self.verify(self.masterHost(),\n self.options.rows,\n size=self.options.size),\n waitTime)\n self.shutdown_accumulo()\n\nclass DiskFailure(SunnyDayTest):\n\n order = 25\n\n settings = SunnyDayTest.settings.copy()\n \n # compact frequently\n settings['tserver.port.search'] = 'true'\n settings['tserver.memory.maps.max'] = '200K'\n settings['tserver.compaction.major.delay'] = 1\n settings['tserver.logger.timeout'] = '5s'\n\n def start_accumulo_procs(self, safeMode=None):\n log.info(\"Starting normal accumulo\")\n SunnyDayTest.start_accumulo_procs(self, safeMode)\n log.info(\"Starting victim logger\")\n libpath = '%s/test/system/auto/fake_disk_failure.so' % ACCUMULO_HOME\n os.environ['LD_PRELOAD'] = libpath\n os.environ['DYLD_INSERT_LIBRARIES'] = libpath\n os.environ['DYLD_FORCE_FLAT_NAMESPACE'] = 'true'\n stop = self.start_logger(self.masterHost())\n del os.environ['LD_PRELOAD']\n del os.environ['DYLD_FORCE_FLAT_NAMESPACE']\n del os.environ['DYLD_INSERT_LIBRARIES']\n self.flagFile = os.getenv(\"HOME\") + \"/HOLD_IO_%d\" % stop.pid\n self.sleep(5)\n \n def runTest(self):\n self.sleep(3)\n waitTime = self.waitTime()\n log.info(\"Waiting for ingest to stop\")\n self.waitForStop(self.ingester, waitTime)\n\n log.info(\"Starting fake disk failure for logger\")\n fp = open(self.flagFile, \"w+\")\n fp.close()\n self.ingester = self.ingest(self.masterHost(),\n self.options.rows,\n self.options.rows,\n size=self.options.size)\n self.waitForStop(self.ingester, waitTime)\n \n log.info(\"Verifying Ingestion\")\n self.waitForStop(self.verify(self.masterHost(),\n self.options.rows * 2,\n size=self.options.size),\n waitTime)\n\n def tearDown(self):\n SunnyDayTest.tearDown(self)\n try:\n os.unlink(self.flagFile)\n except:\n pass\n \n\ndef suite():\n result = unittest.TestSuite()\n result.addTest(WriteAheadLog())\n result.addTest(DiskFailure())\n return result\n\n","sub_path":"bugs/accumulo/5594b2e0/test/system/auto/simple/wal.py","file_name":"wal.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"97104413","text":"# coding=utf-8\nimport matplotlib.pyplot as plt\n\nnombres = {'Enrique': 2, 'Jose': 6, 'Alejandro': 1, 'Maria': 3,\n'Ivan': 2, 'Alberto': 3, 'Pablo': 1, 'Omar': 1, 'Guadalupe': 3,\n'Juan': 1}\n\n\n\"\"\"\nplt.bar( NOMBRE DEL AXIS X, ALTURA DE CADA COSA, aling=)\n\nplt.bar(lista, etiquetas) esta parte define la ??? no se xd lmao. Es como\nsi la parte de que tan alto sea la magnitud de cada elemento\n\n\n.keys regresa el nombre de cada elemnto del diccionario\n.values regresa el valor de cada elemento del diccionario \n\nasí que se puede ver como que la primera parte define el eje x\ny la segunda línea de código ve la parte de las y\n\"\"\"\n\n\nplt.bar(range(len(nombres)), nombres.values(), align='center')\nplt.xticks(range(len(nombres)), list(nombres.keys()))\n\nplt.show()","sub_path":"20feb/histo.py","file_name":"histo.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"423650307","text":"def swap(nums, i, j):\n temp = nums[i]\n nums[i] = nums[j]\n nums[j] = temp\n\ndef inplace_sort(nums, begin, end):\n if begin >= end:\n return\n i, j = begin, end\n while i < j:\n swap(nums, i, j)\n i += 1\n j -= 1\n\nclass Solution(object):\n\n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n n = len(nums)\n if n <= 1:\n return\n if nums[n - 2] < nums[n - 1]:\n swap(nums, n - 2, n - 1)\n return\n i = n - 2\n while i >= 0 and nums[i] >= nums[i + 1]:\n i -= 1\n if i < 0:\n nums.sort()\n return\n j = n - 1\n while nums[j] <= nums[i]:\n j -= 1\n swap(nums, i, j)\n inplace_sort(nums, i + 1, n - 1)\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n x = [1, 2, 3]\n sol.nextPermutation(x)\n print(x)\n x = [3, 2]\n sol.nextPermutation(x)\n print(x)\n x = [2, 1, 3, 5, 4, 4, 3, 2]\n sol.nextPermutation(x)\n print(x)\n","sub_path":"leetcode/python/n31_Next_Permutation.py","file_name":"n31_Next_Permutation.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"426078369","text":"\"\"\"\nFlask web app connects to Mongo database.\nKeep a simple list of dated memoranda.\n\nRepresentation conventions for dates: \n - We use Arrow objects when we want to manipulate dates, but for all\n storage in database, in session or g objects, or anything else that\n needs a text representation, we use ISO date strings. These sort in the\n order as arrow date objects, and they are easy to convert to and from\n arrow date objects. (For display on screen, we use the 'humanize' filter\n below.) A time zone offset will \n - User input/output is in local (to the server) time. \n\"\"\"\n\nimport flask\nfrom flask import request\nfrom flask import url_for\nimport logging\nfrom bson import ObjectId\n# Date handling\nimport arrow\nfrom pymongo import MongoClient\n\nimport config\n\n###\n# Globals\n###\nCONFIG = config.configuration()\n\napp = flask.Flask(__name__)\napp.secret_key = CONFIG.SECRET_KEY\n\n# Mongo database\nMONGO_CLIENT_URL = \"mongodb://{}:{}@{}:{}/{}\".format(\n CONFIG.DB_USER,\n CONFIG.DB_USER_PW,\n CONFIG.DB_HOST,\n CONFIG.DB_PORT,\n CONFIG.DB)\n\ndbclient = MongoClient(MONGO_CLIENT_URL)\ndb = dbclient[str(CONFIG.DB)]\ncollection = db.dated_memos\n\napp.debug = CONFIG.DEBUG\nif CONFIG.DEBUG:\n app.logger.setLevel(logging.DEBUG)\n\n####\n# Database connection per server process\n###\n\n\n\n\n###\n# Pages\n###\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n app.logger.debug(\"Main page entry\")\n return flask.render_template('index.html')\n\n\n# Responses\n@app.route(\"/_get_memos\")\ndef send_memos():\n results = get_memos()\n if results:\n return flask.jsonify(results={\"memos\": results})\n else:\n return flask.jsonify(results={\"memos\": \"none\"})\n\n\n@app.route(\"/_post_memo\", methods=['POST'])\ndef post_memo():\n data = request.form\n date, memo = data['date'], data['memo']\n try:\n date_valid = arrow.get(date)\n insert_bson = {'date': date, 'memo': memo}\n tr_id = collection.insert_one(insert_bson).inserted_id\n except:\n tr_id = 0\n return flask.jsonify(results={'success': bool(tr_id)})\n\n\n@app.route(\"/_delete_memo\")\ndef delete_memo():\n data = request.args['id']\n success = collection.delete_one({'_id': ObjectId(data)}).deleted_count\n return flask.jsonify(result={'success': success})\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n app.logger.debug(\"Page not found\")\n return flask.render_template('page_not_found.html',\n badurl=request.base_url,\n linkback=url_for(\"index\")), 404\n\n\n#################\n#\n# Functions used within the templates\n#\n#################\n\n\ndef humanize_arrow_date(date):\n \"\"\"\n Date is internal UTC ISO format string.\n Output should be \"today\", \"yesterday\", \"in 5 days\", etc.\n Arrow will try to humanize down to the minute, so we\n need to catch 'today' as a special case. \n \"\"\"\n\n then = arrow.get(date).to('local')\n now = arrow.utcnow().to('local')\n if then.date() == now.date():\n human = \"Today\"\n else:\n human = then.humanize(now)\n if human == \"in a day\":\n human = \"Tomorrow\"\n return human\n\n\n#############\n#\n# Functions available to the page code above\n#\n##############\ndef get_memos():\n \"\"\"\n Returns all memos in the database, in a form that\n can be inserted directly in the 'session' object.\n \"\"\"\n records = []\n for record in collection.find():\n record[\"_id\"] = str(record[\"_id\"])\n record['human_date'] = humanize_arrow_date(record['date'])\n records.append(record)\n if records:\n return_record = sorted(records, key=lambda k: arrow.get(k['date']))\n else:\n return_record = records\n return return_record\n\n\nif __name__ == \"__main__\":\n app.run(port=CONFIG.PORT, host=\"0.0.0.0\")\n","sub_path":"memos/flask_main.py","file_name":"flask_main.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"603584921","text":"#23.4. List Comprehensions\n#Python provides an alternative way to do map and filter operations, called a list comprehension. Many programmers find them easier to understand and write. List comprehensions are concise ways to create lists from other lists. The general syntax is:\n# [ for in if ]\n\n#where the if clause is optional. For example,\nthings = [2, 5, 9]\n\nyourlist = [value * 2 for value in things]\n\nprint(yourlist)\n#The transformer expression is value * 2. The item variable is value and the sequence is things. \n# This is an alternative way to perform a mapping operation. \n# As with map, each item in the sequence is transformed into an item in the new list. \n# Instead of the iteration happening automatically, \n# however, we have adopted the syntax of the for loop which may make it easier to understand.\n#Just as in a regular for loop, \n# the part of the statement for value in things says to execute some code once for each item in things. \n# Each time that code is executed, value is bound to one item from things. \n# The code that is executed each time is the transformer expression, \n# value * 2, rather than a block of code indented underneath the for statement. \n# The other difference from a regular for loop is that each time the expression is evaluated, \n# the resulting value is appended to a list. That happens automatically, \n# without the programmer explicitly initializing an empty list or appending each item.\n#The if clause of a list comprehension can be used to do a filter operation. \n# To perform a pure filter operation, the expression can be simply the variable that is bound to each item.\n# For example, the following list comprehension will keep only the even numbers from the original list.\n\ndef keep_evens(nums):\n new_list = [num for num in nums if num % 2 == 0]\n return new_list\n\nprint(keep_evens([3, 4, 6, 7, 0, 1]))\n\n#You can also combine map and filter operations by chaining them together, \n# or with a single list comprehension.\nthings = [3, 4, 6, 7, 0, 1]\n#chaining together filter and map:\n# first, filter to keep only the even numbers\n# double each of them\nprint(map(lambda x: x*2, filter(lambda y: y % 2 == 0, things)))\n\n# equivalent version using list comprehension\nprint([x*2 for x in things if x % 2 == 0])\n\n\n#2. The for loop below produces a list of numbers greater than 10. \n# Below the given code, use list comprehension to accomplish the same thing. \n# Assign it the the variable lst2. Only one line of code is needed.\n\nL = [12, 34, 21, 4, 6, 9, 42]\nlst = []\nfor x in L:\n if x > 10:\n lst.append(x)\nprint(lst)\n\nlst2 = [num for num in L if num > 10]\n\n\n#3. Write code to assign to the variable compri all the values of the key name in any of the sub-dictionaries in the dictionary tester. Do this using a list comprehension.\n\n\ntester = {'info': [{\"name\": \"Lauren\", 'class standing': 'Junior', 'major': \"Information Science\"},{'name': 'Ayo', 'class standing': \"Bachelor's\", 'major': 'Information Science'}, {'name': 'Kathryn', 'class standing': 'Senior', 'major': 'Sociology'}, {'name': 'Nick', 'class standing': 'Junior', 'major': 'Computer Science'}, {'name': 'Gladys', 'class standing': 'Sophomore', 'major': 'History'}, {'name': 'Adam', 'major': 'Violin Performance', 'class standing': 'Senior'}]}\ncompri = [d['name'] for d in tester['info']]\nprint(compri)","sub_path":"python3programming/course_3/23_4_ List Comprehensions.py","file_name":"23_4_ List Comprehensions.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"417787985","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^backend/', include('backend.urls')),\n url(r'^userInfo/', include('userInfo.urls')),\n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"surround-django-project/surround/surround/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"8086148","text":"from django.shortcuts import render\nfrom congener_specific.models import InputForm\nfrom congener_specific.compute import main\nimport os\n\ndef congener_specific(request):\n os.chdir(os.path.dirname(__file__))\n result = None\n if request.method == 'POST':\n form = InputForm(request.POST)\n if form.is_valid():\n form2 = form.save(commit=False)\n result = main(form2.feed_intake, form2.exposure_time, form2.depletion_time,\n form2.c1, form2.c2,form2.c3, form2.c4,\n form2.c5, form2.c6,form2.c7, form2.c8,\n form2.c9, form2.c10,form2.c11, form2.c12,\n form2.c13, form2.c14, form2.c15,\n form2.c16, form2.c17,form2.c18, form2.c19,\n form2.c20, form2.c21,form2.c22, form2.c23,\n form2.c24, form2.c25,form2.c26, form2.c27,\n form2.c28, form2.c29)\n result = result.replace('static/', '')\n \n else:\n form = InputForm()\n context = {'form': form,'result': result}\n return render(request,'congener_specific.html', context)\n\n","sub_path":"congener_specific/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"157248342","text":"import logging\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom convictions_data.models import Disposition\nfrom convictions_data.statute import (get_iucr, IUCRLookupError,\n ILCSLookupError, StatuteFormatError)\n\nfrom pprint import pprint\nimport json\n\n# can't find a handler?\n#logger = logging.getLogger(__name__)\n\n\ndef append_or_create(dict, chrgdesc, category):\n if category:\n try:\n categories = dict[chrgdesc]\n if category not in categories:\n dict[chrgdesc].append(category)\n except KeyError:\n dict[chrgdesc] = [category]\n else:\n # warn if there's no IUCR category for this disposition\n assert False\n\nclass Command(BaseCommand):\n help = \"Map charge descriptions to iucr categories.\"\n\n def handle(self, *args, **options):\n\n chrgdesc_to_category = {}\n\n for disposition in Disposition.objects.all():\n\n chrgdesc = disposition.ammndchrgdescr if \\\n disposition.ammndchrgdescr else disposition.chrgdesc\n category = disposition.iucr_category\n\n case_number = disposition.case_number\n statute = disposition.final_statute if \\\n disposition.final_statute else disposition.statute\n chrgdisp = disposition.chrgdisp\n chrgdispdate = disposition.chrgdispdate\n\n try:\n append_or_create(chrgdesc_to_category, chrgdesc, category)\n except AssertionError:\n # print('No IUCR category for disposition: {} {} {} {}'\n # .format(case_number, statute, chrgdispdate, chrgdisp))\n pass\n\n print('num total: ', len(chrgdesc_to_category))\n with open('chrgdesc_to_category__all.json', 'w') as f:\n json.dump(chrgdesc_to_category, f)\n\n chrgdesc_to_category = {x: chrgdesc_to_category[x] for x in chrgdesc_to_category.keys() if len(chrgdesc_to_category[x]) > 1}\n \n with open('chrgdesc_to_category__multiples.json', 'w') as f:\n print('num with multiple: ', len(chrgdesc_to_category))\n json.dump(chrgdesc_to_category, f)\n\n\n\n\n","sub_path":"convictions_data/management/commands/chrgdesc2category.py","file_name":"chrgdesc2category.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"304005868","text":"# Goes through list and greps out queries and prints them all pretty\n# CSV Source: https://github.com/fivethirtyeight/russian-troll-tweets/\n\nimport os\nimport re\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom operator import itemgetter\n\nprint('Troll Tweet Grep:\\n')\n\"\"\"\nMakes a word-occurrence chart based on search string\n\nInput: query - str() - search query\n\"\"\"\nquery = str(input('Please enter search string: '))\n\n# collections\nourlist = next(os.walk('tweets/'))[2]\nweight_dict = {}\ntweetlist = []\nauthorlist = []\nword_list = []\ngarbage_list = list(\n ['about', 'all', 'among', 'and', 'are', 'ass', 'because', 'become', 'but', 'can', 'could', 'don', 'dont', 'down',\n 'for', 'from', 'fuck', 'fucked', 'fuckin', 'fucking', 'fucks', 'get', 'give', 'got', 'had', 'has', 'have', 'her',\n 'hers', 'him', 'his', 'how', 'http', 'https', 'into', 'just', 'know', 'like', 'may', 'might', 'must', 'not', 'now',\n 'off', 'one', 'out', 'say', 'shall', 'she', 'shit', 'should', 'some', 'still', 'that', 'the', 'their', 'their,',\n 'them', 'then', 'there', 'these', 'they', 'this', 'three', 'two', 'up', 'want', 'was', 'wasnt', 'were', 'what',\n 'when', 'where', 'who', 'why', 'will', 'with', 'without', 'would', 'you', 'your', 'than','lol','video','look'])\ngarbage_list.append(query)\n\n# used for getting a sorted copy of the above text after you update it :)\n# print(sorted(set(garbage_list)))\n\n\n# Retrieving results section:\nfor tweetfile, index in zip(ourlist, range(0, len(ourlist))):\n print('Now searching through ' + str(tweetfile) + '...')\n with open('./tweets/' + str(tweetfile), encoding=\"utf8\") as fin:\n dr = csv.DictReader(fin)\n for row in dr:\n if query.lower() in row['content'].lower():\n tweetlist.append(row['content'].lower())\n authorlist.append(row['author'].lower())\nprint(str(len(tweetlist)) + ' results found.')\ngarbage_list.append(authorlist)\n\n\"\"\"ROWS FOR QUERYING YOURSELF\n row['external_author_id'], \n row['author'], \n row['content'], \n row['region'],\n row['language'], \n row['publish_date'], \n row['harvested_date'], \n row['following'],\n row['followers'], \n row['updates'], \n row['post_type'], \n row['account_type'],\n row['retweet'], \n row['account_category'], \n row['new_june_2018'])\n \"\"\"\n\n# Processing results section:\n# for tweet, author in zip(tweetlist, authorlist):\n# print('Troll account name: ' + str(author))\n# print('Tweet: \\n' + str(tweet) + '\\n')\n\n# turn all lists of all tweets into a big string\ntweetlist = str(tweetlist)\n\n# remove all whitespaces, and turn each word back into an array\ntweetlist = re.findall(r'\\w+', tweetlist)\n\nfor word in tweetlist:\n # print(word)\n ALREADY_IN_LIST = False\n try:\n if word.lower() not in weight_dict and len(word) > 2 and word.lower() not in garbage_list:\n weight_dict.update({word: 0})\n elif word.lower() in weight_dict:\n ALREADY_IN_LIST = True\n weight_dict[word] += 1\n except KeyError as e:\n # print('keyerror on: ' + str(word))\n # if ALREADY_IN_LIST:\n # print(str(word) + ' is already in the list')\n continue\n\nweight_dict = dict(sorted(weight_dict.items(), key=itemgetter(1), reverse=True))\nMAX_PLOTTED_VALS = 20\nbar_width = 0.30\ny = weight_dict.values()\nx = weight_dict.keys()\nmax_x_range = np.arange(MAX_PLOTTED_VALS)\nmax_y = int(max(y)) if y else None\nplt.xticks(np.arange(MAX_PLOTTED_VALS), x, rotation=45)\nplt.ylabel('Most common words with \\\"' + str(query) + '\\\" in it')\nplt.xlabel('Words')\nplt.bar(np.arange(MAX_PLOTTED_VALS) + bar_width, list(y)[:MAX_PLOTTED_VALS], width=bar_width, facecolor='#9999ff',\n edgecolor='white')\nfig = plt.gcf()\nfig.canvas.set_window_title(str(MAX_PLOTTED_VALS) + ' most common words with \\\"' + str(query) + '\\\" in it')\nplt.show()\n","sub_path":"TrollTweetSearch.py","file_name":"TrollTweetSearch.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"430964242","text":"import glob\nimport numpy as np\nimport sklearn\nimport sklearn.model_selection\n\n# we have kinetic / commute mapping - lag times: 1 and 10 - 4 sets of trajs\n\n# let's first make splits by filename, then translate them to numbers for all 3: dihedrals, distances, log. distances\n\n#shuffle_split = sklearn.model_selection.ShuffleSplit(n_splits=10, test_size=0.5)\n#split_train = [[],[],[],[],[],[],[],[],[],[]]\n#split_test = [[],[],[],[],[],[],[],[],[],[]]\n\n# 11707 has runs up to run168, 11709 up to run3\n# 11707\n#for run in range(0,169):\n# filenames_ = [x for x in glob.glob('/cbio/jclab/home/rafal.wiewiora/repos/MSM_play/set8_apo_11707_11709_FINAL/data_cut_start/11707/run%d-*.h5' % run) if 'run104-clone4' not in x]\n# if filenames_:\n# splits = np.array(list(shuffle_split.split(filenames_)))\n# for index, split in enumerate(splits):\n# for filename in split[0]:\n# split_train[index].append(filenames_[filename].split('/')[-2] + '/' + filenames_[filename].split('/')[-1])\n# for filename in split[1]:\n# split_test[index].append(filenames_[filename].split('/')[-2] + '/' + filenames_[filename].split('/')[-1])\n\n# 11709\n#for run in range(0,4):\n# filenames_ = glob.glob('/cbio/jclab/home/rafal.wiewiora/repos/MSM_play/set8_apo_11707_11709_FINAL/data_cut_start/11709/run%d-*.h5' % run)\n# if filenames_:\n# splits = np.array(list(shuffle_split.split(filenames_)))\n# for index, split in enumerate(splits):\n# for filename in split[0]:\n# split_train[index].append(filenames_[filename].split('/')[-2] + '/' + filenames_[filename].split('/')[-1])\n# for filename in split[1]:\n# split_test[index].append(filenames_[filename].split('/')[-2] + '/' + filenames_[filename].split('/')[-1])\n \n#np.save('splits_train_filenames', split_train)\n#np.save('splits_test_filenames', split_test)\n\nsplit_train = np.load('splits_train_filenames.npy')\nsplit_test = np.load('splits_test_filenames.npy')\n\n# dihedrals\n\n#featurize_glob = glob.glob('/cbio/jclab/home/rafal.wiewiora/repos/MSM_play/set8_apo_11707_11709_FINAL/data_cut_start_noH/*/*.h5')\nfeaturize_glob = np.load('dih_featurize_glob.npy')\ntica_glob = glob.glob('/data/chodera/rafal.wiewiora/set8_apo_11707_11709/data_cut_start_noH_stride10_featurized/dih/*.npy')\n\ndihedrals_glob_dict = dict()\nfor i in range(len(tica_glob)):\n dihedrals_glob_dict[featurize_glob[int(tica_glob[i].split('/')[-1][:-4])].split('/')[-2] + '/' + featurize_glob[int(tica_glob[i].split('/')[-1][:-4])].split('/')[-1]] = i\n\nsplit_train_dih = [[],[],[],[],[],[],[],[],[],[]]\nsplit_test_dih = [[],[],[],[],[],[],[],[],[],[]]\n\nfor index, split in enumerate(split_train):\n for filename in split:\n split_train_dih[index].append(dihedrals_glob_dict[filename])\n\nfor index, split in enumerate(split_test):\n for filename in split:\n split_test_dih[index].append(dihedrals_glob_dict[filename])\n\nnp.save('splits_train_dih', split_train_dih)\nnp.save('splits_test_dih', split_test_dih)\n\n# distances\ntica_glob = glob.glob('/data/chodera/rafal.wiewiora/set8_apo_11707_11709/data_cut_start_noH_stride10_featurized/dist_cross/*/*.npy')\n\ndist_glob_dict = dict()\nfor i in range(len(tica_glob)):\n dist_glob_dict[tica_glob[i].split('/')[-2] + '/' + tica_glob[i].split('/')[-1][:-3] + 'h5'] = i\n\nsplit_train_dist = [[],[],[],[],[],[],[],[],[],[]]\nsplit_test_dist = [[],[],[],[],[],[],[],[],[],[]]\n\nfor index, split in enumerate(split_train):\n for filename in split:\n split_train_dist[index].append(dist_glob_dict[filename])\n\nfor index, split in enumerate(split_test):\n for filename in split:\n split_test_dist[index].append(dist_glob_dict[filename])\n\nnp.save('splits_train_dist', split_train_dist)\nnp.save('splits_test_dist', split_test_dist)\n\n# logistic distances\ntica_glob = glob.glob('/data/chodera/rafal.wiewiora/set8_apo_11707_11709/data_cut_start_noH_stride10_featurized/dist_cross_soft/*/*.npy')\n\ndistlog_glob_dict = dict()\nfor i in range(len(tica_glob)):\n distlog_glob_dict[tica_glob[i].split('/')[-2] + '/' + tica_glob[i].split('/')[-1][:-3] + 'h5'] = i\n\nsplit_train_distlog = [[],[],[],[],[],[],[],[],[],[]]\nsplit_test_distlog = [[],[],[],[],[],[],[],[],[],[]]\n\nfor index, split in enumerate(split_train):\n for filename in split:\n split_train_distlog[index].append(distlog_glob_dict[filename])\n\nfor index, split in enumerate(split_test):\n for filename in split:\n split_test_distlog[index].append(distlog_glob_dict[filename])\n\nnp.save('splits_train_distlog', split_train_distlog)\nnp.save('splits_test_distlog', split_test_distlog)\n","sub_path":"APO/hyperparameter_selection/feat_choice_scoring_new_scheme_2/split_data_lilac.py","file_name":"split_data_lilac.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"205622899","text":"import sys\nimport re\nimport numpy as np\n\n# hardcoded rule groups for psl files for this dataset\nrule_group_indices = [[[0], [1], [2]],\n [[3], [4]],\n [[5], [6]],\n [[7], [8]],\n [[9, 10], [11, 12], [13, 14]],\n [[15, 16], [17, 18]]]\n\n\ndef main(argv):\n input_psl_file_path = argv[0]\n pruning_method = argv[1]\n output_psl_file_path = argv[2]\n\n # open file\n with open(input_psl_file_path, 'r') as original_psl_file:\n with open(output_psl_file_path, 'w') as pruned_psl_file:\n\n dropped_rule_indices = []\n\n original_lines = original_psl_file.readlines()\n original_psl_file.seek(0)\n\n # find all rules to prune using specified pruning method\n for rule_group in rule_group_indices:\n sub_group_maximums = []\n for sub_group in rule_group:\n # get maximum value in subgroup\n max_value = 0\n for line_number in sub_group:\n line = original_lines[line_number]\n value = float(re.findall(\"\\d+\\.\\d+\", line)[0])\n if value > max_value:\n max_value = value\n\n sub_group_maximums.append(max_value)\n\n # use heuristic/prune method to find dropped rule indices\n if pruning_method == 'TakeTopPruning':\n top_sub_group_index = np.argmax(sub_group_maximums)\n for sub_group_index, sub_group in enumerate(rule_group):\n if sub_group_index != top_sub_group_index:\n dropped_rule_indices = dropped_rule_indices + [index for index in sub_group]\n elif pruning_method == 'DropBottomPruning':\n bottom_sub_group_index = np.argmin(sub_group_maximums)\n for sub_group_index, sub_group in enumerate(rule_group):\n if sub_group_index == bottom_sub_group_index:\n dropped_rule_indices = dropped_rule_indices + [index for index in sub_group]\n\n # copy rules that are not in dropped_rule_indices list\n for line_index, line in enumerate(original_lines):\n if line_index not in dropped_rule_indices:\n pruned_psl_file.write(line)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"datasets/lastfm/cli/psl_rule_prune.py","file_name":"psl_rule_prune.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"641467117","text":"__author__ = 'bloe'\n\nfrom dynamite.GENERATOR.Configuration.ConfigurationHelper import ConfigurationHelper\nfrom dynamite.GENERATOR.Configuration.Phase import Phase\nimport uuid\n\nclass ConfigurationResource:\n\n GENERATE_UUID_KEYWORD = \"generate\"\n NO_UUID_KEYWORD = \"none\"\n\n service_type = None\n instance_uuid = None\n instance_name = None\n write_interval = None\n metric_name = None\n metric_value_template = None\n phases = []\n\n def __init__(self):\n self.service_type = \"\"\n self.instance_uuid = \"\"\n self.instance_name = \"\"\n self.metric_name = \"\"\n self.metric_value_template = \"\"\n self.write_interval = 0\n self.phases = []\n\n @classmethod\n def generate_uuid(cls):\n generated_uuid = uuid.uuid4()\n return str(generated_uuid)\n\n @classmethod\n def from_dictionary(cls, global_write_interval, dictionary):\n resource = ConfigurationResource()\n resource.write_interval = ConfigurationHelper.dict_value_or_default(dictionary, \"write_interval\", global_write_interval)\n resource.metric_name = ConfigurationHelper.dict_value_or_fail(\n dictionary,\n \"metric_name\",\n \"Metric name missing in resource configuration!\"\n )\n resource.metric_value_template = ConfigurationHelper.dict_value_or_default(\n dictionary,\n \"metric_value_template\",\n None\n )\n resource.service_type = ConfigurationHelper.dict_value_or_fail(\n dictionary,\n \"service_type\",\n \"Service type missing in resource configuration!\"\n )\n resource.instance_uuid = ConfigurationHelper.dict_value_or_default(\n dictionary,\n \"instance_uuid\",\n cls.GENERATE_UUID_KEYWORD\n )\n if resource.instance_uuid == cls.GENERATE_UUID_KEYWORD:\n resource.instance_uuid = cls.generate_uuid()\n elif resource.instance_uuid == cls.NO_UUID_KEYWORD:\n resource.instance_uuid = \"\"\n\n resource.instance_name = ConfigurationHelper.dict_value_or_default(\n dictionary,\n \"instance_name\",\n None\n )\n phases_dictionary = ConfigurationHelper.dict_value_or_fail(\n dictionary,\n \"phases\",\n \"There are no phases defined for a resource!\"\n )\n previous_phase = None\n for phase_dictionary in phases_dictionary:\n phase = Phase.from_dictionary(phase_dictionary, previous_phase=previous_phase)\n previous_phase = phase\n resource.phases.append(phase)\n\n return resource\n","sub_path":"dynamite/GENERATOR/Configuration/ConfigurationResource.py","file_name":"ConfigurationResource.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"423622125","text":"from django.contrib import admin\nfrom .models import Order, OrderLineItem\n\n\nclass OrderLineItemAdminInline(admin.TabularInline):\n \"\"\"\n Add/edit line items in admin from inside Order model\n \"\"\"\n # Access OrderLineItem (see inlines var in OrderAdmin below)\n model = OrderLineItem\n # lineitem_total not to be edited\n readonly_fields = ('lineitem_total',)\n\n\nclass OrderAdmin(admin.ModelAdmin):\n \"\"\"\n Admin interface for orders\n \"\"\"\n # Access OrderLineItem from OrderAdmin interface in Admin\n inlines = (OrderLineItemAdminInline,)\n\n # Fields caculated by model method, read only so they cannot be edited\n readonly_fields = ('order_number', 'date',\n 'delivery_cost', 'order_total',\n 'grand_total', 'original_cart',\n 'stripe_pid',)\n\n # Specify order of fields in admin interface\n fields = ('order_number', 'user_profile', 'date', 'full_name',\n 'email', 'phone_number', 'country',\n 'postcode', 'town_or_city', 'street_address1',\n 'street_address2', 'county', 'delivery_cost',\n 'order_total', 'grand_total', 'original_cart',\n 'stripe_pid',)\n\n # Fields displayed in admin\n list_display = ('order_number', 'date', 'full_name',\n 'order_total', 'delivery_cost',\n 'grand_total',)\n\n # Fields order by date from newest to oldest\n ordering = ('-date',)\n\n\n# Register Order, OrderAdmin models (OrderLineItem through OrderAdmin)\nadmin.site.register(Order, OrderAdmin)\n","sub_path":"checkout/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"156003877","text":"def shellSort(arr):\r\n gap = int((len(arr)/2))\r\n while gap > 0:\r\n for i in range (gap, len(arr)):\r\n temp = arr[i]\r\n j = i\r\n while j >= gap and arr[j - gap] > temp:\r\n arr[j] = arr[j-gap]\r\n j -= gap\r\n arr[j] = temp\r\n gap /= 2 \r\n gap = int(gap)\r\nstring = input().split(\" \")\r\ndata = [int(item) for item in string]\r\nshellSort(data)\r\nprint(data)","sub_path":"Sorting/shell_sort.py","file_name":"shell_sort.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"522283851","text":"\"\"\"\nGiven a binary tree, return the postorder traversal of its nodes' values.\n\nFollow up: Recursive solution is trivial, could you do it iteratively?\n\"\"\"\nfrom TreeNode import *\nclass Solution(object):\n def postorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n def Travel(root, res):\n if root:\n Travel(root.left, res)\n Travel(root.right, res)\n res.append(root.val)\n\n res = []\n Travel(root, res)\n return res\n\n def postorderTraversal2(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n traversal, stack = [], [root]\n while stack:\n node = stack.pop()\n if node:\n # pre-order, right first\n traversal.append(node.val)\n stack.append(node.left)\n stack.append(node.right)\n\n # reverse result\n return traversal[::-1]\n\n\nA = [1,None,2,3] # 321\nroot = TreeNode().BuildTree(A)\nprint(Solution().postorderTraversal2(root))\n","sub_path":"145BinTreePostOrder.py","file_name":"145BinTreePostOrder.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"267466917","text":"import json\n\nimport pymysql\nfrom flask import Flask, request, Response\n\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n\ndef conn():\n return pymysql.connect(host='localhost',\n user='root',\n db='tinhte',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n\n\n@app.route('/getArticle')\ndef getArticle():\n after = request.args.get('after')\n before = request.args.get('before')\n limit = request.args.get('limit')\n\n connection = conn()\n cursor = connection.cursor()\n\n if before:\n cursor.execute(\n \"SELECT * FROM article WHERE id < %s ORDER BY id DESC LIMIT %s\" % (str(before), str(limit)))\n elif after:\n cursor.execute(\"SELECT * FROM article WHERE article.id > %s LIMIT %s\" % (str(after), str(limit)))\n else:\n cursor.execute(\"SELECT * FROM article LIMIT %s\" % str(limit))\n articles = cursor.fetchall()\n if before and articles:\n articles.reverse()\n for item in articles:\n cursor.execute(\"SELECT * FROM author WHERE id = %s\" % item['id_author'])\n author = cursor.fetchone()\n item['author'] = author\n\n cursor.execute(\"SELECT * FROM article_detail WHERE id = %s\" % item['id_detail'])\n detail = cursor.fetchone()\n item['detail'] = detail\n\n return Response(json.dumps(articles), mimetype='application/json')\n\n\nif __name__ == '__main__':\n app.run(host='192.168.1.84', debug=True)\n","sub_path":"practice11/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"101459733","text":"\"\"\"\nС помощью паттерна \"Цепочка обязанностей\" составьте список покупок для выпечки блинов.\nНеобходимо осмотреть холодильник и поочередно проверить, есть ли у нас необходимые ингридиенты:\n 2 яйца\n 300 грамм муки\n 0.5 л молока\n 100 грамм сахара\n 10 мл подсолнечного масла\n 120 грамм сливочного масла\n\nВ итоге мы должны получить список недостающих ингридиентов.\n\"\"\"\n\nclass BaseHandler:\n def __init__(self):\n self.next = None\n\n def set_next(self, handler):\n self.next = handler\n return self.next\n\n\nclass Fridge:\n def __init__(self, eggs=0, flour=0, milk=0.0,sugar=0, sunflower_oil=0, butter=0):\n self.eggs = eggs\n self.flour = flour\n self.milk = milk\n self.sugar = sugar\n self.sunflower_oil = sunflower_oil\n self.butter = butter\n\n\nclass Eggs(BaseHandler):\n def handle(self, fridge: Fridge):\n if fridge.eggs < 2:\n print('Need {2-fridge.eggs} eggs')\n if self.next:\n self.next.handle(fridge)\n\n\nclass Flour(BaseHandler):\n def handle(self, fridge: Fridge):\n if fridge.flour < 300:\n print('Need {300-fridge.flour} gramm of flour')\n if self.next:\n self.next.handle(fridge)\n\n\nclass Milk(BaseHandler):\n def handle(self, fridge: Fridge):\n if fridge.milk < 0.5:\n print('Need {0.5-fridge.milk} liter of milk')\n if self.next:\n self.next.handle(fridge)\n\n\nclass Sugar(BaseHandler):\n def handle(self, fridge: Fridge):\n if fridge.sugar < 100:\n print('Need {100-fridge.sugar} gram of sugar')\n if self.next:\n self.next.handle(fridge)\n\n\nclass Oil(BaseHandler):\n def handle(self, fridge: Fridge):\n if fridge.sunflower_oil < 10:\n print('Need{10-fridge.sunflower_oil} oil')\n if self.next:\n self.next.handle(fridge)\n\n\nclass ButterHandler(BaseHandler):\n def handle(self, fridge: Fridge):\n if fridge.butter < 120:\n print('Need {120-fridge.butter} gramm of butter')\n if self.next:\n self.next(fridge)\n","sub_path":"10-Design_Patterns/hw/4-Chain_Of_Responsibility/chain_of_responsibility.py","file_name":"chain_of_responsibility.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"463250806","text":"'''\nThis modules defines the functions for actually calculating the Raman spectra including\nthe actual peak intensities given the Polarizability Tensor (aka alpha_tensor), electric\nfield as a function of z (distance from the gold surface), and center of vibration for a\ngiven frequency.\n'''\n# import time\nimport numpy as np\nimport efields\nimport euler_rotate\nimport math\n# import plot_data\n\n\ndef sparse_intensity(rotation_vector, peaks_to_compare,\n center_of_vibration, alpha_tensor,\n a_tensor, z_position, frequencies):\n rotation_matrix = euler_rotate.find_rotation_matrix(rotation_vector)\n # for num, peaks in enumerate(peaks_to_compare):\n z_centers = np.array([(euler_rotate.vector_rotate(\n center_of_vibration[peaks, :],\n rotation_matrix)[2]) for peaks in peaks_to_compare])\n\n new_alpha = np.array([euler_rotate.tensor_rotate(alpha_tensor[peaks, :, :],\n rotation_matrix) for peaks in peaks_to_compare])\n\n new_a_zzz = np.array([euler_rotate.tensor_rotate3_zzz(a_tensor[peaks],\n rotation_matrix) for peaks in peaks_to_compare])\n\n a_component = np.array([1./3.*(new_alpha[i, 0, 0]+ new_alpha[i, 1, 1]+\n new_alpha[i, 2, 2])\n for i in range(len(peaks_to_compare))])\n gamma_component = np.array([((new_alpha[i, 0, 0] - new_alpha[i, 1, 1])**2 +\n (new_alpha[i, 1, 1] - new_alpha[i, 2, 2])**2 +\n (new_alpha[i, 2, 2] - new_alpha[i, 0, 0])**2 +\n 6*(new_alpha[i, 0, 1]**2 +\n new_alpha[i, 1, 2]**2 +\n new_alpha[i, 2, 0]**2))*0.5\n for i in range(len(peaks_to_compare))])\n\n full_alpha = (45*a_component**2 + 7*gamma_component)/45\n\n new_alpha_zz = np.array([new_alpha[i, 2, 2] for i in\n range(len(peaks_to_compare))])\n\n # full_alpha = [0, 0, 0]\n #\n # new_alpha_zz = np.array([euler_rotate.tensor_rotate(\n # alpha_tensor[peaks, :, :],\n # rotation_matrix)[2, 2] for peaks in peaks_to_compare])\n\n new_z_centers = z_centers + z_position\n # print 'Centers: ', new_z_centers[0]\n # print 'Frequencies used: ', frequencies[peaks_to_compare]\n intensity_alpha = calculate_intensities(25, peaks_to_compare,\n frequencies, full_alpha,\n new_alpha_zz, new_a_zzz,\n new_z_centers)\n return intensity_alpha\n\ndef run_full_intensity(rotation_vector,\n center_of_vibration, alpha_tensor,\n z_position, frequencies):\n\n rotation_matrix = euler_rotate.find_rotation_matrix(rotation_vector)\n\n z_centers = np.array(\n [(euler_rotate.vector_rotate(center_of_vibration[peaks, :],\n rotation_matrix)[2]) for peaks in range(len(frequencies))])\n\n new_alpha_zz = np.array(\n [euler_rotate.tensor_rotate(alpha_tensor[peaks, :, :],\n rotation_matrix)[2, 2] for peaks in range(len(frequencies))])\n\n new_z_centers = z_centers + z_position\n # print 'Centers: ', z_centers\n # print 'Frequencies used: ', frequencies[peaks_to_compare]\n intensity_alpha = calculate_full_intensities(25, frequencies,\n new_alpha_zz, new_z_centers)\n return intensity_alpha\n\ndef calculate_intensities(dia, peaks_to_compare, wavenumber, full_alpha,\n new_alpha_zz, new_a_zzz, z_cent):\n '''\n This function actually calculates the Raman intensities.\n '''\n\n omega = 1\n\n intensity_alpha = np.zeros([len(peaks_to_compare)])\n intensity_a = np.zeros([len(peaks_to_compare)])\n intensity = np.zeros([len(peaks_to_compare)])\n # print z_centers.shape\n # print len(peaks_to_compare)\n\n for w_num, peak in enumerate(peaks_to_compare):\n # print w_num\n # grabs the tensor matrix of the correct frequency\n alpha_temp = new_alpha_zz[w_num] # Grab the ZZ component\n a_temp = new_a_zzz[w_num]\n z_location = z_cent[w_num]/10\n intensity_alpha[w_num] = (efields.e_field(dia, z_location,\n wavenumber[peak])**2\n *efields.e_field(dia, z_location, 0)**2\n *omega**4 *(alpha_temp)**2)/full_alpha[w_num]\n\n intensity_a[w_num] = (efields.grad_efield(dia, z_location,\n wavenumber[peak])**2\n *efields.grad_efield(dia, z_location, 0)**2\n *omega**4 *(a_temp)**2)\n\n cross_term = (math.sqrt(intensity_alpha[w_num])\n *math.sqrt(intensity_a[w_num])\n /math.sqrt(full_alpha[w_num]))\n\n intensity[w_num] = (1./4.*intensity_alpha[w_num] +\n 1./36.*intensity_a[w_num] -\n 1./6.*cross_term)\n return intensity #, ramanshifts, lorentzian_alpha\n\ndef calculate_full_intensities(dia, wavenumbers, new_alpha_zz, z_cent):\n '''\n This function actually calculates the Raman intensities.\n '''\n omega = 1\n\n intensity_alpha = np.zeros([len(wavenumbers)])\n # print z_centers.shape\n # print len(peaks_to_compare)\n for num, wavenum in enumerate(wavenumbers):\n # print num\n # grabs the tensor matrix of the correct frequency\n alpha_temp = new_alpha_zz[num] # Grab the ZZ component\n z_location = z_cent[num]/10\n intensity_alpha[num] = (efields.e_field\n (dia, z_location,\n wavenum)**2\n *efields.e_field(dia, z_location, 0)**2\n *omega**4 *(alpha_temp)**2)\n\n\n return intensity_alpha\n\ndef calculate_raman_spectrum(wavenumber, intensity_alpha):\n '''\n Currently not used. This function uses the intensities from the alpha\n tensor to create a sum of lorentzian peaks of fixed width to generate\n a Raman spectrum.\n '''\n\n width = 10.\n ramanshifts = range(400, 1801)\n lorentzian_alpha = np.zeros(len(ramanshifts))\n\n for index, wave in enumerate(wavenumber):\n lorentzian_alpha_intensity = np.zeros(len(ramanshifts))\n for r_num, shift in enumerate(ramanshifts):\n x_value = (wave - shift)/width\n lorentzian_alpha_intensity[r_num] = intensity_alpha[index]/(1 + x_value**2)\n lorentzian_alpha += lorentzian_alpha_intensity\n\n return lorentzian_alpha, ramanshifts\n","sub_path":"src/calculate_spectrum.py","file_name":"calculate_spectrum.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"363215893","text":"from collections import UserDict\nimport json\nfrom os import listdir\nfrom os.path import exists, join, splitext\n\nfrom ..utils.exceptions import UnknownCodelistError\n\n\nclass Codelist(UserDict):\n def __init__(self, slug, path, version):\n self.slug = slug\n self.version = version\n self.filepath = join(path, slug + '.json')\n if not exists(self.filepath):\n raise UnknownCodelistError('Codelist not found')\n\n def __iter__(self):\n for k in self.all().keys():\n yield k\n\n def __len__(self):\n return len(self.all())\n\n def __repr__(self):\n return '<{} ({} v{})>'.format(\n self.__class__.__name__,\n self.slug,\n self.version)\n\n def _load(self):\n if hasattr(self, 'data'):\n return\n with open(self.filepath) as f:\n j = json.load(f)\n self._url = j['metadata']['url']\n self._name = j['metadata']['name']\n self._description = j['metadata']['description']\n self._complete = j['attributes']['complete']\n self.data = {x['code']: x['name'] for x in j['data']}\n\n @property\n def url(self):\n self._load()\n return self._url\n\n @property\n def name(self):\n self._load()\n return self._name\n\n @property\n def description(self):\n self._load()\n return self._description\n\n @property\n def complete(self):\n self._load()\n return self._complete\n\n def all(self):\n self._load()\n return self.data\n\n def get(self, code):\n return self.all().get(code)\n\n def __getitem__(self, code):\n return self.all()[code]\n\n\ndef get(slug, path=None, version='latest'):\n if not path:\n path = join('__pyandicache__', 'standard', 'codelists')\n if version == 'latest':\n major = max(listdir(path))\n path = join(path, major)\n else:\n major = version.split('.')[0]\n path = join(path, major)\n return Codelist(slug, path, major)\n\n\ndef all(path=None, version='latest'):\n if not path:\n path = join('__pyandicache__', 'standard', 'codelists')\n if version == 'latest':\n major = max(listdir(path))\n path = join(path, major)\n else:\n major = version.split('.')[0]\n path = join(path, major)\n return [Codelist(splitext(slug)[0], path, major) for slug in listdir(path)]\n","sub_path":"pyandi/standard/codelist.py","file_name":"codelist.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"390386210","text":"# -*- coding: utf-8 -*-\n\nfrom iprofile import texts\nfrom iprofile.core.decorators import icommand\nfrom iprofile.core.models import ICommand\nfrom iprofile.profiles.models import Profile\nfrom slugify import slugify\nimport click\n\n\n@icommand(help=texts.HELP_DELETE, short_help=texts.HELP_DELETE)\n@click.argument('profile', required=False)\n@click.option('--no-input', is_flag=True, help=texts.HELP_NO_INPUT)\n@click.option('-p', '--project', required=False, help=texts.HELP_PROJECT_OPT)\nclass Delete(ICommand):\n\n def run(self, **options):\n name = options.get('profile')\n no_input = options.get('no_input')\n project = options.get('project')\n\n if not (name and slugify(name)):\n deleted = 0\n confirm_text = texts.INPUT_CONFIRM_DELETE_ALL\n if not (no_input or click.confirm(confirm_text)):\n return\n project_path = project or self.settings.get('path')\n for profile_name in self.list_profiles(project_path):\n if ':' in profile_name:\n profile_name, project = profile_name.split(':')\n if self.delete(profile_name, project=project, delete_all=True):\n deleted += 1\n if deleted > 0:\n click.echo()\n self.green(texts.LOG_QTT_DELETED.format(\n deleted, 's' if deleted != 1 else ''))\n else:\n self.red(texts.ERROR_NO_PROFILES_TO_DELETE)\n else:\n confirm_text = texts.INPUT_CONFIRM_DELETE.format(name)\n if not (no_input or click.confirm(confirm_text)):\n return\n self.delete(name, project=project)\n\n def delete(self, name, project=None, delete_all=False):\n profile = Profile(name, project=project)\n\n if not profile.exists():\n self.red(texts.ERROR_PROFILE_DOESNT_EXIST.format(name))\n return\n\n profile.delete()\n delete_text = texts.LOG_DELETE_PROFILE.format(name)\n if delete_all:\n self.pgreen(delete_text)\n else:\n self.green(delete_text)\n\n return True\n","sub_path":"iprofile/cli/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"619185217","text":"import os\nimport pickle\nimport pathlib\n\nimport numpy as np\nimport pandas as pd\nimport redis\nimport xgboost as xgb\nimport yaml\nfrom flask_restful import Resource, fields, marshal_with, reqparse\n\nfrom api import api\nfrom api.tasks.ml_model import feature_engineering, make_new_dataframe, xgboost_model\nfrom config import basedir\nfrom log import logger\n\n# from webargs.fields import Int, Str\n# from webargs.flaskparser import use_kwargs\n\n\nlog = logger(__name__)\n\nmessage_fields = {\n \"company_name\": fields.String,\n \"start_date\": fields.String,\n \"end_date\": fields.String,\n \"category\": fields.String,\n \"city\": fields.String,\n \"state\": fields.String,\n \"country\": fields.String,\n \"price\": fields.Integer,\n \"product_id\": fields.Integer,\n \"product_name\": fields.String,\n}\n\n\nclass DefaultResource(Resource):\n \"\"\"Handle default route.\"\"\"\n\n def get(self):\n \"\"\"Get request for home page or response.\"\"\"\n return {\n \"status\": \"success\",\n \"data\": {\"msg\": \"Welcome to Linode Sales Training System- ADPS\"},\n }\n\n\nclass MakePredictions(Resource):\n \"\"\"Make new predictions\"\"\"\n\n # @marshal_with(message_fields)\n def post(self):\n\n \"\"\"Make new predictions.\n\n Handle POST to make new predictions\n Please state_date & end_date format should be like this \n (year-month-day (2020-03-01))\n ---\n parameters:\n - name: Sales forecast form\n in: body\n required: true\n schema:\n required:\n - start_date\n - company_name\n - end_date\n - category\n - city\n - state\n - country\n - price\n - product_id\n properties:\n start_date:\n type: string\n required: true\n description: start date for prediction\n end_date:\n type: string\n description: end date for prediction\n company_name:\n type: string\n required: true\n description: company name\n category:\n type: string\n required: true\n description: product category\n city:\n type: string\n required: true\n description: the city store is located\n state:\n type: string\n description: The state store is located\n country:\n type: string\n required: true\n description: the country store is located\n price:\n type: integer\n required: true\n description: the price of product to be forecasted\n product_id:\n type: string\n required: true\n description: the product_id of product to be forecasted\n product_name:\n type: string\n required: false\n description: the product_name of product to be forecasted\n responses:\n 400:\n description: Fails when there is no pickled model of trained sales ML for the company\n schema:\n type: object\n description: A dictionary containing error message\n properties:\n status:\n type: string\n description: Response status(fail/error)\n data:\n type: object\n description: Response content\n properties:\n msg:\n type: string\n description: The reason the endpoint failed.\n 200:\n description: Returns a list of forecasted sales, quantity and product variables\n schema:\n type: object\n properties:\n status:\n type: string\n description: Status of the response\n data:\n type: object\n properties:\n date:\n type: array\n items:\n type: string\n product_name:\n type: array\n items:\n type: string\n product_id:\n type: array\n items:\n type: number\n city:\n type: array\n items:\n type: string\n state:\n type: array\n items:\n type: string\n country:\n type: array\n items:\n type: string\n forecasted_sales:\n type: array\n items:\n type: number\n quantity_predicted:\n type: array\n items:\n type: number\n \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument(\n \"company_name\",\n type=str,\n required=True,\n help=\"company name cannot be blank!\",\n )\n parser.add_argument(\n \"start_date\", type=str, required=True, help=\"start date cannot be blank!\"\n )\n parser.add_argument(\n \"end_date\", type=str, required=True, help=\"end date cannot be blank!\"\n )\n parser.add_argument(\n \"category\", type=str, required=True, help=\"category cannot be blank!\"\n )\n parser.add_argument(\n \"city\", type=str, required=True, help=\"city cannot be blank!\"\n )\n parser.add_argument(\n \"state\", type=str, required=True, help=\"state cannot be blank!\"\n )\n parser.add_argument(\n \"country\", type=str, required=True, help=\"country cannot be blank!\"\n )\n parser.add_argument(\n \"price\", type=int, required=True, help=\"price cannot be blank!\"\n )\n parser.add_argument(\n \"product_id\", type=int, required=True, help=\"product_id cannot be blank!\"\n )\n parser.add_argument(\n \"product_name\",\n type=str,\n required=False,\n help=\"product name cannot be blank!\",\n )\n args = parser.parse_args()\n\n start_date = args[\"start_date\"]\n company_name = args[\"company_name\"]\n end_date = args[\"end_date\"]\n category = args[\"category\"]\n city = args[\"city\"]\n state = args[\"state\"]\n country = args[\"country\"]\n price = args[\"price\"]\n product_id = args[\"product_id\"]\n product_name = args[\"product_name\"]\n\n # create a new dataframe from the values passed in json\n datasets = make_new_dataframe(\n start_date,\n end_date,\n product_name,\n product_id,\n category,\n city,\n state,\n country,\n price,\n )\n\n train_data = datasets.copy()\n # apply feature engineering and make predictions\n train_data = feature_engineering(datasets)\n train_data = train_data[\n [\n \"year\",\n \"day\",\n \"weekofyear\",\n \"month\",\n \"product_id\",\n \"city\",\n \"category\",\n \"country\",\n \"state\",\n ]\n ]\n\n train_data = xgb.DMatrix(train_data)\n\n # check if pickle model file exist before running background training and calling files from AWS\n filename = os.path.join(basedir, \"models\", f\"pickled_model_{company_name}\")\n path = pathlib.Path(filename)\n if path.exists():\n model = open(filename, \"rb\")\n model = pickle.load(model)\n results = model.predict(train_data)\n datasets[\"forecasted_sales\"] = results\n datasets[\"quantity_predicted\"] = round(\n datasets[\"forecasted_sales\"] / datasets[\"price\"]\n )\n\n # Datetime are not JSON serializabe, so we change dates to string\n datasets[\"date\"] = datasets[\"date\"].dt.strftime(\"%Y-%b-%d\")\n results = datasets.to_dict(orient=\"list\")\n\n log.info(\"succesfully made new predictions\")\n return {\"status\": \"success\", \"data\": results}, 200\n\n else:\n \"\"\"A function that calls the files uploaded to AWS S3 bucket.\n But for now we would be working with local files until such \n options are available.\n If the company_name is passed in json, it is used to filter out the \n the datasets from AWS S3 bucket\n \"\"\"\n\n def load_files_from_aws(company_name):\n \"\"\"Function that loads files from AWS S3 Bucket\"\"\"\n pass\n\n ords = os.path.join(basedir, \"data/orders.csv\")\n trans = os.path.join(basedir, \"data/transactions.csv\")\n prods = os.path.join(basedir, \"data/products.csv\")\n\n # background task for model_training\n background_task = xgboost_model.delay(ords, trans, prods, company_name)\n\n log.warning(\n \"failed to make new predictions ,sales model is still running,retry requests in few minute time\"\n )\n return {\n \"status\": \"fail\",\n \"data\": {\n \"msg\": \"failed to make new predictions\" +\n \"sales model is still running,retry requests in few minute time\"\n }\n },400\n \n\n\napi.add_resource(DefaultResource, \"/\", endpoint=\"home\")\napi.add_resource(MakePredictions, \"/prediction/\", endpoint=\"prediction\")\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"437994920","text":"# Dependencies\nimport pandas as pd\nimport numpy as np\nimport requests\nimport time\n\npd.options.display.max_rows = 999\npd.options.display.max_columns = 999\n\nstart_time = time.time()\n\nykey = \"Ii7Pa9IZug_H12vEjc6Z8Q7PVKDIkFXZDYLKJR9xdtTWkgw75dqoWkhpaHQ__jFcmvrmytLdF3plPxFDfw6cJqe5ugr-HOUirTiSPMFI7aYZ1W9mvifY2AJY7Sk5XXYx\"\n\nyelp_url = 'https://api.yelp.com/v3/businesses/search'\nyelp_headers = {'Authorization': 'Bearer %s' % ykey}\n\ncity = 'Los Angeles, CA'\n\n# Columns of DataFrame\n\nid_list = []\nname_list = []\nis_closed_list = []\nreview_count_list = []\ncategories_list = []\nrating_list = []\nlatitude_list = []\nlongitude_list = []\nprice_list = []\naddress_list = []\ncity_list = []\nzip_code_list = []\nstate_list = []\ncategories_all_list = []\n\nrestaurant_count = 0\n\nfor offset in range(0, 1000, 20):\n# print(f'Retrieving restaurants in {city}')\n yelp_params = {'location': city, 'term': 'restaurants', 'limit' : '20', 'offset' : offset}\n yelp_response = requests.get(yelp_url, yelp_params, headers = yelp_headers)\n yelp_data = yelp_response.json()\n# print(yelp_data)\n restaurant_data = yelp_data['businesses']\n\n for restaurant in restaurant_data:\n categories_all_list_sub = []\n for category in restaurant['categories']: \n categories_all_list_sub.append(category['title'])\n \n for category in restaurant['categories']: \n id_list.append(restaurant['id']) \n name_list.append(restaurant['name'])\n is_closed_list.append(restaurant['is_closed'])\n review_count_list.append(restaurant['review_count'])\n categories_list.append(category['title'])\n rating_list.append(restaurant['rating'])\n latitude_list.append(restaurant['coordinates']['latitude'])\n longitude_list.append(restaurant['coordinates']['longitude'])\n address_list.append(restaurant['location']['address1'])\n city_list.append(restaurant['location']['city'])\n zip_code_list.append(restaurant['location']['zip_code'])\n state_list.append(restaurant['location']['state'])\n \n categories_all_list.append(', '.join(categories_all_list_sub))\n\n try: \n price_list.append(restaurant['price'])\n\n except:\n price_list.append('')\n\n restaurant_count += 1\n print(f'Restaurant #{restaurant_count} data retrieved.')\n \n \nprint(\"--- %s seconds ---\" % (time.time() - start_time)) \n\n# In[4]:\n\nrestaurants_df = pd.DataFrame({'ID': id_list, 'Name': name_list, 'Is_Closed': is_closed_list, 'Review_Count': review_count_list, 'Categories_All': categories_all_list, 'Categories': categories_list, 'Rating': rating_list, 'Latitude': latitude_list, 'Rating': rating_list, 'Latitude': latitude_list, 'Longitude': longitude_list, 'Price' : price_list, 'Address': address_list, 'City': city_list, 'Zip_Code' : zip_code_list, 'State': state_list})\nrestaurants_df.drop_duplicates(keep = False, inplace = True)\nrestaurants_df.dropna(subset = ['Address'], inplace = True)\nrestaurants_df.dropna(subset = ['Price'], inplace = True)\nrestaurants_df = restaurants_df[restaurants_df.Price != '']\nrestaurants_df['Price'].replace({'$$$$$': 5, '$$$$': 4, '$$$': 3, '$$': 2, '$': 1}, inplace = True)\nrestaurants_df['Price'] = pd.to_numeric(restaurants_df['Price'])\nrestaurants_df['Categories'].replace(['American (New)', 'American (Traditional)'], 'American', inplace = True)\nrestaurants_df['Categories'].replace(['New Mexican Cuisine'], 'Mexican', inplace = True)\n\ncat_list_df= pd.DataFrame(columns=new_cat_list)\n\nnew_df = pd.merge(restaurants_df, cat_list_df, how='left', left_on = restaurants_df.ID, right_on = cat_list_df.Southern)\n\nfor i in new_cat_list:\n new_df.loc[(new_df.Categories.str.contains(i)==True), i] = 1\n\nnew_df.drop(axis = 1, columns = ['Categories', 'key_0'], inplace = True)\n\nnew_df.drop_duplicates(subset = 'ID', inplace = True)\n\nnew_df.fillna(0, inplace = True)\n\nmodeling_df = new_df.drop(axis = 1, columns = ['ID', 'Name', 'Categories_All', 'Is_Closed', 'Latitude', 'Longitude', 'Address', 'City', 'State'])\n\nmodeling_df = pd.get_dummies(modeling_df)\n\nL = list(modeling_df.columns)\n\nfor i in range(3):\n L.pop(0)\n\nmodeling_df[L] = modeling_df[L].astype('category')\n\nX = modeling_df.drop(axis = 1, columns = ['Review_Count', 'Rating'])\ny = modeling_df[['Rating']]\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5)\n\nprint(X_train.shape, y_train.shape)\nprint(X_test.shape, y_test.shape)\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom sklearn.ensemble import RandomForestRegressor\n\nrf = RandomForestRegressor(n_jobs = -1, oob_score=True, verbose=0, max_depth=3)\nrf = rf.fit(X_train, y_train)\n\nuser_df = X.drop(X.index)\nuser_df['Price'] = [0]\nuser_df.fillna(0, inplace = True)\n\n# Inputs from user\nuser_zipcode = '91604'\nuser_categories = ['Mexican', 'Bars']\nuser_price = 2\n\nuser_df.Price = user_price\n\nzip_column = [col for col in user_df.columns if user_zipcode in col]\nuser_df[zip_column] = 1\n\nfor category in user_categories:\n cat_column = [col for col in user_df.columns if category == col]\n user_df[cat_column] = 1\n\n# Prediction\nuser_prediction = np.round(rf.predict(user_df) * 2) / 2\nuser_prediction = user_prediction[0] #4\n\n# Prediction to display\nuser_prediction\n\n\n\n\n\nmap_df = restaurants_df[restaurants_df.Is_Closed == False].drop(axis = 1, columns = ['ID', 'Is_Closed', 'Categories'])\n\nmap_df.drop_duplicates(inplace = True)\n\nmap_df_display = map_df[(map_df.Zip_Code == user_zipcode) & (map_df.Categories_All.str.contains('|'.join(user_categories)))]\n\nmap_df_display['Price'].replace({4 : '$$$$', 3: '$$$', 2 : '$$', 1 : '$'}, inplace = True)","sub_path":"Project3.py","file_name":"Project3.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"346964250","text":"import sys\nimport csv\nimport math\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom batch1 import get_system_model\nfrom missions.didymos import DidymosSystemModel\nfrom missions.rosetta import RosettaSystemModel\n\ntry:\n from sklearn.gaussian_process import GaussianProcessRegressor\n from sklearn.gaussian_process import GaussianProcessClassifier\n from sklearn.gaussian_process.kernels import RBF, WhiteKernel\nexcept:\n print('Requires scikit-learn, install using \"conda install scikit-learn\"')\n sys.exit()\n\nfrom settings import *\nfrom algo import tools\n\nEASY_LIMITS = ((80, 180), (0, 12), (50, 270), (0.8, 1))\nFAIL_ERRS = {\n 'rel shift error (m/km)': 200,\n 'altitude error': 2000,\n 'dist error (m/km)': 200,\n 'lat error (m/km)': 200,\n 'rot error': 25,\n}\nMAX_ROTATION_ERR = 7\n\n# read logfiles\ndef read_data(sm, logfile, predictors, target):\n X, y, rot_err, labels = [], [], [], []\n \n with open(logfile, newline='') as csvfile:\n rad = sm.asteroid.mean_radius * 0.001\n data = csv.reader(csvfile, delimiter='\\t')\n first = True\n for row in data:\n if len(row)>10:\n if first:\n first = False\n prd_i = [row.index(p) for p in predictors if p not in ('distance', 'visible')]\n trg_i = row.index(target)\n rot_i = row.index('rot error')\n pos_i = [row.index(p+' sc pos') for p in ('x','y','z')]\n lbl_i = row.index('iter')\n else:\n row = np.array(row)\n try:\n pos = row[pos_i].astype(np.float)\n except ValueError as e:\n print('Can\\'t convert cols %s to float on row %s' % (pos_i, row[0]))\n raise e\n distance = np.sqrt(np.sum(pos**2))\n xt = abs(pos[2])*math.tan(math.radians(sm.cam.x_fov)/2)\n yt = abs(pos[2])*math.tan(math.radians(sm.cam.y_fov)/2)\n\n #xm = np.clip((xt - (abs(pos[0])-rad))/rad/2, 0, 1)\n #ym = np.clip((yt - (abs(pos[1])-rad))/rad/2, 0, 1)\n xm = 1 - (max(0, pos[0]+rad - xt) + max(0, rad-pos[0] - xt))/rad/2\n ym = 1 - (max(0, pos[1]+rad - yt) + max(0, rad-pos[1] - yt))/rad/2\n\n X.append(np.concatenate((\n row[prd_i].astype(np.float),\n [distance],\n [xm*ym],\n )))\n \n # err m/km\n tmp = row[trg_i].astype(np.float) if len(row)>trg_i else float('nan')\n y.append(tmp)\n rot_err.append(row[rot_i].astype(np.float))\n labels.append(row[lbl_i])\n \n X = np.array(X)\n \n # for classification of fails\n yc = np.isnan(y)\n rot_err = np.array(rot_err)\n if True:\n yc = np.logical_or(yc, np.isnan(rot_err))\n if MAX_ROTATION_ERR > 0:\n I = np.logical_not(yc)\n rot_err[I] = np.abs(tools.wrap_degs(rot_err[I]))\n yc[I] = np.logical_or(yc[I], rot_err[I] > MAX_ROTATION_ERR)\n\n # for regression\n yr = np.array(y)\n #yr[np.isnan(yr)] = FAIL_ERRS[target] # np.nanmax(yr)\n\n if target == 'rot error':\n yr = np.abs(tools.wrap_degs(yr))\n\n return X, yc, yr, labels\n\n\nif __name__ == '__main__':\n if len(sys.argv)<2:\n print('USAGE: python analyze-log.py [gpr|1d|easy] [shift|alt|dist|lat|orient]')\n sys.exit()\n \n mode = sys.argv[2]\n if len(sys.argv) > 3:\n sc = 1\n if sys.argv[3] == 'shift':\n target = 'rel shift error (m/km)'\n elif sys.argv[3] == 'alt':\n target = 'altitude error'\n sc = 1000\n elif sys.argv[3] == 'dist':\n target = 'dist error (m/km)'\n elif sys.argv[3] == 'lat':\n target = 'lat error (m/km)'\n elif sys.argv[3] == 'orient':\n target = 'rot error'\n else:\n assert False, 'unknown target: %s' % sys.argv[3]\n\n predictors = (\n 'sol elong', # solar elongation\n 'total dev angle', # total angle between initial estimate and actual relative orientation\n 'distance', # distance of object\n 'visible', # esimate of % visible because of camera view edge\n )\n predictor_labels = (\n 'Solar Elongation (deg)',\n 'Initial orientation error (deg)',\n 'Distance (km)',\n 'In camera view (%)',\n )\n target = target or 'rel shift error (m/km)' #'shift error km' #if not one_d_only else 'dist error'\n\n data = []\n for logfile in sys.argv[1].split(\" \"):\n mission = logfile.split('-')[0]\n sm = get_system_model(mission)\n\n # read data\n X, yc, yr, labels = read_data(sm, os.path.join(LOG_DIR, logfile), predictors, target)\n X[:, 1] = np.abs(tools.wrap_degs(X[:, 1]))\n data.append((logfile, X, yc, yr*sc, labels))\n\n\n if mode in ('1d', 'easy'):\n n_groups = 6\n #yr = yr/1000\n #idxs = (0, 1, 2, 3)\n idxs = (2,)\n for idx in idxs:\n fig, axs = plt.subplots(len(data), 1, figsize=(20, 18), sharex=True)\n for i, (logfile, X, yc, yr, labels) in enumerate(data):\n if mode == 'easy':\n q997 = np.percentile(np.abs(yr), 99.7)\n tmp = tuple((X[:, k] >= EASY_LIMITS[k][0], X[:, k] <= EASY_LIMITS[k][1]) for k in idxs if k!=idx)\n\n # concatenate above & take logical and, also remove worst 0.3%\n I = np.logical_and.reduce(sum(tmp, ()) + (np.logical_or(np.abs(yr) < q997, yr == FAIL_ERRS[target]),))\n else:\n I = np.ones((X.shape[0],), dtype='bool')\n\n xmin, xmax = np.min(X[I, idx]), np.max(X[I, idx])\n ax = axs[i] if len(data) > 1 else axs\n line, = ax.plot(X[I, idx], yr[I], 'x')\n\n if n_groups:\n # calc means and stds in bins\n\n #x = [1/v for v in np.linspace(1/xmin, 1/xmax, n_groups+1)]\n x = np.linspace(xmin, xmax, n_groups + 1)\n y_grouped = [yr[np.logical_and.reduce((\n I,\n np.logical_not(yc),\n X[:, idx] > x[i],\n X[:, idx] < x[i+1],\n ))] for i in range(n_groups)]\n #means = [np.percentile(yg, 50) for yg in y_grouped]\n means = np.array([np.mean(yg) for yg in y_grouped])\n #stds = np.subtract([np.percentile(yg, 68) for yg in y_grouped], means)\n stds = np.array([np.std(yg) for yg in y_grouped])\n x = x.reshape((-1, 1))\n stds = stds.reshape((-1, 1))\n means = means.reshape((-1, 1))\n xstep = np.concatenate((x, x), axis=1).flatten()[1:-1]\n sstep = np.concatenate((stds, stds), axis=1).flatten()\n mstep = np.concatenate((means, means), axis=1).flatten()\n ax.plot(xstep, sstep, '-')\n ax.plot(xstep, mstep, '-')\n # bar_width = (xmax - xmin)/n_groups * 0.2\n # rects1 = ax.bar((x[1:] + x[:-1]) * 0.5, stds, width=bar_width, bottom=means-stds/2,\n # alpha=0.4, color='b', yerr=stds, error_kw={'ecolor': '0.3'}, label='error')\n\n else:\n # filtered means, stds\n xt = np.linspace(xmin, xmax, 100)\n\n if False:\n # exponential weight\n weight_fun = lambda d: 0.01**abs(d/(xmax-xmin))\n else:\n # gaussian weight\n from scipy.stats import norm\n from scipy.interpolate import interp1d\n interp = interp1d(xt-xmin, norm.pdf(xt-xmin, 0, (xmax-xmin)/10))\n weight_fun = lambda d: interp(abs(d))\n\n if False:\n # use smoothed mean for std calc\n yma = tools.smooth1d(X[I, idx], X[I, idx], yr[I], weight_fun)\n else:\n # use global mean for std calc (fast)\n yma = np.mean(yr[I])\n\n ym = tools.smooth1d(xt, X[I, idx], yr[I], weight_fun)\n ystd = tools.smooth1d(xt, X[I, idx], (yr[I] - yma)**2, weight_fun) ** (1/2)\n\n ax.plot(xt, ym, '-')\n ax.plot(xt, ystd, '-')\n\n ax.set_title('%s: %s by %s' % (logfile, target, predictor_labels[idx]))\n ax.set_xlabel(predictor_labels[idx])\n ax.set_ylabel(target)\n ax.set_yticks(range(-200, 201, 50))\n ax.hlines(range(-200, 201, 10), xmin, xmax, '0.95', '--')\n ax.hlines(range(-200, 201, 50), xmin, xmax, '0.7', '-')\n plt.setp(ax.get_xticklabels(), rotation='vertical', fontsize=14)\n plt.setp(ax.get_yticklabels(), fontsize=14)\n tools.hover_annotate(fig, ax, line, np.array(labels)[I])\n\n #ax.set_xticks((x[1:] + x[:-1]) * 0.5)\n #ax.set_xticklabels(['%.2f-%.2f' % (x[i], x[i+1]) for i in range(n_groups)])\n #ax.legend()\n\n # operation zones for didymos mission\n if mission[:4]=='didy' and idx==2:\n ax.set_xticks(np.arange(0.1, 10.5, 0.2))\n if i==0:\n ax.axvspan(1.1, 1.3, facecolor='cyan', alpha=0.3)\n ax.axvspan(3.8, 4.2, facecolor='orange', alpha=0.3)\n elif i==1:\n ax.axvspan(0.15, 0.3, facecolor='pink', alpha=0.5)\n ax.axvspan(1.1, 1.3, facecolor='cyan', alpha=0.3)\n elif i == 2:\n ax.axvspan(3.8, 4.2, facecolor='orange', alpha=0.3)\n elif i==3:\n ax.axvspan(1.1, 1.3, facecolor='cyan', alpha=0.3)\n ax.axvspan(2.8, 5.2, facecolor='orange', alpha=0.3)\n\n plt.tight_layout()\n while(plt.waitforbuttonpress() == False):\n pass\n\n elif mode == '2d':\n\n # 0: solar elong\n # 1: initial deviation angle\n # 2: distance\n # 3: ratio in view\n idxs = tuple(range(4))\n pairs = (\n # (2, 0),\n # (1, 3),\n (2, 3),\n (0, 1),\n )\n\n titles = ['ORB', 'AKAZE', 'SURF', 'SIFT']\n nd = len(data)\n r, c = {\n 1: (1, 1),\n 2: (1, 2),\n 3: (3, 1),\n 4: (2, 2),\n }[nd]\n fig, axs = plt.subplots(r, c*len(pairs), figsize=(32, 18))\n\n for j, (logfile, X, yc, yr, labels) in enumerate(data):\n for i, (i0, i1) in enumerate(pairs):\n ax = axs.flatten()[j*len(pairs) + i]\n\n # filter out difficult regions of axis that are not shown\n tmp = tuple((X[:, k] >= EASY_LIMITS[k][0], X[:, k] <= EASY_LIMITS[k][1]) for k in idxs if k not in (i0, i1))\n I = np.logical_and.reduce(sum(tmp, ()))\n\n # add some offset if ratio in view is one so that they dont all stack in same place\n offsets = (X[I, 3] == 1) * np.random.uniform(0, 0.2, (np.sum(I),))\n off0 = 0 if i0 != 3 else offsets\n off1 = 0 if i1 != 3 else offsets\n\n line = ax.scatter(X[I, i0] + off0, X[I, i1] + off1, s=60, c=yc[I], cmap=plt.cm.Paired, alpha=0.5) #edgecolors=(0, 0, 0))\n ax.tick_params(labelsize=18)\n ax.set_xlabel(predictors[i0], fontsize=22)\n ax.set_ylabel(predictors[i1], fontsize=22)\n tools.hover_annotate(fig, ax, line, np.array(labels)[I])\n\n if i==0:\n col, row = j%c, j//c\n fig.text(0.26+col*0.5, 0.96-row*0.5, titles[j], fontsize=30, horizontalalignment='center')\n # ax.set_xbound(xmin, xmax)\n # ax.set_ybound(ymin, ymax)\n\n plt.tight_layout()\n plt.subplots_adjust(top=0.94, hspace=0.3, wspace=0.25)\n plt.show()\n\n elif mode == 'gpr':\n logfile, X, yc, yr, labels = data[0]\n\n pairs = (\n (0, 1),\n (0, 2),\n (1, 2),\n # (0,3),(1,3),(2,3),\n )\n for pair in pairs:\n xmin, xmax = np.min(X[:, pair[0]]), np.max(X[:, pair[0]])\n ymin, ymax = np.min(X[:, pair[1]]), np.max(X[:, pair[1]])\n xx, yy = np.meshgrid(np.linspace(xmin, xmax, 50), np.linspace(ymin, ymax, 50))\n\n kernel = 0.01 * RBF(length_scale=((xmax - xmin) * 2, (ymax - ymin) * 2))\n if False:\n y = yc\n # fit hyper parameters\n kernel += 0.1 * WhiteKernel(noise_level=0.001)\n gpc = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X[:, pair], yc)\n # hyper parameter results\n res = gpc.kernel_, gpc.log_marginal_likelihood(gpc.kernel_.theta)\n # classify on each grid point\n P = gpc.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]\n else:\n y = yr\n # fit hyper parameters\n kernel += 4.0 * WhiteKernel(noise_level=4.0)\n gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, normalize_y=True).fit(X[:, pair], yr)\n # hyper parameter results\n res = gpr.kernel_, gpr.log_marginal_likelihood(gpr.kernel_.theta)\n # regress on each grid point\n P = gpr.predict(np.vstack((xx.ravel(), yy.ravel())).T)\n\n P = P.reshape(xx.shape)\n\n # plot classifier output\n fig = plt.figure(figsize=(8, 8))\n if True:\n print('%s' % ((np.min(P), np.max(P), np.min(y), np.max(y)),))\n image = plt.imshow(P, interpolation='nearest', extent=(xmin, xmax, ymin, ymax),\n aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)\n plt.scatter(X[:, pair[0]], X[:, pair[1]], s=30, c=y, cmap=plt.cm.Paired, edgecolors=(0, 0, 0))\n cb = plt.colorbar(image)\n ax = fig.gca()\n else:\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib.colors import Normalize\n\n ax = fig.gca(projection='3d')\n scalarMap = plt.cm.ScalarMappable(norm=Normalize(vmin=np.min(P), vmax=np.max(P)),\n cmap=plt.cm.PuOr_r)\n ax.plot_surface(xx, yy, P, rstride=1, cstride=1, facecolors=scalarMap.to_rgba(P), antialiased=True)\n\n cb.ax.tick_params(labelsize=18)\n ax.tick_params(labelsize=18)\n plt.xlabel(predictors[pair[0]], fontsize=22)\n plt.ylabel(predictors[pair[1]], fontsize=22)\n plt.axis([xmin, xmax, ymin, ymax])\n # plt.title(\"%s\\n Log-Marginal-Likelihood:%.3f\" % res, fontsize=12)\n plt.tight_layout()\n plt.show()\n\n elif mode == '3d':\n from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\n\n logfile, X, yc, yr, labels = data[0]\n xmin, xmax = np.min(X[:, 0]), np.max(X[:, 0])\n ymin, ymax = np.min(X[:, 1]), np.max(X[:, 1])\n zmin, zmax = np.min(X[:, 2]), np.max(X[:, 2])\n\n fig = plt.figure(figsize=(20, 20))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=yr, cmap=plt.cm.Paired, edgecolors=(0, 0, 0))\n\n # cb = plt.colorbar(image)\n # cb.ax.tick_params(labelsize=18)\n ax.tick_params(labelsize=18)\n ax.set_xlabel(predictors[0], fontsize=22)\n ax.set_ylabel(predictors[1], fontsize=22)\n ax.set_zlabel(predictors[2], fontsize=22)\n ax.set_xbound(xmin, xmax)\n ax.set_ybound(ymin, ymax)\n ax.set_zbound(zmin, zmax)\n\n plt.tight_layout()\n plt.show()\n else:\n assert False, 'wrong mode'\n\n #plt.waitforbuttonpress()\n","sub_path":"src/analyze-log.py","file_name":"analyze-log.py","file_ext":"py","file_size_in_byte":16381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"586992165","text":"class Solution:\n def allPathsSourceTarget(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n total = []\n def helper(graph, sofar):\n\n if not graph[0]:\n total.append(sofar)\n else:\n for spot in graph[0]:\n if spot not in sofar:\n helper(graph[1:], sofar + [spot])\n else:\n helper(graph[1:], sofar)\n\n helper(graph, [0])\n return total\n ","sub_path":"Leetcode/0797_all_paths_source_to_target.py","file_name":"0797_all_paths_source_to_target.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"535586232","text":"try:\n\tfrom inits.inits import Inits\nexcept:\n\tfrom inits import Inits\n\n \nclass GeneralData(object):\n\t\"\"\"docstring for GeneralData\"\"\"\n\n\tdata=Inits()\n\t# data=s.find_data(\"hardinfo.txt\", \"Processor\", 1)\n\n\t# returns vendor data\n\tdef vendor(self):\n\t\tdata=self.data.find_data('bios.txt', 'Vendor: ')\n\n\t\tif \"AMERICAN MEGATRENDS\" in data:\n\t\t\tdata=self.data.find_data('baseboard.txt', 'Manufacturer: ')\n\t\t\tdata=data.replace('TEK COMPUTER', '')\n\n\t\treturn data.replace('-', ' ').replace(' INC.', '')\n\n\n\t# returns laptop, desktop Exec..\n\tdef hardware_type(self):\n\t\tdata=self.data.find_data('chassis.txt', 'Type: ')\n\n\t\tif (\"SERVER\" in data) or (\"RACK MOUNT\" in data):\n\t\t\treturn \"SERVER\"\n\n\t\tif (\"NOTEBOOK\" in data) or (\"PORTABLE\" in data):\n\t\t\treturn \"LAPTOP\"\n\n\t\tif (\"DESKTOP\" in data) or (\"MINI TOWER\" in data):\n\t\t\treturn \"DESKTOP\"\n\n\t\treturn data\n\n\n \n\t# returns system part number\n\tdef model(self):\n\n\n\t\tif \"SERVER\" in self.hardware_type():\n\t\t\tdata=self.data.find_data('system.txt', 'Product Name: ')\n\t\t\treturn data\n\n\n\t\tif (\"DESKTOP\" in self.hardware_type()):\n\t\t\tdata=self.data.find_data('system.txt', 'Product Name: ')\n\t\t\tdata=data.replace('WORKSTATION', '')\n\t\t\tdata=data.replace('HP ', '')\n\n\t\t\tif \"PRODUCT NAME\" in data:\n\t\t\t\tdata=self.data.find_data('baseboard.txt', 'Product Name: ')\n\t\t\t\tif 'P8Z68-V' in data:\n\t\t\t\t\tdata=data.replace(' GEN3', '')\n\n\t\t\treturn data\n\n\n\t\tif (\"LAPTOP\" in self.hardware_type()):\n\t\t\tdata=self.data.find_data('system.txt', 'Product Name: ')\n\t\t\tdata=data.replace('HP ', '')\n\t\t\treturn data\n\n\t\treturn self.hardware_type()+\"*** SAVE TO DE-BUG LOG ***\"\n\n\n\n\n\t# returns system models\n\tdef part_type(self):\n\t# def model(self):\n\n\t\tdata=self.data.find_data('system.txt', 'Product Name: ')\n\n\n\t\tif \"SERVER\" in self.hardware_type():\n\t\t\tif (\"POWEREDGE 1950\" in data) or (\"POWEREDGE R610\" in data):\n\t\t\t\treturn \"DATA UNAVAILABLE\"\n\t\t\treturn data\n\n\n\t\tif (\"DESKTOP\" in self.hardware_type()):\n\n\t\t\tif 'PRODUCT NAME' in data:\n\t\t\t\tif 'P8Z68-V' in self.model():\n\t\t\t\t\treturn \"DATA UNAVAILABLE\"\n\n\t\t\tif 'HP' in data:\n\t\t\t\tdata=self.data.find_data('system.txt', 'SKU Number: ')\n\t\t\t\treturn data\n\n\t\t\tif 'RAMPAGE III GENE' in self.model():\n\t\t\t\tdata=self.data.find_data('baseboard.txt', 'Type: ')\n\t\t\t\treturn data\n\n\n\t\t\treturn data\n\n\n\t\tif (\"LAPTOP\" in self.hardware_type()):\n\n\t\t\tif 'ELITEBOOK' in data:\n\t\t\t\tdata=self.data.find_data('system.txt', 'SKU Number: ')\n\t\t\t\treturn data\n\n\t\t\tif ('LATITUDE E' in data) or ('INSPIRON' in data):\n\t\t\t\tdata=self.data.find_data('baseboard.txt', 'Product Name: ')\n\t\t\t\treturn data\n\n\n\n\t\t\treturn data\n\n\t\t\t\t\n\n\n\t\treturn self.hardware_type()+\"*** SAVE TO DE-BUG LOG ***\"\n\n\n\n\tdef form_factor(self):\n\t\ttry:\n\t\t\tdata=self.data.find_data('chassis.txt', 'Height: ')\n\t\t\tdata=data.replace(\" \", \"\")\n\t\t\treturn \"SERVER - \"+data\n\t\texcept:\n\t\t\treturn \"DATA NOT AVAILABLE\"\n\n\tdef etho_data(self):\n\t\ttry:\n\t\t\tdata=self.data.find_data('hardinfo.txt', 'Ethernet controller', 1)\n\t\t\treturn str(\" \".join(data[1:]))\n\t\texcept:\n\t\t\treturn \"DATA NOT AVAILABLE\"\n\n\n\n\n\n\n\n# g=GeneralData()\n# print(g.vendor())\n# print(g.hardware_type())\n# print(g.form_factor())\n# print(g.vendor())\n# print(g.model())\n# print(g.part_type())\n# print(g.etho_data())\n","sub_path":"inits/general_data.py","file_name":"general_data.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"62549656","text":"from flask import Flask, render_template, request\napp = Flask(__name__)\nimport mlab\nfrom models.bike import Bike\nmlab.connect()\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route(\"/new_bike\", methods=[\"POST\",\"GET\"])\ndef newbike():\n if request.method == \"GET\":\n return render_template(\"add_bike.html\")\n elif request.method == \"POST\":\n form = request.form\n model = form[\"model\"]\n daily = form[\"daily\"]\n image = form[\"image\"]\n year = form[\"year\"]\n new_bike = Bike(model=model,daily=daily,image=image,year=year)\n \n new_bike.save()\n return(\"hello\")\n \n\nif __name__ == \"__main__\":\n app.run( debug=True)\n ","sub_path":"homework1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"213070405","text":"from sklearn.svm import SVR\nfrom sklearn.model_selection import GridSearchCV\nfrom .BaseModel import BaseModel\nimport numpy as np\n\n\nclass Svr(BaseModel):\n def __init__(self) -> None:\n\n self.param_grid = {\n #'C': np.logspace(-3, 2, 6),\n \"kernel\": [\"linear\"],\n #'degree': np.logspace(2, 3, 4),\n #'gamma' : np.logspace(-3, 2, 6)\n \"epsilon\": [0.5], # , 0.1, 1.5]\n }\n self.grid_search = GridSearchCV(SVR(), self.param_grid, cv=5)\n self.model = 0\n\n def fit(self, dataset, dataset_train):\n X = np.concatenate((dataset[0], dataset_train[0]), axis=0)\n y = np.concatenate((dataset[1], dataset_train[1]), axis=0)\n\n self.grid_search.fit(X, y)\n self.model = self.grid_search.best_estimator_\n","sub_path":"models/svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"240619182","text":"def accept(*args):\n print(type(args))\n print(args)\n\naccept('abs')\naccept(1,2,3,4,5,6)\naccept([7,84,651])\n\n\ndef custom_max_num(*numbers):\n greatest=numbers[0]\n for number in numbers:\n if number>greatest:\n greatest=number\n return greatest\n\nprint(custom_max_num(78))\nprint(custom_max_num(78,22, 89, 56, 10, 111, 78, 56, 186))\n\n\ndef product(a, b):\n return a*b\n\nnums1=[45, 78]\nnums2=(56, 12)\n\nprint(product(*nums1))\nprint(product(*nums2))","sub_path":"tuples/tuples2.py","file_name":"tuples2.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"464677761","text":"#gets data from database, puts into csv files\r\n##also does not create file if entire set of w1 or w2 is null\r\n\r\nimport sklearn\r\nimport nose\r\nimport astroML_addons\r\nimport astroML\r\nimport scipy\r\nimport pandas\r\nimport xdgmm\r\nfrom astropy.io import fits\r\nimport csv\r\nimport os\r\n\r\nimport requests\r\nimport ast\r\n\r\ndef WISE(req, RA, DEC): \r\n url = 'https://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?spatial=cone&catalog=allwise_p3as_mep&objstr=' + RA + '+' + DEC + '&size=5&outfmt=1'\r\n\r\n ret = requests.get(url).text\r\n #Split into lines and get rid of all headers, data starts at line 72\r\n retCutLines = ret.split(\"\\n\")[72:]\r\n #print(retCutLines)\r\n mjd = []\r\n w1 = []\r\n w1sig = []\r\n w2 = []\r\n w2sig = []\r\n for x in range(0, len(retCutLines)-1):\r\n retCutData = retCutLines[x].split()\r\n mjd.append(retCutData[9])\r\n w1.append(retCutData[10])\r\n w1sig.append(retCutData[11])\r\n w2.append(retCutData[13])\r\n w2sig.append(retCutData[14])\r\n\r\n if(checkNull(w1) == 0) or (checkNull(w2) == 0): #there were no w1 or w2 values for this object\r\n return\r\n\r\n makeCSV(mjd, w1, w1sig, w2, w2sig, req)\r\n\r\n\r\n\r\ndef dr14(): #raiding the quasar catalogue\r\n cat = fits.open(\"../DR14Q_v3_1.fits\")\r\n #for entry in len(cat[1].data['RA'])/100.0: #get them all\r\n for entry in range(10000, 20000): #change these range values to say which ones to download\r\n RA = cat[1].data['RA'][entry]\r\n DEC = cat[1].data['DEC'][entry]\r\n print(entry)\r\n\r\n RA_DEC_str = \"\" + str(RA) + \"_\" + str(DEC) #this is just parsing it\r\n WISE(RA_DEC_str, str(RA), str(DEC)) \r\n\r\ndef makeCSV(data1, data2, data3, data4, data5, obj):\r\n outfile = open('./csvfiles/'+obj+'.csv', 'w') #creates/opens the csv file for appending\r\n wr = csv.writer(outfile)\r\n wr.writerow(data1)\r\n wr.writerow(data2)\r\n wr.writerow(data3)\r\n wr.writerow(data4)\r\n wr.writerow(data5)\r\n print(\"done file for obj \" + obj)\r\n\r\ndef checkNull(arr):\r\n val = 0\r\n for i in range(len(arr)):\r\n if arr[i] != 'null':\r\n val = val + 1 #keep track of how many non-zero entries we find\r\n return val\r\n\r\n\r\n#WISE('294.1256623 -22.4012492')\r\ndr14()\r\n\r\n","sub_path":"db_finding.py","file_name":"db_finding.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"289697493","text":"\nfrom abc import ABC, abstractmethod\nimport types\nimport time\n\nfrom .driver import STOP_ITERATION\nfrom ..backend import Backend\n\nclass Instruction(ABC):\n \"\"\"\n An instruction that updates an operation in a lazy DAG.\n \"\"\"\n\n @abstractmethod\n def evaluate(self, thread, index_range, batch_index, values, context):\n \"\"\"\n Evaluates an instruction.\n\n Parameters\n ----------\n\n thread : the thread that is currently executing\n index_range : the index range of the executing program.\n batch_index : the index of the current split batch.\n values : a global value map holding the inputs.\n context : map holding execution state (arg ID -> value).\n\n \"\"\"\n pass\n\nclass Split(Instruction):\n \"\"\"\n An instruction that splits the inputs to an operation.\n \"\"\"\n\n def __init__(self, target, ty, backend, batch_size):\n \"\"\"\n A Split instruction takes an argument and split type and applies\n the splitter on the argument.\n\n Parameters\n ----------\n\n target : the arg ID that will be split.\n ty : the split type.\n backend : the backend the instruction is executed on.\n batch_size : the batch size of the instruction split.\n \"\"\"\n self.target = target\n self.ty = ty\n self.splitter = None\n self.backend = backend\n self.batch_size = batch_size\n self.index_to_split = None\n\n def __str__(self):\n return \"({}:{}) v{} = split {}:{}\".format(\n self.backend.value, self.batch_size, self.target, self.target, self.ty)\n\n def evaluate(self, thread, index_range, batch_index, values, context):\n \"\"\" Returns values from the split. \"\"\"\n start = 0 + self.batch_size * batch_index\n end = start + self.batch_size\n\n from ..dag import Operation\n value = values[self.target]\n if isinstance(value, Operation):\n value = value.value\n value = self.ty.split(index_range[0], index_range[1], value)\n\n num_elements = self.ty.elements(value)\n if num_elements is not None:\n end = min(end, num_elements)\n if self.splitter is None:\n # First time - check if the splitter is actually a generator.\n result = self.ty.split(start, end, value)\n if isinstance(result, types.GeneratorType):\n self.splitter = result\n result = next(self.splitter)\n else:\n self.splitter = self.ty.split\n else:\n if isinstance(self.splitter, types.GeneratorType):\n result = next(self.splitter)\n else:\n result = self.splitter(start, end, value)\n\n if isinstance(result, str) and result == STOP_ITERATION:\n return STOP_ITERATION\n\n context[self.target].append(result)\n\nclass Merge(Instruction):\n \"\"\"\n An instruction that merges the outputs of an operation.\n \"\"\"\n\n def __init__(self, target, ty, backend, batch_size):\n \"\"\"\n A merge instruction that merges all the values for the target in the\n context. Only inserted in a program prior to changing the batch size.\n\n Parameters\n ----------\n target : the target to merge\n ty : the split type of the target\n backend : the backend on which the merge is executed\n batch_size : the eventual batch size\n \"\"\"\n self.target = target\n self.ty = ty\n self.backend = backend\n self.batch_size = batch_size\n\n def __str__(self):\n return \"({}:{}) v{} = merge {}:{}\".format(\n self.backend.value, self.batch_size, self.target, self.target, self.ty)\n\n def evaluate(self, _thread, _index_range, _batch_index, _values, context):\n raise Exception('this is not called since the pipeline can only have a single batch size')\n\nclass Call(Instruction):\n \"\"\" An instruction that calls an SA-enabled function. \"\"\"\n def __init__(self, target, func, args, kwargs, annotation, tys, backend, batch_size, finalized):\n self.target = target\n # Function to call.\n self.func = func\n # Arguments: list of targets.\n self.args = args\n # Keyword arguments: Maps { name -> target }\n self.kwargs = kwargs\n # Return split type.\n self.ty = annotation.return_type\n # Argument split types.\n self.tys = tys\n # The backend the instruction is executed on.\n self.backend = backend\n # The batch size of the instruction split.\n self.batch_size = batch_size\n # Compute estimator.\n if annotation.estimator is None:\n self.estimator = None\n else:\n self.estimator = [annotation.estimator]\n # Whether the backend needs to be finalized.\n self.finalized = finalized\n\n def __str__(self):\n args = \", \".join(map(lambda a: \"v\" + str(a), self.args))\n kwargs = list(map(lambda v: \"{}=v{}\".format(v[0], v[1]), self.kwargs.items()))\n arguments = \", \".join([args] + kwargs)\n return \"({}:{}) {}call {}({}):{}\".format(\n self.backend.value,\n self.batch_size,\n \"\" if self.target is None else \"v{} = \".format(self.target),\n self.func.__name__,\n arguments,\n str(self.ty)\n )\n\n def get_args(self, context):\n return [ context[target][-1] for target in self.args ]\n\n def get_kwargs(self, context):\n return dict([ (name, context[target][-1]) for (name, target) in self.kwargs.items() ])\n\n def _finalize_backend(self, context):\n if self.finalized:\n return\n self.finalized = True\n if self.backend == Backend.CPU:\n return\n\n cpu_cost = 0\n gpu_cost = 0\n\n # Check if estimators exist\n if self.estimator is None:\n return\n for target, ty in self.tys.items():\n if ty.estimator is None:\n return\n\n # Add compute estimate\n values = []\n tys = []\n for target, ty in self.tys.items():\n values.append(context[target][-1])\n tys.append(ty)\n cpu_cost += self.estimator[0](tys, values, Backend.CPU)\n gpu_cost += self.estimator[0](tys, values, Backend.GPU)\n\n # Add transfer estimate\n for target, ty in self.tys.items():\n backend = ty.backend(context[target][-1])\n if backend == Backend.CPU:\n gpu_cost += ty.estimator(context[target][-1], Backend.GPU)\n elif backend == Backend.GPU:\n cpu_cost += ty.estimator(context[target][-1], Backend.CPU)\n\n if cpu_cost < gpu_cost:\n self.backend = Backend.CPU\n\n\n def _transfer_args_kwargs(self, context):\n # Transfer each argument to the operation backend\n for target, ty in self.tys.items():\n value = context[target][-1]\n if ty.backend(value) != self.backend:\n context[target][-1] = ty.to(value, self.backend)\n\n def evaluate(self, _thread, _index_range, _batch_index, _values, context):\n \"\"\"\n Evaluates a function call by gathering arguments and calling the\n function.\n\n \"\"\"\n self._finalize_backend(context)\n self._transfer_args_kwargs(context)\n args = self.get_args(context)\n kwargs = self.get_kwargs(context)\n result = self.func(*args, **kwargs)\n if self.target is not None:\n context[self.target].append(result)\n\n def remove_target(self):\n self.target = None\n\nclass To(Instruction):\n def __init__(self, target, ty, backend):\n self.target = target\n self.ty = ty\n self.backend = backend\n\n def __str__(self):\n return \"({}) v{} = to_{}:{}\".format(\n self.backend.value, self.target, self.backend.value, str(self.ty))\n\n def evaluate(self, _thread, _index_range, _batch_index, _values, context):\n old_value = context[self.target][-1]\n new_value = self.ty.to(old_value, self.backend)\n context[self.target][-1] = new_value\n","sub_path":"pycomposer/sa/annotation/vm/instruction.py","file_name":"instruction.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"562551254","text":"#!/usr/bin/python\nimport sys\nimport cv2\nimport numpy as np # NumPy's arrays are not flexible like Python lists, you can store only same data type in each column.\n\n\n#directory of the yolo model\nwith open(\"yolo/coco.names\", 'r') as f:\n classes = [line.strip() for line in f.readlines()] #The strip() method returns a copy of the string with both leading and trailing characters removed.\nnet = cv2.dnn.readNet(\"yolo/yolov3.weights\", \"yolo/yolov3.cfg\") #?\n\n# function to get the output layer names in the architecture\ndef get_output_layers(net): \n layer_names = net.getLayerNames() \n output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n return output_layers\n\ndef draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h,classes,COLORS):\n label = str(classes[class_id])\n color = COLORS[class_id]\n cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)\n cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\ndef read_frame(file): #load and return picture from the selected file \n image = cv2.imread(file)\n return image\n\ndef detect(classes,net,image,class_of_interest): #image= the image that method read_frame returned \n Width = image.shape[1]\n Height = image.shape[0]\n scale = 0.00392 \n \n # generate different colors for different classes\n COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) #Samples are uniformly distributed over the half-open interval [low, high) (includes low, but excludes high). In other words, any value within the given interval is equally likely to be drawn by uniform.\n #classes = everything from the file we opened above \n \n # create input blob\n blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)#[blobFromImage] creates 4-dimensional blob from image. Optionally resizes and crops image from center, subtract mean values, scales values by scalefactor, swap Blue and Red channels.\n #(image we want to preprocess before passing it through our deep neural network for classification, we can optionally scale our images by some factor,spatial size that the Convolutional Neural Network expects, mean subtraction values, OpenCV assumes images are in BGR channel order; however, the `mean` value assumes we are using RGB order. To resolve this discrepancy we can swap the R and B channels in image by setting this value to `True` \n \n net.setInput(blob) #Sets the new value for the layer output blob.\n \n # run inference through the network and gather predictions from output layers #?\n outs = net.forward(get_output_layers(net))\n \n #initialization \n class_ids = []\n confidences = []\n boxes = []\n \n #confidence of detecet objects 0.1 - 1 -> 1% - 100%\n conf_threshold = 0.7\n nms_threshold = 0.4\n \n # for each detetion from each output layer \n for out in outs:\n for detection in out:\n scores = detection[5:] #5 till the end \n class_id = np.argmax(scores) #Returns the indices of the maximum values along an axis.\n confidence = scores[class_id]\n if confidence > 0.5:\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = center_x - w / 2\n y = center_y - h / 2\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)\n results = []\n \n # go through the detections remaining after nms and draw bounding box\n for i in indices:\n i = i[0]\n box = boxes[i]\n x = box[0]\n y = box[1]\n w = box[2]\n h = box[3]\n draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h),classes,COLORS)\n results.append({'class':classes[class_ids[i]] , 'confidence':confidences[i]})\n \n number_of(class_of_interest,results) #?\n preview(image)\n save_file(image)\n\ndef number_of(for_detection,results):\n counter = 0\n for i in results:\n if i['class'] == for_detection:\n counter+=1\n print(('Detected {} items of class {}.').format(counter,for_detection)) \n \ndef preview(image):\n cv2.imshow(\"object detection\", image) #cv2. imshow() method is used to display an image in a window. The window automatically fits to the image size. \n cv2.waitKey() #You can create as many windows as you wish, but with different window names. cv2. waitKey() is a keyboard binding function. ... The function waits for specified milliseconds for any keyboard event.\n\ndef save_file(image):\n cv2.imwrite(\"object-detection.jpg\", image) #cv2. imwrite() method is used to save an image to any storage device. This will save the image according to the specified format in current working directory.\n cv2.destroyAllWindows()\n\n \nif __name__ == \"__main__\": #main\n #default directory of the image\n directory_of_image = \"images/cl2.jpg\"\n #default class, read yolo/coco.names to select a available class \n class_of_interest = \"person\" #?\n\n #check for arguments\n if len(sys.argv) > 2:\n directory_of_image = sys.argv[1]\n class_of_interest = sys.argv[2]\n elif len(sys.argv) > 1:\n directory_of_image = sys.argv[1]\n \n frame = read_frame(directory_of_image) #load and return picture from the selected file \n detect(classes,net,frame,class_of_interest)\n \n \n \n \n \n","sub_path":"yolo_images.py","file_name":"yolo_images.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"458813723","text":"## simple demo script for showing how to connect to an sqlite DB \n# from Python, and run a simple SQL query \n\n# import the python library for SQLite \nimport sqlite3\ndef getdata():\n # connect to the database file, and create a connection object\n db_connection = sqlite3.connect('restaurants.db')\n\n # create a database cursor object, which allows us to perform SQL on the database. \n db_cursor = db_connection.cursor()\n\n # run a first query \n #db_cursor.execute(\"SELECT from neighborhoods\")\n db_cursor.execute(\"\"\"SELECT * from restaurants\n INNER JOIN neighborhoods ON restaurants.NEIGHBORHOOD_ID=neighborhoods.ID\n WHERE neighborhoods.NAME=\"Kreuzberg\"\n \"\"\")\n\n # store the result in a local variable. \n # this will be a list of tuples, where each tuple represents a row in the table\n list_restaurants = db_cursor.fetchall()\n\n db_connection.close()\n #print(\"list_restaurants contents:\")\n return(list_restaurants)\n\n \n","sub_path":"restaurants/database_connect.py","file_name":"database_connect.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"387616377","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport webbrowser\nfrom pprint import pprint\n\nclass DoubanLoginSpider(scrapy.Spider):\n name = 'douban_login'\n allowed_domains = ['douban.com']\n start_urls = ['https://www.douban.com']\n login_url = \"https://www.douban.com/accounts/login\"\n setting_url = 'https://www.douban.com/settings/notification'\n headers = {\n \"User-Agent\":\"User-Agent Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\",\n }\n captcha_solution = None\n\n def login_parse(self, response):\n x = response.css(\"div.item.item-captcha img::attr(src)\").extract()[0]\n if (x):\n print(x)\n print(\"验证码:\")\n webbrowser.open_new_tab(x)\n self.captcha_solution = str(input())\n #print(len(self.captcha_solution),self.captcha_solution)\n formdata = {\n 'source': 'index_nav',\n \"form_email\": \"13697252729\",\n \"form_password\": \"451325378+\",\n \"captcha-solution\": self.captcha_solution,\n \"login\": \"登录\"\n }\n return [scrapy.FormRequest.from_response(response, formdata=formdata,callback=self.parse_login)]\n\n def start_requests(self):\n yield scrapy.Request(self.login_url,headers=self.headers,callback=self.login_parse)\n\n def parse(self,response):\n pass\n\n def parse_login(self,response):\n if '登录豆瓣' in response.text:\n print(\"No!!!!!\")\n else:\n print(\"Yes!\")\n yield scrapy.Request(url=self.setting_url,callback=self.parse_setting)\n\n def parse_setting(self,response):\n if (response.url == self.setting_url):\n ck = response.css(\"div[style='display:none;'] input::attr(value)\").extract()[0]\n fromdata={\n \"ck\": ck,\n \"request\": \"request_forward\",\n \"dm\": \"dm_forward\",\n \"recv_weekly_rec\": \"sns:notification:recv_weekly_rec\",\n \"market\": \"market_no_forward\",\n \"notification\": \"notification_noforward\",\n \"email_submit\": \"更新邮件提醒设置\"\n }\n yield scrapy.FormRequest(self.setting_url,formdata=fromdata,callback=self.parse_edit)\n\n def parse_edit(self,response):\n if \"设置更新成功\" in response.text:\n print(\"chenggong!\")\n else:\n print(\"shibai!!!\")\n\n\n\n","sub_path":"spiders/douban_login.py","file_name":"douban_login.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"519199076","text":"#coding:utf-8\r\nimport tensorflow as tf \r\nimport numpy as np \r\n\r\n\r\n#creat data\r\nx_data = np.random.rand(100).astype(np.float32)\r\ny_data = x_data*0.1+0.3\r\n\r\n###create tensorflow structure start###\r\nWeights = tf.Variable(tf.random_uniform([1],-1.0,1.0))\r\nbias = tf.Variable(tf.zeros([1]))\r\n\r\ny=Weights*x_data+bias\r\n\r\nloss = tf.reduce_mean(tf.square(y-y_data))\r\n\r\noptimizer=tf.train.GradientDescentOptimizer(0.5)\r\ntrain=optimizer.minimize(loss)\r\n\r\ninit = tf.global_variables_initializer()\r\n\r\ngpu_options = tf.GPUOptions(allow_growth=True)\r\nsess=tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\r\nsess.run(init)\r\n\r\nfor step in range(201):\r\n\tsess.run(train)\r\n\tif step % 20 ==0:\r\n\t\tprint(step, sess.run(Weights), sess.run(bias))\r\n","sub_path":"MFTensorflowLearning/firstexam.py","file_name":"firstexam.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"629076234","text":"# no.of possible ways of making an amount, from given coins\r\namt = int(input('amount: '))\r\ncoin =list(map(int,input().split()))\r\n\r\ndp=[[0 for _ in range(amt+1)] for _ in range(len(coin)+1)]\r\n\r\ndp[0][0]=1\r\nfor i in range(1,len(coin)+1):\r\n\r\n dp[i][0]=1\r\n\r\n for j in range(1,amt+1):\r\n cv=coin[i-1]\r\n\r\n woc=dp[i-1][j]\r\n wc=0\r\n if(cv<=j):\r\n wc=dp[i][j-cv]\r\n dp[i][j]=woc+wc\r\n\r\nprint(dp)\r\n \r\n'''\r\nhttps://github.com/bephrem1/backtobackswe/blob/master/Dynamic%20Programming%2C%20Recursion%2C%20%26%20Backtracking/changeMakingProblem2.java\r\n'''\r\n","sub_path":"coins_allways_dynamic.py","file_name":"coins_allways_dynamic.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"225092610","text":"# Dilirici Radu\n# Grupa 232\n\nimport time\nimport copy\n\n# directiile posibile. prima pozite este pentru y, a doua pentru x\nl_directii = [[-1, 0], [0, 1], [1, 0], [0, -1]]\n\n\nclass Joc:\n \"\"\"\n Clasa care defineste jocul. Se va schimba de la un joc la altul.\n \"\"\"\n NR_COLOANE = 22\n NR_LINII = 13\n SIMBOLURI_JUC = ['1', '2']\n SIMBOLURI_BOMBE = ['!', '@']\n JMIN = None\n JMAX = None\n BMIN = None\n BMAX = None\n\n def __init__(self, tabla=None, vieti_jmin=None, vieti_jmax=None):\n if tabla is not None:\n self.matr = tabla\n self.vieti_jmin = vieti_jmin\n self.vieti_jmax = vieti_jmax\n else:\n # harta e putin modificata pentru a putea vedea mai rapid rezultatele\n self.matr = [['#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#'],\n ['#','1',' ',' ',' ',' ','#',' ',' ',' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' ',' ','#'],\n ['#',' ','#',' ',' ',' ','#','#','#',' ',' ',' ','#','#','#','#',' ','#','#','#','#','#'],\n ['#',' ','#',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','#'],\n ['#',' ',' ','2',' ','p',' ','#',' ',' ','p',' ',' ','#',' ',' ','#','#','#',' ',' ','#'],\n ['#','#','#','#','#','#','#','#','#','#',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','#'],\n ['#',' ','#',' ',' ',' ',' ',' ','#',' ',' ',' ','#','#','#','#',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','p',' ',' ',' ',' ',' ',' ','#'],\n ['#',' ','#','#','#','#','#','#','#',' ',' ',' ','#','#','#','#','#','#','#',' ',' ','#'],\n ['#',' ',' ',' ',' ','#',' ',' ','#',' ',' ',' ','#','p',' ',' ',' ',' ',' ',' ',' ','#'],\n ['#',' ','#','#','#','#',' ',' ','#',' ',' ',' ','#','#','#',' ','#','#','#',' ',' ','#'],\n ['#',' ',' ',' ',' ','#',' ',' ',' ',' ','#',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','#'],\n ['#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#']]\n self.vieti_jmin = 1\n self.vieti_jmax = 1\n\n def final(self, jucator):\n # returnam simbolul jucatorului castigator daca nu mai exista mutari posibile (sau remiza)\n # sau 'False' daca nu s-a terminat jocul\n # In cazul in care jucatorul e blocat (nu se mai poate misca in nicio casuta) consideram ca a pierdut\n\n if self.vieti_jmin <= 0 and self.vieti_jmax <= 0:\n return 'remiza'\n elif self.vieti_jmin <= 0:\n return self.JMAX\n elif self.vieti_jmax <= 0:\n return self.JMIN\n elif len(self.mutari(jucator)) == 0: # nu se mai poate misca\n return self.JMIN if jucator == self.JMAX else self.JMAX\n else:\n return False\n\n def pozitie_caracter(self, c):\n for i in range(self.NR_LINII):\n for j in range(self.NR_COLOANE):\n if self.matr[i][j] == c:\n return i, j\n return None\n\n def explozie(self, pozitie):\n # Explozia afecteaza ambii jucatori\n # Returneaza si daca a murit vreun jucator\n bomba = self.matr[pozitie[0]][pozitie[1]]\n bomba_opusa = self.BMAX if bomba == self.BMIN else self.BMIN\n for directie in l_directii:\n i, j = pozitie[0], pozitie[1]\n while self.matr[i][j] != '#':\n if self.matr[i][j] == self.JMIN:\n if self.vieti_jmin > 0:\n self.vieti_jmin -= 1\n elif self.matr[i][j] == self.JMAX:\n if self.vieti_jmax > 0:\n self.vieti_jmax -= 1\n elif self.matr[i][j] == bomba_opusa:\n self.explozie((i, j)) # bombele care explodeaza detoneaza si bombele la care ajung explozia\n else: # bombele distrug si puterile (vietile in plus)\n self.matr[i][j] = '*' # marcam explozia pe harta ca sa se vada ce s-a intamplat\n\n i += directie[0]\n j += directie[1]\n\n if self.vieti_jmin <= 0 or self.vieti_jmax <= 0:\n return True\n else:\n return None\n\n def curata_explozie(self):\n for i in range(self.NR_LINII):\n self.matr[i] = [x if x != '*' else ' ' for x in self.matr[i]]\n\n def muta_jucator(self, jucator, directie, foloseste_bomba):\n self.curata_explozie()\n poz_juc = self.pozitie_caracter(jucator)\n if poz_juc is None:\n return False\n poz_noua = (poz_juc[0] + directie[0], poz_juc[1] + directie[1])\n bomba = self.BMAX if jucator == self.JMAX else self.BMIN\n poz_bomba = self.pozitie_caracter(bomba)\n\n if self.matr[poz_noua[0]][poz_noua[1]] not in [' ', 'p']: # mutarea nu este valida\n if poz_noua == poz_bomba: # daca jucatorul e blocat de propria bomba, o poate declansa si sa mearga acolo\n if foloseste_bomba:\n if self.explozie(poz_bomba):\n return True\n if self.matr[poz_juc[0]][poz_juc[1]] == jucator:\n self.matr[poz_juc[0]][poz_juc[1]] = ' '\n self.matr[poz_noua[0]][poz_noua[1]] = jucator\n return True\n return False\n\n if foloseste_bomba:\n if poz_bomba is None: # daca nu am pus bomba, o putem acum\n self.matr[poz_juc[0]][poz_juc[1]] = bomba\n else: # altfel, o detonam\n if self.explozie(poz_bomba): # daca a murit cineva nu mai misca\n return True # motivul e sa se vada mai bine ce s-a intamplat. rezultatul e acelasi\n if self.matr[poz_juc[0]][poz_juc[1]] == jucator:\n self.matr[poz_juc[0]][poz_juc[1]] = ' '\n elif self.matr[poz_juc[0]][poz_juc[1]] == jucator:\n self.matr[poz_juc[0]][poz_juc[1]] = ' ' # lasa liber in urma jucatorului\n\n if self.matr[poz_noua[0]][poz_noua[1]] == 'p': # adauga viata\n if jucator == self.JMIN:\n self.vieti_jmin += 1\n else:\n self.vieti_jmax += 1\n self.matr[poz_noua[0]][poz_noua[1]] = jucator\n\n return True\n\n def mutari(self, jucator):\n l_mutari = []\n\n for directie in l_directii:\n # mutam jucatorul in directia respectiva mai intai fara folosirea bombei, iar apoi cu\n # in total sunt 4 (directiile) * 2 (folosire bomba) = 8 mutari posibile\n config_noua = Joc(copy.deepcopy(self.matr), self.vieti_jmin, self.vieti_jmax)\n if config_noua.muta_jucator(jucator, directie, False):\n l_mutari.append(config_noua)\n\n config_noua = Joc(copy.deepcopy(self.matr), self.vieti_jmin, self.vieti_jmax)\n if config_noua.muta_jucator(jucator, directie, True):\n l_mutari.append(config_noua)\n\n return l_mutari\n\n def puncte(self, jucator):\n nr = 0\n if jucator == self.JMIN: # vietile in plus sunt foarte importante\n nr += 500 * self.vieti_jmin\n else:\n nr += 500 * self.vieti_jmax\n\n bomba = self.BMAX if jucator == self.JMAX else self.BMIN # la scor adaugam cate casute acopera bomba pusa\n poz_bomba = self.pozitie_caracter(bomba) # ca sa incurajam computer-ul sa le foloseasca\n\n if poz_bomba is not None:\n nr += 1\n for directie in l_directii:\n i, j = poz_bomba[0] + directie[0], poz_bomba[1] + directie[1]\n while self.matr[i][j] != '#':\n nr += 1\n i += directie[0]\n j += directie[1]\n\n return nr\n\n def fct_euristica(self):\n return self.puncte(Joc.JMAX) - self.puncte(Joc.JMIN)\n\n def estimeaza_scor(self, adancime, jucator):\n t_final = self.final(jucator)\n if t_final == Joc.JMAX:\n return 999 + adancime\n elif t_final == Joc.JMIN:\n return -999 - adancime\n elif t_final == 'remiza':\n return 0\n else:\n return self.fct_euristica()\n\n def __str__(self):\n sir = ''\n for line in self.matr:\n for c in line:\n sir += c\n sir += '\\n'\n return sir\n\n\nclass Stare:\n \"\"\"\n Clasa folosita de algoritmii minimax si alpha-beta\n Are ca proprietate tabla de joc\n Functioneaza cu conditia ca in cadrul clasei Joc sa fie definiti JMIN si JMAX (cei doi jucatori posibili)\n De asemenea cere ca in clasa Joc sa fie definita si o metoda numita mutari() care ofera lista cu\n configuratiile posibile in urma mutarii unui jucator\n \"\"\"\n\n ADANCIME_MAX = None\n\n def __init__(self, tabla_joc, j_curent, adancime, parinte=None, scor=None):\n self.tabla_joc = tabla_joc\n self.j_curent = j_curent\n\n # adancimea in arborele de stari\n self.adancime = adancime\n\n # scorul starii (daca e finala) sau al celei mai bune stari-fiice (pentru jucatorul curent)\n self.scor = scor\n\n # lista de mutari posibile din starea curenta\n self.mutari_posibile = []\n\n # cea mai buna mutare din lista de mutari posibile pentru jucatorul curent\n self.stare_aleasa = None\n\n def jucator_opus(self):\n if self.j_curent == Joc.JMIN:\n return Joc.JMAX\n else:\n return Joc.JMIN\n\n def mutari(self):\n l_mutari = self.tabla_joc.mutari(self.j_curent)\n juc_opus = self.jucator_opus()\n l_stari_mutari = [Stare(mutare, juc_opus, self.adancime - 1, parinte=self) for mutare in l_mutari]\n\n return l_stari_mutari\n\n def __str__(self):\n sir = str(self.tabla_joc) + \"(Juc curent: \" + self.j_curent + \")\\n\"\n return sir\n\n\n\"\"\" Algoritmul MinMax \"\"\"\n\n\ndef min_max(stare):\n if stare.adancime == 0 or stare.tabla_joc.final(stare.j_curent):\n stare.scor = stare.tabla_joc.estimeaza_scor(stare.adancime, stare.j_curent)\n return stare\n\n # calculez toate mutarile posibile din starea curenta\n stare.mutari_posibile = stare.mutari()\n\n # aplic algoritmul minimax pe toate mutarile posibile (calculand astfel subarborii lor)\n mutari_scor = [min_max(mutare) for mutare in stare.mutari_posibile]\n\n if stare.j_curent == Joc.JMAX:\n # daca jucatorul e JMAX aleg starea-fiica cu scorul maxim\n stare.stare_aleasa = max(mutari_scor, key=lambda x: x.scor)\n else:\n # daca jucatorul e JMIN aleg starea-fiica cu scorul minim\n stare.stare_aleasa = min(mutari_scor, key=lambda x: x.scor)\n\n stare.scor = stare.stare_aleasa.scor\n return stare\n\n\ndef alpha_beta(alpha, beta, stare):\n if stare.adancime == 0 or stare.tabla_joc.final(stare.j_curent):\n stare.scor = stare.tabla_joc.estimeaza_scor(stare.adancime, stare.j_curent)\n return stare\n\n if alpha >= beta:\n return stare # este intr-un interval invalid deci nu o mai procesez\n\n stare.mutari_posibile = stare.mutari()\n\n if stare.j_curent == Joc.JMAX:\n scor_curent = float('-inf')\n\n for mutare in stare.mutari_posibile:\n # calculeaza scorul\n stare_noua = alpha_beta(alpha, beta, mutare)\n\n if scor_curent < stare_noua.scor:\n stare.stare_aleasa = stare_noua\n scor_curent = stare_noua.scor\n if alpha < stare_noua.scor:\n alpha = stare_noua.scor\n if alpha >= beta:\n break\n\n elif stare.j_curent == Joc.JMIN:\n scor_curent = float('inf')\n\n for mutare in stare.mutari_posibile:\n stare_noua = alpha_beta(alpha, beta, mutare)\n\n if scor_curent > stare_noua.scor:\n stare.stare_aleasa = stare_noua\n scor_curent = stare_noua.scor\n\n if beta > stare_noua.scor:\n beta = stare_noua.scor\n if alpha >= beta:\n break\n\n stare.scor = stare.stare_aleasa.scor\n return stare\n\n\ndef afis_daca_final(stare_curenta, jucator):\n final = stare_curenta.tabla_joc.final(jucator)\n if final:\n if final == \"remiza\":\n print(\"Remiza!\")\n else:\n print(\"A castigat \" + final)\n\n return True\n\n return False\n\n\ndef main():\n # initializare algoritm\n raspuns_valid = False\n while not raspuns_valid:\n tip_algoritm = input(\"Algorimul folosit? (raspundeti cu 1 sau 2)\\n 1.Minimax\\n 2.Alpha-beta\\n \")\n if tip_algoritm in ['1', '2']:\n raspuns_valid = True\n else:\n print(\"Nu ati ales o varianta corecta.\")\n\n # initializare ADANCIME_MAX\n raspuns_valid = False\n while not raspuns_valid:\n n = input(\"Adancime maxima a arborelui: \")\n if n.isdigit():\n Stare.ADANCIME_MAX = int(n)\n raspuns_valid = True\n else:\n print(\"Trebuie sa introduceti un numar natural nenul.\")\n\n # initializare jucatori\n [s1, s2] = Joc.SIMBOLURI_JUC.copy() # lista de simboluri posibile\n [b1, b2] = Joc.SIMBOLURI_BOMBE.copy()\n raspuns_valid = False\n while not raspuns_valid:\n Joc.JMIN = str(input(\"Doriti sa jucati cu {} sau cu {}? \".format(s1, s2))).upper()\n Joc.BMIN = b1 if Joc.JMIN == s1 else b2\n if Joc.JMIN in Joc.SIMBOLURI_JUC:\n raspuns_valid = True\n else:\n print(\"Raspunsul trebuie sa fie {} sau {}.\".format(s1, s2))\n Joc.JMAX = s1 if Joc.JMIN == s2 else s2\n Joc.BMAX = b1 if Joc.JMAX == s1 else b2\n\n # initializare tabla\n tabla_curenta = Joc()\n print(\"Tabla initiala\")\n print(str(tabla_curenta))\n\n # creare stare initiala\n stare_curenta = Stare(tabla_curenta, Joc.SIMBOLURI_JUC[0], Stare.ADANCIME_MAX)\n\n print(\"Intructiuni:\\n\"\n \"Introduceti directia (w a s d) si apasati 'Enter'.\\n\"\n \"Daca doriti sa plasati/detonati o bomba puneti un 'space' inainte de directie.\\n\"\n \"Puteti avea o singura bomba plasata la un moment dat.\\n\"\n \"Puterile 'p' va protejeaza de o explozie. Acestea se pot cumula.\\n\"\n \"Bombele jucatorului 1 sunt marcare cu '!', iar cele ale jucatorului 2 cu '@'.\\n\"\n \"Daca o bomba este atinsa de o explozie, explodeaza si aceasta.\")\n\n directie = [0, 0]\n while True:\n if stare_curenta.j_curent == Joc.JMIN:\n\n # testez daca jocul a ajuns intr-o stare finala\n # si afisez un mesaj corespunzator in caz ca da\n if afis_daca_final(stare_curenta, Joc.JMIN):\n break\n\n # muta jucatorul\n raspuns_valid = False\n while not raspuns_valid:\n try:\n user_input = input(\"directie =\\n\")\n\n if user_input not in [' w', ' a', ' s', ' d', 'w', 'a', 's', 'd']:\n print(\"Input incorect\")\n print(\"Introduceti directia (w a s d).\\n\"\n \"Inainte de directie puteti pune un spatiu daca vreti sa lasati o bomba\")\n continue\n\n if len(user_input) == 1: # jucatorul doar s-a miscat\n dir_input = user_input[0]\n foloseste_bomba = False\n else: # jucatorul a pus si o bomba (sau a activat-o)\n dir_input = user_input[1]\n foloseste_bomba = True\n if dir_input == 'w':\n directie[0] = -1\n directie[1] = 0\n elif dir_input == 'a':\n directie[0] = 0\n directie[1] = -1\n elif dir_input == 's':\n directie[0] = 1\n directie[1] = 0\n elif dir_input == 'd':\n directie[0] = 0\n directie[1] = 1\n\n if not stare_curenta.tabla_joc.muta_jucator(Joc.JMIN, directie, foloseste_bomba):\n print(\"Pozitia este ocupata\")\n continue\n\n raspuns_valid = True # in acest moment deja am mutat jucatorul\n\n except ValueError:\n print(\"Eroare la citire input.\")\n\n # afisarea starii jocului in urma mutarii utilizatorului\n print(\"\\nTabla dupa mutarea jucatorului\")\n print(str(stare_curenta))\n\n # S-a realizat o mutare. Schimb jucatorul cu cel opus\n stare_curenta.j_curent = stare_curenta.jucator_opus()\n\n # --------------------------------\n else: # jucatorul e JMAX (calculatorul)\n\n if afis_daca_final(stare_curenta, Joc.JMAX):\n break\n\n # preiau timpul in milisecunde de dinainte de mutare\n t_inainte = int(round(time.time() * 1000))\n if tip_algoritm == '1':\n stare_actualizata = min_max(stare_curenta)\n else:\n stare_actualizata = alpha_beta(-5000, 5000, stare_curenta)\n if not stare_actualizata.stare_aleasa:\n print(\"Eroare\")\n break\n\n stare_curenta.tabla_joc = stare_actualizata.stare_aleasa.tabla_joc\n print(\"Tabla dupa mutarea calculatorului\")\n print(str(stare_curenta))\n\n # preiau timpul in milisecunde de dupa mutare\n t_dupa = int(round(time.time() * 1000))\n print(\"Calculatorul a \\\"gandit\\\" timp de \" + str(t_dupa - t_inainte) + \" milisecunde.\")\n print(f\"Vieti {Joc.JMIN}: {stare_curenta.tabla_joc.vieti_jmin}\")\n print(f\"Vieti {Joc.JMAX}: {stare_curenta.tabla_joc.vieti_jmax}\")\n print()\n\n # S-a realizat o mutare. Schimb jucatorul cu cel opus\n stare_curenta.j_curent = stare_curenta.jucator_opus()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Year 2/Sem 2/IA/minmax_alphabeta/bomberman.py","file_name":"bomberman.py","file_ext":"py","file_size_in_byte":18108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"610810924","text":"from unittest import TestCase\n\nimport pydicom\n\nfrom dicom_parser.utils.siemens.csa.data_element import CsaDataElement\nfrom dicom_parser.utils.siemens.csa.header import CsaHeader\nfrom dicom_parser.utils.siemens.private_tags import SIEMENS_PRIVATE_TAGS\nfrom tests.fixtures import TEST_RSFMRI_IMAGE_PATH\nfrom tests.utils.siemens.csa.fixtures import (ARRAY_PATTERNS, LISTED_KEYS,\n NON_ARRAY_PATTERNS, RAW_ELEMENTS,\n VALUES)\n\n\nclass CsaDataElementTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n dcm = pydicom.dcmread(TEST_RSFMRI_IMAGE_PATH)\n tag = SIEMENS_PRIVATE_TAGS[\"CSASeriesHeaderInfo\"]\n cls.series_header_info = dcm.get(tag).value\n cls.csa_header = CsaHeader(cls.series_header_info)\n cls.csa_data_element = CsaDataElement(cls.csa_header.raw_elements[0])\n\n def test_init_splits_to_key_and_value(self):\n first_key = [\"Version\"]\n first_value = \"51130001\"\n self.assertListEqual(self.csa_data_element.key, first_key)\n self.assertEqual(self.csa_data_element.value, first_value)\n element_100 = CsaDataElement(self.csa_header.raw_elements[100])\n key_100 = [\"GRADSPEC\", \"GPAData[0]\", \"EddyCompensationY\", \"TimeConstant[1]\"]\n value_100 = \"0.917683601379\"\n self.assertListEqual(element_100.key, key_100)\n self.assertEqual(element_100.value, value_100)\n\n def test_clean_part(self):\n raw_part_names = \"sSliceArray\", \"lTrackingBackgroundSuppr\", \"alRecoveryDuration\"\n expected = \"SliceArray\", \"TrackingBackgroundSuppr\", \"RecoveryDuration\"\n for i, raw_part_name in enumerate(raw_part_names):\n clean_part = self.csa_data_element.clean_part(raw_part_name)\n self.assertEqual(clean_part, expected[i])\n\n def test_key_to_list(self):\n key_sample = [raw_element.split(\"\\t\")[0] for raw_element in RAW_ELEMENTS]\n for key, expected in zip(key_sample, LISTED_KEYS):\n result = self.csa_data_element.key_to_list(key)\n self.assertListEqual(result, expected)\n\n def test_split(self):\n for i, raw_element in enumerate(RAW_ELEMENTS):\n data_element = CsaDataElement(raw_element)\n key, value = data_element.split()\n self.assertListEqual(key, LISTED_KEYS[i])\n self.assertEqual(value, VALUES[i])\n\n def test_search_array_pattern(self):\n for array_pattern in ARRAY_PATTERNS:\n result = self.csa_data_element.search_array_pattern(array_pattern)\n self.assertIsInstance(result, int)\n for non_array_pattern in NON_ARRAY_PATTERNS:\n result = self.csa_data_element.search_array_pattern(non_array_pattern)\n self.assertIsNone(result)\n\n","sub_path":"tests/utils/siemens/csa/test_csa_data_element.py","file_name":"test_csa_data_element.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"51716881","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom .models import TODO\n\n# Create your views here.\ndef home(request):\n return render(request, 'todo.html')\n\ndef todo_list(request):\n todos = TODO.objects.all()\n return JsonResponse({'todos': list(todos.values())})\n\n\ndef todo_create(request):\n if request.method == 'POST':\n todo_name = request.POST.get('todo_name')\n todo = TODO.objects.filter(name=todo_name)\n\n if todo.exists():\n return JsonResponse({'status': 'error'})\n \n todo = TODO.objects.create(name=todo_name)\n return JsonResponse({'todo_name': todo.name, 'status': 'created'})\n\n\ndef todo_edit(request):\n if request.method == 'POST':\n todo_name = request.POST.get('todo_name')\n new_todo_name = request.POST.get('new_todo_name')\n completed = request.POST.get('completed')\n edited_todo = TODO.objects.get(name=todo_name)\n\n if completed:\n if completed == '0':\n edited_todo.completed = False\n edited_todo.save()\n return JsonResponse({'status': 'updated'})\n elif completed == '1':\n edited_todo.completed = True\n edited_todo.save()\n return JsonResponse({'status': 'updated'})\n\n if TODO.objects.filter(name=new_todo_name).exists():\n return JsonResponse({'status': 'error'})\n \n edited_todo.name = new_todo_name\n edited_todo.save()\n\n context ={\n 'new_todo_name': new_todo_name,\n 'status': 'updated'\n }\n return JsonResponse(context)\n\ndef todo_delete(request):\n if request.method == 'POST':\n todo_name = request.POST.get('todo_name')\n TODO.objects.filter(name=todo_name).delete()\n return JsonResponse({'status': 'deleted'})","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"213398232","text":"from celery import shared_task\n\n\n@shared_task\ndef run_scraper():\n from .scraper import scrape_results\n\n scrape_results()\n\n\ndef assign_winners():\n from operator import itemgetter\n from .scraper import scrape_results\n from apps.races.models import Race\n\n results = scrape_results(upload=False)\n\n contest_headers = results['contest_headers']\n cand_headers = results['cand_headers']\n\n idx_total_votes = contest_headers.index('vote_tot')\n idx_cand_votes = cand_headers.index('vote_cnt')\n idx_cand_name = cand_headers.index('name')\n\n for key, val in results['contests'].items():\n meta, cands = itemgetter('meta', 'cands')(val)\n\n total_votes = meta[idx_total_votes]\n\n race = Race.objects.get(cboe_results_id=key)\n\n # does the candidate with the most votes have >50%?\n sorted_cands = sorted(cands, reverse=True,\n key=itemgetter(idx_cand_votes))\n top_cand = sorted_cands[0]\n has_winner = top_cand[idx_cand_votes] / total_votes > .5\n\n # get top cand if has_winner, else top two\n top_cands = [top_cand] if has_winner else sorted_cands[:2]\n\n top_cand_names = list(\n map(lambda x: x[idx_cand_name].replace('*', ''), top_cands))\n\n top_cand_objs = race.candidates.filter(\n full_name__in=top_cand_names)\n\n # winner? set the top cand to elected\n if has_winner:\n cand_obj = top_cand_objs.first()\n cand_obj.status = 'elected'\n cand_obj.save(publish=False)\n\n # no winner? set the top cands to runoff\n else:\n for cand_obj in top_cand_objs:\n cand_obj.status = 'runoff'\n cand_obj.save(publish=False)\n\n race.save(publish=True)\n","sub_path":"apps/scrape/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"162350404","text":"# Copyright (c) 2019, MD2K Center of Excellence\n# - Nasir Ali \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport json\n\nimport pandas as pd\n\n\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\nfrom pyspark.sql.types import StructField, StructType, StringType, FloatType, TimestampType, IntegerType\n\nschema = StructType([\n StructField(\"timestamp\", TimestampType()),\n StructField(\"localtime\", TimestampType()),\n StructField(\"user\", StringType()),\n StructField(\"version\", IntegerType()),\n StructField(\"incentive\", FloatType()),\n StructField(\"total_incentive\", FloatType()),\n StructField(\"ema_id\", StringType()),\n StructField(\"data_quality\", FloatType())\n])\n\n@pandas_udf(schema, PandasUDFType.GROUPED_MAP)\ndef get_ema_incentive_features(user_data):\n all_vals = []\n for index, row in user_data.iterrows():\n ema = row[\"incentive\"]\n if not isinstance(ema, dict):\n ema = json.loads(ema)\n\n incentive = ema[\"incentive\"]\n total_incentive = ema[\"totalIncentive\"]\n ema_id = ema[\"emaId\"]\n data_quality = ema[\"dataQuality\"]\n\n\n all_vals.append([row[\"timestamp\"],row[\"localtime\"], row[\"user\"],1,incentive,total_incentive,ema_id,data_quality])\n\n return pd.DataFrame(all_vals,columns=['timestamp','localtime', 'user', 'version','incentive','total_incentive','ema_id','data_quality'])\n","sub_path":"cerebralcortex/algorithms/ema/ema_incentive_features.py","file_name":"ema_incentive_features.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"550719582","text":"import config\nimport platform\nimport random\nimport string\nimport time\nimport requests\nimport json\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementNotVisibleException, WebDriverException\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass Dex(object):\n def __init__(self, driver):\n self.driver = driver\n\n @staticmethod\n def random_email():\n step = random.randint(9, 14)\n list1 = []\n for _ in range(0, step):\n x = random.choice(string.ascii_lowercase + string.digits)\n list1.append(x)\n list_string = ''.join(map(str, list1))\n d1 = random.choice(['@gmail', '@yahoo', '@outlook', '@aol', '@fakemail', '@mail', '@test2', '@test3', '@test'])\n d2 = random.choice(['.com', '.org', '.ro', '.es', '.it', '.ru', '.net', '.pt', '.uk', '.de', '.pl', '.gg'])\n return \"QA_\" + list_string + d1 + d2\n\n @staticmethod\n def random_name():\n list1 = []\n for _ in range(0, 7):\n x = random.choice(string.ascii_uppercase + string.digits)\n list1.append(x)\n list_string = ''.join(map(str, list1))\n return 'QA_' + list_string\n \n\n @staticmethod\n def url_split(url: str, steps: int):\n final_list = []\n try:\n for i in range(1, steps + 1):\n url_section0 = url.split('/')[-i]\n url_section = '/' + url_section0\n final_list.insert(0, url_section)\n final_string = ''.join(map(str, final_list))\n return final_string\n except IndexError:\n print('url_split -> IndexError: List index out of range')\n raise\n\n\n\n def driver_setup(self, screens=1):\n if screens == 2:\n self.driver.set_window_position(-1920, 80)\n #self.driver.set_window_size(1800, 500)\n self.driver.maximize_window()\n pass\n\n\n def response_json(self, url, write=False, filename=\"data_file\"):\n # test_URL = \"https://jsonplaceholder.typicode.com/photos/5000\"\n response = requests.get(url)\n data = response.json()\n data_string = (json.dumps(data, indent=4))\n if write == True:\n with open(filename + \".json\", \"w\") as write_file:\n json.dump(data, write_file, indent=4)\n return data_string\n\n\n def dataLayer(self):\n data_layer = self.driver.execute_script(''' return dataLayer; ''')\n return data_layer\n\n\n def click(self, elem):\n try:\n elem.click()\n except (WebDriverException, NoSuchElementException) as _:\n time.sleep(1)\n ActionChains(self.driver).move_to_element(elem).perform()\n try:\n elem.click()\n except (WebDriverException, NoSuchElementException) as _:\n time.sleep(1)\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", elem)\n self.driver.execute_script(\"scrollBy(0, -74);\")\n try:\n elem.click()\n except (WebDriverException, NoSuchElementException) as _:\n time.sleep(1)\n self.driver.execute_script('arguments[0].click();', elem)\n # print('DEBUG: arguments[0].click() was executed.')\n\n\n\n\n def find_element(self, sel):\n a01 = (sel[0] + sel[1])\n if a01 in ('//', '(/', '(('):\n elem = self.driver.find_element(By.XPATH, sel)\n else:\n elem = self.driver.find_element(By.CSS_SELECTOR, sel)\n return elem\n \n def find_multiple_elements(self, sel):\n a01 = (sel[0] + sel[1])\n if a01 in ('//', '(/', '(('):\n elem = self.driver.find_elements(By.XPATH, sel)\n else:\n elem = self.driver.find_elements(By.CSS_SELECTOR, sel)\n return elem\n\n\n\n def wait_for_present(self, selector, time=30):\n a01 = (selector[0] + selector[1])\n if a01 in ('//', '(/', '(('):\n try:\n element_present = EC.presence_of_all_elements_located((By.XPATH, selector))\n WebDriverWait(self.driver, time).until(element_present)\n except TimeoutException:\n print('Timed out waiting for XPATH Selector: \"' + selector + '\" to become present.')\n print('')\n raise\n else:\n try:\n element_present = EC.presence_of_all_elements_located((By.CSS_SELECTOR, selector))\n WebDriverWait(self.driver, time).until(element_present)\n except TimeoutException:\n print('Timed out waiting for CSS Selector: \"' + selector + '\" to become present.')\n print('')\n raise\n \n\n\n def wait_for_visible(self, selector, time=30):\n a01 = (selector[0] + selector[1])\n if a01 in ('//', '(/', '(('):\n try:\n element_visible = EC.visibility_of_all_elements_located((By.XPATH, selector))\n WebDriverWait(self.driver, time).until(element_visible)\n except TimeoutException:\n print('Timed out waiting for XPATH Selector: \"' + selector + '\" to become present.')\n print('')\n raise\n else:\n try:\n element_visible = EC.visibility_of_all_elements_located((By.CSS_SELECTOR, selector))\n WebDriverWait(self.driver, time).until(element_visible)\n except TimeoutException:\n print('Timed out waiting for CSS Selector: \"' + selector + '\" to become present.')\n print('')\n raise\n \n def wait_for_clickable(self, selector, time=30):\n a01 = (selector[0] + selector[1])\n if a01 in ('//', '(/', '(('):\n try:\n element_clickable = EC.element_to_be_clickable((By.XPATH, selector))\n EC.visibility_of_all_elements_located\n WebDriverWait(self.driver, time).until(element_clickable)\n except TimeoutException:\n print('Timed out waiting for XPATH Selector: \"' + selector + '\" to become present.')\n print('')\n raise\n else:\n try:\n element_clickable = EC.element_to_be_clickable((By.CSS_SELECTOR, selector))\n WebDriverWait(self.driver, time).until(element_clickable)\n except TimeoutException:\n print('Timed out waiting for CSS Selector: \"' + selector + '\" to become present.')\n print('')\n raise\n\n def switch_to_tab(self, index):\n self.driver.switch_to.window(self.driver.window_handles[index])\n \n def close_current_tab(self):\n self.driver.close()\n\n def open_in_new_tab(self, link):\n actions = ActionChains(self.driver)\n if platform.system() == 'Windows':\n actions.key_down(Keys.CONTROL)\n elif platform.system() == 'Darwin':\n actions.key_down(Keys.COMMAND)\n actions.move_to_element(link)\n actions.click()\n if platform.system() == 'Windows':\n actions.key_up(Keys.CONTROL)\n elif platform.system() == 'Darwin':\n actions.key_up(Keys.COMMAND)\n actions.perform()\n\n self.switch_to_tab(1)\n\n\n\n\n\n\"\"\" JavaScript commands \"\"\"\nclass JS(object):\n def __init__(self, driver):\n self.driver = driver\n\n def click(self, element):\n self.driver.execute_script('arguments[0].click();', element)\n\n def scroll_into_view(self, element):\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", element)\n\n def scroll(self):\n self.driver.execute_script(\"scrollBy(0, -74);\")\n\n def scroll_by(self, x=0, y=-74):\n self.driver.execute_script(\"scrollBy(\" + str(x) + \",\" + str(y) + \");\")\n \n def scroll_to(self, x=0, y=0):\n self.driver.execute_script(\"window.scrollTo(\" + x + \", \" + y + \")\")\n\n\n\n\n\n\n\"\"\" Decorators used to repeat a function X times (use @repeat(X) just before declaring the function) \"\"\"\ndef repeat(times):\n def repeatHelper(f):\n def callHelper(*args):\n for _ in range(0, times):\n f(*args)\n return callHelper\n return repeatHelper\n\n\n\"\"\" Similar behavior to 'repeat' \"\"\"\ndef retry(howmany):\n def tryIt(func):\n def f():\n attempts = 0\n while attempts < howmany:\n try:\n return func()\n except:\n attempts += 1\n return f\n return tryIt\n","sub_path":"dex_library/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":8805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"358043103","text":"import plotly\nfrom plotly.graph_objs import Scatter, Layout\nimport pandas as pd\nimport numpy as np\nimport sys\n\nif len(sys.argv) == 1:\n print(\"please enter file to plot\")\n sys.exit()\n\ndf = pd.read_csv(sys.argv[1],error_bad_lines=False)\ndf.head()\n\nplotly.offline.plot({\n \"data\": [Scatter(y=df[\"X\"],name='X')\n ,Scatter(y=df[\"Y\"],name='Y')\n ,Scatter(y=df[\"Z\"],name='Z')\n ],\n \"layout\": Layout(title=\"Accelerometer\")\n})","sub_path":"data_read_and_plot/rchart.py","file_name":"rchart.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"276534123","text":"import os\nimport responses\nfrom copy import deepcopy\nfrom nextcode import config, Client\nfrom nextcode.services import BaseService\nfrom nextcode.session import ServiceSession\nfrom nextcode.exceptions import (\n InvalidToken,\n InvalidProfile,\n ServiceNotFound,\n ServerError,\n)\nfrom nextcode.utils import decode_token, check_resp_error\nfrom tests import BaseTestCase, REFRESH_TOKEN, ACCESS_TOKEN, AUTH_URL, AUTH_RESP\n\ncfg = config.Config()\n\nSERVICE_URL = \"https://test.wuxinextcode.com/service\"\nROOT_RESP = {\n \"endpoints\": {\n \"health\": SERVICE_URL + \"/health\",\n \"documentation\": SERVICE_URL + \"/documentation\",\n },\n \"current_user\": {\"email\": \"testuser\"},\n \"service_name\": \"myservice\",\n \"build_info\": {\"version\": \"1.0.0\"},\n}\n\n\nclass ServicesTest(BaseTestCase):\n @responses.activate\n def setUp(self):\n super(ServicesTest, self).setUp()\n self.client = Client(api_key=REFRESH_TOKEN)\n\n @responses.activate\n def test_base_service(self):\n responses.add(responses.POST, AUTH_URL, json=AUTH_RESP)\n responses.add(responses.GET, SERVICE_URL, json=ROOT_RESP)\n svc = BaseService(self.client, service_path=\"service\")\n svc.status()\n svc.status(force=True)\n self.assertEqual(\n \"\",\n repr(svc),\n )\n _ = svc.build_info\n _ = svc.app_info\n _ = svc.endpoints\n\n @responses.activate\n def test_localhost(self):\n os.environ[\"SERVICE_IN_ROOT\"] = \"1\"\n responses.add(responses.POST, AUTH_URL, json=AUTH_RESP)\n responses.add(responses.GET, \"http://localhost/\", json=ROOT_RESP)\n client = Client(api_key=REFRESH_TOKEN)\n client.profile.root_url = \"http://localhost\"\n _ = BaseService(client, service_path=\"service\")\n client.profile.root_url = \"\"\n with self.assertRaises(InvalidProfile):\n _ = BaseService(client, service_path=\"service\")\n client.profile.root_url = \"http://localhost\"\n client.profile.skip_auth = True\n _ = BaseService(client, service_path=\"service\")\n del os.environ[\"SERVICE_IN_ROOT\"]\n","sub_path":"tests/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"25004948","text":"# -*- coding: utf-8 -*-\n# @project : just_to_eat\n# @file : 数字序列.py\n# @time : 2020-03-31\n\n'''\n时间限制:C/C++ 2秒,其他语言4秒\n\n空间限制:C/C++ 256M,其他语言512M\n\n一个由若干个取值范围在[1,2^31-1]的整数构成长度为N的数字序列,其中N<=5,000,000;求该数字序列上一段最小的连续区间的长度,要求该区间内正好包含了所有不同的数字,如果存在多个这样的区间,按照出现的顺序有序输出所有的区间起始和结束位置,序列的位置编号从1到N,其中最小的区间长度不会超过10,000。\n\n输入描述:\n第一行:N\n第2至N+1行:每行1个数\n\n输出描述:\n第一行:最小的区间长度 区间个数X (以空格进行分隔)\n第二行:X个区间的起始和结束位置,按照出现的顺序有序输出,多个区间之间以空格分隔,每个区间的输出格式如下所示:[start,end],最后以换行结尾\n\n输入例子1:\n10\n1\n1\n3\n4\n6\n6\n5\n1\n3\n3\n\n输出例子1:\n6 3\n[2,7] [3,8] [4,9]\n'''\nfrom collections import defaultdict\n\nclass Solution():\n def min_list(self, N, number_list):\n d = set()\n res = []\n for i in number_list:\n res.append(i)\n if i not in d:\n d.add(i)\n total = []\n min_length = len(res)\n temp = defaultdict(int)\n start, end = 0, 1\n temp[res[0]] = 1\n temp_len = 1\n while (end < N + 1):\n if temp_len == len(d):\n if end - start < min_length:\n total = [[start + 1, end]]\n min_length = end - start\n elif end - start == min_length:\n total.append([start + 1, end])\n if temp[res[start]] == 1:\n temp_len -= 1\n temp[res[start]] -= 1\n start += 1\n else:\n if end < N and temp[res[end]] == 0:\n temp_len += 1\n if end < N:\n temp[res[end]] += 1\n end += 1\n\n print(min_length, len(total))\n for i in total:\n print('[' + str(i[0]) + ',' + str(i[1]) + ']', end=' ')\n print(end='\\n')\n\nif __name__ == \"__main__\":\n solution = Solution()\n solution.min_list(10, [1, 1, 3, 4, 6, 6, 5, 1, 3, 3])\n","sub_path":"practice/数字序列.py","file_name":"数字序列.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"613499153","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\nclass DataReader:\n def __init__(self, url):\n self.url = url\n\n def read_data(self):\n source = urllib.request.urlopen(self.url)\n raw_text = source.read()\n output = raw_text.decode('utf-8')\n source.close()\n return output\n\nclass DataParser:\n def __init__(self, page_source):\n self.page_source = page_source\n\n def parse_data(self):\n soup = BeautifulSoup(self.page_source, 'lxml')\n teachers_list = []\n for con in soup.find_all('div', {'class': 'names-list'}):\n for teachers in con.find_all('li'):\n teachers_list.append(teachers)\n new_str = []\n for field in teachers_list:\n field = str(field)\n tmp = field[field.find('
  • ') + 4: field.find('
  • ')]\n if not tmp.find('') == -1:\n tmp = tmp[tmp.find('>', 4) + 1: tmp.find('')]\n if not tmp.find('') == -1:\n tmp = tmp[tmp.find('') + 8 : tmp.find('')]\n tmp = tmp.strip()\n if tmp:\n new_str.append(tmp)\n return new_str\n\nobj = DataReader('http://www.jedynka.zgora.pl/?show=kadra')\n\nsite_content = DataParser(obj.read_data())\ncn = site_content.parse_data()\n\nfor i in cn:\n print(i)","sub_path":"webscrapping.py","file_name":"webscrapping.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"40609429","text":"\"\"\"A setuptools based setup module.\n\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='momobot',\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version='0.0.1',\n\n description=\"\"\"\n A Python project based on Slackbot allowing to create and manage polls\n \"\"\",\n long_description=long_description,\n\n # The project's main homepage.\n url='https://github.com/axelroy/slackMomoBot',\n\n # Author details\n author='Ramseyer Sylvain & Roy Axel',\n author_email='sylvain.ramseyer@gmail.com & royaxe@gmail.com',\n\n # Choose your license\n license='MIT',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3.5',\n ],\n\n # What does your project relate to?\n keywords='polls, bot, slackbot',\n\n # You can just specify the packages manually nhere if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=[\"source\"]),\n\n # Alternatively, if you want to distribute just a my_module.py, uncomment\n # this:\n # py_modules=[\"my_module\"],\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=['aiohttp', 'websockets'],\n extras_requires={\n 'test': ('pytest',),\n 'doc': ('Sphinx', 'sphinx_rtd_theme'),\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"41793573","text":"import scrapy\nimport logging\n\n\nclass SidehustleproSpider(scrapy.Spider):\n name = 'sidehustlepro'\n start_urls = ['http://www.sidehustlepro.co/podcast-episodes/']\n counter = 1\n\n def parse(self, response):\n titles = response.xpath('//article/div/div/h3/a/text()')\n for title in titles:\n yield {self.counter:title.get()}\n self.counter += 1\n\n next_page = response.css('.next::attr(href)').get()\n if next_page is not None:\n self.log('next page ' + next_page, logging.CRITICAL)\n yield scrapy.Request(next_page, callback=self.parse)","sub_path":"proj/spiders/sidehustlepro.py","file_name":"sidehustlepro.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"82333039","text":"\r\n\"\"\"\r\nOriginal code by Rob van Putten\r\nChanges by Nynke ter Heide:\r\n - only final ML-model, stripped from intermediate steps\r\n - addition of GEF name to df\r\n - print and plot feature coefficients\r\n - soil pressure as feature replaced by depth\r\n\"\"\"\r\n\r\nimport sys, os, math\r\nimport pandas as pd\r\nsys.path.append(\"C:\\\\Anaconda3\\\\Lib\\\\site-packages\") #using windows.. so have to 'hack' a little\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom bbgeolib.objects.gef import get_from_file\r\nfrom bbgeolib.tools.gef import get_unit_weight_from_cpt, get_soilstress_from_cpt\r\nimport numpy as np\r\n\r\nfiles = [\r\n 'gefs\\\\E06-1441.GEF',\r\n 'gefs\\\\E06-1443.GEF',\r\n 'gefs\\\\E06-1444.GEF',\r\n 'gefs\\\\E06-1445.GEF',\r\n 'gefs\\\\E07-1107.GEF',\r\n 'gefs\\\\E07-1109.GEF',\r\n 'gefs\\\\E07-1110.GEF',\r\n 'gefs\\\\E07-1112.GEF',\r\n 'gefs\\\\E07-1113.GEF'\r\n] \r\n\r\nsoils = {\r\n 'E06-1441':'99,OB,-0.6,KS,-4.0,HV,-6.0,KS,-6.7,WZ, -8.8,KM,-10.5,BV,-10.6,PZ,-13.3,KM,-14.3,PZ',\r\n 'E06-1443':'99,OB,-0.9,KS,-4.0,HV,-6.2,KS,-6.9,WZ, -9.3,KM,-11.1,BV,-11.4,PZ,-13.7,KM,-15.1,PZ',\r\n 'E06-1444':'99,OB,-1.0,KS,-3.9,HV,-5.9,KS,-6.2,WZ,-10.4,KM,-11.2,BV,-11.9,PZ,-14.2,KM',\r\n 'E06-1445':'99,OB,-1.0,KS,-4.1,HV,-6.0,KS,-6.2,WZ,-10.5,KM,-11.2,BV,-12.5,PZ,-14.6,KM,-15.7,PZ',\r\n 'E07-1107':'99,OB,-0.6,KS,-3.2,HV,-6.2,KS,-6.9,WZ, -9.8,KM,-11.6,BV,-12.3,PZ,-14.7,KM',\r\n 'E07-1109':'99,OB,-0.4,KS,-2.7,HV,-5.8,KS,-6.1,WZ,-10.2,KM,-12.0,BV,-12.4,PZ,-14.7,KM,-15.7,PZ',\r\n 'E07-1110':'99,OB,-0.5,KS,-3.5,HV,-6.4,KS,-6.8,WZ, -9.9,KM,-11.2,BV,-11.6,KM,-17.7,PZ',\r\n 'E07-1112':'99,OB,-0.9,KS,-3.6,HV,-6.4,KS,-6.8,WZ, -9.9,KM,-10.9,BV,-11.2,KM,-12,BV,-12.6,PZ,-14.6,KM,-17.2,PZ',\r\n 'E07-1113':'99,OB,-0.9,KS,-4.2,HV,-6.2,KS,-6.4,WZ, -9.8,KM,-10.5,BV,-10.8,KM,-11.9,BV,-12.3,PZ,-14.6,KM,-17,PZ'\r\n}\r\n\r\n# functon to extract data from gef files\r\ndef create_input_with_soilstress():\r\n dfs = [] \r\n for file in files:\r\n gef = get_from_file(file)\r\n name = os.path.basename(gef.filename).split('.')[0]\r\n df = gef.as_dataframe()\r\n df = get_soilstress_from_cpt(gef)\r\n df['soilname'] = 'unknown'\r\n args = soils[name].split(',')\r\n soilnames = args[1::2]\r\n tops = [float(a) for a in args[0::2]]\r\n bottoms = tops[1:]\r\n bottoms.append(-99)\r\n for soilname, top, bottom in zip(soilnames,tops,bottoms): \r\n df.loc[(df['depth']<=top) & (df['depth']>=bottom), 'soilname']=soilname\r\n df = df[['depth', 'qc','fs', 'sv', 'soilname']]\r\n df['GEFname'] = name\r\n dfs.append(df)\r\n \r\n return pd.concat(dfs) \r\n\r\n# extract data from gef files\r\ndf3 = create_input_with_soilstress()\r\n\r\n# transform features qc and fs to log scale\r\ndf3['qc'] = df3['qc'].apply(lambda x: math.log(x))\r\ndf3['fs'] = df3['fs'].apply(lambda x: math.log(x))\r\n\r\n# normalise the features to a value between 0 and 1 (min = 0, max = 1)\r\ndf3['qcn']=(df3['qc']-df3['qc'].min())/(df3['qc'].max()-df3['qc'].min())\r\ndf3['fsn']=(df3['fs']-df3['fs'].min())/(df3['fs'].max()-df3['fs'].min())\r\ndf3['depth']=(df3['depth']-df3['depth'].min())/(df3['depth'].max()-df3['depth'].min())\r\n\r\n# remove noise\r\ndf3 = df3[df3['fsn']>0.1]\r\n\r\n# make soil classes numeric\r\nle = LabelEncoder()\r\ndf3['class'] = le.fit_transform(df3['soilname'])\r\nsoillabels = le.inverse_transform([0,1,2,3,4,5,6])\r\n\r\n# split in features (X) and target (y)\r\nX = df3[['qcn', 'fsn', 'depth']].values\r\ny = df3['class'].values\r\n\r\nclf = SGDClassifier(loss=\"hinge\", penalty=\"l2\", max_iter=1000, tol=1e-4, class_weight= 'balanced', shuffle = True)\r\ndf3['sgdclassifier'] = clf.fit(X,y).predict(X)\r\nprint(\"Accuracy SGDClassifier:\", accuracy_score(y, df3['sgdclassifier']))\r\n\r\n# print the coefficients of the features\r\nprint(clf.coef_)\r\n\r\n# to df and add labels for clarity and plotting\r\ncoefs = pd.DataFrame(clf.coef_, columns = ['feature_coef_1', 'feature_coef_2', 'feature_coef_3'])\r\ncoefs['soilnames'] = soillabels\r\ncoefs = pd.wide_to_long(coefs, stubnames = 'feature_coef_', i='soilnames', j = 'feature')\r\ncoefs = coefs.reset_index()\r\ncoefs['feature_name'] = np.select(condlist = [(coefs['feature'] == 1), (coefs['feature'] == 2), (coefs['feature'] == 3)], \r\n choicelist = ['qcn', 'fsn', 'depth'], default='empty')\r\n\r\n# plot the coefficients\r\nimport seaborn as sns\r\nsns.set(style='whitegrid')\r\n_ = sns.barplot(x='soilnames', y = 'feature_coef_', hue = 'feature_name', data = coefs)\r\n_ = plt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\r\n_ = plt.tight_layout()\r\n_ = plt.ylabel('feature coefficient')\r\n_ = plt.xlabel('soil class')\r\nplt.savefig(\"feature_coefficient_SGDclassifier_depth.jpg\", dpi = 300)\r\n\r\n\r\n","sub_path":"geo_AI_SGDclassifier_depth.py","file_name":"geo_AI_SGDclassifier_depth.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"446263220","text":"from utils import parse_color\nfrom command import Command\nfrom error import ParseError, ExecutionError\nfrom jsonschema import Draft7Validator\n\n\nclass TurnOff(Command):\n\n def __init__(self):\n super().__init__()\n arguments_schema = {\n \"$schema\": \"https://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"section_id\": {\n \"type\": \"string\",\n }\n },\n }\n self.validator = Draft7Validator(arguments_schema)\n\n def validate_arguments(self):\n errors = [e for e in self.validator.iter_errors(self.args)]\n if len(errors) > 0:\n raise ParseError(errors)\n\n def exec(self):\n section_id = self.args['section_id'] if 'section_id' in self.args else None\n self.controller.turn_off(section_id)\n","sub_path":"src/commands/turn_off.py","file_name":"turn_off.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"304898058","text":"#!/usr/bin/python3\n\nfrom messageAPI import FloodProxy\n\ndef main():\n print(\"Creating Flood Proxy.\")\n fb = FloodProxy()\n print(\"Running Flood Proxy.\")\n fb.run()\n print(\"This comes after a true loop and should not be visible.\")\n print(\"Error in the Flood Proxy app.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"floodproxy.py","file_name":"floodproxy.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"119204919","text":"while 1:\n a=''\n x=input()\n if len(x)<1:break\n b=''\n for i in x:\n if i!=\" \":a=a+str(i.isupper()*1)\n if len(a)==5:\n b+=(chr(ord('A')+int(a,2)))\n a=''\n print(b)","sub_path":"solutions/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"125820058","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport prettytable\nfrom datetime import datetime\nfrom collections import OrderedDict\nfrom .utils import requests_get, exit_after_echo,colored\n\nQUERY_URL = 'https://kyfw.12306.cn/otn/lcxxcx/query'\nFROM_STATION_NOT_FOUND = 'from station not found'\nTO_STATION_NOT_FOUND = 'to station not found'\nNO_RESPONSE = 'no response'\nINVALID_DATE = 'invalid date'\nTABLE_HEADER = ('车次','车站','时间','历时','商务','一等','二等','软卧','硬卧','软座','硬座','无座')\n\n\nclass TrainsCollection():\n def __init__(self, rows, opt):\n self._rows = rows\n self._opt = opt\n\n def _during_time(self,lishi):\n hours,minutes = lishi.split(':')\n if hours == '00':\n return '%s分钟'%minutes\n return '%s小时%s分钟'%(hours,minutes)\n\n\n def trains(self):\n for row in self._rows:\n train_code = row.get('station_train_code')[0].lower()\n if not self._opt or train_code in self._opt: \n train = (row.get('station_train_code'),\n colored.green(row.get('from_station_name'))+'\\n'+colored.red(row.get('to_station_name')),\n colored.green(row.get('start_time'))+'\\n'+colored.red(row.get('arrive_time')),\n self._during_time(row.get('lishi')),\n row.get('swz_num'),# 商务座\n row.get('zy_num'), # 一等座\n row.get('ze_num'), # 二等座\n row.get('rw_num'), # 软 卧\n row.get('yw_num'), # 硬 卧\n row.get('rz_num'), # 软 座\n row.get('yz_num'), # 硬 座\n row.get('wz_num')) # 无 座\n yield train\n\n def preety_print(self):\n table = prettytable.PrettyTable(header=False)\n table.add_row(TABLE_HEADER)\n for train in self.trains():\n table.add_row(train)\n print(table)\n\n\nclass TrainTicketsQuery():\n def __init__(self, from_station, to_station, date, opts=None):\n\n self.from_station = from_station\n self.to_station = to_station\n self.date = date\n self.opts = opts\n\n def stations(self):\n filepath = os.path.join(\n os.path.dirname(__file__),\n 'datas', 'stations.dat'\n )\n d = {}\n with open(filepath, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n name, telecode = line.split()\n d.setdefault(name, telecode)\n return d\n\n def _from_station_telecode(self):\n code = self.stations().get(self.from_station)\n if not code:\n exit_after_echo(FROM_STATION_NOT_FOUND)\n return code\n\n def _to_station_telecode(self):\n code = self.stations().get(self.to_station)\n if not code:\n exit_after_echo(TO_STATION_NOT_FOUND)\n return code\n\n def _parser_date(self):\n result = ''.join(re.findall('\\d',self.date))\n l = len(result)\n \n # 处理类似6.1 、6.21 、0621之类的��入\n if l in (2,3,4):\n year = str(datetime.today().year)\n return year + result\n\n # 处理类似2016.6.1 、2016.6.21、2016.06.21之类的输入\n if l in (6,7,8):\n return result\n return ''\n\n def _valid_date(self):\n date = self._parser_date()\n if not date:\n exit_after_echo(INVALID_DATE)\n\n try:\n date = datetime.strptime(date,'%Y%m%d')\n except ValueError:\n exit_after_echo(INVALID_DATE)\n \n # 火车票预售期为50天\n offset = date - datetime.today()\n \n if offset.days not in range(-1,50):\n exit_after_echo(INVALID_DATE)\n\n return datetime.strftime(date, '%Y-%m-%d')\n\n def _build_params(self):\n d = OrderedDict()\n d['purpose_codes'] = 'ADULT'\n d['queryDate'] = self._valid_date()\n d['from_station'] = self._from_station_telecode()\n d['to_station'] = self._to_station_telecode()\n return d\n\n def query(self):\n params = self._build_params()\n\n r = requests_get(QUERY_URL, params=params, verify=False)\n try:\n rows = r.json()['data']['datas']\n except KeyError:\n rows = []\n except TypeError:\n exit_after_echo(NO_RESPONSE)\n\n return TrainsCollection(rows, self.opts)\n\n\ndef query(params):\n \"\"\"`params` is a list, contains `from`, `to`, `date`.\"\"\"\n return TrainTicketsQuery(*params).query()\n","sub_path":"iquery/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"303747858","text":"import os\nimport time\nfrom datetime import datetime\n\nfrom .attachments import Comment, Owner, Wall\nfrom .groups import Group\nfrom .post import post\nfrom .uniobj import detect\nfrom .users import User\nfrom .utils import pathjoin\nfrom .vkscript import VkScript\n\n_preloader = None\n\n\ndef get(return_banned=False, start_time=None, end_time=None,\n max_photos=5, sources=None, count=50, ignore_ads=True,\n start_from=None, preloadComments=(0, 0)):\n \"\"\" Возвращает массив объектов Wall \"\"\"\n global _preloader\n if type(preloadComments) != tuple:\n raise TypeError(\"preloadComments must be tuple\" +\n \"(count, thread_count)\")\n j = {\"return_banned\": int(return_banned),\n \"filters\": \"post\",\n \"count\": count,\n \"max_photos\": int(max_photos),\n \"ignore_ads\": int(ignore_ads)}\n\n if start_time is not None:\n if isinstance(start_time, datetime):\n start_time = start_time.timestamp()\n j[\"start_time\"] = start_time\n\n if end_time is not None:\n if isinstance(end_time, datetime):\n start_time = end_time.timestamp()\n j[\"end_time\"] = end_time\n\n if sources is not None:\n j[\"source_ids\"] = \",\".join([str(detect(s)) for s in sources])\n\n if start_from is not None:\n j[\"start_from\"] = start_from\n\n result = []\n next_from = 0\n # Подгружаем комменты\n if preloadComments != (0, 0):\n if _preloader is None:\n _preloader = VkScript.loadInternal(\"newsfeedCommentsPreload\")\n # За один запрос мы можем получить максимум 24 поста,\n # так что делаем цикл со счётчиком на случай, если\n # потребуется сделать это в несколько вызовов\n while count > 0:\n _c = min(24, count)\n resp = _preloader.execute(params=str(j), count=_c,\n c_count=preloadComments[0],\n ct_count=preloadComments[1])\n # Сохраняем время последнего поста, а также\n # присваиваем в j, для последующих итераций\n next_from = resp.get(\"next_from\", None)\n j[\"start_from\"] = next_from\n \n for i in resp[\"items\"]:\n w = Wall.fromJSON(i[0])\n if i[1] is None:\n continue\n w.comments._saved = [Comment.fromJSON(c, w) for c in i[1]]\n w.comments._start = 0\n w.comments._end = _c\n result.append(w)\n count -= _c\n else:\n resp = post(\"newsfeed.get\", j)\n for item in resp[\"items\"]:\n if item[\"type\"] == Newsfeed.POST:\n result.append(Wall.fromJSON(item))\n next_from = resp.get(\"next_from\", None)\n return (result, next_from)\n\n\nclass Newsfeed:\n \"\"\" Нужен для непрерывного получения обновлений из ленты. \n Метод `getNewer` будет возвращать новые посты каждый раз, \n а `getOlder` - листать ленту вниз с текущего момента. \"\"\"\n POST = \"post\"\n PHOTO = \"photo\"\n PHOTO_TAG = \"photo_tag\"\n WALL_PHOTO = \"wall_photo\"\n FRIEND = \"friend\"\n NOTE = \"note\"\n AUDIO = \"audio\"\n VIDEO = \"video\"\n\n def __init__(self, return_banned=False, start_time=None,\n end_time=None, max_photos=None):\n self.return_banned = return_banned\n self.end_time = end_time\n self.max_photos = max_photos\n\n self._start_from = None\n self._last_time = time.time()\n\n def reset(self):\n \"\"\" Возвращает ленту к самому свежему посту \n\n `getNewer` и `getOlder` теперь будут \n возвращать посты с этого места.\"\"\"\n self._start_from = None\n self._last_time = time.time()\n\n def getNewer(self, sources=None, count=50,\n ignore_ads=True, preloadComments=(0, 0)):\n \"\"\" Возвращает массив новых постов со времени последнего вызова \n `preloadComments` - Ожидается `(кол-во, кол-во ответов)` \n Если было указано, то в `comments` каждого коммента \n будут комменты \"\"\"\n result = []\n while len(result) < count:\n t = time.time()\n posts = get(sources=sources,\n count=min(count, 100),\n ignore_ads=ignore_ads,\n preloadComments=preloadComments,\n start_time=self._last_time,\n end_time=None)\n self._last_time = t\n if len(posts[0]) == 0:\n break\n result += posts[0]\n return result\n\n def getOlder(self, sources=None, count=50,\n ignore_ads=True, preloadComments=(0, 0)):\n \"\"\" Листает новостную ленту вниз, каждый вызов более старые посты \n `preloadComments` - Ожидается `[кол-во, кол-во ответов]` \n `source` - источники \n `count` - Количество, макс 100. \n `ignore_ads` - Не возвращать посты `marked_as_ads` \"\"\"\n result = []\n while len(result) < count:\n posts = get(sources=sources,\n count=min(count, 100),\n ignore_ads=ignore_ads,\n preloadComments=preloadComments,\n start_from=self._start_from)\n self._start_from = posts[1]\n self._last_time = posts[0][-1].date\n result += posts[0]\n return result\n\n\ndef _getgidsanduids(sources):\n uids = []\n gids = []\n for s in sources:\n t = type(s)\n if t == User:\n uids += str(s.id)\n elif t == Group:\n gids += str(g.id)\n elif t == Owner:\n if s.id > 0:\n uids += str(s.id)\n else:\n gids += str(abs(s.id))\n elif t == int:\n if s > 0:\n uids += str(s)\n else:\n gids += str(abs(s))\n return (\",\".join(uids), \",\".join(gids))\n\n\ndef addBan(sources):\n \"\"\" Запрещает показывать новости от указанных групп или пользователей \n `sources` - `User`, `Group`, `Owner` или ID источника(ов) \"\"\"\n if not isinstance(sources, (list, tuple)):\n sources = [sources]\n uids, gids = _getgidsanduids(sources)\n return post(\"newsfeed.addBan\", {\"user_ids\": uids,\n \"group_ids\": gids})\n\n\ndef deleteBan(sources):\n \"\"\" Разрешает показывать новости от указанных групп или пользователей \n `sources` - `User`, `Group`, `Owner` или ID источника(ов) \"\"\"\n if not isinstance(sources, (list, tuple)):\n sources = [sources]\n uids, gids = _getgidsanduids(sources)\n return post(\"newsfeed.deleteBan\", {\"user_ids\": uids,\n \"group_ids\": gids})\n\n\n__all__ = [\"Newsfeed\", \"get\"]\n","sub_path":"build/lib/vkApi/newsfeed.py","file_name":"newsfeed.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"70475064","text":"from util.data_container import ExhaustionTestContainer\nfrom util.tests import ExhaustionTester\nfrom util.plot import Plotter\n\ntester = ExhaustionTester()\n\ndictionary = tester.dao.load('results/van_der_pol/2020-10-25-04-19-43-exhaustion-test/data.json')\n\ndata_container = ExhaustionTestContainer()\ndata_container.test_T = 0.5\ndata_container.load_results(dictionary)\n\nplotter = Plotter()\ntester.plot_graphs(data_container, plotter)\nplotter.show()\n","sub_path":"exhaustion_test_plot.py","file_name":"exhaustion_test_plot.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"306451575","text":"from bs4 import BeautifulSoup\r\nimport urllib.request\r\nimport os\r\nimport json\r\nfrom hashlib import sha1\r\nfrom queue import Queue\r\nfrom threading import Thread\r\nimport getpass\r\nimport ctypes\r\nimport winreg\r\nimport random\r\nuser = getpass.getuser()\r\nWORKER_THREADS = 40\r\n\r\nq = Queue()\r\n\r\n\r\ndef worker():\r\n while True:\r\n resData = q.get()\r\n if resData is None:\r\n break\r\n link = resData[0]\r\n Type = resData[1]\r\n DIR = resData[2]\r\n try:\r\n req = urllib.request.Request(link)\r\n req.add_header('User-Agent', \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\")\r\n res = urllib.request.urlopen(req)\r\n raw_img = res.read()\r\n filename = str(sha1(link.encode()).hexdigest()) + \".\" + Type\r\n if len(Type) != 0 and res.info().get(\"Content-Type\") != \"text/html\":\r\n f = open(os.path.join(DIR, filename), 'wb')\r\n f.write(raw_img)\r\n f.close()\r\n except Exception as e:\r\n print(\"could not load : \" + link)\r\n print(e)\r\n q.task_done()\r\n\r\n\r\ndef get_soup(url, header):\r\n return BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url, headers=header)), 'html.parser')\r\n\r\n\r\nquery = input('What wallpapers do you want?\\n')\r\nquery = query.split()\r\nquery = '+'.join(query)\r\nurl = \"https://www.google.com/search?q=\" + query + \"&source=lnms&tbm=isch\"\r\nDIR = 'C://Users//' + user + '//Pictures//Wallpapers'\r\nif not os.path.exists(DIR):\r\n os.makedirs(DIR)\r\n\r\nheader = {'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"\r\n }\r\nsoup = get_soup(url, header)\r\nif not os.path.exists(DIR):\r\n os.mkdir(DIR)\r\nDIR = os.path.join(DIR, query.split()[0])\r\nif not os.path.exists(DIR):\r\n os.mkdir(DIR)\r\nfor i in range(WORKER_THREADS):\r\n t = Thread(target=worker)\r\n t.daemon = True\r\n t.start()\r\nfor a in soup.find_all(\"div\", {\"class\": \"rg_meta\"}):\r\n link, Type = json.loads(a.text)[\"ou\"], json.loads(a.text)[\"ity\"]\r\n q.put([link, Type, DIR])\r\n\r\nq.join()\r\nprint('Downloaded to ' + str(DIR))\r\n\r\ncurrentuser = getpass.getuser()\r\nsourcefolder = (DIR)\r\n\r\n\r\nfor folder, subfolders, files in os.walk(sourcefolder):\r\n for file in files:\r\n if \".jpg\" in file:\r\n filePath = os.path.join(os.path.abspath(folder), file)\r\n source = random.choice(os.listdir(sourcefolder))\r\n sourcePath = os.path.join(os.path.abspath(sourcefolder), source)\r\n regKey = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER, \"Control Panel\\\\Desktop\", 0, winreg.KEY_SET_VALUE)\r\n winreg.SetValueEx(regKey, \"WallpaperStyle\", 0, winreg.REG_SZ, \"0\")\r\n winreg.SetValueEx(regKey, \"TileWallpaper\", 0, winreg.REG_SZ, \"0\")\r\n ctypes.windll.user32.SystemParametersInfoW(20, 0, sourcePath)\r\n","sub_path":"BrandonsWallpaperGrabber.py","file_name":"BrandonsWallpaperGrabber.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"486276352","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTo crawl the stock market and put in mysql\n\"\"\"\n\n__author__ = 'MNI'\n__version__ = '1.0'\n\nfrom pathlib import Path\nimport sys\nimport logging\nfrom time import time\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nimport mysql.connector\nfrom time import sleep\nfrom datetime import datetime\n\n\ndef logger():\n path_logger = f\"{Path(__file__).absolute()}\"\n logging.basicConfig(stream=sys.stdout,\n format='%(asctime)s : %(levelname)s : %(message)s',\n datefmt='%d-%m-%Y %I:%M:%S %p')\n logging.root.setLevel(level=logging.INFO)\n logging.info(f\"Running {path_logger}\")\n return logging.getLogger(path_logger)\n\n\ndef driver():\n options = Options()\n options.headless = True\n return webdriver.Firefox(executable_path=f\"{Path.cwd()}/geckodriver\", options=options)\n\n\ndef data(parts):\n return [x.text for x in run_driver.find_element_by_id('DataTables_Table_0_wrapper')\n .find_elements_by_class_name(parts)]\n\n\ndef max_page():\n return int([x.text for x in run_driver.find_element_by_class_name(\"pagination\")\n .find_elements_by_tag_name(\"li\")][-2])\n\n\n# get current time - 01-01-2019 23:59:59\ndef current_time():\n dt = datetime.now()\n dt = dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return dt\n\n\n# get current time (epoch)\ndef current_epoch():\n dt = datetime.now()\n dt = int(dt.strftime(\"%s\")) * 1000\n return dt\n\n\nif __name__ == \"__main__\":\n t0 = time()\n log = logger()\n\n # set mysql parameters\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"nafizizzat\",\n passwd=\"mni123#@!\",\n database=\"stocks\"\n )\n mycursor = mydb.cursor()\n\n # set driver parameters\n run_driver = driver()\n url = \"https://www.bursamalaysia.com/market_information/shariah_compliant_equities_prices?per_page=50\"\n open_url = run_driver.get(url)\n\n page = 1\n while True:\n # stop if reach max page\n if page == max_page():\n log.info(f\"all {max_page()} collected\")\n break\n else:\n # scrap web page\n log.info(max_page())\n\n current_url = run_driver.current_url\n new_url = run_driver.get(f\"{url}&page={page}\")\n scrap_page = (data('odd') + data('even'))\n\n for scrap_rows in scrap_page:\n # clean data\n clean_rows = scrap_rows.replace(\"\\n\", \" \").replace(\"\\\"\", \"\").replace(\",\", \"\")\\\n .replace(\"[\", \"\").replace(\"]\", \"\").replace(\"+\", \"\")\n each_row = clean_rows.split()\n if len(each_row) == 18:\n del each_row[4]\n elif len(each_row) == 16:\n each_row.insert(4, \"-\")\n elif len(each_row) == 15:\n each_row.insert(4, \"-\")\n each_row.insert(4, \"-\")\n each_row.extend([current_time(),current_time(),current_epoch()])\n print(each_row)\n\n # sql transfer row by row\n sql_string = \"\"\"INSERT INTO stocks.stocks_raw\n (no,name,syariah,code,dividend_type,rem,last_done,lacp,chg,\n chg_percentage,volume,buy_volume,buy,sell,sell_volume,high,low,\n time_created,time_updated,time_epoch)\n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n mycursor.execute(sql_string, each_row)\n mydb.commit()\n print(mycursor.rowcount, \"record inserted.\")\n\n page += 1\n sleep(.5)\n continue\n\n run_driver.close()\n t1 = time()\n timer = t1 - t0\n print(timer)\n","sub_path":"crawl_market.py","file_name":"crawl_market.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"227806123","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#The MIT License (MIT)\n#\n#Copyright (c) <2013-2014> \n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n#THE SOFTWARE.\n#\n\"\"\"\nThis module contains the pyFlakes checker mode\n\"\"\"\nimport _ast\nfrom pyqode.core import CheckerMode, CheckerMessage, logger\nfrom pyqode.core import MSG_STATUS_ERROR, MSG_STATUS_WARNING\nfrom pyqode.core import CHECK_TRIGGER_TXT_SAVED\n\n\ndef pyflakesAnalysisProcess(q, codeString, filename, fileEncoding):\n \"\"\"\n Function executed in background process to run the pyflakes on the\n codeString.\n \"\"\"\n msgs = []\n # First, compile into an AST and handle syntax errors.\n if not codeString or not fileEncoding:\n return\n try:\n tree = compile(codeString.encode(fileEncoding),\n filename, \"exec\", _ast.PyCF_ONLY_AST)\n except SyntaxError as value:\n msg = value.args[0]\n (lineno, offset, text) = value.lineno, value.offset, value.text\n # If there's an encoding problem with the file, the text is None\n if text is None:\n # Avoid using msg, since for the only known case, it\n # contains a bogus message that claims the encoding the\n # file declared was unknown.s\n logger.warning(\"%s: problem decoding source\" % filename)\n else:\n msgs.append(\n CheckerMessage(msg, MSG_STATUS_ERROR, lineno))\n else:\n # Okay, it's syntactically valid. Now check it.\n from pyflakes import checker, messages\n msg_types = {messages.UnusedImport: MSG_STATUS_WARNING,\n messages.RedefinedWhileUnused: MSG_STATUS_WARNING,\n messages.RedefinedInListComp: MSG_STATUS_WARNING,\n messages.ImportShadowedByLoopVar: MSG_STATUS_WARNING,\n messages.ImportStarUsed: MSG_STATUS_WARNING,\n messages.UndefinedName: MSG_STATUS_ERROR,\n messages.DoctestSyntaxError: MSG_STATUS_ERROR,\n messages.UndefinedExport: MSG_STATUS_ERROR,\n messages.UndefinedLocal: MSG_STATUS_ERROR,\n messages.DuplicateArgument: MSG_STATUS_WARNING,\n messages.Redefined: MSG_STATUS_WARNING,\n messages.LateFutureImport: MSG_STATUS_WARNING,\n messages.UnusedVariable: MSG_STATUS_WARNING}\n w = checker.Checker(tree, filename)\n w.messages.sort(key=lambda msg: msg.lineno)\n for warning in w.messages:\n msg = warning.message % warning.message_args\n line = warning.lineno\n status = msg_types[type(warning)]\n msgs.append(CheckerMessage(msg, status, line))\n q.put(msgs)\n\n\nclass PyFlakesCheckerMode(CheckerMode):\n \"\"\"\n This checker mode runs pyflakes on the fly to check your python syntax.\n \"\"\"\n DESCRIPTION = \"Check python code using pyFlakes\"\n IDENTIFIER = \"pyFlakesCheckerMode\"\n\n def __init__(self):\n CheckerMode.__init__(self, pyflakesAnalysisProcess,\n delay=1200,\n clearOnRequest=False)\n\n def _onInstall(self, editor):\n \"\"\"\n Checks for pyflakes support. Auto disable if pyflakes could not be\n imported\n \"\"\"\n CheckerMode._onInstall(self, editor)\n try:\n import pyflakes\n except ImportError:\n logger.warning(\"Cannot import PyFlakes, PyFlakesCheckerMode \"\n \"disabled\")\n self.enabled = False\n","sub_path":"pyqode/python/modes/pyflakes_checker.py","file_name":"pyflakes_checker.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"236830983","text":"from automator import BlackJackGameAutomator\n\n\ndef main():\n num_hands = 5\n automator = BlackJackGameAutomator(num_hands)\n automator.initialize_deck()\n success = automator.deal_initial()\n if not success:\n raise Exception('Error, Out of cards.')\n else:\n print('-- Initial --')\n automator.print_hands_and_score()\n black_jacks = automator.get_black_jacks()\n if black_jacks:\n print('Blackjack at ')\n for idx in black_jacks:\n print(idx, ', ', end='')\n print()\n else:\n success = automator.play_all_hands()\n if not success:\n raise Exception('Error, Out of cards.')\n else:\n print('\\n-- Completed Game --')\n automator.print_hands_and_score()\n winners = automator.get_winners()\n if winners:\n print('Winners: ', end='')\n for idx in winners:\n print('{}, '.format(idx), end='')\n print()\n else:\n print('Draw. All players have busted.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"OOD/BlackJack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"274028391","text":"from collections import Counter\n\nfrom django.shortcuts import render_to_response\n\ncounter_show = Counter()\ncounter_click = Counter()\n\n\ndef index(request):\n landing_name = request.GET.get('from-landing')\n counter_click[landing_name] += 1\n\n return render_to_response('index.html')\n\ndef landing(request):\n dict_renders = {'original': 'landing.html',\n 'test': 'landing_alternate.html'}\n ab_argument = request.GET.get('ab-test-arg')\n counter_show[ab_argument] += 1\n\n return render_to_response(dict_renders.get(ab_argument, ''))\n\n\ndef stats(request):\n test_conversion = counter_click['test'] / counter_show['test'] if counter_show['test'] else 0\n original_conversion = counter_click ['original'] / counter_show['original'] if counter_show['original'] else 0\n\n return render_to_response('stats.html', context={\n 'test_conversion': test_conversion,\n 'original_conversion': original_conversion,\n })\n","sub_path":"request-handling/landing/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"84230550","text":"from mininet.net import Mininet\nfrom lib.router import Router\nfrom lib.switch import Switch\nfrom lib.host import Host\nfrom lib.device import Device\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel, info\nimport sys\ndef main():\n net = Mininet()\n a1 = Router('1', [{'interface': 'eth0', 'ip': '10.1.1.1', 'netmask': '255.255.255.0', 'ext':True}, {'interface': 'eth1', 'ip': '12.1.1.1', 'netmask': '255.255.255.0'}], net, '122.1.1.4')\n a2 = Router('2', [{'interface': 'eth0', 'ip': '10.1.1.2', 'netmask': '255.255.255.0', 'ext':True}, {'interface': 'eth1', 'ip': '13.1.1.1', 'netmask': '255.255.255.0'}], net, '122.1.2.4')\n s1 = Switch('1', [{'interface': 'eth0', 'ip': '0.0.0.0', 'netmask': '0.0.0.0', 'netw': '12.1.1.0'}, {'interface': 'eth1', 'ip': '0.0.0.0', 'netmask': '255.255.255.0'}], net, '122.1.1.5')\n s2 = Switch('2', [{'interface': 'eth0', 'ip': '0.0.0.0', 'netmask': '0.0.0.0', 'netw': '13.1.1.0'}, {'interface': 'eth1', 'ip': '0.0.0.0', 'netmask': '255.255.255.0'}], net, '122.1.2.5')\n h1 = Host('1', [{'interface': 'eth0', 'ip': '12.1.1.200', 'netmask': '255.255.255.0'}], net, '122.1.1.200')\n h2 = Host('2', [{'interface': 'eth0', 'ip': '13.1.1.200', 'netmask': '255.255.255.0'}], net, '122.1.2.200')\n a1.connect_devices(a1, a2, net, a1.ext_int, a2.ext_int)\n s1.connect_devices(s1, a1, net, s1.interfacelist[0]['eth0'],a1.interfacelist[0]['eth1'])\n s2.connect_devices(s2, a2, net, s2.interfacelist[0]['eth0'],a2.interfacelist[0]['eth1'])\n h1.connect_devices(h1, s1, net, h1.interfacelist[0]['eth0'],s1.interfacelist[0]['eth1'])\n h2.connect_devices(h2, s2, net, h2.interfacelist[0]['eth0'],s2.interfacelist[0]['eth1'])\n net.build()\n a1.initialise()\n a1.set_up_default_ext_route(a2.interfaces[0]['ip'])\n a2.initialise()\n a2.set_up_default_ext_route(a1.interfaces[0]['ip'])\n h1.initialise()\n h2.initialise()\n h1.set_up_route('ip r a default via '+a1.interfaces[1]['ip'])\n h2.set_up_route('ip r a default via '+a2.interfaces[1]['ip'])\n s1.initialise()\n s2.initialise()\n net.start()\n CLI(net)\n net.stop()\n\n\n\nif __name__ == '__main__':\n setLogLevel('debug')\n main()\n","sub_path":"Basic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"353671681","text":"import cv2 as cv\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\nimport win32com.client as comclt\nfrom win32gui import GetWindowText, GetForegroundWindow\nimport pyautogui\n\n\nshell = comclt.Dispatch(\"WScript.Shell\")\n\ncap = cv.VideoCapture(0)\nbg = cv.createBackgroundSubtractorMOG2(100, 20)\ndetect = False\nright_sided = True\ncount = 264\ncnt = 0\npre = 1\ndelay = -1\n\nfeature = []\ntarget = []\n\nfor i in range(count):\n feature.append(cv.imread('hand_five' + str(i) + '.jpg',cv.IMREAD_GRAYSCALE))\n target.append(0)\n# for i in range(count):\n# feature.append(cv.imread('hand_thumbs_up' + str(i) + '.jpg',cv.IMREAD_GRAYSCALE))\n# target.append(1)\nfor i in range(count):\n feature.append(cv.imread('hand_nothing' + str(i) + '.jpg',cv.IMREAD_GRAYSCALE))\n target.append(1)\nh,w = feature[0].shape\n\nfeature = np.asarray(feature)\nprint(feature.shape)\ntarget = np.asarray(target)\nfeature = feature / 255\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(h,w)),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dense(2, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer=tf.train.AdamOptimizer(), \n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(feature, target, epochs=2)\n\nwhile(True):\n _, frame = cap.read()\n height, width, _ = frame.shape\n lr = -0.001\n fgmask = bg.apply(frame, learningRate=lr)\n kernel = np.ones((4, 4), np.uint8)\n ret, fgmask = cv.threshold(fgmask, 0, 255, cv.THRESH_BINARY)\n fgmask = cv.erode(fgmask, kernel, iterations=1)\n fgmask = cv.dilate(fgmask, kernel, iterations=1)\n fgmask = cv.erode(fgmask, kernel, iterations=1)\n fgmask = cv.dilate(fgmask, kernel, iterations=3)\n fgmask = cv.erode(fgmask, kernel, iterations=1)\n if right_sided:\n cropped_fgmask = fgmask[0:height // 4 * 3, 0:width // 5 * 2]\n cropped_frame = frame[0:height // 4 * 3, 0:width // 5 * 2]\n else:\n cropped_fgmask = fgmask[0:height // 4 * 3, width // 5 * 2:width]\n cropped_frame = frame[0:height // 4 * 3, width // 5 * 2:width]\n _, contours, hierarchy = cv.findContours(\n cropped_fgmask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n black = np.zeros((height // 4 * 3, width // 5 * 2, 3))\n cv.drawContours(black, contours, -1, (255, 255, 255), 3)\n cv.imshow('frame', black)\n \n black = np.uint8(black)\n black=cv.cvtColor(black,code=cv.COLOR_RGB2GRAY)\n predictions = model.predict(np.asarray([black]))\n cur = np.argmax(predictions[0])\n if delay == -1:\n if pre == 0:\n if cur == 0:\n count = count + 1\n if count > 30:\n if GetWindowText(GetForegroundWindow()) == \"Killing Floor 2 (64-bit, DX11) v1070\":\n shell.SendKeys(\"u\")\n if GetWindowText(GetForegroundWindow()) == \"BattleBlock Theater\":\n pyautogui.keyDown('ctrlleft')\n count = 0\n delay = 0\n else:\n if count > 30:\n if GetWindowText(GetForegroundWindow()) == \"Killing Floor 2 (64-bit, DX11) v1070\":\n shell.SendKeys(\"u\")\n if GetWindowText(GetForegroundWindow()) == \"BattleBlock Theater\":\n pyautogui.keyDown('ctrlleft')\n count = 0\n delay = 0\n elif count > 7:\n if GetWindowText(GetForegroundWindow()) == \"Killing Floor 2 (64-bit, DX11) v1070\":\n shell.SendKeys(\"i\")\n if GetWindowText(GetForegroundWindow()) == \"BattleBlock Theater\":\n pyautogui.keyDown('e')\n count = 0\n delay = 0\n else:\n if cur == 0:\n count = count + 1\n else:\n delay = delay + 1\n if delay == 20:\n pyautogui.keyUp('e')\n if delay == 45:\n pyautogui.keyUp('ctrlleft')\n #pyautogui.keyUp('e')\n delay = -1\n pre = cur\n k = cv.waitKey(1)\n if k == 27:\n break\n\ncap.release()\ncv.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"346951342","text":"# CONFIG\n# ---------\ntoken = \"\" # To find this, it's harder than it used to be. Please Google the process.\nprefix = \"~\" # This will be used at the start of commands.\n# ----------\n\n# Import the needed libs.\nfrom discord.ext import commands\nimport discord\nimport random\n\nprint(\"Loading..\")\n\n# Declare the bot, pass it a prefix and let it know to only listen to itself.\nintents = discord.Intents().all()\n\nbot = commands.Bot(command_prefix=prefix, self_bot=True, intents=intents)\nbot.remove_command(\"help\")\n\n\n@bot.event\nasync def on_ready():\n print(\"Ready to be innocent.\")\n\n\n@bot.command()\nasync def kall(ctx):\n \"\"\"\n Kicks every member in a server\n \"\"\"\n await ctx.message.delete()\n for user in list(ctx.guild.members):\n try:\n await ctx.guild.kick(user)\n print(f\"{user.name} has been kicked from {ctx.guild.name}\")\n\n except:\n print(f\"{user.name} has FAILED to be kicked from {ctx.guild.name}\")\n\n print(\"Action Completed: kall\")\n\n\n@bot.command()\nasync def ball(ctx):\n \"\"\"\n Bans every member in a server\n \"\"\"\n await ctx.message.delete()\n for user in list(ctx.guild.members):\n try:\n await ctx.guild.ban(user)\n print(f\"{user.name} has been banned from {ctx.guild.name}\")\n\n except:\n print(f\"{user.name} has FAILED to be banned from {ctx.guild.name}\")\n\n print(\"Action Completed: ball\")\n\n\n@bot.command()\nasync def rall(ctx, rename_to):\n \"\"\"\n Renames every member in a server\n \"\"\"\n await ctx.message.delete()\n for user in ctx.guild.members:\n try:\n await user.edit(nick=rename_to)\n print(f\"{user.name} has been renamed to {rename_to} in {ctx.guild.name}\")\n\n except:\n print(f\"{user.name} has NOT been renamed to {rename_to} in {ctx.guild.name}\")\n\n print(\"Action Completed: rall\")\n\n\n@bot.command()\nasync def mall(ctx, *, message):\n \"\"\"\n Messages every member in a server\n \"\"\"\n await ctx.message.delete()\n for user in ctx.guild.members:\n try:\n await user.send(message)\n print(f\"{user.name} has recieved the message.\")\n\n except:\n print(f\"{user.name} has NOT recieved the message.\")\n\n print(\"Action Completed: mall\")\n\n\n@bot.command()\nasync def dall(ctx, *conditions):\n \"\"\"\n Can perform multiple actions that envolve mass deleting\n \"\"\"\n conditions = [condition.lower() for condition in conditions]\n if \"channels\" in conditions:\n for channel in list(ctx.guild.channels):\n try:\n await channel.delete()\n print(f\"{channel.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{channel.name} has NOT been deleted in {ctx.guild.name}\")\n\n print(\"Action Completed: dall channels\")\n\n elif \"roles\" in conditions:\n for role in list(ctx.guild.roles):\n try:\n await role.delete()\n print(f\"{role.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{role.name} has NOT been deleted in {ctx.guild.name}\")\n\n print(\"Action Completed: dall roles\")\n\n elif \"emojis\" in conditions:\n for emoji in list(ctx.guild.emojis):\n try:\n await emoji.delete()\n print(f\"{emoji.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{emoji.name} has NOT been deleted in {ctx.guild.name}\")\n\n print(\"Action Completed: dall emojis\")\n\n elif \"all\" in conditions:\n for channel in list(ctx.guild.channels):\n try:\n await channel.delete()\n print(f\"{channel.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{channel.name} has NOT been deleted in {ctx.guild.name}\")\n\n for role in list(ctx.guild.roles):\n try:\n await role.delete()\n print(f\"{role.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{role.name} has NOT been deleted in {ctx.guild.name}\")\n\n for emoji in list(ctx.guild.emojis):\n try:\n await emoji.delete()\n print(f\"{emoji.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{emoji.name} has NOT been deleted in {ctx.guild.name}\")\n\n print(\"Action Completed: dall all\")\n\n\n@bot.command()\nasync def destroy(ctx):\n \"\"\"\n Outright destroys a server\n \"\"\"\n await ctx.message.delete()\n for emoji in list(ctx.guild.emojis):\n try:\n await emoji.delete()\n print(f\"{emoji.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{emoji.name} has NOT been deleted in {ctx.guild.name}\")\n\n for channel in list(ctx.guild.channels):\n try:\n await channel.delete()\n print(f\"{channel.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{channel.name} has NOT been deleted in {ctx.guild.name}\")\n\n for role in list(ctx.guild.roles):\n try:\n await role.delete()\n print(f\"{role.name} has been deleted in {ctx.guild.name}\")\n\n except:\n print(f\"{role.name} has NOT been deleted in {ctx.guild.name}\")\n\n for user in list(ctx.guild.members):\n try:\n await ctx.guild.ban(user)\n print(f\"{user.name} has been banned from {ctx.guild.name}\")\n\n except:\n print(f\"{user.name} has FAILED to be banned from {ctx.guild.name}\")\n\n print(\"Action Completed: destroy\")\n\n# == NEW COMMANDS == #\n@bot.command()\nasync def populate(ctx, name, amount=10):\n await ctx.message.delete()\n \"\"\"Create some channels...\"\"\"\n for r in range(amount):\n await ctx.message.guild.create_text_channel(name)\n print(\"Action Completed: populate\")\n\n@bot.command()\nasync def name(ctx, name):\n await ctx.message.delete()\n await ctx.guild.edit(name=name)\n print(\"Action Completed: name\")\n\n@bot.command()\nasync def spam(ctx, name, message, amount=10):\n await ctx.message.delete()\n for r in range(amount):\n channel = await ctx.message.guild.create_text_channel(name)\n await channel.send(message)\n print(\"Action Completed: spam\")\n\n\n\nbot.run(token, bot=False) # Starts the bot by passing it a token and telling it it isn't really a bot. \n","sub_path":"innocent.py","file_name":"innocent.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"585007417","text":"# ------------------------------------------------------------\n# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.\n#\n# Licensed under the BSD 2-Clause License.\n# You should have received a copy of the BSD 2-Clause License\n# along with the software. If not, See,\n#\n# \n#\n# ------------------------------------------------------------\n\n\ndef ToFillerArgs(FillerParamerer):\n kwargs = \\\n {'value' : FillerParamerer.value,\n 'low': FillerParamerer.min,\n 'high': FillerParamerer.max,\n 'mean': FillerParamerer.mean,\n 'std': FillerParamerer.std}\n return kwargs\n\n","sub_path":"Dragon/python/dragon/vm/caffe/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"438302643","text":"from RPi.GPIO import setup, setmode, BOARD, OUT, HIGH, LOW, output, cleanup\nfrom time import sleep\nimport math\n\n###################\n# This class encapsulates the logic for the pulse width modulation\n# required to move the motors.\n# See car.py for an example of how the functions are used\n\n\nclass Motors:\n\n # Set up the Pi's pins to command the motors\n def __init__(self):\n\n # Time duration of pulse \n self.pulseWidth = 1.7 / 1000000\n\n # Time in between pulses\n self.pauseMiliseconds = (20-1.7) / 1000000\n\n # Motor GPIO numbers (BOARD mode)\n self.leftMotor = 16\n self.rightMotor = 36\n\n # Set the way the GPIO pins on the Pi are indexed\n setmode(BOARD)\n\n # Set up the pins\n setup(self.leftMotor, OUT)\n setup(self.rightMotor, OUT)\n\n\n # Sends a single pulse to both left and right motors \n def straight(self):\n \n # Send pulse to motors\n output(self.leftMotor, HIGH)\n output(self.rightMotor, HIGH)\n sleep(self.pulseWidth) \n output(self.leftMotor, LOW)\n output(self.rightMotor, LOW)\n\n # Sleep in between pulses\n sleep(self.pauseMiliseconds)\n \n\n\n # Sends a single pulse to the right motor, turning the car left\n def turnRight(self):\n\n # Send pulse to one motor\n output(self.leftMotor, HIGH)\n sleep(self.pulseWidth)\n output(self.leftMotor, LOW)\n \n # Sleep in between pulses\n sleep(self.pauseMiliseconds) \n \n\n # Sends a single pulse to the left motor, turning the car right\n def turnLeft(self):\n\n # Send pulse to one motor\n output(self.rightMotor, HIGH)\n sleep(self.pulseWidth)\n output(self.rightMotor, LOW)\n\n # Sleep in between pulses\n sleep(self.pauseMiliseconds)\n \n\n # Clean up the Pi's GPIO pins\n def cleanUp(self):\n cleanup()\n","sub_path":"motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"378560736","text":"'''\nInput: a List of integers\nReturns: a List of integers\n'''\ndef product_of_all_other_numbers(arr): # O(n)\n # products = create an array that will hold the products\n products = []\n # set total to the value of the first element of the array\n total = arr[0]\n\n # multiply the total by the other values in the input array\n for value in arr[1:]:\n total = total * value\n \n # for each value in the original array\n for value in arr:\n # divide total by the value\n # append product to products\n products.append(total/value)\n \n return products\n\n\nif __name__ == '__main__':\n # Use the main function to test your implementation\n arr = [1, 2, 3, 4, 5]\n # arr = [2, 6, 9, 8, 2, 2, 9, 10, 7, 4, 7, 1, 9, 5, 9, 1, 8, 1, 8, 6, 2, 6, 4, 8, 9, 5, 4, 9, 10, 3, 9, 1, 9, 2, 6, 8, 5, 5, 4, 7, 7, 5, 8, 1, 6, 5, 1, 7, 7, 8]\n\n print(f\"Output of product_of_all_other_numbers: {product_of_all_other_numbers(arr)}\")\n","sub_path":"product_of_all_other_numbers/product_of_all_other_numbers.py","file_name":"product_of_all_other_numbers.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"271203069","text":"import discord\r\nimport socket\r\nimport threading\r\nimport json\r\nimport time\r\nimport requests\r\n\r\nimport subprocess\r\nimport os\r\n\r\nimport configparser\r\nimport codecs\r\n\r\nimport webview\r\nfrom urllib.parse import urlparse\r\nfrom urllib.parse import parse_qs\r\n\r\nimport traceback\r\nimport math\r\n\r\n# https://discordapp.com/developers/applications/645192983490330657/oauth\r\n# https://discordapp.com/api/oauth2/authorize?client_id=645192983490330657&permissions=0&scope=bot\r\n# https://developers.facebook.com/docs/graph-api/reference/user/live_videos/?locale=zh_TW\r\n# todo: 寫 Error Log 檔\r\n\r\n# ======================================================\r\n# 初始化設定\r\n# ======================================================\r\nconfig = configparser.ConfigParser()\r\nconfig.read_file(codecs.open('conf.ini', 'r', 'UTF-8'))\r\n# 初始 FB 登入用的睡眠設定\r\nsleep_time = int(config['process_variables']['sleep_time'])\r\n# 會常常用到的常數設定\r\ndiscord_channel_id = config['env']['discord_channel_id']\r\ndiscord_author_id = config['env']['discord_author_id']\r\n# 啟動設定\r\nis_merge_twitch = int(config['process_variables']['is_merge_twitch'])\r\nis_merge_facebook = int(config['process_variables']['is_merge_facebook'])\r\nis_merge_discord = int(config['process_variables']['is_merge_discord'])\r\nis_write_log = int(config['process_variables']['is_write_log'])\r\nis_remove_old_session_file = int(config['process_variables']['is_remove_old_session_file'])\r\n# 常數設定\r\nmessage_from_discord = 0\r\nmessage_from_twitch = 1\r\nmessage_from_FB = 2\r\n\r\nhostname = socket.gethostname()\r\naddress = socket.gethostbyname(hostname)\r\naddress = '127.0.0.1'\r\nport = int(config['process_variables']['port'])\r\nprint('IP: ' + str(address))\r\n\r\nif is_remove_old_session_file == 1 and os.path.isfile('./tmp/sess_b9jc4kekq6idnvsjdo7sf57t3f'):\r\n os.remove('./tmp/sess_b9jc4kekq6idnvsjdo7sf57t3f')\r\n\r\nproc = subprocess.Popen('php -S {}:{}'.format(address, port), bufsize=0)\r\n# proc.kill()\r\n# proc.communicate()\r\n\r\npost_data = {\"op_code\": \"add_message\"}\r\napi_url = 'http://{}:{}/'.format(address, port)\r\nrequests_object = requests.Session()\r\n\r\nlock = threading.Lock()\r\ndef write_message (nick_name, message, source):\r\n message = message.replace(\"'\", '\"')\r\n lock.acquire ()\r\n post_data['data'] = [nick_name, message, source]\r\n post_data['data'] = json.dumps(post_data['data'])\r\n req = requests_object.post(api_url, data=post_data)\r\n post_data['data'] = None\r\n if req.status_code != 200:\r\n print ('post fail')\r\n # print (req.text)\r\n lock.release ()\r\n\r\ndef print_log(message):\r\n if is_write_log == 0:\r\n return\r\n print(message)\r\n\r\n# ======================================================\r\n# Discord 擷取聊天室訊息\r\n# ======================================================\r\n# 由 discord 提供 module 產生 class 並修改監聽事件\r\nbot_client = discord.Client()\r\n# 當成功登入時 印出準備完成,可供測試\r\n@bot_client.event\r\nasync def on_ready ():\r\n print ('Discord Bot 已登入 開始擷取聊天室訊息 可輸入訊息測試')\r\n # channel = bot_client.get_channel (645193836062310413)\r\n # print (channel)\r\n\r\n# 收到訊息時,確認訊息來源頻道\r\n@bot_client.event\r\nasync def on_message (message):\r\n # 不是目標來源的訊息略過\r\n if message.channel.id != int(discord_channel_id):\r\n print_log('訊息來源頻道ID: {} 與預期頻道ID: {} 不同'.format(message.channel.id, discord_channel_id))\r\n return\r\n \r\n # 確認訊息\r\n text = message.content\r\n # 訊息若為關閉 Bot 且來源為指定使用者\r\n if text == '!clean_token' and message.author.id == discord_author_id:\r\n clean_data = {'data':'', \"op_code\": 'clean_session'}\r\n req = requests_object.post(api_url, data=clean_data)\r\n if req.status_code != 200:\r\n print_log ('請求清除聊天室失敗')\r\n print_log('請求清除聊天室訊息, 收到回復')\r\n print_log (req.text)\r\n return\r\n if text == '!close_bot' and message.author.id == discord_author_id:\r\n print_log ('接收到關閉指令,關閉 Bot')\r\n await bot_client.close()\r\n write_message (message.author.name, text, message_from_discord)\r\n print_log ('{}: {}'.format (message.author.name, text))\r\n\r\n# ======================================================\r\n# Twitch 擷取聊天室訊息\r\n# ======================================================\r\n\r\ndef start_twitch_bot ():\r\n server = 'irc.chat.twitch.tv'\r\n port = 6667\r\n nickname = config['env']['twitch_nickname']\r\n token = config['env']['twitch_token']\r\n channel = '#{}'.format(config['env']['connect_twitch_channel'])\r\n\r\n sock = socket.socket()\r\n sock.connect((server, port))\r\n sock.send(f\"PASS {token}\\r\\n\".encode('utf-8'))\r\n sock.send(f\"NICK {nickname}\\r\\n\".encode('utf-8'))\r\n sock.send(f\"JOIN {channel}\\r\\n\".encode('utf-8'))\r\n sock.send(f\"CAP REQ :twitch.tv/tags\\r\\n\".encode('utf-8'))\r\n\r\n send_message_title = f'PRIVMSG {channel} :'\r\n message_title_len = len(send_message_title)\r\n find_nickname_str = ';display-name='\r\n find_nickname_str_len = len(find_nickname_str)\r\n\r\n find_account_str = 'user-type= :'\r\n find_account_str_len = len(find_account_str)\r\n \r\n message_end_str = \"\\r\\n\"\r\n try:\r\n while True:\r\n display_name_start_index = -1\r\n display_name_end_index = -1\r\n resp = sock.recv(2048).decode('utf-8')\r\n print_log('收到 Twitch 訊息')\r\n print_log(resp)\r\n # 是心跳包就回復\r\n if resp.startswith('PING'):\r\n # sock.send(\"PONG :tmi.twitch.tv\\n\".encode('utf-8'))\r\n sock.send(\"PONG\\n\".encode('utf-8'))\r\n # 長度奇怪,忽略\r\n elif len(resp) <= 0:\r\n print_log ('訊息內容長度有問題 忽略本次訊息')\r\n retry_times = 1\r\n print('等候 {} 秒後重試'.format(math.pow(2, retry_times)))\r\n time.sleep(math.pow(2, retry_times))\r\n continue\r\n # \r\n message = resp.find (send_message_title)\r\n if message == -1:\r\n print_log ('訊息中未發現 目標頻道名稱 忽略本次訊息')\r\n retry_times = 1\r\n print('等候 {} 秒後重試'.format(math.pow(2, retry_times)))\r\n time.sleep(math.pow(2, retry_times))\r\n continue\r\n display_name_start_index = resp.find (find_nickname_str) + find_nickname_str_len\r\n display_name_end_index = resp.find (';', display_name_start_index)\r\n display_name = resp[display_name_start_index:display_name_end_index]\r\n if len (display_name) <= 0:\r\n display_name = resp[find_account_str_len+find_account_str_len:resp.find ('!')]\r\n end_index = resp.find (message_end_str)\r\n message = resp[message+message_title_len:end_index]\r\n write_message (display_name, message, message_from_twitch)\r\n print_log ('{}: {}'.format (display_name, message))\r\n\r\n except KeyboardInterrupt:\r\n sock.close()\r\n exit()\r\n\r\napp_id = config['env']['app_id']\r\nredirect_uri = config['env']['redirect_uri']\r\nstate_param = config['env']['state_param']\r\nstate_param = '{st=state123abc,ds=123456789}'\r\nfb_webview = None\r\n\r\ndef start_parser_facebook_chat(fb_webview):\r\n time.sleep(int(config['process_variables']['sleep_time']))\r\n fb_url = fb_webview.get_current_url()\r\n fb_webview.destroy()\r\n print_log('獲得 FB 登入的 URL')\r\n print_log(fb_url)\r\n fb_url = urlparse(fb_url)\r\n fb_url = parse_qs(fb_url.fragment)\r\n print_log('解析後呈現')\r\n print_log(fb_url)\r\n access_token = fb_url['access_token'].pop()\r\n print_log('權杖Token (Access_token)')\r\n print_log(access_token)\r\n req = requests.get(f'https://graph.facebook.com/v6.0/me?fields=id,name&access_token={access_token}')\r\n if req.status_code > 200:\r\n print_log ('請求 FB 個人 ID 失敗')\r\n print_log(req)\r\n print_log(req.text)\r\n print('請求個人 ID 失敗 FB 擷取失敗')\r\n return\r\n is_facebook_page = int(config['env']['is_facebook_page'])\r\n me = json.loads(req.text)\r\n user_id = me['id']\r\n if is_facebook_page == 1:\r\n print_log ('請求粉專 ID 中')\r\n req = requests.get(f'https://graph.facebook.com/v6.0/{user_id}/accounts?access_token={access_token}')\r\n print_log('Test')\r\n if req.status_code > 200:\r\n print_log ('請求 FB 粉專 ID 失敗')\r\n print_log(req)\r\n print_log(req.text)\r\n print('請求粉專 ID 失敗 FB 擷取失敗')\r\n return\r\n print_log(req.text)\r\n me = json.loads(req.text)\r\n user_id = me['data'].pop(0)['id']\r\n retry_times = 0\r\n while True:\r\n req = requests.get(f'https://graph.facebook.com/v6.0/{user_id}/live_videos?access_token={access_token}')\r\n if req.status_code > 200:\r\n print_log ('請求 FB 粉專 ID 失敗')\r\n print_log(req)\r\n print_log(req.text)\r\n print('請求影片內容失敗 FB 擷取失敗')\r\n return\r\n print_log('獲得影片內容')\r\n print_log(req.text)\r\n live_video_node = json.loads(req.text)\r\n live_video_id = None\r\n # 這段程式碼待驗證\r\n for item in live_video_node['data']:\r\n if item['status'] == 'LIVE':\r\n live_video_id = item['id']\r\n break\r\n print_log ('找尋到的影片 ID')\r\n print_log(live_video_id)\r\n if live_video_id != None:\r\n break\r\n retry_times = retry_times + 1\r\n print('等候 {} 秒後重試'.format(math.pow(2, retry_times)))\r\n time.sleep(math.pow(2, retry_times))\r\n print('未找到正在實況中的影片內容,擷取失敗')\r\n \r\n req = requests.get(f'https://streaming-graph.facebook.com/{live_video_id}/live_comments?access_token={access_token}&fields=from,message', stream=True)\r\n print('開始擷取 FB 聊天室 可輸入訊息測試')\r\n print_log('開始嘗試擷取 FB 聊天室')\r\n for line in req.iter_lines():\r\n line = line.decode('utf-8')\r\n line = line[6:]\r\n if line:\r\n line = json.loads(line)\r\n write_message(line['from']['name'], line['message'], message_from_FB)\r\n print_log ('{}: {}'.format (line['from']['name'], line['message']))\r\n else:\r\n print_log('FB ping')\r\n \r\n\r\n# ======================================================\r\n# 整合處\r\n# ======================================================\r\nif is_merge_discord == 1:\r\n discord_bot = threading.Thread(target = bot_client.run, args = (config['env']['discord_bot_token'],) )\r\n discord_bot.start ()\r\nif is_merge_twitch == 1:\r\n twitch_bot = threading.Thread(target = start_twitch_bot, args = ())\r\n twitch_bot.start ()\r\nif is_merge_facebook == 1:\r\n login_url = f'https://www.facebook.com/v6.0/dialog/oauth?client_id={app_id}&redirect_uri={redirect_uri}&state={state_param}&response_type=token,granted_scopes'\r\n fb_webview = webview.create_window('Webview', login_url)\r\n facebook_chat_parser = threading.Thread(target = start_parser_facebook_chat, args = (fb_webview,))\r\n facebook_chat_parser.start ()\r\n webview.start()\r\n\r\n","sub_path":"LiveChatMerge.py","file_name":"LiveChatMerge.py","file_ext":"py","file_size_in_byte":11429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"102507792","text":"from django.conf.urls import url\nfrom django.urls import path, re_path\nfrom django.conf import settings\nfrom . import views\nfrom django.views.static import serve\n\nurlpatterns = [\n\n path('', views.homeUI,name=\"homeUI\"),\n path('uploadtxt/', views.uploadtxt, name=\"uploadtxt\"),\n path('uploadtxt1/', views.uploadtxt1, name=\"uploadtxt1\"),\n path('uploadtxt0/', views.uploadtxt0, name=\"uploadtxt0\"),\n path('uploadtxt2/', views.uploadtxt2, name=\"uploadtxt2\"),\n path('uploadtxt3/', views.uploadtxt3, name=\"uploadtxt3\"),\n path('picshow/', views.picshow, name='picshow'),\n path('aboutus/', views.aboutus,name=\"aboutus\"),\n path('fun2/', views.fun2,name=\"fun2\"),\n path('fun3/', views.fun3,name=\"fun3\"),\n path('fun0/', views.fun0,name=\"fun0\"),\n path('pagenumerror/', views.pagenumerror,name=\"pagenumerror\"),\n path('finish/', views.finish,name=\"finish\"),\n re_path('media/(?P.*)$', serve, {'document_root': r'E:\\stitch\\static\\media'}),\n #这句意思是将访问的图片href由“http://127.0.0.1:8888/media/图片存储文件夹/字母哥.jpg”转为本地访问D:\\workspace\\upload_pic\\media的形式\n]\n","sub_path":"yf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"173352167","text":"from django.test import TestCase, RequestFactory\nfrom facecrop.views import index\nclass FaceCropTest(TestCase):\n def setUp(self):\n self.factory = RequestFactory()\n def testViews(self):\n with open('/home/kien/Downloads/birthday2.jpg', 'rb') as img:\n result = self.factory.post('/facecrop/',{'img':img})\n resp = index(result)\n print(resp)\n","sub_path":"facecrop/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"578651869","text":"\n\nimport pika\nimport sys\n\nusername = \"shiwei\"\npwd = 'shiwei666666'\nuser_pwd = pika.PlainCredentials(username, pwd)\n\n# 创建连接\nconn = pika.BlockingConnection(pika.ConnectionParameters(\"localhost\", credentials=user_pwd))\n\n# 在连接上创建一个频道\nchannel = conn.channel()\n\nchannel.exchange_declare(exchange=\"direct_logs\", exchange_type=\"direct\") # 参数 名改变了, 以前是 type\n\nresult = channel.queue_declare(exclusive=True, # 创建随机队列,当消费者与rabbitmq断开连接时,这个队列将自动删除。\n queue='',)\nqueue_name = result.method.queue # 分配随机队列的名字。\n\nseverities = sys.argv[1:]\nif not severities:\n sys.stderr.write(\"Usage: %s [info] [warning] [error]\\n\" % sys.argv[0])\n sys.exit(1)\n\nfor severity in severities: # 循环 队列, 使其与交换机绑定在一起,\n channel.queue_bind(exchange='direct_logs',\n queue=queue_name,\n routing_key=severity,)\n\ndef callback(ch, method, properties, body): # 定义回调函数,接收消息\n print(\" [消费者] %r:%r\" % (method.routing_key, body))\n\nchannel.basic_consume(queue=queue_name,\n on_message_callback = callback,\n auto_ack=True,) # 消费者接收消息后,不给rabbimq回执确认。\n\nchannel.start_consuming() # 循环等待消息接收。\n\n","sub_path":"RabbitMQ_Demo/发布_订阅_广播,一对多/direct_exchange/direct_receive_subscribe.py","file_name":"direct_receive_subscribe.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"362410395","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n FileName : most_common_word.py\n Author : libins\n Contact : libins810@gmail.com\n CreateDate : 2020-04-25 23:25\n SoftWare : IntelliJ IDEA\n Description : 最常见的单词[leetcode:819题]\n-------------------------------------------------\n\"\"\"\n\n\ndef most_common_word(paragraph, banned):\n alphabet = {'A': 'a', 'B': 'b', 'C': 'c', 'D': 'd', 'E': 'd', 'F': 'f', 'G': 'g', 'H': 'h', 'I': 'i', 'J': 'j', 'K': 'k', 'L': 'l', 'M': 'm', 'N': 'n',\n 'O': 'o', 'P': 'p', 'Q': 'q', 'R': 'r', 'S': 's', 'T': 't', 'U': 'u', 'V': 'v', 'W': 'w', 'X': 'x', 'Y': 'y', 'Z': 'z', 'a': 'a', 'b': 'b',\n 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h', 'i': 'i', 'j': 'j', 'k': 'k', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p',\n 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u', 'v': 'v', 'w': 'w', 'x': 'x', 'y': 'y', 'z': 'z'}\n\n words_count = {}\n\n # 拆分单词\n\n def load_words(word):\n if word in words_count:\n words_count[word] += 1\n else:\n words_count[word] = 1\n return \"\"\n\n # 统计每个单词词频\n word = \"\"\n for i, a in enumerate(paragraph):\n if a in alphabet and i == len(paragraph) - 1:\n word += alphabet[a]\n word = load_words(word)\n continue\n if a in alphabet:\n word += alphabet[a]\n elif len(word) > 0:\n word = load_words(word)\n # 去掉停用词\n for bnd in banned:\n if bnd in words_count:\n words_count[bnd] = 0\n\n max_cnt = 0\n max_word = \"\"\n # 获取出现最大次数的单词\n for word, cnt in words_count.items():\n if cnt > max_cnt:\n max_cnt = cnt\n max_word = word\n\n return max_word\n\n\nparagraph = \"Bob\"\nbanned = [\"\"]\nret = most_common_word(paragraph, banned)\nprint(ret)\n","sub_path":"most_common_word.py","file_name":"most_common_word.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"146708230","text":"def display_board(board):\n a = board\n print (' ___ ___ ___ ')\n for i in range(0,3):\n start = i * 3\n end = start + 3\n sliced_board = a[start:end]\n print('| | | |')\n print(f'| {sliced_board[0]} | {sliced_board[1]} | {sliced_board[2]} |')\n print ('|___|___|___|')\n\ndef get_indexOf_input(num):\n option = [1,2,3,4,5,6,7,8,9]\n if num in option:\n return option.index(num)\n\ndef is_valid_name(name):\n return name.isalpha()\n\ndef is_valid_marker(a):\n return a in 'XO'\n\ndef set_players():\n player1 = input(\"Player 1, enter name: \").lower().capitalize()\n while not is_valid_name(player1):\n player1 = input(\"Please enter a valid name. (Alphabet Only): \").lower().capitalize()\n if is_valid_name(player1):\n break\n player1_marker = input(f\"{player1}, pick a marker, 'X' or 'O': \").upper()\n while not is_valid_marker(player1_marker):\n player1_marker = input(f\"{player1}, pick a marker, 'X' or 'O': \").upper()\n if is_valid_marker(player1_marker):\n break\n player2 = input(\"Player 2, enter name: \").lower().capitalize()\n while not is_valid_name(player2):\n player2 = input(\"Please enter a valid name. (Alphabet Only): \").lower().capitalize()\n if is_valid_name(player2):\n break\n if player1_marker == 'X':\n player2_marker = 'O'\n else:\n player2_marker = 'X'\n\n return (player1,player1_marker,player2,player2_marker)\n\ndef is_winner(board):\n patterns = ['012','345','678','036','147','258','048','246']\n for item in patterns:\n if board[int(item[0])] == board[int(item[1])] == board[int(item[2])]:\n return True\n return False\n\ndef is_move_available(moves,input):\n return input in moves\n\n\ndef main():\n board_display = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\n board_data = ['1','2', '3', '4', '5', '6', '7', '8', '9']\n moves = [1,2, 3, 4, 5, 6, 7, 8, 9]\n\n player1_name, player1_marker, player2_name, player2_marker = set_players()\n p1_turn = True\n p2_turn = False\n game_over = False\n display_board(board_display)\n\n while not game_over:\n while p1_turn:\n num = int(input(f'{player1_name} turn, place your marker: '))\n while not is_move_available(moves,num):\n num = int(input(f'{player1_name} , your move is already taken, place marker again: '))\n if is_move_available(moves,num):\n break\n index = get_indexOf_input(num)\n moves[index] = player1_marker\n board_display[index] = player1_marker\n board_data[index] = player1_marker\n p1_turn = False\n p2_turn = True\n display_board(board_display)\n if is_winner(board_data):\n print(f'{player1_name} wins')\n game_over = True\n p2_turn = False\n\n while p2_turn:\n num = int(input(f'{player2_name} turn, place your marker: '))\n while not is_move_available(moves, num):\n num = int(input(f'{player2_name} , your move is already taken, place marker again: '))\n if is_move_available(moves, num):\n break\n index = get_indexOf_input(num)\n moves[index] = player1_marker\n board_display[index] = player2_marker\n board_data[index] = player2_marker\n p1_turn = True\n p2_turn = False\n display_board(board_display)\n if is_winner(board_data):\n print(f'{player2_name} wins')\n game_over = True\n p1_turn = False\n\nmain()","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"228866596","text":"# -*- coding: utf-8 -*-\nimport os\nimport random\nimport string\nimport datetime\nimport urllib.parse\nimport psycopg2\nimport bcrypt\nimport configparser\n\nfrom psycopg2.extras import RealDictCursor\nfrom passlib.hash import sha256_crypt\n\nfrom flask import request\n\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jwt_extended import (\n JWTManager, jwt_required, create_access_token,\n get_jwt_identity, decode_token\n)\n\nfrom models.token_model import TokenModel\nfrom utilities.email_stmp import EmailTools\nfrom utilities.gmail_tools import GmailAPITools\n\nparser = reqparse.RequestParser()\n\n\ndef token_decode(token):\n de = decode_token(token)\n if 'identity' in de:\n iden = de['identity']\n return iden\n else:\n return False\n\n\nclass AuthModel:\n\n def __init__(self):\n try:\n config = configparser.ConfigParser()\n dirname = os.path.abspath(os.path.dirname(__file__))\n config_path = os.path.join(dirname, '../.config.ini')\n config.read(config_path)\n host = config.get('dbsettings', 'db_host')\n user = config.get('dbsettings', 'db_user')\n passwd = config.get('dbsettings', 'db_passwd')\n dbname = config.get('dbsettings', 'db_dbname')\n self.db = psycopg2.connect(database=dbname, user=user, password=passwd, host=host)\n except Exception as err:\n raise Exception('Could not connect to db ', err)\n\n def challenge_user(self, username):\n try:\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"\"\"SELECT * FROM systems_users AS u WHERE u.username = '%s';\"\"\" % username\n cur.execute(query)\n row = cur.fetchone()\n cur.close()\n if row:\n return row\n else:\n return None\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def update_user_password(self, id_user, n_password):\n try:\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n cur.execute(\n \"UPDATE systems_users SET password = '{p}' WHERE id = {i};\".format(p=n_password, i=id_user))\n self.db.commit()\n cur.close()\n return True\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def challenge_email(self, email):\n try:\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"SELECT id FROM systems_users WHERE email = '{e}';\".format(e=email)\n cur.execute(query)\n row = cur.fetchone()\n cur.close()\n if row:\n return row\n else:\n return None\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def add_token(self, id_user, token):\n try:\n n = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"\"\"INSERT INTO systems_users_tokens(id_user, token, process, created_at) VALUES ({i}, '{t}', '{p}', '{d}');\"\"\".format(i=id_user, t=token, p='login', d=n)\n cur.execute(query)\n self.db.commit()\n cur.close()\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def add_token_type(self, id_user, token, type):\n try:\n n = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"\"\"INSERT INTO systems_users_tokens(id_user, token, process, created_at) VALUES ({i}, '{t}', '{p}', '{d}');\"\"\".format(i=id_user, t=token, p=type, d=n)\n cur.execute(query)\n self.db.commit()\n cur.close()\n return True\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def add_recover_token(self, id_user, token):\n try:\n n = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"INSERT INTO systems_users_tokens(id_user, token, process, created_at) VALUES ({i}, '{t}', '{p}', '{d}');\".format(i=id_user, t=token, p='recovery', d=n)\n cur.execute(query)\n self.db.commit()\n cur.close()\n return True\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def add_user(self, email, name, last_name, username, password):\n try:\n d = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"INSERT INTO systems_users(email, name, last_name, created_at, role, username, password, profile_img) VALUES ('{e}', '{n}', '{l}', '{d}', '{r}', '{u}', '{p}', '{m}') RETURNING id;\".format(e=email, n=name, l=last_name, d=d, r='user', u=username, p=password, m='/profiles/users/profile-default.jpg')\n cur.execute(query)\n self.db.commit()\n i = cur.fetchone()\n cur.close()\n return i['id']\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def add_user_data(self, id_user, institute):\n try:\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"INSERT INTO systems_users_data(id_user, instituto) VALUES ('{i}', '{n}');\".format(i=id_user, n=institute)\n cur.execute(query)\n self.db.commit()\n cur.close()\n return True\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n # def delete_recover_token(self, id_user):\n # try:\n # cur = self.db.connection.cursor()\n # cur.execute(\n # \"DELETE FROM tokens WHERE id_user = %s AND `type` = 'recovery';\" % id_user)\n # self.db.connection.commit()\n # cur.close()\n # except mysql.connector.Error as err:\n # raise Exception(err.message)\n\n def check_token_type(self, id_user, token, token_type):\n try:\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"SELECT id, id_user FROM systems_users_tokens WHERE id_user = {i} AND token = '{t}' AND process = '{p}';\".format(i=id_user, t=token, p=token_type)\n cur.execute(query)\n row = cur.fetchone()\n cur.close()\n if row:\n return row\n return None\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def delete_token(self, id_token):\n try:\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n query = \"DELETE FROM systems_users_tokens WHERE id = {i};\".format(i=id_token)\n cur.execute(query)\n self.db.commit()\n cur.close()\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n def add_log_activity(self, id_user, ip, activity, params):\n try:\n cur = self.db.cursor(cursor_factory=RealDictCursor)\n sql = \"\"\"INSERT INTO systems_users_log (user_id, ip, activity, params, created_at) VALUES ({u}, '{i}', '{a}', '{p}', CURRENT_TIMESTAMP);\"\"\".format(u=id_user, i=ip, a=activity, p=params)\n cur.execute(sql)\n self.db.commit()\n cur.close()\n except psycopg2.Error as err:\n self.db.rollback()\n raise Exception(err)\n\n # def get_user_roles(self, id_user):\n # try:\n # cur = self.db.connection.cursor()\n # results = cur.execute(\n # \"SELECT role FROM user_roles WHERE id_user = %s;\" % id_user)\n # if results > 0:\n # rows = cur.fetchall()\n # cur.close()\n # return rows\n # cur.close()\n # return False\n # except mysql.connector.Error as err:\n # raise Exception(err.message)\n\n def __del__(self):\n self.db.close()\n\n\nclass Login(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n parser.add_argument('username', type=str)\n parser.add_argument('password', type=str)\n args = parser.parse_args()\n\n u = self.model.challenge_user(args['username'])\n if u: \n v = bcrypt.checkpw(args['password'].encode(), u['password'].encode())\n if v:\n self.model.delete_token(u['id'])\n \n landing = None\n if u['role'] == 'admin':\n landing = '/admin/dashboard'\n else:\n landing = 'https://simar.conabio.gob.mx'\n\n usr_data = {\n 'id_user': u['id'],\n 'email': u['email'],\n 'roles': u['role'],\n 'profile_img': u['profile_img'],\n }\n expires = datetime.timedelta(days=365)\n\n ip = request.remote_addr\n id_user = u['id']\n activity = \"session-ini\"\n params = args['username']\n\n self.model.add_log_activity(id_user, ip, activity, params)\n\n access_token = create_access_token(\n identity=usr_data, expires_delta=expires)\n\n self.model.add_token(u['id'], access_token)\n\n return {\"success\": True, \"landing\": landing, \"profile_img\": u['profile_img'], \"token\": access_token}\n\n else:\n raise Exception(\"Invalid password\")\n else:\n raise Exception(\"User not found\")\n except Exception as error: \n return {\"success\": False, \"message\": str(error)}\n\n\nclass Roles(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n token = str(request.headers.get(\n 'Authorization')).replace(\"Bearer \", \"\")\n\n iden = token_decode(token)\n if iden:\n roles = self.model.get_user_roles(iden['id_user'])\n r = []\n for key in roles:\n r.append(key['role'])\n\n return {\"success\": True, 'roles': r}\n else:\n raise Exception(\"Invalid token\")\n except Exception as error:\n return {\"success\": False, \"message\": str(error)}\n\n\nclass Logout(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n token = str(request.headers.get(\n 'Authorization')).replace(\"Bearer \", \"\")\n\n iden = token_decode(token)\n if iden:\n\n ip = request.remote_addr\n id_user = iden['id_user']\n activity = \"session-logout\"\n params = ''\n\n self.model.add_log_activity(id_user, ip, activity, params)\n\n self.model.delete_token(iden['id_user'])\n\n return {\"success\": True}\n except Exception as error:\n print(error)\n return {\"success\": False, \"message\": str(error)}\n\n\nclass Update(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n return {\"success\": True}\n except Exception as error:\n print(error)\n return {\"success\": False, \"message\": str(error)}\n\n\nclass Validate(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try: \n parser.add_argument('token', type=str)\n args = parser.parse_args()\n\n token = args['token']\n\n iden = token_decode(token)\n if iden:\n\n if iden['roles'] == 'admin':\n landing = '/admin/dashboard'\n else:\n landing = 'https://simar.conabio.gob.mx'\n\n return {\n \"success\": True,\n \"landing\": landing,\n \"email\": iden['email'],\n \"profile_img\": iden['profile_img'],\n }\n else:\n raise Exception(\"Invalid token\") \n except Exception as error:\n print(error)\n return {\"success\": False, \"message\": str(error)}\n\n\nclass ValidateToken(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n parser.add_argument('token', type=str)\n parser.add_argument('type', type=str)\n args = parser.parse_args()\n iden = token_decode(args['token'])\n if 'token' in args:\n if args['token'] != '':\n if iden:\n r = self.model.check_token_type(iden['id_user'], args['token'], args['type'])\n if r:\n return {\"success\": True}\n else:\n raise Exception(\"Token has expired or it is not valid\")\n else:\n raise Exception(\"Invalid token\")\n raise Exception(\"No token\")\n except Exception as error:\n return {\"success\": False, \"message\": str(error)}\n\n\nclass RestoreToken(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n parser.add_argument('token', type=str)\n args = parser.parse_args()\n\n if 'token' in args:\n r = self.model.check_recover_token(args['token'])\n if r:\n recov = token_decode(args['token'])\n if recov['id_user'] == r['id_user']:\n return {\"success\": True}\n else:\n raise Exception(\"Invalid token\")\n else:\n raise Exception(\"Invalid token\")\n else:\n return {\"success\": False}\n except Exception as error:\n print(error)\n return {\"success\": False, \"message\": str(error)}\n\n\nclass Register(Resource):\n\n def __init__(self, base_url):\n self.model = AuthModel()\n self.base_url = base_url\n\n def randon_password(self, size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n def post(self):\n try:\n parser.add_argument('n_name', type=str)\n parser.add_argument('n_lastname', type=str)\n parser.add_argument('n_institute', type=str)\n parser.add_argument('n_email', type=str)\n parser.add_argument('n_usrnm', type=str)\n args = parser.parse_args()\n\n if args['n_name'] and args['n_lastname'] and args['n_institute'] and args['n_email'] and args['n_usrnm']:\n u = self.model.challenge_user(args['n_usrnm'])\n if u:\n raise Exception(\"Nombre de usuario ya registrado.\")\n e = self.model.challenge_email(args['n_email'])\n if e:\n raise Exception(\"Ya existe un usuario con ese email registrado.\")\n\n if not u and not e:\n\n pss = self.randon_password(size=10)\n pss_e = bcrypt.hashpw(str(pss).encode('utf-8'), bcrypt.gensalt())\n\n id_user = self.model.add_user(args['n_email'], args['n_name'], args['n_lastname'], args['n_usrnm'], pss_e.decode('utf-8'))\n if id_user:\n self.model.add_user_data(id_user, args['n_institute'])\n\n usr_data = {\n 'id_user': id_user,\n 'email': args['n_email'],\n 'timestamp': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n }\n\n expires = datetime.timedelta(days=2)\n\n register_token = create_access_token(identity=usr_data, expires_delta=expires)\n\n ip = request.remote_addr\n id_user = id_user\n activity = \"session-register\"\n params = args['n_email']\n\n self.model.add_log_activity(id_user, ip, activity, params)\n\n r = self.model.add_token_type(id_user, register_token, 'register')\n if r:\n uri = urllib.parse.quote_plus(register_token)\n emt = GmailAPITools()\n emt.send_activation_email(args['n_email'], args['n_usrnm'], pss, self.base_url + \"activar/\" + uri)\n\n return {\"success\": True, \"message\": \"Hemos enviado un mensaje a su correo eletrónico. Complete el registro activando su usario activandolo.\"}\n else:\n raise Exception(\"No se pudo agregar token\")\n\n raise Exception(\"No se pudo agregar usuario\")\n\n raise Exception(\"Unknown exception found\")\n\n return {\"success\": False}\n\n except Exception as error:\n return {\"success\": False, \"message\": str(error)}\n\n\nclass ActivateUser(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n parser.add_argument('token', type=str)\n args = parser.parse_args()\n\n return {\"success\": False}\n\n except Exception as error:\n print(error)\n return {\"success\": False, \"message\": str(error)}\n\n\nclass ChangePassword(Resource):\n\n def __init__(self):\n self.model = AuthModel()\n\n def post(self):\n try:\n parser.add_argument('token', type=str)\n parser.add_argument('upwd', type=str)\n args = parser.parse_args()\n\n iden = token_decode(args['token'])\n\n if 'token' in args and 'upwd' in args:\n\n r = self.model.check_token_type(iden['id_user'], args['token'], 'recovery')\n if r:\n recov = token_decode(args['token'])\n if recov['id_user'] == r['id_user']:\n new_password = bcrypt.hashpw(str(args['upwd']).encode('utf-8'), bcrypt.gensalt())\n\n u = self.model.update_user_password(r['id_user'], new_password.decode('utf-8'))\n if u:\n\n ip = request.remote_addr\n id_user = iden['id_user']\n activity = \"session-password-change\"\n params = args['token']\n\n self.model.add_log_activity(id_user, ip, activity, params)\n\n self.model.delete_token(r['id'])\n return {\"success\": True}\n else:\n raise Exception(\"Unable to update password\")\n else:\n raise Exception(\"Invalid token\")\n else:\n raise Exception(\"Invalid token\")\n else:\n raise Exception(\"Invalid input!\")\n\n except Exception as error:\n print(error)\n return {\"success\": False, \"message\": str(error)}\n\n\nclass Recovery(Resource):\n\n def __init__(self, base_url):\n self.model = AuthModel()\n self.base_url = base_url\n\n def post(self):\n try:\n\n parser.add_argument('email', type=str)\n parser.add_argument('lang', type=str)\n args = parser.parse_args()\n\n if 'email' in args:\n u = self.model.challenge_email(args['email'])\n if u:\n usr_data = {\n 'id_user': u['id'],\n 'email': args['email'],\n 'timestamp': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n }\n\n expires = datetime.timedelta(days=2)\n\n recover_token = create_access_token(\n identity=usr_data, expires_delta=expires)\n\n if recover_token:\n r = self.model.add_recover_token(\n u['id'], recover_token)\n if r:\n uri = urllib.parse.quote_plus(recover_token)\n rec_url = self.base_url + \"restablecer/\" + uri\n\n emt = GmailAPITools()\n emt.send_recovery_email(\n args['email'], rec_url)\n\n ip = request.remote_addr\n id_user = u['id']\n activity = \"session-recovery\"\n params = args['email']\n\n self.model.add_log_activity(id_user, ip, activity, params)\n\n if args['lang'] == 'es':\n m = \"Hemos enviado un email para restablecer su contraseña.\"\n else:\n m = \"We have sent an email to reset your password.\"\n\n return {\"success\": True, \"message\": m}\n\n else:\n raise Exception(\"Could not add token\")\n else:\n raise Exception(\"Email not found\")\n else:\n raise Exception(\"Invalid input!\")\n except Exception as error:\n print(error)\n return {\"success\": False, \"message\": str(error)}\n","sub_path":"models/auth_model.py","file_name":"auth_model.py","file_ext":"py","file_size_in_byte":22029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"253670","text":"# Script to chek if the examples build \n# As a side effect, it also checks if rgl builds\n\nimport os \nfrom subprocess import run\n\ndef check():\n run(['cargo', 'check'])\n\nos.chdir(\"Examples/\")\n\nfor dir in os.listdir(\".\"):\n os.chdir(dir + \"/\")\n print (\"\\n\" + \"=\" * 20)\n print (\"Checking example: \" + dir, end = \"\\n\\n\")\n check()\n os.chdir(\"..\")","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542595207","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import mobile\n\n# Create your views here.\n\ndef index(request):\n return render(request, \"index.html\", {'delete':False})\n\ndef search(request):\n model_number_ip = request.POST.get('model_number', '')\n\n try:\n query = mobile.objects.get(mobileno_db=model_number_ip)\n mobileDetails = [query.mobileno_db,query.price_db,query.year_db,query.company_db,query.RAM_db,query.ROM_db]\n return render(request, \"single.html\", {\"data\":mobileDetails, 'delete':False})\n except:\n return HttpResponse(\"Model Number does not exist.\")\n\ndef add(request):\n return render(request, \"add.html\")\n\ndef addentry(request):\n model_number_ip = request.POST.get('model_number', '')\n price_ip = request.POST.get('price', '')\n year_ip = request.POST.get('year', '')\n company_ip = request.POST.get('company', '')\n RAM_ip = request.POST.get('RAM', '')\n ROM_ip = request.POST.get('ROM', '')\n\n query = mobile(mobileno_db=model_number_ip,price_db=price_ip,year_db=year_ip,company_db=company_ip,RAM_db=RAM_ip,ROM_db=ROM_ip)\n query.save()\n return render(request, \"index.html\")\n \ndef delete(request):\n return render(request, \"index.html\", {\"delete\":True})\n\ndef deletepage(request):\n model_number_ip = request.POST.get('model_number', '')\n\n try:\n query = mobile.objects.get(mobileno_db=model_number_ip)\n mobileDetails = [query.mobileno_db,query.price_db,query.year_db,query.company_db,query.RAM_db,query.ROM_db]\n return render(request, \"single.html\", {\"data\":mobileDetails, 'delete':True})\n except:\n return HttpResponse(\"Model Number does not exist.\")\n\ndef deleteentry(request):\n model_number_ip = request.POST.get('entry', '')\n\n try:\n query = mobile.objects.get(mobileno_db=model_number_ip)\n query.delete()\n return render(request, \"index.html\", {'delete':False})\n except:\n return HttpResponse(\"Model Number does not exist.\")\n \ndef viewall(request):\n try:\n dbObj = mobile.objects.all()\n dbData = [str(element) for element in list(dbObj)]\n finalData = [element.split(\"*\") for element in dbData]\n\n return render(request, \"viewall.html\", {\"data\":finalData})\n\n except:\n return HttpResponse(\"No data exists.\")\n \n\n\n","sub_path":"django/MobileStore2/mobstore2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"150797737","text":"\n\nfrom xai.brain.wordbase.nouns._overlay import _OVERLAY\n\n#calss header\nclass _OVERLAYS(_OVERLAY, ):\n\tdef __init__(self,): \n\t\t_OVERLAY.__init__(self)\n\t\tself.name = \"OVERLAYS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"overlay\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_overlays.py","file_name":"_overlays.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"130411554","text":"import pytest\nfrom user_sync.post_sync.manager import PostSyncData\n\n\n@pytest.fixture\ndef example_user():\n return {\n 'type': 'federatedID',\n 'username': 'user@example.com',\n 'domain': 'example.com',\n 'email': 'user@example.com',\n 'firstname': 'Example',\n 'lastname': 'User',\n 'groups': set(),\n 'country': 'US',\n }\n\n\ndef test_add_umapi_user(example_user):\n email_id = 'user@example.com'\n post_sync_data = PostSyncData()\n post_sync_data.update_umapi_data(None, email_id, [], [], **example_user)\n assert post_sync_data.umapi_data[None][email_id] == example_user\n\n\ndef test_add_groups(example_user):\n post_sync_data = PostSyncData()\n email_id = 'user@example.com'\n example_user['groups'] = {'group1', 'group2', 'group3'}\n groups_add = ['group3', 'group4', 'group5']\n post_sync_data.update_umapi_data(None, email_id, groups_add, [], **example_user)\n assert post_sync_data.umapi_data[None][email_id]['groups'] == example_user['groups'] | set(groups_add)\n\n\ndef test_remove_groups(example_user):\n post_sync_data = PostSyncData()\n email_id = 'user@example.com'\n example_user['groups'] = {'group1', 'group2', 'group3'}\n groups_remove = ['group1', 'group2']\n post_sync_data.update_umapi_data(None, email_id, [], groups_remove, **example_user)\n assert post_sync_data.umapi_data[None][email_id]['groups'] == example_user['groups'] - set(groups_remove)\n\n\ndef test_add_remove_groups(example_user):\n post_sync_data = PostSyncData()\n email_id = 'user@example.com'\n example_user['groups'] = {'group1', 'group2', 'group3', 'group4', 'group5'}\n groups_add = ['group6']\n groups_remove = ['group1', 'group2']\n post_sync_data.update_umapi_data(None, email_id, groups_add, groups_remove, **example_user)\n delta_groups = example_user['groups'] | set(groups_add)\n delta_groups -= set(groups_remove)\n assert post_sync_data.umapi_data[None][email_id]['groups'] == delta_groups\n","sub_path":"tests/test_post_sync.py","file_name":"test_post_sync.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"214269098","text":"from typing import Union\n\nimport networkx as nx\nimport numpy as np\nimport torch\nfrom gensim import downloader as api\nfrom gensim.models import Word2Vec\n\nfrom transformers import BertTokenizer, BertModel\n\n\nclass BaseNodeFeatureBuilder:\n def __call__(self, node_id: int, node_attrs: dict, graph: nx.DiGraph) -> Union[int, float, np.ndarray]:\n raise NotImplementedError()\n\n\nclass DebugFeatureBuilder(BaseNodeFeatureBuilder):\n def __init__(self):\n print('WARNING: You are using the debugging feature builder, '\n 'which will result in the node target class being the node feature!!')\n\n def __call__(self, node_id: int, node_attrs: dict, graph: nx.DiGraph) -> Union[int, float, np.ndarray]:\n return node_attrs['class_one_hot']\n\n\nclass IdNodeFeatureBuilder(BaseNodeFeatureBuilder):\n def __call__(self, node_id: int, node_attrs: dict, graph: nx.DiGraph) -> Union[int, float, np.ndarray]:\n return node_id\n\n\nclass PosFeatureBuilder(BaseNodeFeatureBuilder):\n def __init__(self, mode):\n self._mode = mode\n\n def __call__(self, node_id: int, node_attrs: dict, graph: nx.DiGraph) -> Union[int, float, np.ndarray]:\n if self._mode == 'coarse-pos':\n return node_attrs['coarse_pos_tags_encoded']\n elif self._mode == 'fine-pos':\n return node_attrs['coarse_pos_tags_encoded']\n raise ValueError(f'Unknown mode: \"{self._mode}\"')\n\n\nclass BertFeatureBuilder(BaseNodeFeatureBuilder):\n def __init__(self):\n self._tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n self._model = BertModel.from_pretrained('bert-base-cased')\n\n def _get_unknown_vector(self):\n return np.zeros(768)\n\n def __call__(self, node_id: int, node_attrs: dict, graph: nx.DiGraph) -> Union[int, float, np.ndarray]:\n if 'text' not in node_attrs or node_attrs['text'] == '':\n return self._get_unknown_vector()\n text = node_attrs['text']\n tokens = self._tokenizer.tokenize(text, add_special_tokens=True)\n token_ids = self._tokenizer.convert_tokens_to_ids(tokens)\n print(f'{tokens} = {token_ids}')\n input_ids = torch.tensor(token_ids).unsqueeze(0)\n outputs = self._model(input_ids)\n # pooled output is a tensor of (1, embedding_size)\n pooled_output = 1\n embedding: torch.Tensor = outputs[pooled_output]\n return embedding.detach().numpy().squeeze()\n\n\nclass Word2VecFeatureBuilder(BaseNodeFeatureBuilder):\n SUPPORTED_MODELS = ['fasttext-wiki-news-subwords-300',\n 'conceptnet-numberbatch-17-06-300',\n 'word2vec-ruscorpora-300',\n 'word2vec-google-news-300',\n 'glove-wiki-gigaword-50',\n 'glove-wiki-gigaword-100',\n 'glove-wiki-gigaword-200',\n 'glove-wiki-gigaword-300',\n 'glove-twitter-25',\n 'glove-twitter-50',\n 'glove-twitter-100',\n 'glove-twitter-200']\n\n def __init__(self, model_name):\n self.model: Word2Vec = api.load(model_name)\n self.model.init_sims(replace=True)\n\n def __call__(self, node_id: int, node_attrs: dict, graph: nx.DiGraph) -> Union[int, float, np.ndarray]:\n if 'text' in node_attrs:\n text = node_attrs['text']\n if text is not '':\n return self._build_w2v_vector(text)\n return self._get_none_vector()\n\n @staticmethod\n def _post_process_vector(vec):\n return np.append(vec, 1.0)\n\n def _get_unknown_vector(self):\n return np.zeros(self.model.vector_size)\n\n def _get_none_vector(self):\n return np.ones(self.model.vector_size)\n\n @staticmethod\n def _normalize_vector(vec: np.ndarray):\n vec = vec / np.linalg.norm(vec)\n vec += 1.\n vec /= 2.\n return vec\n\n def _build_w2v_vector(self, text):\n tokens = text.split(' ')\n ret = []\n for token in tokens:\n if token in self.model:\n # vec = np.random.random(self.model.vector_size)\n vec = self.model.wv.word_vec(token, use_norm=True).copy()\n # vec = self._normalize_vector(vec)\n ret.append(vec)\n else:\n print()\n print(f'WARN: Word not in corpus: {token}')\n\n if len(ret) == 0:\n # every single token in this node is unknown, treat as if it had no text at all\n # TODO: experiment with a different \"special\" vector, so the model can distinguish between empty and unknown\n return self._get_unknown_vector()\n else:\n ret = np.array(ret)\n ret = np.mean(ret, axis=0)\n\n if np.isnan(ret).any():\n print(f'WARN: Found a nan value in embedding: {ret}')\n\n # if np.any(np.where(ret < 0)):\n # print(f'WARN: Found negative value in embedding: {ret}')\n\n return ret\n\n\nclass ConcatFeatureBuilder(BaseNodeFeatureBuilder):\n def __init__(self):\n self._w2v_feature_builder = Word2VecFeatureBuilder('word2vec-google-news-300')\n self._id_feature_builder = IdNodeFeatureBuilder()\n\n def __call__(self, node_id: int, node_attrs: dict, graph: nx.DiGraph) -> Union[int, float, np.ndarray]:\n return\n","sub_path":"ucca4bpm/data/feature_builder.py","file_name":"feature_builder.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"368641166","text":"#!/usr/bin/env python3\n#\n# @author Jian Zhao \n#\n# @description \n#\n\n# range\nfor i in range(1,10):\n print(i)\n\n# while\na = 1\nwhile a < 10:\n print(a)\n a+=1\n\n# no \"do while\" in python!\n","sub_path":"python/basic/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"315528218","text":"import functools\nimport torch\nfrom torch import nn\n\n\nclass ConvBlock(nn.Module):\n def __init__(self, in_planes, out_planes, **kwargs):\n super(ConvBlock, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_planes, out_planes, bias=False, **kwargs),\n nn.BatchNorm2d(out_planes),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass StemBlock(nn.Module):\n \"\"\"\n StemBlock used in PeleeNet\n According to Pelee paper, it is motivated by\n Inception-v4 Szegedy et al. (2017) and DSOD Shen et al. (2017)\n This is used before the first dense layer\n \"\"\"\n\n def __init__(self, k=32):\n super(StemBlock, self).__init__()\n self.conv1 = ConvBlock(3, k, kernel_size=3, stride=2, padding=1)\n self.left_conv1 = ConvBlock(k, k//2, kernel_size=1, stride=1)\n self.left_conv2 = ConvBlock(k//2, k, kernel_size=3, stride=2, padding=1)\n self.right = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv_last = ConvBlock(k*2, k, kernel_size=1, stride=1)\n\n def forward(self, x):\n \"\"\"\n x: input image of shape [batch, 3, 224, 224]\n \"\"\"\n x = self.conv1(x) # [batch, 32, 112, 112]\n left = self.left_conv1(x) # [batch, 16, 112, 112]\n left = self.left_conv2(left) # [batch, 32, 112, 112]\n right = self.right(x) # [batch, 32, 112, 112]\n x = torch.cat((left, right), dim=1) # [batch, 64, 112, 112]\n x = self.conv_last(x) # [batch, 32, 56, 56]\n return x\n\n\nclass DenseLayer(nn.Module):\n \"\"\"\n Two-way dense layer suggested by the paper\n \"\"\"\n def __init__(self, in_planes, growth_rate, bottleneck_width):\n \"\"\"\n bottleneck_width is usally 1, 2, or 4\n \"\"\"\n super(DenseLayer, self).__init__()\n\n inter_channel = bottleneck_width * growth_rate / 2\n inter_channel = bottleneck_width * growth_rate // 2 # will be k/2, k, 2k depending on bottleneck_width = 1,2,4\n\n # Left side\n self.cb1_a = ConvBlock(in_planes, inter_channel, kernel_size=1, stride=1)\n self.cb1_b = ConvBlock(inter_channel, growth_rate//2, kernel_size=3, stride=1, padding=1)\n\n\n # Right side\n self.cb2_a = ConvBlock(in_planes, inter_channel, kernel_size=1, stride=1)\n self.cb2_b = ConvBlock(inter_channel, growth_rate//2, kernel_size=3, stride=1, padding=1)\n self.cb2_c = ConvBlock(growth_rate//2, growth_rate//2, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x):\n cb1_a_out = self.cb1_a(x)\n cb1_b_out = self.cb1_b(cb1_a_out)\n\n cb2_a_out = self.cb2_a(x)\n cb2_b_out = self.cb2_b(cb2_a_out)\n cb2_c_out = self.cb2_c(cb2_b_out)\n\n out = torch.cat((x, cb1_b_out, cb2_c_out), 1)\n\n return out\n\n\nclass DenseBlock(nn.Module):\n def __init__(self, in_planes, no_dense_layers, growth_rate, bottleneck_width):\n super(DenseBlock, self).__init__()\n layers = [DenseLayer(in_planes+growth_rate*i, growth_rate, bottleneck_width) for i in range(no_dense_layers)]\n self.block = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.block(x)\n\n\nclass TransitionLayer(nn.Module):\n def __init__(self, inp, oup, last=False):\n super(TransitionLayer, self).__init__()\n conv = ConvBlock(inp, oup, kernel_size=1, stride=1)\n if not last:\n self.layer = nn.Sequential(conv, nn.AvgPool2d(kernel_size=2, stride=2))\n else:\n self.layer = conv\n\n def forward(self, x):\n return self.layer(x)\n\n\ndef _pelee(growth_rate, dense_layers, bottleneck_widths, num_layers):\n layers = [StemBlock(k=64)]\n filters = 64\n for i in range(num_layers):\n next_filters = filters + growth_rate * dense_layers[i]\n layers.append(\n nn.Sequential(\n DenseBlock(filters, dense_layers[i], growth_rate, bottleneck_widths[i]),\n TransitionLayer(next_filters, next_filters, last=False)\n #TransitionLayer(next_filters, next_filters, last=(i == num_layers-1))\n )\n )\n filters += growth_rate * dense_layers[i]\n return layers\n\n\ndef wrapped_partial(func, *args, **kwargs):\n partial_func = functools.partial(func, *args, **kwargs)\n functools.update_wrapper(partial_func, func)\n return partial_func\n\n\npelee = wrapped_partial(\n _pelee,\n growth_rate=48,\n dense_layers=[3, 4, 8, 6],\n bottleneck_widths=[1, 2, 4, 4],\n num_layers=4\n)\n","sub_path":"detector_back/worker/detector/detector/common/models/nets/pelee.py","file_name":"pelee.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"97037811","text":"\"\"\"StaticCodeAnalyzer URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom sca.views import (AddProjectView, DeleteProjectView, UpdateProjectView, ProjectView, ProjectsView,\n AddReportView, DeleteReportView, UpdateReportView, ReportView, ReportsView,\n AddFileView, DeleteFileView, UpdateFileView, FileView, FilesView,\n AddPersonView, DeletePersonView, UpdatePersonView, PersonView, PersonsView)\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n \n url(r'^projects/$', ProjectsView.as_view(), name='projects'),\n url(r'^project/(?P(\\d)+)/$', ProjectView.as_view(), name='project'),\n url(r'^add_project/$', AddProjectView.as_view(), name='add-project'),\n url(r'^delete_project/(?P(\\d)+)/$', DeleteProjectView.as_view(), name='delete-project'),\n url(r'^update_project/(?P(\\d)+)/$', UpdateProjectView.as_view(), name='update-project'),\n \n url(r'^reports/$', ReportsView.as_view(), name='reports'),\n url(r'^report/(?P(\\d)+)/$', ReportView.as_view(), name='report'),\n url(r'^add_report/$', AddReportView.as_view(), name='add-report'),\n url(r'^delete_report/(?P(\\d)+)/$', DeleteReportView.as_view(), name='delete-report'),\n url(r'^update_report/(?P(\\d)+)/$', UpdateReportView.as_view(), name='update-report'),\n\n url(r'^files/$', FilesView.as_view(), name='files'),\n url(r'^file/(?P(\\d)+)/$', FileView.as_view(), name='file'),\n url(r'^add_file/$', AddFileView.as_view(), name='add-file'),\n url(r'^delete_file/(?P(\\d)+)/$', DeleteFileView.as_view(), name='delete-file'),\n url(r'^update_file/(?P(\\d)+)/$', UpdateFileView.as_view(), name='update-file'),\n\n url(r'^persons/$', PersonsView.as_view(), name='persons'),\n url(r'^person/(?P(\\d)+)/$', PersonView.as_view(), name='person'),\n url(r'^add_person/$', AddPersonView.as_view(), name='add-person'),\n url(r'^delete_person/(?P(\\d)+)/$', DeletePersonView.as_view(), name='delete-person'),\n url(r'^update_person/(?P(\\d)+)/$', UpdatePersonView.as_view(), name='update-person'),\n]\n","sub_path":"StaticCodeAnalyzer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"496464682","text":"# Simple HTTP example\n#\n# A simple example using the HTTP plugin that shows the retrieval of a\n# single page via HTTP. The resulting page is written to a file.\n#\n# More complex HTTP scripts are best created with the TCPProxy.\n\nfrom net.grinder.script.Grinder import grinder\nfrom net.grinder.script import Test\nfrom net.grinder.plugin.http import HTTPRequest\n\nimport data_urls\nimport jarray\nimport random\n\ntest1 = Test(1, \"Request random dataset\")\nreq = HTTPRequest()\nreq.setReadResponseBody(False)\nchunk = 1024*512\n\nproperties = grinder.properties.getPropertySubset('dapbench.')\n\ndataset_list = properties['datasets']\n\ndef streamed_get(url):\n buf = jarray.zeros(chunk, 'b')\n total = 0\n resp = req.GET(url)\n stream = resp.getInputStream()\n ret = 0\n while ret != -1:\n ret = stream.read(buf)\n total += ret\n\n return total\nstreamed_get = test1.wrap(streamed_get)\n\nclass TestRunner:\n def __call__(self):\n grinder.logger.output('Selecting dataset from %s' % dataset_list)\n dataset_url = random.choice(data_urls.load_dataset_list(dataset_list))\n grinder.logger.output('Downloading %s' % dataset_url)\n result = streamed_get(dataset_url)\n grinder.logger.output('Transfered %d bytes' % result)\n \n","sub_path":"grinder/basic_download.py","file_name":"basic_download.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"515313602","text":"import time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom TrialsOfNeuralVocalRecon.data_processing.data_generators import Reconstruction_Generator, Prediction_Generator, \\\n Random_Generator, CPC_Generator\n\n\ndef timeStructured():\n named_tuple = time.localtime() # get struct_time\n time_string = time.strftime(\"%Y-%m-%d-%H-%M-%S\", named_tuple)\n return time_string\n\n\ndef plot_predictions(sound2sound, spike2sound, generator_test_snd2snd, generator_test_spk2snd, ex):\n # test spike to sound\n\n batch_spk_test, batch_snd_test = generator_test_spk2snd.__getitem__()\n\n predicted_sound = spike2sound.predict_on_batch(batch_spk_test)\n one_spike, one_sound, one_predicted_sound = batch_spk_test[0], batch_snd_test[0], predicted_sound[0]\n\n fig, axs = plt.subplots(3)\n fig.suptitle('Vertically stacked subplots')\n axs[0].plot(one_spike)\n axs[0].set_title('spike')\n axs[1].plot(one_sound)\n axs[1].set_title('sound')\n axs[2].plot(one_predicted_sound)\n axs[2].set_title('predicted sound')\n\n fig_path = 'data/spk2snd.pdf'\n fig.savefig(fig_path, bbox_inches='tight')\n ex.add_artifact(fig_path)\n\n # test sound to sound\n batch_snd_input, batch_snd_output = generator_test_snd2snd.__getitem__()\n\n predicted_sound = sound2sound.predict_on_batch(batch_snd_input)\n one_sound_input, one_sound_output, one_predicted_sound = batch_snd_input[0], batch_snd_output[0], predicted_sound[0]\n\n fig, axs = plt.subplots(3)\n fig.suptitle('Vertically stacked subplots')\n axs[0].plot(one_sound_input)\n axs[0].set_title('input sound')\n axs[1].plot(one_sound_output)\n axs[1].set_title('output sound')\n\n axs[2].plot(one_predicted_sound)\n axs[2].set_title('predicted sound')\n\n fig_path = 'data/snd2snd.pdf'\n fig.savefig(fig_path, bbox_inches='tight')\n ex.add_artifact(fig_path)\n\n\ndef plot_losses(n2n_lh, k2n_lh, n2n_lh_cpc, k2n_lh_cpc, ex):\n # plot training losses\n fig, ax = plt.subplots(figsize=(8, 8))\n ax.plot(n2n_lh, label='n2n_lh')\n ax.plot(k2n_lh, label='k2n_lh')\n ax.plot(n2n_lh_cpc, label='n2n_lh_cpc')\n ax.plot(k2n_lh_cpc, label='k2n_lh_cpc')\n ax.set_title('model loss')\n ax.set_ylabel('loss')\n ax.set_xlabel('epoch')\n ax.legend()\n\n random_string = ''.join([str(r) for r in np.random.choice(10, 4)])\n plot_filename = 'data/{}_train_losses.pdf'.format(random_string)\n fig.savefig(plot_filename, bbox_inches='tight')\n ex.add_artifact(plot_filename)\n\n\ndef plot_final_bar_plot(list_terms, final_scores, plot_filename):\n # compare the performance of the different approaches\n y_pos = np.arange(len(list_terms))\n\n fig, ax = plt.subplots(figsize=(8, 8))\n\n ax.bar(y_pos, final_scores, align='center', alpha=0.5)\n # ax.xticks(y_pos, list_terms)\n ax.set_xticklabels(list_terms, fontdict=None, minor=False)\n\n ax.set_ylabel('MSE')\n ax.set_title('training type')\n\n fig.savefig(plot_filename, bbox_inches='tight')\n\ndef get_innvestigate_analyzers():\n file = open('innvestigate_analyzers.txt', 'r')\n analyzers = []\n for line in file:\n if ':' in line:\n idx = line.index(':')\n analyzer = line[:idx].replace(' ', '').replace(',', '').replace('\\\"', '')\n analyzers.append(analyzer)\n return analyzers\n\ndef getData(\n sound_shape=(3, 1),\n spike_shape=(3, 1),\n data_type='real_prediction',\n batch_size=128,\n terms=3, predict_terms=3):\n filepath_noisy_spikes_train = './data/spikes_noisy_train.h5'\n filepath_noisy_spikes_test = './data/spikes_noisy_test.h5'\n filepath_stim_noisy_train = './data/input_noisy_train.h5'\n filepath_stim_noisy_test = './data/input_noisy_test.h5'\n filepath_stim_clean_train = './data/input_clean_train.h5'\n filepath_stim_clean_test = './data/input_clean_test.h5'\n\n\n filepath_spikes_train = './data/spikes_train_windowed_normalized.h5'\n filepath_spikes_test = './data/spikes_test_windowed_normalized.h5'\n filepath_stim_train = './data/input_train_windowed_normalized.h5'\n filepath_stim_test = './data/input_test_windowed_normalized.h5'\n\n if data_type == 'real_prediction':\n\n generator_train_spk2snd = Prediction_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_spikes_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size)\n generator_test_spk2snd = Prediction_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_spikes_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size)\n\n generator_train_snd2snd = Prediction_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_stim_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size)\n generator_test_snd2snd = Prediction_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_stim_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size)\n\n elif data_type == 'denoising':\n\n generator_train_spk2snd = Prediction_Generator(filepath_input=filepath_noisy_spikes_train,\n filepath_output=filepath_stim_clean_train,\n batch_size=batch_size)\n generator_test_spk2snd = Prediction_Generator(filepath_input=filepath_noisy_spikes_test,\n filepath_output=filepath_stim_clean_test,\n batch_size=batch_size)\n\n generator_train_snd2snd = Prediction_Generator(filepath_input=filepath_stim_noisy_train,\n filepath_output=filepath_stim_clean_train,\n batch_size=batch_size)\n generator_test_snd2snd = Prediction_Generator(filepath_input=filepath_stim_noisy_test,\n filepath_output=filepath_stim_clean_test,\n batch_size=batch_size)\n\n\n elif data_type == 'real_reconstruction':\n\n generator_train_spk2snd = Reconstruction_Generator(filepath_input=filepath_spikes_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size)\n generator_test_spk2snd = Reconstruction_Generator(filepath_input=filepath_spikes_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size)\n\n generator_train_snd2snd = Reconstruction_Generator(filepath_input=filepath_stim_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size)\n generator_test_snd2snd = Reconstruction_Generator(filepath_input=filepath_stim_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size)\n\n elif data_type == 'cpc_prediction':\n\n generator_train_spk2snd = CPC_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_spikes_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size,\n terms=terms,\n predict_terms=predict_terms)\n generator_test_spk2snd = CPC_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_spikes_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size,\n terms=terms,\n predict_terms=predict_terms)\n\n generator_train_snd2snd = CPC_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_stim_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size,\n terms=terms,\n predict_terms=predict_terms)\n generator_test_snd2snd = CPC_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_stim_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size,\n terms=terms,\n predict_terms=predict_terms)\n\n elif data_type == 'random':\n\n generator_train_spk2snd = Random_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_spikes_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size)\n generator_test_spk2snd = Random_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_spikes_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size)\n\n generator_train_snd2snd = Random_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_stim_train,\n filepath_output=filepath_stim_train,\n batch_size=batch_size)\n generator_test_snd2snd = Random_Generator(\n sound_shape=sound_shape,\n spike_shape=spike_shape,\n filepath_input=filepath_stim_test,\n filepath_output=filepath_stim_test,\n batch_size=batch_size)\n\n else:\n raise NotImplementedError\n\n return generator_train_spk2snd, generator_test_spk2snd, generator_train_snd2snd, generator_test_snd2snd\n\n\nif __name__ == '__main__':\n get_innvestigate_analyzers()","sub_path":"old_codes/convenience_tools.py","file_name":"convenience_tools.py","file_ext":"py","file_size_in_byte":10056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"480811045","text":"#! /usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom __future__ import (unicode_literals, absolute_import, division)\n\nimport base64\nfrom bs4 import BeautifulSoup\nimport pickle\nimport requests\nfrom types import MethodType\nimport datetime as dt\nimport HTMLParser\n\nfrom lib import constants as const\nfrom lib import utils\nfrom lib import kodi\nfrom lib import mediatypes\n\n\nparser = HTMLParser.HTMLParser()\n\n\nclass RequestsSession(object):\n def __init__(self):\n self.session = requests.Session()\n\n def load_cookies(self):\n try:\n with open(utils.os_join(const.userdatafolder, \"cookies\"), 'rb') as f:\n self.session.headers = pickle.load(f)\n except IOError:\n pass\n\n def save_cookies(self):\n with open(utils.os_join(const.userdatafolder, \"cookies\"), 'wb') as f:\n pickle.dump(self.session.headers, f)\n\n def soup(self, req):\n return BeautifulSoup(req.text)\n\n def get(self, url, **kwargs):\n kodi.log(\"LOADING: %s\" % url)\n req = self.session.get(url, timeout=30, **kwargs)\n req.soup = MethodType(self.soup, req)\n return req\n\n def post(self, url, hidden=False, **kwargs):\n kodi.log(\"LOADING: %s\" % url)\n if kwargs and not hidden:\n kodi.log(\"Payload: %s\" % kwargs)\n req = self.session.post(url, timeout=30, **kwargs)\n req.soup = MethodType(self.soup, req)\n return req\n\n def login(self, deviceid):\n kodi.log(\"logging in\")\n username = kodi.settings[\"username\"]\n passw = kodi.settings[\"password\"]\n if not username or not passw:\n raise Exception('Username or password not specified')\n payload = \"%sweb\" % deviceid\n loginpage = self.post('https://api-hbon.hbo.clearleap.com:443/cloffice/client/device/login',\n auth=(username, base64.b64encode(passw)), data=payload, hidden=True).soup()\n if loginpage.find(\"status\").text != \"Success\":\n kodi.log(loginpage.text, debug=True)\n raise Exception(\"Login attempt failed\")\n token = loginpage.find(\"token\").text\n self.session.headers.update({\n \"X-Clearleap-DeviceToken\": token,\n \"X-Clearleap-DeviceId\": deviceid\n })\n\n def setup(self):\n kodi.log(\"checking login status\")\n browsepage = self.get(\"https://api-hbon.hbo.clearleap.com/cloffice/client/web/browse/\")\n if not browsepage.soup().find(text=\"Watchlist\"):\n deviceid = browsepage.headers['x-clearleap-transactionid']\n self.login(deviceid)\n browsepage = self.get(\"https://api-hbon.hbo.clearleap.com/cloffice/client/web/browse/\")\n return browsepage.soup()\n\n def get_watchlist(self):\n browsepage = self.setup()\n watchlistentry = browsepage.find(text=\"Watchlist\").parent.parent\n watchlisturl = watchlistentry.link.text\n watchlistpage = self.get(watchlisturl).soup()\n available_movies = set()\n available_shows = set()\n for media in watchlistpage.find_all(\"item\"):\n mediatype = \"show\" if media.find(\"clearleap:series\") is not None else \"movie\"\n if mediatype == \"movie\":\n title = parser.unescape(media.title.text)\n urlid = parser.unescape(media.guid.text)\n metadata = get_movie_metadata(media)\n movie = mediatypes.ScrapedMovie(urlid, title, metadata)\n available_movies.add(movie)\n elif mediatype == \"show\":\n title = parser.unescape(media.find(\"clearleap:series\").text)\n seasonurlid = media.find(\"clearleap:parentguid\").text\n available_shows.add(mediatypes.ScrapedShow(seasonurlid=seasonurlid, title=title))\n if not (available_movies or available_shows):\n raise Exception(\"No media found in watchlist\")\n return available_movies, available_shows\n\n\ndef get_showdata_episodes(show):\n reqs = RequestsSession()\n episodes = set()\n if show.urlid is None:\n parentseasonpage = reqs.get(\"https://api-hbon.hbo.clearleap.com:443/cloffice/client/web/browse/%s\" % show.seasonurlid).soup()\n show.urlid = parentseasonpage.find('clearleap:parentguid').text\n showpage = reqs.get(\"https://api-hbon.hbo.clearleap.com/cloffice/client/web/browse/%s\" % show.urlid).soup()\n for season in showpage.find_all(\"item\"):\n seasonpage = reqs.get(season.link.text).soup()\n for epinfo in seasonpage.find_all(\"item\"):\n seasonnr = int(epinfo.find('clearleap:season').text)\n episodenr = int(epinfo.find('clearleap:episodeinseason').text)\n urlid = epinfo.guid.text\n metadata = get_episode_metadata(epinfo)\n episode = mediatypes.ScrapedEpisode(show=show, seasonnr=seasonnr, episodenr=episodenr, urlid=urlid, metadata=metadata)\n episodes.add(episode)\n if not episodes:\n raise Exception(\"No episodes found in show page\")\n show_metadata = get_show_metadata(showpage, seasonpage)\n return show_metadata, episodes\n\n\ndef get_movie_metadata(movinfo):\n poster, fanart = sorted(movinfo.find_all(\"media:thumbnail\", width=True, height=True),\n key=lambda item: int(item[\"width\"]) * int(item[\"height\"]))[-2:]\n metadata = {\n \"year\": int(movinfo.find(role=\"year\").text),\n \"plot\": parser.unescape(movinfo.find(\"description\").text),\n \"runtime\": dt.timedelta(seconds=int(movinfo.find('media:content')['duration'])),\n \"genre\": parser.unescape(movinfo.find(\"media:keywords\").text),\n \"fanart\": fanart[\"url\"],\n \"poster\": poster[\"url\"],\n }\n return metadata\n\n\ndef get_show_metadata(showinfo, seasoninfo):\n poster, fanart = sorted(showinfo.find_all(\"media:thumbnail\", width=True, height=True),\n key=lambda item: int(item[\"width\"]) * int(item[\"height\"]))[-2:]\n metadata = {\n \"plot\": parser.unescape(showinfo.find(\"description\").text),\n \"year\": int(seasoninfo.find(role=\"year\").text),\n \"fanart\": fanart[\"url\"],\n \"poster\": poster[\"url\"],\n \"genre\": parser.unescape(seasoninfo.find(\"media:keywords\").text),\n }\n return metadata\n\n\ndef get_episode_metadata(epinfo):\n thumb = sorted(epinfo.find_all(\"media:thumbnail\", width=True, height=True),\n key=lambda item: int(item[\"width\"]) * int(item[\"height\"]))[-1]\n metadata = {\n \"title\": parser.unescape(epinfo.find(\"title\").text),\n \"plot\": parser.unescape(epinfo.find(\"description\").text),\n \"runtime\": dt.timedelta(seconds=int(epinfo.find('media:content')['duration'])),\n \"thumb\": thumb[\"url\"],\n }\n return metadata\n","sub_path":"lib/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":6764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"496636793","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('oficina', '0003_remove_oficina_estado_atencion'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='oficina',\n name='nombre',\n field=models.CharField(help_text=b'Nombre', unique=True, max_length=120),\n ),\n ]\n","sub_path":"apps/oficina/migrations/0004_auto_20160310_2308.py","file_name":"0004_auto_20160310_2308.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"438861255","text":"# https://leetcode.com/problems/remove-duplicates-from-sorted-array/description/\r\n\r\nclass Solution(object):\r\n def removeDuplicates(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n r = len(nums)\r\n i = 0\r\n if r > 1:\r\n for j in range(1, len(nums)):\r\n if nums[j] != nums[i]:\r\n i += 1\r\n nums[i] = nums[j]\r\n r = i + 1 # Returning length , not the last inex\r\n return r\r\n\r\ns = Solution()\r\nprint(s.removeDuplicates([1,1,2]))","sub_path":"Leetcode/sorted_array_duplicates.py","file_name":"sorted_array_duplicates.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"160150887","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nleasie = cv2.imread(\"Melanoma.png\")\ncolor_score = 0\nleasie = cv2.cvtColor(leasie, cv2.COLOR_BGR2RGB)\n\n# intervals\nlight_brown_higher_range = (109, 60, 46)\nlight_brown_lower_range = (92, 64, 51)\n\ndark_brown_higher_range = (92, 64, 51)\ndark_brown_lower_range = (43, 30, 24)\n\nwhite_higher_range = (255, 255, 255)\nwhite_lower_range = (217, 217, 217)\n\nred_higher_range = (255, 77, 77)\nred_lower_range = (154, 0, 0)\n\nblue_grey_higher_range = (144, 168, 180)\nblue_grey_lower_range = (69, 91, 102)\n\nblack_higher_range = (38, 38, 38)\nblack_lower_range = (0, 0, 0)\n\n# mask of colour\nmask_light_brown = cv2.inRange(leasie, light_brown_lower_range, light_brown_higher_range)\nmask_dark_brown = cv2.inRange(leasie, dark_brown_lower_range, dark_brown_higher_range)\nmask_white = cv2.inRange(leasie, white_lower_range, white_higher_range)\nmask_red = cv2.inRange(leasie, red_higher_range, red_higher_range)\nmask_blue_grey = cv2.inRange(leasie, blue_grey_lower_range, blue_grey_higher_range)\nmask_black = cv2.inRange(leasie, black_lower_range, black_higher_range)\nmask_negative_space = cv2.inRange(leasie, (0, 0, 0), (0, 0, 0))\nfinal_mask_black = mask_black - mask_negative_space\n\n# show images\n\nfig, ax = plt.subplots(3, 2)\nplt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.8)\nfig.suptitle('Masks of colours', fontsize=14)\n\nax[0, 0].imshow(mask_light_brown, cmap=\"gray\")\nax[0, 1].imshow(mask_dark_brown, cmap=\"gray\")\nax[1, 0].imshow(mask_white, cmap=\"gray\")\nax[1, 1].imshow(final_mask_black, cmap=\"gray\")\nax[2, 0].imshow(mask_red, cmap=\"gray\")\nax[2, 1].imshow(mask_blue_grey, cmap=\"gray\")\n\nax[0, 0].set_title(\"light brown\")\nax[0, 1].set_title(\"dark brown\")\nax[1, 0].set_title(\"white\")\nax[1, 1].set_title(\"black\")\nax[2, 0].set_title(\"red\")\nax[2, 1].set_title(\"blue grey\")\n\n\n# area colours\npix_melanoma = np.sum(leasie > 0)\npix_light_brown = np.sum(mask_light_brown == 255)\npix_dark_brown = np.sum(mask_dark_brown == 255)\npix_white = np.sum(mask_white == 255)\npix_red = np.sum(mask_red == 255)\npix_blue_gray = np.sum(mask_blue_grey == 255)\npix_black = np.sum(final_mask_black == 255)\n\n#counting system\nfor i in [pix_dark_brown, pix_light_brown, pix_white, pix_red, pix_blue_gray, pix_black]:\n if (i/pix_melanoma) >= 0.05:\n color_score += 1\n\nprint(\"Je 'C' score is: \", color_score)\n\nif color_score == 0:\n #consequentie\n print()\n\nif color_score == 1:\n #consequentie \n print()\n\nif color_score == 2:\n #consequentie \n print()\n\nif color_score == 3:\n #consequentie \n print()\n\nif color_score == 4:\n #consequentie \n print()\n\nif color_score == 5:\n #consequentie \n print()\n\nif color_score == 6:\n #consequentie \n print()\n","sub_path":"Colour-evaluation.py","file_name":"Colour-evaluation.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"453759340","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport py3Dmol\nfrom biopandas.pdb import PandasPdb\nfrom scipy.spatial import distance_matrix\n\n\ndef ExtractProbMap(resultsCOORDfile, coord_toexclude = 1):\n \"\"\" Extract probability map from coordination file\n obtained with LINK analysis (output of LINKanalysis.py )\n \n Parameters\n ----------\n resultsCOORDfile : str\n path of the probability map file of interest\n\n coord_toexclude : int\n coordinations equal or lower than COORD_THRESHOLD are excluded\n \n Returns\n -------\n ProbMap : list\n Probability map containing [0] coordination (1-letter codes for AA)\n [1] probability \n \n \"\"\"\n CoordFile = open(resultsCOORDfile, 'r')\n COORD_THRESHOLD = coord_toexclude\n ln = 0\n ProbMap = [[],[]]\n for line in CoordFile:\n fileline = line.split(';')[:-1]\n if(ln==0):\n for c in range(1,len(fileline)):\n ProbMap[0].append(fileline[c])\n else:\n for c in range(1,len(fileline)):\n if(ln==1):\n ProbMap[1].append(int(fileline[c]))\n else:\n ProbMap[1][c-1]+=int(fileline[c])\n ln+=1 \n toremove = []\n for i in range(0,len(ProbMap[0])): \n if(len(ProbMap[0][i])<=COORD_THRESHOLD):\n toremove.append(i)\n\n for i in range(0,len(toremove)):\n index = toremove[i]-i\n ProbMap[0].pop(index)\n ProbMap[1].pop(index)\n\n CoordFile.close()\n #PrbMap now contains coordinations observed with corresponding occurrency\n Tot = sum(ProbMap[1])\n for i in range(0, len(ProbMap[1])):\n ProbMap[1][i] /= Tot\n #PrbMap now contains coordinations observed with corresponding probability\n\n return ProbMap\n\ndef ProteinRead(pdb_file, Include_dAA = True, IncludeWATER = False):\n \"\"\" Read pdb file using biopandas library \n excluding non-standard amino acids\n \n Parameters\n ----------\n pdb_file : str\n name of pdb file to analyze\n\n Include_dAA : bool\n Flag for D-Amino acids:\n if True, considered with probabilities of corresponding L-ones\n if False, excluded (considered as non-standard) \n\n IncludeWATER : bool\n Flag for water molecules present in pdb structure \n probability map from LINK analysis contains waters (when resolved)\n if True water molecules considered and scored as done with other AA \n if False water molecules in the structure excluded\n Note: False by default due to low reliability of water location in structures\n used to build the probability map\n and to difficulty of placing water molecules (expecially buried ones) \n in new structures, expecially when also metal ions are present \n \n Returns\n -------\n ppdb_ATOM : pandas DataFrame\n biopandas dataset with amino acids (and water) of the input structure considered \n\n Chains : list\n chains in structure (used for scoring part)\n\n \"\"\"\n # structure from input file or fetched if not present\n if(pdb_file[-4:] == '.pdb' or pdb_file[-3:] == '.gz'):\n ppdb = PandasPdb().read_pdb(pdb_file)\n else:\n ppdb = PandasPdb().fetch_pdb(pdb_file)\n \n # lists for standard and d-AA used to save structure to dataset \n standardAA = ['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n d_AA = ['DAL','DAR','DSG','DAS','DCY','DGN','DGL','GLY','DHI','DIL','DLE','DLY','MED','DPN','DPR','DSN','DTH','DTR','DTY','DVA']#scan takes into account only standard amino acids\n\n for aa in standardAA: #ATOM entries, excluding water molecules \n if(aa==standardAA[0]):\n ppdb_ATOM = ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa] \n else:\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa]], ignore_index=True) \n\n if(Include_dAA):\n for i in range(0,len(d_AA)): \n if(d_AA[i]!='GLY'):\n ppdb_d_AA = pd.concat([ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == d_AA[i]],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == d_AA[i]]], ignore_index=True)\n pd.options.mode.chained_assignment = None \n ppdb_d_AA['residue_name'].iloc[:] = standardAA[i] #dAA considered as standard one for scan \n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_d_AA], ignore_index=True) \n\n ppdb_PROTEIN = ppdb_ATOM #protein atoms saved here \n ppdb_WATER = pd.concat([ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'HOH'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'HOH'],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'WAT'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'WAT']], ignore_index=True) #oxygen atoms of water molecules\n #can be both HETATM (standard pdb file) or ATOM (vmd output)\n if(len(ppdb_WATER)>0 and IncludeWATER):\n pd.options.mode.chained_assignment = None \n ppdb_WATER['residue_name'].iloc[:] = 'HOH'\n ppdb_WATER['chain_id'].iloc[:] = 'water'\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_WATER], ignore_index=True)\n\n Chains = []\n for i in range(0,len(ppdb_ATOM)):\n if(ppdb_ATOM['chain_id'].iloc[i] in Chains):\n continue\n else:\n Chains.append(ppdb_ATOM['chain_id'].iloc[i]) \n return ppdb_ATOM, Chains\n\ndef RefAtom(Residue):\n \"\"\" Assign reference atom(s) from which to search for neighbouring residues\n\n Parameters\n ----------\n Residue : str\n 3-letter code for AA \n \n Returns\n -------\n REF : list\n reference atom(s) for given residue\n \"\"\"\n\n RES = ['HOH','ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n REF = [['O'],['O'], ['NH1','NH2'], ['OD1'], ['OD1', 'OD2'], ['SG'], ['OE1'], ['OE1', 'OE2'], ['O'], ['ND1', 'NE2'], ['O'], ['O'], ['NZ'], ['SD'], ['O'], ['O'], ['OG'], ['OG1'], ['O'], ['OH'], ['O']]\n return REF[RES.index(Residue)][:]\n\ndef res_1Letter(RES):\n \"\"\" Convert 3letter code AA to 1letter code AA\n\n Parameters\n ----------\n RES : str\n 3-letter code AA \n \n Returns\n -------\n R_LetterCode : str\n 1letter code AA\n \"\"\"\n RES_LetterCode = ['HOH', 'ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n R_LetterCode = ['O', 'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\t\n return R_LetterCode[RES_LetterCode.index(RES)]\n\ndef ScoreLoc(Loc, ProbMap):\n \"\"\" Score a location based on the probability map\n score computed as sum of probabilities in ProbMap\n of coordinations compatible with the observed one\n \n Parameters\n ----------\n Loc : str\n local coordination in the structure\n\n ProbMap : list\n Probability map containing [0] coordination (1-letter codes for AA)\n [1] probability \n\n Returns\n -------\n Score : float\n score [0,1] of the given Loc\n\n \"\"\"\n Score = 0.\n for i in range(0,len(ProbMap[0])):\n PotentialCoord = ProbMap[0][i]\n LocTest = Loc\n Flag = False\n for c in PotentialCoord:\n if c in LocTest:\n LocTest = LocTest.replace(c, '', 1)\n Flag = True\n else:\n Flag = False\n break\n if (Flag):\n Score += ProbMap[1][i]\n return Score\n\n # Chemical element specified in output file\n # Site prediction for each group\n # Weighted average reported in output file (.xyz file)\n # If 3 arguments are given last = chemical element\n ChemicalElement = 'H' # Default = H \n if(len(sys.argv)==4):\n ChemicalElement = sys.argv[-1].upper()\n\n console_output = sys.stdout \n if(sys.argv[1][-4:] == '.pdb'):\n OutFile = open(sys.argv[1][:-4].replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n elif(sys.argv[1][-3:] == '.gz'):\n OutFile = open(sys.argv[1][:-7].replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n else:\n OutFile = open(sys.argv[1].replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n OutFile.close()\n\n\ndef SitesPredict(Chain, pdb_file, ppdb_ATOM, ProbMap, ScoreThreshold = 0.75, SearchRadius = 5.5, ChemicalElement = 'H'):\n \"\"\" Scan all residues in each chain\n (scoring based on nearby residue in the same and other chains)\n and site determination as a weighted average between the residues with higher score\n where weights are given from scores\n Each predictions is written in a temporary output file \n \n Parameters\n ----------\n Chain: str\n code for chain to scan\n\n pdb_file : str\n name of pdb file to analyze\n\n ProbMap : list\n Probability map containing [0] coordination (1-letter codes for AA)\n [1] probability \n ScoreThreshold : float\n prediction done considering residues within ScoreThreshold% \n of the highest-scored one\n Default (0.75) resulted to be the best compromise \n between sites found and false positives for ZN testset \n \n SearchRadius : float\n Radius used to perform search around each amino acid\n For a given metal with d_{M-L} = D --> SearchRadius = 2*D + eps\n where eps accounts for possible rearrangements/structure relaxation\n Default (5.5) from average LINK distance of 2.2+-0.2 for ZN structures\n\n ChemicalElement : str\n Chemical element in output file\n Default H, changing it does not affect prediction\n only for visualization (proper vdw radius)\n\n \"\"\"\n\n ResStart = min(ppdb_ATOM['residue_number'][ppdb_ATOM['chain_id']==Chain]) #First residue number\n ResStop = max(ppdb_ATOM['residue_number'][ppdb_ATOM['chain_id']==Chain]) #Last residue number\n LOCAL_coord = []\n Score = [[],[]]\n\n console_output = sys.stdout \n if(pdb_file[-4:] == '.pdb'):\n OutFile = open(pdb_file[:-4].replace('../','')+'_PredictedSites.xyz', 'a') #overwrite file if already present\n elif(pdb_file[-3:] == '.gz'):\n OutFile = open(pdb_file[:-7].replace('../','')+'_PredictedSites.xyz', 'a') #overwrite file if already present\n else:\n OutFile = open(pdb_file.replace('../','')+'_PredictedSites.xyz', 'a') #overwrite file if already present\n OutFile.close()\n\n #Scan all protein structure assigning scores to each residue\n for i in range(ResStart, ResStop+1):\n # Scan one residue per time \n # Missing residues present in the pdb structure are skipped\n try: \n ppdb_RES = ppdb_ATOM[ppdb_ATOM['residue_number']==i] # Atoms of residue considered\n ppdb_RES = ppdb_RES[ppdb_RES['chain_id']==Chain]\n RES = ppdb_RES['residue_name'].iloc[0]\n REF = RefAtom(RES) # reference atoms(s) for residue considered \n reference_coord = [0.0, 0.0, 0.0]\n add_SearchRadius = 0.0 # For residues with more reference points\n # SearchRadius augmented, adding distance from midpoint\n if(len(REF)==1): \n reference_coord = [ppdb_RES['x_coord'][ppdb_RES['atom_name']==str(REF[0])].iloc[0], ppdb_RES['y_coord'][ppdb_RES['atom_name']==str(REF[0])].iloc[0], ppdb_RES['z_coord'][ppdb_RES['atom_name']==str(REF[0])].iloc[0]] \n else:\n # if more than one atom is used as reference, the mean position is calculated as reference point\n for r in range(0,len(REF)):\n reference_coord[0] += (ppdb_RES['x_coord'][ppdb_RES['atom_name']==str(REF[r])].iloc[0])\n reference_coord[1] += (ppdb_RES['y_coord'][ppdb_RES['atom_name']==str(REF[r])].iloc[0])\n reference_coord[2] += (ppdb_RES['z_coord'][ppdb_RES['atom_name']==str(REF[r])].iloc[0]) \n reference_coord[0] /= len(REF)\n reference_coord[1] /= len(REF)\n reference_coord[2] /= len(REF)\n dist = 0.0\n for r in range(0,len(REF)):\n dist += (reference_coord[0]-(ppdb_RES['x_coord'][ppdb_RES['atom_name']==str(REF[r])].iloc[0]))**2+(reference_coord[1]-(ppdb_RES['y_coord'][ppdb_RES['atom_name']==str(REF[r])].iloc[0]))**2+(reference_coord[2]-(ppdb_RES['z_coord'][ppdb_RES['atom_name']==str(REF[r])].iloc[0]))**2 \n dist = np.sqrt(dist)\n add_SearchRadius += dist\n add_SearchRadius /= len(REF)\n \n # Distance from reference point to any other atom of the structure\n distances = PandasPdb.distance_df(ppdb_ATOM, xyz=(reference_coord[0],reference_coord[1],reference_coord[2]))\n ppdb_LOCAL = ppdb_ATOM[distances <= (SearchRadius + add_SearchRadius)]\n ppdb_LOCAL = ppdb_LOCAL[ppdb_LOCAL['residue_number']!=i]\t\n \n # Save local environment for residue considered\n # Other residues in LOCAL if their reference point(s) distance from the reference_coord\n # within SearchRadius+add_SearchRadius threshold\n LOCAL = [[RES],[i]]\n for l in ppdb_LOCAL['atom_number']:\n l_index = int(ppdb_LOCAL[ppdb_LOCAL['atom_number']==l].index.to_numpy())\n if(ppdb_LOCAL['atom_name'].loc[l_index] in RefAtom(ppdb_LOCAL['residue_name'].loc[l_index])):\n LOCAL[0].append(ppdb_LOCAL['residue_name'].loc[l_index])\n LOCAL[1].append(ppdb_LOCAL['residue_number'].loc[l_index])\n\n # Conversion to 1-letter format, alphabetically sorted\n for l in range(0,len(LOCAL[0])):\n LOCAL[0][l] = res_1Letter(LOCAL[0][l])\n LOCAL_coord.append(''.join(sorted(LOCAL[0])))\n # Score vector with score for each residue considered\n # assigned from ProbMap\n Score[0].append(i)\n Score[1].append(ScoreLoc(LOCAL_coord[-1], ProbMap))\n except:\n continue\n sortedlist = sorted(zip(Score[1][:],Score[0][:]), reverse=True)\n \n coeffScore = 1. - ScoreThreshold \n highestscoredRES = [] # residue number, score, ref. point coord \n for i in range(0,len(Score[0])):\n if(sortedlist[[i][0]][0]>=coeffScore*sortedlist[[0][0]][0]):\n x, y, z = 0.0, 0.0, 0.0\n ppdb_RES = ppdb_ATOM[ppdb_ATOM['residue_number']==sortedlist[[i][0]][1]]\n ppdb_RES = ppdb_RES[ppdb_RES['chain_id']==Chain]\n for l in range(0,len(RefAtom(ppdb_RES['residue_name'].iloc[0]))):\n # last iloc[0] to consider only first in case of alt_loc\n x += float(ppdb_RES['x_coord'][ppdb_RES['atom_name']==RefAtom(ppdb_RES['residue_name'].iloc[0])[l]].iloc[0])\n y += float(ppdb_RES['y_coord'][ppdb_RES['atom_name']==RefAtom(ppdb_RES['residue_name'].iloc[0])[l]].iloc[0])\n z += float(ppdb_RES['z_coord'][ppdb_RES['atom_name']==RefAtom(ppdb_RES['residue_name'].iloc[0])[l]].iloc[0])\n x /= len(RefAtom(ppdb_RES['residue_name'].iloc[0]))\n y /= len(RefAtom(ppdb_RES['residue_name'].iloc[0]))\n z /= len(RefAtom(ppdb_RES['residue_name'].iloc[0]))\n highestscoredRES.append([sortedlist[[i][0]][1],sortedlist[[i][0]][0], x, y, z])\n highestscoredRES_df = pd.DataFrame([row[2:5] for row in highestscoredRES], columns=['x','y','z'], index=[row[0] for row in highestscoredRES])\n highestscoredRES_dmatrix = pd.DataFrame(distance_matrix(highestscoredRES_df.values,highestscoredRES_df.values), index=highestscoredRES_df.index, columns=highestscoredRES_df.index)\n\n # nearby (closer than 2*Searchradius) residues grouped in clusters\n # producing 1 predicted site for each group\n Clusters = []\n possibleCluster = []\n for rw in range(0,len(highestscoredRES_dmatrix.index.values)):\n possibleCluster = []\n for cl in range(0,len(highestscoredRES_dmatrix.index.values)):\n if(any(highestscoredRES_dmatrix.index.values[cl] in row for row in Clusters)):\n continue \n else:\n if(highestscoredRES_dmatrix.values[rw][cl]<2*SearchRadius): \n if(any(highestscoredRES_dmatrix.index.values[rw] in row for row in Clusters)):\n # residue added to group already existing\n Clusters[[Clusters.index(row) for row in Clusters if (highestscoredRES_dmatrix.index.values[rw]) in row][0]].append(highestscoredRES_dmatrix.index.values[cl])\n else:\n possibleCluster.append(highestscoredRES_dmatrix.index.values[cl])\n if(len(possibleCluster)>0):\n Clusters.append(possibleCluster)\n # highestscoredRES contains RES number, score, x_pos, y_pos, z_pos for each residue\n # in case of residues with more than one reference atom, midpoint is calculated\n for i in range(0,len(Clusters)):\n if(len(Clusters[i][:])==1):\n # append closest highest scored within threshold\n for j in range(0,len(Score[0])): \n if(sortedlist[[j][0]][1] != Clusters[i][0]):\n x, y, z = 0.0, 0.0, 0.0\n ppdb_RES = ppdb_ATOM[ppdb_ATOM['residue_number']==sortedlist[[j][0]][1]]\n ppdb_RES = ppdb_RES[ppdb_RES['chain_id']==Chain]\n for l in range(0,len(RefAtom(ppdb_RES['residue_name'].iloc[0]))):\n #last iloc[0] to consider only first in case of alt_loc\n x += float(ppdb_RES['x_coord'][ppdb_RES['atom_name']==RefAtom(ppdb_RES['residue_name'].iloc[0])[l]].iloc[0])\n y += float(ppdb_RES['y_coord'][ppdb_RES['atom_name']==RefAtom(ppdb_RES['residue_name'].iloc[0])[l]].iloc[0])\n z += float(ppdb_RES['z_coord'][ppdb_RES['atom_name']==RefAtom(ppdb_RES['residue_name'].iloc[0])[l]].iloc[0])\n x /= len(RefAtom(ppdb_RES['residue_name'].iloc[0]))\n y /= len(RefAtom(ppdb_RES['residue_name'].iloc[0]))\n z /= len(RefAtom(ppdb_RES['residue_name'].iloc[0]))\n x_c = ([row[2] for row in highestscoredRES])[([row[0] for row in highestscoredRES]).index(Clusters[i][0])]\n y_c = ([row[3] for row in highestscoredRES])[([row[0] for row in highestscoredRES]).index(Clusters[i][0])]\n z_c = ([row[4] for row in highestscoredRES])[([row[0] for row in highestscoredRES]).index(Clusters[i][0])]\n if(np.sqrt((x-x_c)**2+(y-y_c)**2+(z-z_c)**2)<2*(SearchRadius)):\n Clusters[i].append(sortedlist[[j][0]][1])\n if(sortedlist[[j][0]][0]>=coeffScore*([row[1] for row in highestscoredRES])[([row[0] for row in highestscoredRES]).index(Clusters[i][0])]):\n highestscoredRES.append([sortedlist[[j][0]][1],sortedlist[[j][0]][0], x, y, z])\n else:\n highestscoredRES.append([sortedlist[[j][0]][1],coeffScore*([row[1] for row in highestscoredRES])[([row[0] for row in highestscoredRES]).index(Clusters[i][0])], x, y, z])\n break \n else:\n continue\n # For each cluster xyz coord for site predicted as weighted average\n # between positions of RefAtom for each residue\n # weight depending on the score of the residue\n # Each predicted location is scored based on surrounding residues\n # within 0.6*SearchRadius \n # if the site is farther than SearchRadius from any protein atom the site is discarded\n # (possible for sites predicted in bulk water)\n for j in range(0,len(Clusters)):\n x_avg, y_avg, z_avg = 0.0, 0.0, 0.0\n sumWeights = 0.0\n for k in range(0,len(Clusters[j])):\n resindex = [row[0] for row in highestscoredRES].index(Clusters[j][k])\n x_avg += highestscoredRES[resindex][1]*highestscoredRES[resindex][2]\n y_avg += highestscoredRES[resindex][1]*highestscoredRES[resindex][3]\n z_avg += highestscoredRES[resindex][1]*highestscoredRES[resindex][4]\n sumWeights += highestscoredRES[resindex][1]\n x_avg /= sumWeights \n y_avg /= sumWeights \n z_avg /= sumWeights\n\t# site discarded if farther than SearchRadius from any protein atom the site \n distances = PandasPdb.distance_df(ppdb_ATOM, xyz=(x_avg,y_avg,z_avg))\n if(min(distances)>SearchRadius):\n break \t\n # during the loop coordinations temporary written in the output file\n # edited at the end of the loop according to xyz files formatting\n # and sorting predicted sites based on score\n if(pdb_file[-4:] == '.pdb'):\n sys.stdout = OutFile = open(pdb_file[:-4].replace('../','')+'_PredictedSites.xyz', 'a')\n elif(pdb_file[-3:] == '.gz'):\n sys.stdout = OutFile = open(pdb_file[:-7].replace('../','')+'_PredictedSites.xyz', 'a')\n else:\n sys.stdout = OutFile = open(pdb_file.replace('../','')+'_PredictedSites.xyz', 'a')\n\n sys.stdout.write('\\n'+ChemicalElement+'\\t'+str(x_avg)+'\\t'+str(y_avg)+'\\t'+str(z_avg))\n # Score the predicted location based on surrounding residues\n distances = PandasPdb.distance_df(ppdb_ATOM, xyz=(x_avg,y_avg,z_avg))\n ppdb_LOCAL = ppdb_ATOM[distances <= 0.6*SearchRadius]\n SITE_coord = ''\n for l in ppdb_LOCAL.index.tolist():\n if(ppdb_LOCAL['atom_name'].loc[l] in RefAtom(ppdb_LOCAL['residue_name'].loc[l])):\n SITE_coord += str(res_1Letter(ppdb_LOCAL['residue_name'].loc[l])) \n # site score added last column in the temporary output file\n sys.stdout.write('\\t'+str(ScoreLoc(''.join(sorted(SITE_coord)), ProbMap)))\n OutFile.close()\n sys.stdout = console_output\n\n\n\ndef SortPredictions(pdb_file, ScoreThreshold = 0.75):\n \"\"\" Sorts predictions in the temporary output file of SitesPredict function\n xyz file formatting\n sites sorted based on score (final score for each site as #comment in xyz file)\n \n Parameters\n ----------\n pdb_file : str\n name of pdb file to analyze\n \n ScoreThreshold : float\n final re-scoring of sites excludes sites with score lower than\n ScoreThreshold% of the highest-scored one\n Default (0.75) resulted to be the best compromise \n between sites found and false positives for ZN testset \n\n \"\"\"\n console_output = sys.stdout \n #read temporary OutFile \n if(pdb_file[-4:] == '.pdb'):\n OutFile = open(pdb_file[:-4].replace('../','')+'_PredictedSites.xyz') \n elif(pdb_file[-3:] == '.gz'):\n OutFile = open(pdb_file[:-7].replace('../','')+'_PredictedSites.xyz') \n else:\n OutFile = open(pdb_file.replace('../','')+'_PredictedSites.xyz')\n PredictedSites = pd.read_table(OutFile, names = ['Element', 'x_coord', 'y_coord', 'z_coord', 'Score'])\n OutFile.close()\n #Open again for writing predicted sites location + score\n #according to xyz files formatting\n #sites sorted according to score, in descending order\n #score of the site added as comment after the coordinates (EL x_site y_site z_site #site_score)\n PredictedSites = PredictedSites.sort_values(by=['Score'], ascending=False)\n\n PredictedSites = PredictedSites[PredictedSites['Score']>(1-ScoreThreshold)*max(PredictedSites['Score'])] #site with score lower than ScoreThreshold% of higest one excluded\n Num_PredSites = len(PredictedSites)\n if(pdb_file[-4:] == '.pdb'):\n OutFile = open(pdb_file[:-4].replace('../','')+'_PredictedSites.xyz', 'w') \n elif(pdb_file[-3:] == '.gz'):\n OutFile = open(pdb_file[:-7].replace('../','')+'_PredictedSites.xyz', 'w') \n else:\n OutFile = open(pdb_file.replace('../','')+'_PredictedSites.xyz', 'w')\n OutFile.write(str(Num_PredSites)+'\\n\\n')\n for i in PredictedSites.index:\n OutFile.write(str(PredictedSites.loc[i]['Element'])+'\\t'+str(PredictedSites.loc[i]['x_coord'])+'\\t'+str(PredictedSites.loc[i]['y_coord'])+'\\t'+str(PredictedSites.loc[i]['z_coord'])+'\\t#'+str(PredictedSites.loc[i]['Score'])+'\\n')\n OutFile.close()\n\n sys.stdout = console_output\n print('----------')\n print('SCAN COMPLETED')\n print('\\tPredicted sites can be found in:')\n if(pdb_file[-4:] == '.pdb'):\n print('\\t'+pdb_file[:-4].replace('../','')+'_PredictedSites.xyz')\n elif(pdb_file[-3:] == '.gz'):\n print('\\t'+pdb_file[:-7].replace('../','')+'_PredictedSites.xyz')\n else:\n print('\\t'+pdb_file.replace('../','')+'_PredictedSites.xyz')\n print('----------')\n\n\n\ndef CreateOutFile(pdb_file):\n \"\"\" Output file initialization\n ovewrite any pre-existing file\n \n Parameters\n ----------\n pdb_file : str\n name of pdb file to analyze\n \n \"\"\" \n if(pdb_file[-4:] == '.pdb'):\n OutFile = open(pdb_file[:-4].replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n elif(pdb_file[-3:] == '.gz'):\n OutFile = open(pdb_file[:-7].replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n else:\n OutFile = open(pdb_file.replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n\n\n\ndef show_map(pdb,show_sticks_all=False, show_sticks_metalbinding=True, show_probes=True, show_pdb_metals=True):\n \"\"\" Show a protein using py3Dmol and the predicted metal sites\n \n Parameters\n ----------\n pdb : str\n Name of the pdb file\n show_sticks_all : bool\n If True, show all the residues as sticks in the protein\n show_sticks_metalbinding : bool\n If True, show the residues that are metal binding as sticks in the protein\n show_probes : bool\n If True, show predicted sites as filled spheres\n coloured according to probability \n show_pdb_metals : bool\n If True, show metal ions in the structure (if present) as transparent spheres \n\n Returns\n -------\n view : py3Dmol.view\n The view of the protein and the probes\n \"\"\"\n view=py3Dmol.view(width=1000, height=800)\n\n view.addModel(open(pdb+'.pdb', 'r').read(),'pdb')\n if show_probes:\n view.addModel(open(pdb+'_PredictedSites.xyz', 'r').read(),'xyz')\n probes = open(pdb+'_PredictedSites.xyz', 'r').readlines()\n if(int(probes[0])!=0):\n probabilities = [p.replace('#','').split()[-1] for p in probes[2:]] # read p from comment in xyz file\n colors = {}\n # use different colors for the probabilities\n for i,x in enumerate(probabilities):\n colors[i] = '#%02x%02x%02x' % (0, 128, int(float(x)/float(probabilities[0])*255))\n else: #no predicted site\n colors = [] \n view.addLabel(\"No probe predicted\", {'position': {'x':0, 'y':0, 'z':0}, 'backgroundColor': '#0080FF', 'fontColor': 'white'});\n \n view.zoomTo()\n view.setBackgroundColor('white')\n view.setStyle({},{'cartoon': {'color':'gray'}})\n if show_sticks_all:\n view.setStyle({}, {'stick':{},'cartoon': {'color':'gray'}})\n if show_pdb_metals:\n view.getModel(0).setStyle({'resn':\"ZN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CA\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CU\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"HG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"FE\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"NI\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MB\"},{'sphere': {'opacity':.75}})\n \n if show_probes:\n view.getModel(1).setStyle({},{'sphere': {'colorscheme':{'prop':'index', 'map':colors}}})\n \n # add hoverable labels for the residues and the predicted metals\n # two callbacks are needed, one for the residues and one for the metals\n # the metal one displays the probability\n view.getModel(0).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.resn+atom.resi+\":\"+atom.atom,{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n view.getModel(1).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.atom+\" [\"+atom.serial+\"]\",{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n if show_sticks_metalbinding:\n view.setStyle({'resn':\"HIS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"ASP\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"GLU\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"CYS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n\n return view.show()\n","sub_path":"Metal1D/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":29171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"42397770","text":"import discord\r\nfrom discord.ext import commands\r\nimport requests\r\nimport pathlib\r\nimport datetime\r\nimport time\r\nfrom config import *\r\nbot = commands.Bot(command_prefix='!')\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('Bot connected:')\r\n print('Name: {}'.format(bot.user))\r\n print('ID: {}'.format(bot.user.id))\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n if message.author.bot == False:\r\n if message.content.startswith in ignored:\r\n pass\r\n else:\r\n if not message.attachments:\r\n pathlib.Path(\"./servers/\" + str(message.guild.id) + \"/\").mkdir(parents=True, exist_ok=True)\r\n pathlib.Path(\"./servers/\" + str(message.guild.id) + \"/channels/\").mkdir(parents=True, exist_ok=True)\r\n pathlib.Path(\"./servers/\" + str(message.guild.id) + \"/channels/\" + \"images/\").mkdir(parents=True, exist_ok=True)\r\n chatlog = open(\"./servers/\"+ str(message.guild.id) + \"/channels/\" + str(message.channel.id) + \"-\" + str(message.channel.name) + \".txt\", \"a+\")\r\n t = time.strptime(str(message.created_at), \"%Y-%m-%d %H:%M:%S.%f\")\r\n msg12hr = time.strftime( \"%b-%d-%Y %I:%M %p\", t )\r\n chatlog.write(str(message.author) + \" | Message: \" + str(message.content) + \" | ID: \" + str(message.id) + \" | \" + str(msg12hr) + \" UTC\" + \"\\n\")\r\n info = open(\"./servers/\"+ str(message.guild.id) + \"/\" \"info.txt\", \"w+\")\r\n info.write(\"Name: \" + str(message.guild.name) + \"\\nOwner: \" + str(message.guild.owner) + \"\\nMembers: \" + str(message.guild.member_count))\r\n else:\r\n try:\r\n pathlib.Path(\"./servers/\" + str(message.guild.id) + \"/\").mkdir(parents=True, exist_ok=True)\r\n pathlib.Path(\"./servers/\" + str(message.guild.id) + \"/channels/\").mkdir(parents=True, exist_ok=True)\r\n chatlog = open(\"./servers/\" + str(message.guild.id) + \"/channels/\" + str(message.channel.id) + \"-\" + str(message.channel.name) + \".txt\", \"a+\")\r\n t = time.strptime(str(message.created_at), \"%Y-%m-%d %H:%M:%S.%f\")\r\n msg12hr = time.strftime( \"%b-%d-%Y %I:%M %p\", t )\r\n chatlog.write(str(message.author) + \" | Message: \" + str(message.content) + \" | Image: \" + str(message.attachments[0].id) + \" | \" + str(msg12hr) + \" UTC\" + \"\\n\")\r\n await message.attachments[0].save(\"./servers/\" + str(message.guild.id) + \"/channels/images/\"+ str(message.attachments[0].id) + \".png\")\r\n except IndexError:\r\n pass\r\n\r\n\r\nbot.run(token, reconnect=True)\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"285578927","text":"import sys\nimport os\nfrom GoParser import GoParser\nfrom GoHtmlCreator import GoHtmlCreator\n\n\n# python main.py file ./test_files/string.go ./string_result.html\n# python main.py folder ./test_files/testDirectory ./result_directory\n# python main.py folder ./test_files/bank-vaults-master ./result_directory\n# python main.py folder ./test_files/fyne-master ./result_directory\n# python main.py folder ./test_files/helmfile-master ./result_directory\n# python main.py folder ./test_files/hydra-master ./result_directory\n# python main.py folder /media/deesthortered/Data/2.Programming/MetaprogrammingLab1/root/test_files/gogs-master /media/deesthortered/Data/2.Programming/MetaprogrammingLab1/root/result_directory\n# python main.py folder D:/2.Programming/MetaprogrammingLab1/root/test_files/gogs-master D:/2.Programming/MetaprogrammingLab1/root/result_directory\n# python main.py folder D:/2.Programming/MetaprogrammingLab1/root/test_files/go-master ./result_directory\n\ndef proceed_directory(input_directory, destination_directory):\n if os.path.exists(input_directory):\n if os.path.isdir(input_directory):\n if not os.listdir(input_directory):\n print(\"The input directory is epmty\")\n else:\n if os.path.isdir(destination_directory):\n print(\"The destination directory already exists.\")\n return False\n else:\n return True\n else:\n print(\"There is file on the path, not directory.\")\n return False\n else:\n print(\"The input directory is not exist.\")\n return False\n\n\ndef proceed_file(input_file, destination_file):\n if os.path.exists(input_file):\n if os.path.isfile(input_file):\n if os.path.isfile(destination_file):\n print(\"The destination file already exists.\")\n return False\n else:\n return True\n else:\n print(\"There is directory on the path, not file.\")\n return False\n else:\n print(\"The input file is not exist.\")\n return False\n\n\nif __name__ == \"__main__\":\n parser = GoParser()\n htmlCreator = GoHtmlCreator()\n\n if len(sys.argv) == 1:\n print(\"This is the Goland Documentator.\")\n print(\"Please, type \\\"--help\\\" for manual of usage.\")\n elif len(sys.argv) == 4:\n\n sys.argv[2] = sys.argv[2].replace('\\\\', '/')\n sys.argv[3] = sys.argv[3].replace('\\\\', '/')\n if sys.argv[2][-1] == '/':\n sys.argv[2] = sys.argv[2][:-1]\n if sys.argv[3][-1] == '/':\n sys.argv[3] = sys.argv[3][:-1]\n\n if sys.argv[1] == \"file\":\n if proceed_file(sys.argv[2], sys.argv[3]):\n parser.start_file(sys.argv[2], sys.argv[3])\n print(\"Start build HTML\")\n htmlCreator.create_file(sys.argv[3])\n print(\"Done!!!\")\n elif sys.argv[1] == \"folder\":\n if proceed_directory(sys.argv[2], sys.argv[3]):\n path_list = parser.start_folder(sys.argv[2], sys.argv[3])\n print(\"Start build HTML\")\n htmlCreator.create_folder(path_list, sys.argv[3])\n print(\"Done!!!\")\n else:\n print(\"Unknown 2-nd parameter.\")\n print(\"Please, type \\\"--help\\\" for manual of usage.\")\n elif len(sys.argv) == 2:\n if sys.argv[1] == \"--help\":\n print(\"Please, enter next parameters for launching the program:\")\n print(\"1) Documentation target: \\\"file\\\" or \\\"folder\\\"\")\n print(\" If you have chose File:\")\n print(\" 2) Path to the target file (*.go)\")\n print(\" 3) Path to the result file (any)\")\n print(\" Warning! The file must not exist before\")\n print(\" If you have chose Folder:\")\n print(\" 2) Path to the target folder\")\n print(\" 3) Path to the destination folder, which will be created\")\n print(\" Warning! The folder must not exist before\")\n print(\" Warning!!! The folder must store only *.go files and \\\"readme.txt\\\"\")\n else:\n print(\"Unknown parameter. Please, type \\\"--help\\\" for manual of usage.\")\n else:\n print(\"Wrong quantity of the parametrs.\")\n","sub_path":"root/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"82051584","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import String\n\ndef bert(msg):\n\tprint(msg.data)\n\n\nrospy.init_node('bertsub')\nrospy.Subscriber('bert', String, bert, queue_size=10)\nrospy.spin()\n\n\n","sub_path":"decawave_driver/ubo_pkg/scripts/Formation/bert-sub.py","file_name":"bert-sub.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377155715","text":"#-*-coding:utf-8-*-\n'''\n统计所有错误的可能性\n'''\nconfig_error={\n 'success':0,\n 'userinvaild':1,# 无效的用户\n 'pwderror':2, #密码错误\n 'moneyerror':3 ,#金币不足\n 'notrookie': 4, # 领过新手奖励了\n 'roominvalid': 5, # 房间无效\n 'roomfull': 6, # 房间已满\n 'userrepeated': 7, # 重复的玩家\n 'autherror': 8, # 实名验证失败\n 'bossleft': 9, # 房主离线,请稍后再加入\n 'userlogined': 10, # 用户已经登录\n 'roomstateerror': 11, # 房间状态错误\n 'notroomowner': 12, # 不是房主\n 'alreadyinroom': 13, # 当前已经在房间里了\n 'notinroom': 14, # 当前不在房间里\n 'readyed': 15, # 当前已经准备过了\n 'luckytimeerror': 16, # 大转盘时间错误\n 'welfareerror': 17, # 救济金领取条件错误\n 'rankawarderror': 18, # 排行奖励领取条件错误\n 'userforbid':19,# 用户被封禁\n 'delegateinvaid':20,# 代理用户无效\n 'idlistisnull':21,\n 'sharetimeerror': 22, # 分享时间错误\n 'dlevelerror': 23, # 代理等级错误\n 'applepayerror': 24, # 苹果支付错误\n 'delepayforothererror': 25, # 代理为下级充值错误\n 'pageNonexistent':26, #不存在当前页\n 'loginExpires':27, #登录超时\n 'adminRights':28, #没有权限\n 'noRecord':29,#不存在记录\n}","sub_path":"MJGameServer/configs/config_error.py","file_name":"config_error.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158214277","text":"#\n# @lc app=leetcode.cn id=112 lang=python3\n#\n# [112] 路径总和\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def hasPathSum(self, root: TreeNode, targetSum: int) -> bool:\n res = []\n path = []\n\n def dfs(node, val, cur_path):\n if not node:\n return True\n val += node.val\n cur_path.append(node.val)\n res1 = dfs(node.left, val, cur_path)\n res2 = dfs(node.right, val, cur_path)\n if res1 and res2 and val == targetSum:\n res.append(cur_path[:])\n cur_path.pop()\n\n dfs(root, 0, path)\n\n return True if res else False\n\n# @lc code=end\n","sub_path":"leetcode/112.路径总和.py","file_name":"112.路径总和.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"558714670","text":"# Released under The MIT License (MIT)\n# http://opensource.org/licenses/MIT\n# Copyright (c) 2013 SCoT Development Team\n\nimport unittest\nfrom importlib import import_module\n\nimport numpy as np\n\nimport scot\nfrom scot import plainica, datatools\nfrom scot.var import VAR\n\n\nclass TestICA(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testModelIdentification(self):\n \"\"\" generate independent signals, mix them, and see if ICA can reconstruct the mixing matrix\n do this for every backend \"\"\"\n\n # original model coefficients\n b0 = np.zeros((3, 3)) # no connectivity\n m0 = b0.shape[0]\n l, t = 100, 100\n\n # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work\n noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3\n\n var = VAR(1)\n var.coef = b0\n sources = var.simulate([l, t], noisefunc)\n\n # simulate volume conduction... 3 sources measured with 7 channels\n mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],\n [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],\n [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]\n data = datatools.dot_special(sources, mix)\n\n backend_modules = [import_module('scot.' + b) for b in scot.backends]\n\n for bm in backend_modules:\n\n result = plainica.plainica(data, backend=bm.backend)\n\n i = result.mixing.dot(result.unmixing)\n self.assertTrue(np.allclose(i, np.eye(i.shape[0]), rtol=1e-6, atol=1e-7))\n\n permutations = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]]\n\n bestdiff = np.inf\n bestmix = None\n\n absmix = np.abs(result.mixing)\n absmix /= np.max(absmix)\n\n for p in permutations:\n estmix = absmix[p, :]\n diff = np.sum((np.abs(estmix) - np.abs(mix)) ** 2)\n\n if diff < bestdiff:\n bestdiff = diff\n bestmix = estmix\n\n self.assertTrue(np.allclose(bestmix, mix, rtol=1e-1, atol=1e-1))\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/test_plainica.py","file_name":"test_plainica.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"27272183","text":"import re\nimport CommonFunctions\nfrom pulsar import provider\n\n# Addon Script information\n__baseUrl__ = provider.ADDON.getSetting(\"base_url\")\n__vo__ = provider.ADDON.getSetting(\"vo\")\n\n# ParseDOM init\ncommon = CommonFunctions\ncommon.plugin = str(sys.argv[0])\n\nACTION_SEARCH = \"recherche\"\nACTION_FILMS = \"films/\"\nACTION_SERIES = \"series/\"\n\n# Raw search - query is always a string\ndef search(query):\n provider.log.info(\"QUERY : %s\" % query)\n if(query['query']) : \n query = query['query']\n # Replace non-alphanum caracters by -, then replace the custom \"5number\" tags by true folder\n query = re.sub('[^0-9a-zA-Z]+', '-', query)\n query = provider.quote_plus(query)\n query = query.replace('11111',ACTION_SERIES).replace('22222',ACTION_FILMS)\n provider.log.info(\"GET : %s/%s/%s.html\" % (__baseUrl__, ACTION_SEARCH, query))\n resp = provider.GET(\"%s/%s/%s.html\" % (__baseUrl__, ACTION_SEARCH, query))\n\n # Parse result\n liens = common.parseDOM(resp.data, 'a', attrs = { \"class\": \"lien-rechercher\" }, ret = 'href')\n #for torrent in re.findall(r\"%s\\/dl-torrent\\/.*\\.html\" % (__baseUrl__),data) :\n return [{\"uri\": __baseUrl__ + \"/_torrents/\" + torrent.rpartition('/')[2].replace(\".html\",\".torrent\")} for torrent in liens]\n\n# Episode Payload Sample\n# {\n# \"imdb_id\": \"tt0092400\",\n# \"tvdb_id\": \"76385\",\n# \"title\": \"married with children\",\n# \"season\": 1,\n# \"episode\": 1,\n# \"titles\": null\n# }\ndef search_episode(episode):\n provider.log.debug(\"Search episode : name %(title)s, season %(season)02d, episode %(episode)02d\" % episode)\n return search({'query':\"11111%(title)s S%(season)02dE%(episode)02d\" % episode})\n\n# Movie Payload Sample\n# Note that \"titles\" keys are countries, not languages\n# The titles are also normalized (accents removed, lower case etc...)\n# {\n# \"imdb_id\": \"tt1254207\",\n# \"title\": \"big buck bunny\",\n# \"year\": 2008,\n# \"titles\": {\n# \"es\": \"el gran conejo\",\n# \"nl\": \"peach open movie project\",\n# \"ru\": \"??????? ??????\",\n# \"us\": \"big buck bunny short 2008\"\n# }\n# }\ndef search_movie(movie):\n provider.log.info(movie['titles'])\n if(movie['titles'].has_key('fr') and __vo__ == 'false'):\n title = movie['titles']['fr']\n else :\n title = movie['title']\n provider.log.info(\"Search movie : title %s, year %s\" % (title, movie['year']))\n return search({'query':\"22222%s %s\" % (title, movie['year'])})\n\n# Registers the module in Pulsar\nprovider.register(search, search_movie, search_episode)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"413266488","text":"from dataclasses import dataclass, field\nfrom typing import Type\n\nfrom cloudshell.cp.core.request_actions import models\nfrom cloudshell.cp.core.request_actions.base import BaseRequestActions\nfrom cloudshell.cp.core.request_actions.models import DeployApp\n\n\n@dataclass\nclass DeployVMRequestActions(BaseRequestActions):\n deploy_app: models.DeployApp = None\n connect_subnets: list = field(default_factory=list)\n\n @classmethod\n def register_deployment_path(cls, deployment_path_cls: Type[DeployApp]):\n \"\"\"Register deployment path class.\"\"\"\n cls.REGISTERED_DEPLOYMENT_PATH_MODELS[\n deployment_path_cls.DEPLOYMENT_PATH\n ] = deployment_path_cls\n\n @classmethod\n def from_request(cls, request, cs_api=None):\n \"\"\"Create DeployVMRequestActions object from the string request.\n\n :param str request:\n :param cloudshell.api.cloudshell_api.CloudShellAPISession cs_api:\n :rtype: DeployVMRequestActions\n \"\"\"\n actions = cls._parse_request_actions(request=request, cs_api=cs_api)\n obj = cls()\n\n for action in actions:\n if isinstance(action, models.DeployApp):\n obj.deploy_app = action\n elif isinstance(action, models.ConnectSubnet):\n obj.connect_subnets.append(action)\n\n obj.connect_subnets.sort(key=lambda x: x.device_index)\n\n return obj\n","sub_path":"cloudshell/cp/core/request_actions/deploy_vm.py","file_name":"deploy_vm.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"366117509","text":"import time\nimport numpy as np\nimport cv2\nimport IPython\n\nD2R = 3.141592/180.0\nR2D = 180.0/3.141592\n\n\n\"\"\"\nTODO: Add states and state functions to this class\n to implement all of the required logic for the armlab\n\"\"\"\nclass StateMachine():\n def __init__(self, rexarm, planner, kinect):\n self.rexarm = rexarm\n self.tp = planner\n self.kinect = kinect\n self.status_message = \"State: Idle\"\n self.current_state = \"idle\"\n self.next_state = \"idle\"\n self.PointsLearned = []\n \n self.cc = 0\n \n \n\n def set_next_state(self, state):\n self.next_state = state\n\n\n \"\"\" This function is run continuously in a thread\"\"\"\n\n def run(self):\n if(self.current_state == \"manual\"):\n if (self.next_state == \"manual\"):\n self.manual()\n if(self.next_state == \"idle\"):\n self.idle()\n if(self.next_state == \"estop\"):\n self.estop()\n\n if(self.current_state == \"idle\"):\n print(\"crrent state:\"+str(self.current_state))\n print(\"next state : \"+str(self.next_state))\n if(self.next_state == \"manual\"):\n self.manual()\n if(self.next_state == \"idle\"):\n self.idle()\n if(self.next_state == \"estop\"):\n self.estop()\n if(self.next_state == \"calibrate\"):\n self.calibrate()\n if(self.next_state == \"execute\"):\n self.execute()\n if(self.next_state == \"teach\"):\n self.teach()\n if(self.next_state==\"repeat\"):\n self.repeat()\n if(self.next_state==\"set_roi\"):\n self.set_roi()\n if(self.next_state==\"set_exclusion\"):\n self.set_exclusion()\n if(self.next_state==\"save_frames\"):\n self.save_frames()\n if(self.next_state==\"task3\"):\n self.task3()\n \n \n if(self.next_state==\"ClickandGrab\"):\n self.ClickandGrab()\n\n if(self.current_state == \"estop\"):\n if (self.next_state == \"estop\"):\n self.estop()\n if (self.next_state == \"idle\"):\n self.idle()\n\n if(self.current_state == \"calibrate\"):\n if(self.next_state == \"idle\"):\n self.idle()\n\n if(self.current_state == \"execute\"):\n if (self.next_state == \"execute\"):\n self.execute()\n if (self.next_state == \"estop\"):\n self.estop()\n if (self.next_state == \"idle\"):\n self.idle()\n\n if(self.current_state==\"teach\"):\n if (self.next_state==\"teach\"):\n self.teach()\n if (self.next_state==\"idle\"):\n self.idle()\n\n if (self.current_state==\"repeat\"):\n if (self.next_state==\"repeat\"):\n self.repeat()\n if (self.next_state==\"idle\"):\n self.idle()\n\n if (self.current_state==\"set_roi\"):\n if (self.next_state==\"set_roi\"):\n self.repeat()\n if (self.next_state==\"idle\"):\n self.idle()\n\n if (self.current_state==\"set_exclusion\"):\n if (self.next_state==\"set_exclusion\"):\n self.set_exclusion()\n if (self.next_state==\"idle\"):\n self.idle()\n\n if (self.current_state==\"save_frames\"):\n if (self.next_state==\"save_frames\"):\n self.save_frames()\n if (self.next_state==\"idle\"):\n self.idle()\n\n if (self.current_state==\"task3\"):\n if (self.next_state==\"task3\"):\n self.task3()\n if (self.next_state==\"idle\"):\n self.idle()\n\n \n \n if(self.current_state == \"ClickandGrab\"):\n if (self.next_state == \"ClickandGrab\"):\n self.ClickandGrab()\n if (self.next_state == \"idle\"):\n self.idle()\n\n\n\n \"\"\"Functions run for each state\"\"\"\n\n\n def manual(self):\n self.status_message = \"State: Manual - Use sliders to control arm\"\n self.current_state = \"manual\"\n self.rexarm.send_commands()\n self.rexarm.get_feedback()\n\n def idle(self):\n self.status_message = \"State: Idle - Waiting for input\"\n self.current_state = \"idle\"\n self.rexarm.get_feedback()\n\n\n\n\n\n def estop(self):\n self.status_message = \"EMERGENCY STOP - Check Rexarm and restart program\"\n self.current_state = \"estop\"\n self.rexarm.disable_torque()\n self.rexarm.get_feedback()\n\n def calibrate(self):\n self.kinect.kinectCalibrated = False\n self.current_state = \"calibrate\"\n self.next_state = \"idle\"\n self.tp.go(max_speed=2.0)\n location_strings = [\"lower left corner of board\",\n \"upper left corner of board\",\n \"upper right corner of board\",\n \"lower right corner of board\",\n \"center of shoulder motor\"]\n i = 0\n for j in range(5):\n\n self.status_message = \"Calibration - Click %s in RGB image\" % location_strings[j]\n while (i <= j):\n\n self.rexarm.get_feedback()\n\n if(self.kinect.new_click == True):\n self.kinect.rgb_click_points[i] = self.kinect.last_click.copy()\n i = i + 1\n self.kinect.new_click = False\n\n i = 0\n for j in range(5):\n self.status_message = \"Calibration - Click %s in depth image\" % location_strings[j]\n while (i <= j):\n self.rexarm.get_feedback()\n if(self.kinect.new_click == True):\n self.kinect.depth_click_points[i] = self.kinect.last_click.copy()\n i = i + 1\n self.kinect.new_click = False\n\n\n \"\"\"TODO Perform camera calibration here\"\"\"\n image_points = np.array(self.kinect.rgb_click_points[:])\n depth_points = np.array(self.kinect.depth_click_points[:])\n self.kinect.getAffineTransform(depth_points, image_points)\n self.kinect.kinectCalibrated = True\n\n def set_roi(self):\n self.current_state = \"set_roi\"\n self.next_state = \"idle\"\n self.tp.go(max_speed=2.0)\n location_strings = [ \"upper left corner of board\",\n \"lower right corner of board\"]\n i = 0\n for j in range(2):\n\n self.status_message = \"Setting ROI - Click %s in RGB image\" % location_strings[j]\n while (i <= j):\n\n self.rexarm.get_feedback()\n\n if(self.kinect.new_click == True):\n self.kinect.rgb_click_points[i] = self.kinect.last_click.copy()\n i = i + 1\n self.kinect.new_click = False\n\n image_points = np.array(self.kinect.rgb_click_points[:])\n self.kinect.SETROI(image_points)\n\n def set_exclusion(self):\n self.current_state = \"set_exclusion\"\n self.next_state = \"idle\"\n self.tp.go(max_speed=2.0)\n location_strings = [ \"upper left corner of board\",\n \"lower right corner of board\"]\n i = 0\n for j in range(2):\n\n self.status_message = \"Setting exclusion - Click %s in RGB image\" % location_strings[j]\n while (i <= j):\n\n self.rexarm.get_feedback()\n\n if(self.kinect.new_click == True):\n self.kinect.rgb_click_points[i] = self.kinect.last_click.copy()\n i = i + 1\n self.kinect.new_click = False\n\n image_points = np.array(self.kinect.rgb_click_points[:])\n self.kinect.SETExclusion(image_points)\n\n def save_frames(self):\n self.status_message = \"State: Saving frames.\"\n self.current_state = \"save_frames\"\n self.next_state = \"idle\"\n self.kinect.SaveBGRandDepthFrames()\n\n def execute(self):\n self.status_message = \"State: execute.\"\n self.current_state = \"execute\"\n self.next_state = \"idle\"\n way_points = np.array([\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [1.0, 0.8, 1.0, 0.5, 1.0],\n [-1.0, -0.8, -1.0, -0.5, -1.0],\n [-1.0, 0.8, 1.0, 0.5, -1.0],\n [1.0, -0.8, -1.0, -0.5, -1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0]\n ])\n\n for point in way_points:\n self.rexarm.set_positions(point)\n self.rexarm.pause(2.0)\n\n \"\"\"\n p=1\n print(\"Way points: \" + str(way_points[p][:]))\n self.rexarm.set_positions(way_points[p][:])\n \"\"\"\n\n print(\"Execte!\")\n\n def teach(self):\n self.status_message = \"State: teaching.\"\n self.current_state = \"teach\"\n self.next_state = \"idle\"\n print('Point learned.', self.rexarm.get_positions())\n temp = self.rexarm.get_positions()\n self.PointsLearned.append(temp[:])\n\n\n def repeat(self):\n self.status_message = \"State: repeat\"\n self.current_state = \"repeat\"\n self.next_state = \"idle\"\n nparray = np.array(self.PointsLearned)\n \"\"\"\n nparray = np.array([\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [1.0, 0.8, 1.0, 0.5, 1.0, 0.0],\n [-1.0, -0.8, -1.0, -0.5, -1.0, 0.0],\n [-1.0, 0.8, 1.0, 0.5, -1.0, 0.0],\n [1.0, -0.8, -1.0, -0.5, -1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n ])\n \"\"\"\n print(nparray)\n self.tp.execute_plan(nparray, 2.5)\n #np.savetxt(\"waypoints.txt\",nparray, delimiter=\",\" )\n if (self.rexarm.gripper_state == True):\n self.rexarm.open_gripper()\n self.rexarm.pause(2.0)\n self.rexarm.close_gripper()\n else:\n self.rexarm.close_gripper()\n self.rexarm.pause(2.0)\n self.rexarm.open_gripper()\n\n \"\"\"\n for point in nparray:\n self.rexarm.set_positions(point)\n self.rexarm.pause(2.0)\n \"\"\"\n \n def task3(self):\n self.status_message = \"State: task3\"\n self.current_state = \"task3\"\n self.next_state = \"idle\"\n self.kinect.detectBlocksInDepthImage()\n self.kinect.IdentifyColors()\n #self.kinect.SpecifcColor2Draw = self.kinect.ColorLabel['red']\n self.kinect.SpecifcColor2Draw = self.cc\n self.cc = self.cc + 1\n selector = self.kinect.GetBlockIndexBasedOnColor(self.kinect.SpecifcColor2Draw)\n if selector==-1:\n print('Block color not present.')\n else:\n center, orientation = self.kinect.GetBlockInfo(selector) \n print('Center: '+str(center))\n print('Orientation: '+str(orientation))\n\n if self.cc > 7:\n self.cc = 0\n \n\n def ClickandGrab(self):\n self.current_state = \"ClickandGrab\"\n self.next_state = \"idle\"\n valid_pt = False\n self.status_message = \"State: Click the block center.\"\n while(self.kinect.new_click == False) or (valid_pt == False):\n if self.kinect.new_click == False:\n self.rexarm.get_feedback()\n else:\n PointWorld = self.kinect.PointWorld_last_click\n Orientation = self.kinect.BlockOrientation_last_click\n print(Orientation)\n PointWorld = np.asarray(PointWorld)\n PointWorld = PointWorld + np.array([[0.0], [0.0], [10.0]])\n print(PointWorld)\n InnerPointWorld = PointWorld + np.array([[0.0], [0.0], [50.0]])\n #print(InnerPointWorld)\n oren = np.array([0.0, np.pi, np.pi-Orientation])\n WaypointPose = np.hstack((np.transpose(InnerPointWorld)[0], oren))\n Pose = np.hstack((np.transpose(PointWorld)[0], oren))\n print(WaypointPose)\n WaypointAngles = self.rexarm.arm_IK(WaypointPose)\n Angles = self.rexarm.arm_IK(Pose)\n print(np.array(WaypointAngles))\n if (len(Angles[Angles==0])!=24) and (len(WaypointAngles[WaypointAngles==0])!=24):\n valid_pt = True\n else:\n self.status_message = \"State: Out of workspace! Click the block center again.\"\n self.kinect.new_click = False\n\n self.kinect.new_click = False\n self.rexarm.open_gripper()\n self.tp.execute_point(np.array(WaypointAngles[0]), 5.0)\n self.rexarm.pause(2.0)\n self.tp.execute_point(np.array(Angles[0]), 2.5)\n self.rexarm.pause(2.0)\n self.rexarm.close_gripper()\n self.rexarm.pause(1.5)\n self.tp.execute_point(np.array(WaypointAngles[0]), 2.5)\n self.rexarm.pause(2.0)\n #self.rexarm.set_positions(np.zeros(6))\n\n self.status_message = \"State: Click the drop place.\"\n valid_pt = False\n while(self.kinect.new_click == False) or (valid_pt == False):\n if self.kinect.new_click == False:\n self.rexarm.get_feedback()\n else:\n PointWorld = self.kinect.PointWorld_last_click\n self.kinect.new_click == False\n PointWorld = np.asarray(PointWorld)\n PointWorld = PointWorld + np.array([[0.0], [0.0], [50.0]])\n InnerPointWorld = PointWorld + np.array([[0.0], [0.0], [50.0]])\n WaypointPose = np.hstack((np.transpose(InnerPointWorld)[0], oren))\n Pose = np.hstack((np.transpose(PointWorld)[0], oren))\n WaypointAngles = self.rexarm.arm_IK(WaypointPose)\n Angles = self.rexarm.arm_IK(Pose)\n if (len(Angles[Angles==0])!=24) and (len(WaypointAngles[WaypointAngles==0])!=24):\n valid_pt = True\n else:\n self.status_message = \"State: Out of workspace! Click the drop place again.\"\n self.kinect.new_click = False\n \n self.kinect.new_click = False\n self.tp.execute_point(np.array(WaypointAngles[0]), 5.0)\n self.tp.execute_point(np.array(Angles[0]), 2.5)\n self.rexarm.open_gripper()\n self.rexarm.pause(1.5)\n self.tp.execute_point(np.array(WaypointAngles[0]), 2.5)\n self.rexarm.set_positions(np.zeros(6))\n\n #print('Point clicked.', self.rexarm.get_positions())\n #temp = self.rexarm.get_positions()\n #self.PointsLearned.append(temp[:])\n","sub_path":"armlab/state_machine.py","file_name":"state_machine.py","file_ext":"py","file_size_in_byte":14905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524167050","text":"import base64\nimport json\n\nfrom cryptography.hazmat.primitives import hashes, serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\nsig = base64.b64decode(\"base64EncodedSignature==\")\n\npayload = '{\"id\":\"01/IL/ABCD1234ABCD1234ABCD1234ABCD1234#ABCD1234\",\"et\":1,\"ct\":1,\"c\":\"IL MOH\",\"cn\":null,\"fn\":null,\"g\":null,\"f\":null,\"gl\":null,\"fl\":null,\"idp\":null,\"idl\":null,\"b\":\"0001-01-01\",\"e\":\"0001-01-01\",\"a\":\"0001-01-01\",\"p\":[{\"idl\":\"0123456789\",\"e\":\"2021-01-01\"}]}'\n\nh = hashes.Hash(hashes.SHA256())\nh.update(payload.encode(\"utf-8\"))\ndigest = h.finalize()\n\nwith open(\"certs/RamzorQRPubKey.pem\", \"rb\") as f:\n k = serialization.load_pem_public_key(f.read())\n k.verify(\n sig,\n digest,\n padding.PKCS1v15(),\n hashes.SHA256(),\n )\n\n\ndata = json.loads(payload)\nprint(f\"Israeli ID Number {data['p'][0]['idl']}\")\nprint(f\"ID valid by {data['p'][0]['e']}\")\nprint(f\"Cert Unique ID {data['id']}\")\n","sub_path":"verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"637542354","text":"class Fraction:\r\n \"\"\" This class represents one single fraction that consists of\r\n numerator and denominator \"\"\"\r\n\r\n def __init__(self, numerator, denominator):\r\n \"\"\"\r\n Constructor. Checks that the numerator and denominator are of\r\n correct type and initializes them.\r\n\r\n :param numerator: fraction's numerator\r\n :param denominator: fraction's denominator\r\n \"\"\"\r\n\r\n if not isinstance(numerator, int) or not isinstance(denominator, int):\r\n raise TypeError\r\n elif denominator == 0:\r\n raise ValueError\r\n\r\n self.__numerator = numerator\r\n self.__denominator = denominator\r\n\r\n def return_string(self):\r\n \"\"\" Returns a string-presentation of the fraction in the format\r\n numerator/denominator \"\"\"\r\n\r\n if self.__numerator * self.__denominator < 0:\r\n sign = \"-\"\r\n else:\r\n sign = \"\"\r\n return \"{:s}{:d}/{:d}\".format(sign, abs(self.__numerator),\r\n abs(self.__denominator))\r\n\r\n def simplify(self):\r\n divisor = greatest_common_divisor(self.__numerator, self.__denominator)\r\n self.__numerator = self.__numerator//divisor\r\n self.__denominator = self.__denominator//divisor\r\n\r\n def complement(self):\r\n return Fraction(self.__numerator*-1, self.__denominator)\r\n\r\n def reciprocal(self):\r\n return Fraction(self.__denominator, self.__numerator)\r\n\r\n def multiply(self, fraction):\r\n return Fraction(self.__numerator*fraction.__numerator,\r\n self.__denominator*fraction.__denominator)\r\n\r\n def divide(self, fraction):\r\n reciprocal = fraction.reciprocal()\r\n return self.multiply(reciprocal)\r\n\r\n def add(self, fraction):\r\n new_Fraction = Fraction((self.__numerator*fraction.__denominator +\r\n fraction.__numerator*self.__denominator),\r\n self.__denominator*fraction.__denominator)\r\n return new_Fraction\r\n\r\n def deduct(self, fraction):\r\n new_Fraction = Fraction((self.__numerator*fraction.__denominator -\r\n fraction.__numerator*self.__denominator),\r\n self.__denominator*fraction.__denominator)\r\n return new_Fraction\r\n\r\n def __lt__(self, fraction):\r\n if self.__numerator/self.__denominator < fraction.__numerator/fraction.__denominator:\r\n return True\r\n else:\r\n return False\r\n\r\n def __gt__(self, fraction):\r\n if self.__numerator/self.__denominator > fraction.__numerator/fraction.__denominator:\r\n return True\r\n else:\r\n return False\r\n\r\n def __str__(self):\r\n if self.__numerator * self.__denominator < 0:\r\n sign = \"-\"\r\n else:\r\n sign = \"\"\r\n return \"{:s}{:d}/{:d}\".format(sign, abs(self.__numerator),\r\n abs(self.__denominator))\r\n\r\n\r\ndef greatest_common_divisor(a, b):\r\n \"\"\"\r\n Euclidean algorithm.\r\n \"\"\"\r\n\r\n while b != 0:\r\n a, b = b, a % b\r\n return a\r\n\r\n\r\ndef create_list():\r\n newlist = []\r\n print(\"Enter fractions in the format integer/integer.\")\r\n print(\"One fraction per line. Stop by entering an empty line.\")\r\n while True:\r\n input_split = input().split(\"/\")\r\n if input_split[0] == \"\":\r\n return newlist\r\n newlist.append(Fraction(int(input_split[0]), int(input_split[1])))\r\n\r\n\r\ndef main():\r\n mylist = create_list()\r\n print(\"The given fractions in their simplified form:\")\r\n for fraction in mylist:\r\n print(fraction, \"=\", end=\" \")\r\n fraction.simplify()\r\n print(fraction)\r\n\r\n\r\nmain()\r\n\r\n","sub_path":"round11/Ex11.2.py","file_name":"Ex11.2.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"112801719","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\n\nfrom sys import argv\nimport numpy as np\nimport math\nimport collections\n\n\na=np.fromfile(argv[1],int)\n\n\n\npts=np.fromfile(argv[2])\n# area_triangles=np.fromfile(argv[3])\npts=np.reshape(pts,(-1,3))\nsize_pts= pts.shape[0]\npts.astype(int)\n\nprint(pts)\nb=np.reshape(a, (-1, 3))\n\n\n# v_vec=np.fromfile(argv[4])\n# v_vec=np.reshape(v_vec,(-1,3))\n# size_v_vec= v_vec.shape[0]\n\nv_scalars=np.fromfile(argv[3])\n\n\n# filt=[~np.isinf(v_scalars)]\n\n# v_vec= v_vec[filt]\n# v_scalars=v_scalars[filt]\n\n# b=b[filt]\n# area_triangles=area_triangles[filt]\n\nc= b.shape[0]\narrayof3s = np.empty(c)\narrayof3s.fill(3)\n\ncells_info=np.column_stack((arrayof3s,b))\ncells_info=np.reshape((cells_info), (-1, 4))\ncells_info=cells_info.astype(int)\nsize_cells_info= cells_info.shape[0]\n\n\n\nwith open('target.vtk','w') as out:\n line1 = \"# vtk DataFile Version 3.0 \"\n line2 = \"vtk output \"\n line3 = \"ASCII \"\n line4 = \"DATASET POLYDATA\" \n line5= \"POINTS {} float\".format(size_pts)\n print(\"I'm going to write these to the file.\")\n out.write('{}\\n{}\\n{}\\n{}\\n{}\\n'.format(line1,line2,line3,line4,line5))\n# import pdb; pdb.set_trace()\n#this is already uplodaed ????\n np.savetxt(out, pts, delimiter=' ',fmt='%.7f') # X is an array\n\n line6= \"POLYGONS {} {}\".format(size_cells_info, size_cells_info*4)\n out.write('{}\\n'.format(line6))\n\n np.savetxt(out, cells_info, delimiter=' ',fmt='%d') \n\n # line7=\"CELL_DATA {}\".format(size_cells_info)\n # out.write('{}\\n'.format(line7))\n # # line8=\"VECTORS fibre_orientations float\"\n # # out.write('{}\\n{}\\n'.format(line7,line8))\n # # np.savetxt(out, v_vec, delimiter=' ',fmt='%.7f') \n \n # # line9=\"SCALARS area float\"\n # # line10=\"LOOKUP_TABLE default\"\n # # out.write('{}\\n{}\\n'.format(line9,line10))\n # # np.savetxt(out,area_triangles)\n \n # line8=\"SCALARS CVs float\"\n # line9=\"LOOKUP_TABLE default\"\n # out.write('{}\\n{}\\n'.format(line8,line9))\n # np.savetxt(out,v_scalars)\n\n\n\n","sub_path":"write_mesh_python.py","file_name":"write_mesh_python.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"124755368","text":"# p.82 編集距離\r\n\"\"\"\r\n文字列S, Tが与えられ、\r\n・変更:S中の文字を一つ選んで好きな文字に変更する\r\n・削除:S中の文字を1つ選んで削除する\r\n・挿入:Sの好きな箇所に好きな文字を1文字挿入する\r\nの3つの操作ができるとき、SとTを同じ文字列にするには\r\n最低何回の操作が必要になるか\r\n\"\"\"\r\nS, T = \"logistic\", \"algorithm\"\r\ns, t = len(S), len(T)\r\n# dp[i][j]:Sの最初のi文字分と、Tの最初のj文字分との間の編集距離\r\ndp = [[10**10]*(t+1) for i in range(s+1)]\r\ndp[0][0]=0\r\nfor i in range(s+1):\r\n\tfor j in range(t+1):\r\n\t\t# 変更\r\n\t\tif i>0 and j>0:\r\n\t\t\tif S[i-1]==T[j-1]: # 一致してたらコストの追加はなし\r\n\t\t\t\tdp[i][j] = min(dp[i][j], dp[i-1][j-1])\r\n\t\t\telse: # 一致してなかったら変更してコスト+1\r\n\t\t\t\tdp[i][j] = min(dp[i][j], dp[i-1][j-1]+1)\r\n\t\t# 削除 i文字目を削除するので、i-1までのやつにコスト+1\r\n\t\tif i>0: dp[i][j] = min(dp[i][j], dp[i-1][j]+1)\r\n\t\t# 挿入 挿入はTの文字を消すことと同値であるのでTのj文字目を削除する\r\n\t\tif j>0: dp[i][j] = min(dp[i][j], dp[i][j-1]+1)\r\nprint(dp[s][t])","sub_path":"5章 動的計画法/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"23387762","text":"\"\"\"\nprint pattern\ninput an integer &\n boolean=\n and print\n*\n**\n***\n****\nif true\n&\n****\n***\n**\n*\nif false\n\"\"\"\n\nnum = int(input(\"Enter a number\"))\nt = int(input(\"Enter 0 for False & 1 for True\"))\nboole = bool(t) # Type casting into boolean\nif boole == True:\n # for i in range(num+1):\n # print(\"*\"*i)\n i = 1\n while num > 0:\n print(i*\"*\")\n num = num - 1\n i = i + 1\nelse:\n while num > 0:\n print(\"*\"*num)\n num = num - 1\n","sub_path":"P2 Pattern.py","file_name":"P2 Pattern.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"200463188","text":"# 2021-03-25, lucas.mayer.almeida@ccc.ufcg.edu.br\n# Calcula as raízes de uma equação de segundo grau\n\na = int(input())\nb = int(input())\nc = int(input())\n\ndelta = b **2 - 4 * a * c\n\nif delta < 0 :\n print(\"sem raizes reais\")\n\nelif delta == 0 :\n raiz = (-b + delta**(1/2)) / (2 * a)\n \n print(f\"x = {raiz:.2f}\")\n\nelse:\n raiz1 = (-b + delta**(1/2)) / (2 * a)\n raiz2 = (-b - delta**(1/2)) / (2 * a)\n \n print(f\"x1 = {raiz1:.2f}\")\n print(f\"x2 = {raiz2:.2f}\")\n","sub_path":"atividades/corrigindo_equacoes/questao.py","file_name":"questao.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"170093572","text":"\n\nimport findspark\nfindspark.init()\nimport pyspark\n\n\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.master(\"local\").appName(\"CTR Models\").config(\n \"spark.executor.memory\", \"8g\").config(\n \"spark.driver.memory\", \"15g\").getOrCreate()\n\n\n# # xgboost & gridsearch \n\n# ## 1. xgboost model\n\n# In[3]:\n\n\nfrom pyspark.sql.types import *\nfrom pyspark.ml.linalg import VectorUDT\n\nstruct_types = [StructField(\"click\", IntegerType(), True), \n StructField(\"id\", StringType(), True),\n StructField(\"selectedFeatures\", VectorUDT(), True)]\nschema = StructType(struct_types)\nschema\ninput_data = spark.read.json(\"sel_data10.json\", schema = schema).select(['id','selectedFeatures','click'])\ntrain_data, test_data = input_data.randomSplit([.8,.2],seed=1234)\n\n# In[5]:\n\n\n# define evaluator \nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\n\nevaluator = BinaryClassificationEvaluator(labelCol='click')\n\n\n# In[6]:\n\n\nfrom pyspark.ml.classification import GBTClassifier\nimport datetime\ngbt = GBTClassifier(labelCol=\"click\", featuresCol=\"selectedFeatures\")\n\nstart = datetime.datetime.now()\nGBT = gbt.fit(train_data)\npredictions = GBT.transform(test_data)\nend = datetime.datetime.now()\n\naccuracy = evaluator.evaluate(predictions)\n# GBT.featureImportances\n\n\n# In[7]:\n\n\naccuracy\n\n\n# In[ ]:\n\n\ntime = end - start\nround(time.total_seconds(),2)\n\n\n# ## 2. ParamGridBuilder and cv \n\n# In[12]:\n\n\nfrom pyspark.ml.tuning import ParamGridBuilder, CrossValidator\n\nparamGrid = (ParamGridBuilder()\n .addGrid(gbt.subsamplingRate, [0.6, 0.8, 1.0])\n .addGrid(gbt.stepSize, [0.05, 0.1, 0.15])\n .addGrid(gbt.maxIter, [20, 30, 40])\n .build())\n\ncv = CrossValidator(estimator=gbt, estimatorParamMaps=paramGrid, evaluator=evaluator, numFolds=3)\n\n# cvModel = cv.fit(train_data)\n\n# best_prediction = cvModel.transform(test_data)\n\n# best_accuracy = evaluator.evaluate(best_prediction)\n# best_accuracy\n\n\n# In[ ]:\n\n\ncvModel = cv.fit(train_data)\n\n\n# In[ ]:\n\n\n#best_prediction['prediction']\n\n\n# In[ ]:\n\n\nbestParams = cvModel.bestModel.extractParamMap()\nbestParams\n\n\n# In[ ]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report, confusion_matrix\ny_test = [int(i.click) for i in test_data.select(['click']).collect()]\ny_pre = [int(i.prediction) for i in best_prediction.select(['prediction']).collect()]\nprint(classification_report(y_test, y_pre))\nconf_mat = confusion_matrix(y_test, y_pre)\nwidth = np.shape(conf_mat)[1]\nheight = np.shape(conf_mat)[0]\n\nres = plt.imshow(np.array(conf_mat), cmap=plt.cm.summer, interpolation='nearest')\nfor i, row in enumerate(conf_mat):\n for j, c in enumerate(row):\n if c>0:\n plt.text(j-.2, i+.1, c, fontsize=16)\n \n#cb = fig.colorbar(res)\nplt.title('Confusion Matrix')\nplt.xticks([0.0, 1.0], ['non-click','click'])\nplt.yticks([0.0, 1.0], ['non-click','click'])\n\nplt.show()\n\n\n","sub_path":"spark_cv.py","file_name":"spark_cv.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"265719679","text":"import os\nimport sys\nimport urllib.parse, urllib.request\nimport json\nfrom naverdic import NaverDic\nfrom bs4 import BeautifulSoup\n\nclass NaverTranslator:\n\n def __init__(self, id, secret):\n self.ID = id\n self.SECRET = secret\n\n self.ALPHABET = \"ALPHA\"\n self.HANGUL = \"HAN\"\n\n def analyze_msg(self, msg):\n if ('A' <= msg[0] <= 'z'):\n return self.ALPHABET\n else:\n return self.HANGUL\n\n def is_one_word(self, msg):\n return \" \" not in msg\n\n def translate(self, msg):\n msg_type = self.analyze_msg(msg)\n\n # if msg is just one word, use dictionary (not translation api)\n if self.is_one_word(msg):\n return self.find_in_dictionary(msg)\n\n enc_msg = urllib.parse.quote(msg)\n\n data = None\n if msg_type == self.ALPHABET:\n data = \"source=en&target=ko&text=\" + enc_msg\n elif msg_type == self.HANGUL:\n data = \"source=ko&target=en&text=\" + enc_msg\n\n url = \"https://openapi.naver.com/v1/language/translate\"\n request = urllib.request.Request(url)\n request.add_header(\"X-Naver-Client-Id\", self.ID)\n request.add_header(\"X-Naver-Client-Secret\", self.SECRET)\n response = urllib.request.urlopen(request, data=data.encode(\"utf-8\"))\n rescode = response.getcode()\n\n if(rescode==200):\n response_body = response.read().decode('utf-8')\n response_body = json.loads(response_body)\n # return response_body.decode('utf-8')\n return response_body[\"message\"][\"result\"][\"translatedText\"]\n else:\n return \"Error Code:\" + rescode\n\n def find_in_dictionary(self, word):\n return NaverDic.search(word)\n","sub_path":"naver_translator.py","file_name":"naver_translator.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"532383564","text":"from rest_framework import serializers\nfrom snippets.models import Category\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('name', 'children')\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Snippet` instance, given the validated data.\n \"\"\"\n print(validated_data)\n for i in validated_data:\n print (\"validated\")\n print (i)\n if i == \"children\":\n a = validated_data.get('children')\n print (a[0][\"children\"])\n return Category.objects.create(**validated_data)\n\n def get_fields(self):\n fields = super(CategorySerializer, self).get_fields()\n fields['children'] = CategorySerializer(many=True, required=False)\n return fields\n\n#CategorySerializer.base_fields['children'] = CategorySerializer()\n","sub_path":"test/snippets/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"637755522","text":"import json\n\nfrom functools import wraps\n\nfrom rest_models import IoTTelemetryModel\n\n\ndef handle_api_error(func):\n \"\"\"\n This define a decorator to format the HTTP response of the lambda:\n - a status code\n - the body of the response as a string\n \"\"\"\n\n @wraps(func)\n def wrapped_func(*args, **kwargs):\n try:\n return {\n 'statusCode': 200,\n 'headers': {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Credentials': True,\n },\n 'body': json.dumps(func(*args, **kwargs)),\n }\n except Exception as e:\n return {\n 'statusCode': 500,\n 'headers': {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Credentials': True,\n },\n 'body': str(e),\n }\n return wrapped_func\n\n\nclass IoTTelemetryAPI(object):\n\n model_cls = IoTTelemetryModel()\n\n def __init__(self):\n super(IoTTelemetryAPI, self).__init__()\n\n\n @handle_api_error\n def get(self, event, context):\n obj_id = event['pathParameters']['id']\n\n return self.model_cls.pull(obj_id)\n\n\n def get_api_methods(self):\n return self.get\n","sub_path":"apigateway/iot_telemetry_rest/rest_handler.py","file_name":"rest_handler.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"112443538","text":"#!/usr/bin/env python\n\"\"\"\nckwg +31\nCopyright 2015-2016 by Kitware, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither name of Kitware, Inc. nor the names of any contributors may be used\n to endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n==============================================================================\n\nVITAL Python version of track features tool, primarily for example Python\ninterface usage.\n\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport os.path\n\nfrom vital import (\n apm,\n ConfigBlock,\n TrackSet\n)\nfrom vital.algo import (\n ConvertImage,\n ImageIo,\n TrackFeatures\n)\n\n\nlogging.basicConfig()\n\n\ndef get_default_config():\n c = ConfigBlock(\"track_features\")\n c.set_value(\"image_list_file\", \"\",\n \"Path to an input file containing new-line separated paths \"\n \"to sequential image files\")\n c.set_value(\"mask_list_file\", \"\",\n \"Optional path to an input file containing new-line \"\n \"separated paths to mask images. This list should be \"\n \"parallel in association to files specified in \"\n \"``image_list_file``. Mask image must be the same size as \"\n \"the image they are associated with.\\n\"\n \"\\n\"\n \"Leave this blank if no image masking is desired.\")\n c.set_value(\"invert_masks\", \"false\",\n \"If true, all mask images will be inverted after loading. \"\n \"This is useful if mask images read in use positive \"\n \"values to indicated masked areas instead of non-masked \"\n \"areas.\")\n c.set_value(\"expect_multichannel_masks\", \"false\",\n \"A majority of the time, mask images are a single channel, \"\n \"however it is feasibly possible that certain \"\n \"implementations may use multi-channel masks. If this is \"\n \"true we will expect multiple-channel mask images, \"\n \"warning when a single-channel mask is provided. If this \"\n \"is false we error upon seeing a multi-channel mask \"\n \"image.\")\n c.set_value(\"output_tracks_file\", \"\",\n \"Path to a file to write output tracks to. If this \"\n \"file exists, it will be overwritten.\")\n # Required algorithm does not have an implemented interface yet\n # c.set_value(\"output_homography_file\", \"\",\n # \"Optional path to a file to write source-to-reference \"\n # \"homographies for each frame. Leave blank to disable this \"\n # \"output. The output_homography_generator algorithm type \"\n # \"only needs to be set if this is set.\")\n return c\n\n\nclass TrackFeaturesTool (object):\n\n @property\n def log(self):\n return logging.getLogger(\"TrackFeaturesTool\")\n\n def __init__(self):\n apm.register_plugins()\n\n # Algorithms\n self.algo_convert_img = ConvertImage(\"convert_image\")\n self.algo_image_io = ImageIo(\"image_reader\")\n self.algo_track_features = TrackFeatures(\"feature_tracker\")\n\n # Other tool variables\n self.image_list_filepath = None\n self.mask_list_filepath = None\n self.invert_masks = False\n self.expect_multichannel_masks = False\n self.output_tracks_filepath = None\n\n def get_configuration(self):\n \"\"\"\n :return: the current tool configuration\n :rtype: ConfigBlock\n \"\"\"\n c = ConfigBlock(\"track_features\")\n c.set_value(\"image_list_file\",\n self.image_list_filepath or \"\",\n \"Path to an input file containing new-line separated paths \"\n \"to sequential image files\")\n c.set_value(\"mask_list_file\",\n self.mask_list_filepath or \"\",\n \"Optional path to an input file containing new-line \"\n \"separated paths to mask images. This list should be \"\n \"parallel in association to files specified in \"\n \"``image_list_file``. Mask image must be the same size as \"\n \"the image they are associated with.\\n\"\n \"\\n\"\n \"Leave this blank if no image masking is desired.\")\n c.set_value(\"invert_masks\",\n str(self.invert_masks).lower(),\n \"If true, all mask images will be inverted after loading. \"\n \"This is useful if mask images read in use positive \"\n \"values to indicated masked areas instead of non-masked \"\n \"areas.\")\n c.set_value(\"expect_multichannel_masks\",\n str(self.expect_multichannel_masks).lower(),\n \"A majority of the time, mask images are a single channel, \"\n \"however it is feasibly possible that certain \"\n \"implementations may use multi-channel masks. If this is \"\n \"true we will expect multiple-channel mask images, \"\n \"warning when a single-channel mask is provided. If this \"\n \"is false we error upon seeing a multi-channel mask \"\n \"image.\")\n c.set_value(\"output_tracks_file\",\n self.output_tracks_filepath or \"\",\n \"Path to a file to write output tracks to. If this \"\n \"file exists, it will be overwritten.\")\n # Required algorithm does not have an implemented interface yet\n # c.set_value(\"output_homography_file\",\n # self.output_homography_filepath or \"\",\n # \"Optional path to a file to write source-to-reference \"\n # \"homographies for each frame. Leave blank to disable this \"\n # \"output. The output_homography_generator algorithm type \"\n # \"only needs to be set if this is set.\")\n\n self.algo_convert_img.get_config(c)\n self.algo_image_io.get_config(c)\n self.algo_track_features.get_config(c)\n\n return c\n\n def set_configuration(self, config):\n \"\"\"\n Set the tool configuration\n :param config: The configuration to set\n :type config: ConfigBlock\n \"\"\"\n self.algo_convert_img.set_config(config)\n self.algo_image_io.set_config(config)\n self.algo_track_features.set_config(config)\n\n abspath = lambda p: os.path.abspath(os.path.expanduser(p))\n\n self.image_list_filepath = config.get_value('image_list_file',\n self.image_list_filepath)\n if self.image_list_filepath:\n self.image_list_filepath = abspath(self.image_list_filepath)\n self.mask_list_filepath = config.get_value('mask_list_file',\n self.mask_list_filepath)\n if self.mask_list_filepath:\n self.mask_list_filepath = abspath(self.mask_list_filepath)\n self.invert_masks = config.get_value_bool('invert_masks', self.invert_masks)\n self.expect_multichannel_masks = \\\n config.get_value_bool('expect_multichannel_masks',\n self.expect_multichannel_masks)\n self.output_tracks_filepath = \\\n config.get_value('output_tracks_file', self.output_tracks_filepath)\n if self.output_tracks_filepath:\n self.output_tracks_filepath = abspath(self.output_tracks_filepath)\n\n def validate_configuration(self):\n \"\"\"\n Check the current configuration for validity\n :return: True if valid, false if not.\n :type: bool\n \"\"\"\n if not self.image_list_filepath:\n self.log.error(\"No image list file provided\")\n return False\n if not os.path.isfile(self.image_list_filepath):\n self.log.error(\"Path given for image list does not refer to a file\")\n return False\n if not self.output_tracks_filepath:\n self.log.error(\"No output tracks file privided\")\n return False\n if self.mask_list_filepath and not os.path.isfile(self.mask_list_filepath):\n self.log.error(\"Specified a mask list filepath, but path did not \"\n \"refer to an existing file.\")\n return False\n\n return (\n # Algorithms should check out valid\n self.algo_convert_img.check_config(self.algo_convert_img.get_config())\n and self.algo_image_io.check_config(self.algo_image_io.get_config())\n and self.algo_track_features.check_config(self.algo_track_features.get_config())\n )\n\n def track_images(self):\n # Create list of input filepaths\n self.log.info(\"Reading input image list file...\")\n with open(self.image_list_filepath, 'r') as image_list_file:\n input_filepaths = \\\n [line.strip() for line in image_list_file.readlines()]\n\n # Create a list of mask images if we were given a list file\n mask_filepaths = None\n if self.mask_list_filepath:\n self.log.info(\"Reading mask image list file...\")\n with open(self.mask_list_filepath) as mask_list_file:\n mask_filepaths = \\\n [line.strip() for line in mask_list_file.readlines()]\n\n if len(input_filepaths) != len(mask_filepaths):\n self.log.error(\"Input and Mask image lists were not congruent \"\n \"in size.\")\n return False\n\n # Check that the output tracks file is open-able and that the containing\n # directory exists.\n if not os.path.isdir(os.path.dirname(self.output_tracks_filepath)):\n self.log.info(\"Creating containing directory for output tracks \"\n \"file: %s\",\n os.path.dirname(self.output_tracks_filepath))\n os.mkdir(os.path.dirname(self.output_tracks_filepath))\n self.log.info(\"Testing that output tracks file is open-able...\")\n test_f = open(self.output_tracks_filepath, 'w')\n test_f.close()\n\n tracks = TrackSet()\n for frame_num in xrange(len(input_filepaths)):\n input_img = self.algo_convert_img.convert(\n self.algo_image_io.load(input_filepaths[frame_num])\n )\n mask_img = None\n if mask_filepaths:\n mask_img = self.algo_convert_img.convert(\n self.algo_image_io.load(mask_filepaths[frame_num])\n )\n self.log.info(\"Processing frame %d...\", frame_num)\n tracks = self.algo_track_features.track(tracks, frame_num,\n input_img, mask_img)\n\n self.log.info(\"Frame processing complete, writing out track set...\")\n tracks.write_tracks_file(self.output_tracks_filepath)\n\n\ndef cli_main():\n \"\"\"\n Returns:\n 0 - Success\n 1 - Configuration invalid\n 2 - Tracking failed\n \"\"\"\n import optparse\n\n logging.getLogger().setLevel(logging.INFO)\n log = logging.getLogger(\"cli_main\")\n\n parser = optparse.OptionParser()\n parser.add_option(\"-c\", \"--config\",\n help=\"Path to the configuration file to use.\")\n parser.add_option(\"-o\", \"--output-config\",\n help=\"Output a configuration file for the current \"\n \"configuration to the specified file path.\")\n opts, args = parser.parse_args()\n\n if opts.config:\n opts.config = os.path.abspath(os.path.expanduser(opts.config))\n if opts.output_config:\n opts.output_config = os.path.abspath(os.path.expanduser(opts.output_config))\n\n tft = TrackFeaturesTool()\n\n if opts.config:\n log.info(\"Setting configuration file: %s\", opts.config)\n tft.set_configuration(ConfigBlock.from_file(opts.config))\n\n if opts.output_config:\n log.info(\"Writing output configuration file: %s\", opts.output_config)\n tft.get_configuration().write(opts.output_config)\n if not tft.validate_configuration():\n log.warning(\"Current configuration insufficient for running the \"\n \"tool\")\n return 0\n\n # Error out if current configuration not valid\n if not tft.validate_configuration():\n log.error(\"Current configuration insufficient for running the tool\")\n return 1\n\n if tft.track_images():\n return 0\n else:\n return 2\n\n\nif __name__ == \"__main__\":\n exit(cli_main())\n","sub_path":"vital/bindings/python/vital/bin/maptk_track_features.py","file_name":"maptk_track_features.py","file_ext":"py","file_size_in_byte":13839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"153863704","text":"\"\"\"\nExample Usage\n\n```python\n@pytest.fixture(scope=\"session\")\ndef dynamodb_tables():\n return [\n {\n \"AttributeDefinitions\": [\n {\"AttributeName\": \"uri\", \"AttributeType\": \"S\"},\n {\"AttributeName\": \"timestamp\", \"AttributeType\": \"S\"},\n ],\n \"TableName\": \"my-dbd-table\",\n \"KeySchema\": [\n {\"AttributeName\": \"uri\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"timestamp\", \"KeyType\": \"RANGE\"},\n ],\n }\n ]\n\n@pytest.fixture(scope=\"class\")\ndef dynamodb(localstack, dynamodb_tables):\n with boto3_fixtures.setup_dynamodb(dynamodb_tables) as tables:\n yield tables\n```\n\"\"\"\n\n\nimport backoff\nimport boto3\nfrom botocore.exceptions import ClientError\n\nimport boto3_fixtures.contrib.boto3\nfrom boto3_fixtures import utils\n\n\n@backoff.on_exception(backoff.expo, ClientError, max_time=30)\ndef create_table(config):\n config.update({\"BillingMode\": \"PAY_PER_REQUEST\"})\n return utils.call(boto3.client(\"dynamodb\").create_table, **config)\n\n\ndef create_tables(dynamodb_tables):\n client = boto3_fixtures.contrib.boto3.client(\"dynamodb\")\n for table in dynamodb_tables:\n assert create_table(table)\n for table in dynamodb_tables:\n name = table[\"TableName\"]\n client.get_waiter(\"table_exists\").wait(\n TableName=name, WaiterConfig={\"Delay\": 1, \"MaxAttempts\": 30}\n )\n assert utils.call(client.describe_table, TableName=name)\n return [t[\"TableName\"] for t in dynamodb_tables]\n\n\n@backoff.on_exception(backoff.expo, ClientError, max_tries=3)\ndef destroy_table(config):\n client = boto3_fixtures.contrib.boto3.client(\"dynamodb\")\n return utils.call(client.delete_table, TableName=config[\"TableName\"])\n\n\ndef destroy_tables(dynamodb_tables):\n boto3_fixtures.contrib.boto3.client(\"dynamodb\")\n for table in dynamodb_tables:\n destroy_table(table)\n\n\ndef setup(tables):\n create_tables(tables)\n return {\"tables\": tables}\n\n\ndef teardown(tables):\n destroy_tables(tables)\n","sub_path":"boto3_fixtures/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"635018828","text":"# -*- coding: utf-8 -*-\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport pickle\nimport torch.optim as optim\nfrom layers.csnet import CSNET\nfrom tqdm import tqdm, trange\n\nPROCESSED_SUMME = 'data/SumMe/eccv16_dataset_summe_google_pool5.h5'\nPROCESSED_TVSUM = 'data/TVSUM/eccv16_dataset_tvsum_google_pool5.h5'\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\ndef save_pickle_file(filename, data):\n print('Saving {} ...'.format(filename))\n with open('{}.pickle'.format(filename), 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print('{} saved'.format(filename))\n\n\nclass CSNET(nn.Module):\n def __init__(self, input_size, hidden_size=256, num_layers=2):\n \"\"\"Scoring LSTM\"\"\"\n super().__init__()\n\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bidirectional=True)\n self.out = nn.Sequential(\n nn.Linear(hidden_size * 2, 1), # bidirection => scalar\n nn.Sigmoid())\n\n def forward(self, feature):\n \"\"\"\n Args:\n features: [seq_len, 1, 100] (compressed pool5 features)\n Return:\n scores [seq_len, 1]\n \"\"\"\n self.lstm.flatten_parameters()\n\n # [seq_len, 1, hidden_size * 2]\n features, (h_n, c_n) = self.lstm(features)\n\n # [seq_len, 1]\n scores = self.out(features.squeeze(1))\n\n return scores\n\n\nclass Runner(object):\n def __init__(self, config=None, train_loader=None, test_loader=None):\n \"\"\"Class that Builds, Trains and Evaluates SUM-GAN model\"\"\"\n self.config = config\n self.train_loader = train_loader\n self.test_loader = test_loader\n\n def build(self):\n\n # Build Modules\n self.csnet = CSNET(\n self.config.input_size,\n self.config.hidden_size\n ).to(device)\n self.model = nn.ModuleList([\n self.csnet])\n\n self.optimizer = optim.Adam(\n self.csnet.parameters(),\n lr=self.config.lr)\n\n self.model.train()\n\n print(self.model)\n\n def difference(self, h_origin, h_fake):\n return torch.abs(h_origin - h_fake)\n\n def train(self):\n step = 0\n for epoch_i in trange(self.config.n_epochs, desc='Epoch', ncols=80):\n s_e_loss_history = []\n d_loss_history = []\n c_loss_history = []\n for batch_i, image_features in enumerate(tqdm(\n self.train_loader, desc='Batch', ncols=80, leave=False)):\n\n if image_features.size(1) > 10000:\n continue\n image_features = image_features.view(-1, self.config.input_size)\n\n image_features_ = image_features.to(device)\n\n T = image_features_.size(0)\n m = 2\n M = k = 4\n cm_idx={}\n for m in range(M):\n end = m + T - k\n idxs=[]\n for i in range(0, T):\n val = i * k + m\n if val >= end:\n idxs.append(end)\n break\n else:\n idxs.append(val)\n cm_idx[m]=idxs\n\n\n\n\n # ---- Train sLSTM, eLSTM ----#\n if self.config.verbose:\n tqdm.write('\\nTraining sLSTM and eLSTM...')\n\n # [seq_len, 1, hidden_size]\n # original_features = self.linear_compress(image_features_.detach()).unsqueeze(1)\n\n self.csnet(\n original_features)\n _, _, _, uniform_features = self.summarizer(\n original_features, uniform=True)\n\n self.s_e_optimizer.zero_grad()\n s_e_loss.backward() # retain_graph=True)\n # Gradient cliping\n torch.nn.utils.clip_grad_norm(self.model.parameters(), self.config.clip)\n self.s_e_optimizer.step()\n\n s_e_loss_history.append(s_e_loss.data)\n\n if self.config.verbose:\n tqdm.write('Plotting...')\n\n # self.writer.update_loss(uniform_prob.data, step, 'uniform_prob')\n\n step += 1\n\n s_e_loss = torch.stack(s_e_loss_history).mean()\n\n # Plot\n if self.config.verbose:\n tqdm.write('Plotting...')\n self.writer.update_loss(s_e_loss, epoch_i, 's_e_loss_epoch')\n\n # Save parameters at checkpoint\n ckpt_path = str(self.config.save_dir) + f'/epoch-{epoch_i}.pkl'\n tqdm.write(f'Save parameters at {ckpt_path}')\n if not os.path.exists(self.config.save_dir):\n os.makedirs(self.config.save_dir)\n torch.save(self.model.state_dict(), ckpt_path)\n self.evaluate(epoch_i)\n\n self.model.train()\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"mcsf-late-fusion/layers/csnet.py","file_name":"csnet.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"63377830","text":"from dataset.base_dataset import BaseDataset\nimport os.path as osp\nfrom dataset.data_transform import *\nfrom torch.utils import data\nimport matplotlib.pyplot as plt\nfrom utils.data_visualization import visualize_segmap\n\n\nclass CityscapesDataset(BaseDataset):\n \"\"\"\n The cityscapes dataset\n \"\"\"\n\n def __init__(self, root, list_path, max_iters=None, crop_size=(512, 1024),\n mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), ignore_label=255, set='val'):\n super().__init__(root, list_path, max_iters, crop_size, mean, std, set)\n self.ignore_label = ignore_label\n self.id_to_trainid = {\n 0: 255, 1: 255, 2: 255, 3: 255, 4: 255, 5: 255, 6: 255, 7: 0, 8: 1, 9: 255, 10: 255, 11: 2,\n 12: 3, 13: 4, 14: 255, 15: 255, 16: 255, 17: 5, 18: 255, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10,\n 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: 255, 30: 255, 31: 16, 32: 17, 33: 18, -1: -1\n }\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n datafiles = self.data[item]\n example = {}\n example[\"image\"] = Image.open(datafiles[\"image\"]).convert('RGB')\n example[\"label\"] = Image.open(datafiles[\"label\"])\n result = self.augment(example)\n result[\"label\"] = self.convert_label(result[\"label\"])\n result[\"name\"] = datafiles[\"name\"]\n return result[\"image\"], result[\"label\"], result[\"name\"]\n\n def convert_label(self, label):\n train_label = self.ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.id_to_trainid.items():\n train_label[label == k] = v\n return train_label\n\n def augment(self, example):\n composed_transform = Compose([\n Resize(self.crop_size[0], self.crop_size[1]),\n Normalize(mean=self.mean, std=self.std),\n ToTensor()\n ])\n return composed_transform(example)\n\n def load_image_and_label(self):\n file = []\n for id in self.img_ids:\n image = osp.join(self.root, \"leftImg8bit/%s/%s\" % (self.set, id))\n id_split = id.split('_')\n label = osp.join(self.root, \"gtFine/%s/%s\" % (self.set, id_split[0] + '_' + id_split[1]\n + '_' + id_split[2] + '_' + 'gtFine_labelIds.png'))\n file.append({\n \"image\": image,\n \"label\": label,\n \"name\": id\n })\n return file\n\n\nif __name__ == '__main__':\n cityscapes_dataset = CityscapesDataset('/media/sdb/duckie/dataset/Cityscapes',\n list_path='./cityscapes_list/val.txt')\n trainloader = data.DataLoader(cityscapes_dataset, batch_size=4, shuffle=True)\n\n for i, data in enumerate(trainloader):\n imgs, labels, _ = data\n if i == 0:\n img_0 = imgs[0].numpy()\n label_0 = labels[0].numpy()\n img_0 = img_0.transpose(1, 2, 0)\n corlor_map = visualize_segmap(label_0, dataset=\"cityscapes\")\n\n img_0 *= (0.229, 0.224, 0.225)\n img_0 += (0.485, 0.456, 0.406)\n img_0 *= 255.0\n img_0 = img_0.astype(np.uint8)\n\n plt.figure()\n plt.title(\"data visualization\")\n plt.subplot(211)\n plt.imshow(img_0)\n plt.subplot(212)\n plt.imshow(corlor_map)\n plt.show()\n break\n","sub_path":"dataset/cityscapes.py","file_name":"cityscapes.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"545063978","text":"'''\nCreated on 28.10.2017\n\n@author: Kevin Köck\n'''\n\n__updated__ = \"2018-05-29\"\n__version__ = \"1.2\"\n\nimport time\nimport gc\nfrom pysmartnode import config\nimport network\nimport sys\nimport uasyncio as asyncio\n\ngc.collect()\n\n\ndef connect():\n ap_if = network.WLAN(network.AP_IF)\n ap_if.active(False)\n wifi = network.WLAN(network.STA_IF)\n wifi.active(True)\n wifi.connect(config.WIFI_SSID, config.WIFI_PASSPHRASE) # Connect to an AP\n time.sleep(0.1)\n count = 0\n while wifi.isconnected() is False: # Check for successful connection\n count += 1\n if count % 10 == 0:\n print(\"Connecting, {!r}\".format(count / 10))\n time.sleep(0.1)\n if count >= 50:\n print(\"Error connecting to wifi, resetting device in 2s\")\n import machine\n time.sleep(2)\n machine.reset()\n loop = asyncio.get_event_loop()\n loop.create_task(start_services(wifi))\n gc.collect()\n return wifi.isconnected()\n\n\nasync def start_services(wifi):\n while wifi.isconnected() is False: # Check for successful connection\n await asyncio.sleep_ms(250)\n if sys.platform == \"esp32_LoBo\":\n from . import wifi_esp32_lobo\n elif sys.platform == \"esp8266\":\n from . import wifi_esp8266\n print(\"Connected, local ip {!r}\".format(wifi.ifconfig()[0]))\n","sub_path":"pysmartnode/networking/wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"499248154","text":"from flask import Flask\nfrom flask_pymongo import pymongo\nfrom app import app\nimport json\nimport ssl\n\nCONNECTION_STRING = \"mongodb+srv://nboursalian:nanlal@lengecluster.b86st.mongodb.net/workoutOptions?retryWrites=true&w=majority\"\nclient = pymongo.MongoClient(CONNECTION_STRING, ssl_cert_reqs=ssl.CERT_NONE)\ndb = client.workoutOptions\n\nemails = client.workoutOptions.emails\n\n\ndef insert_data(user):\n data = {'first_name': user.first_name, 'last_name': user.last_name, 'email': user.email, 'password': user.password}\n result = db.emails.insert(data)\n print(\"success\")\n\n\ndef insert_workout(email, the_dict):\n name = email + ' Collection'\n user_collection = db[name]\n result = user_collection.insert_one(the_dict)\n\n\ndef collec_exist(email):\n collist = db.list_collection_names()\n name = email + ' Collection'\n if name in collist:\n return True\n else:\n return False\n\n\ndef get_user(email):\n my_col = db[\"emails\"]\n # print(my_col.find_one()['email'])\n\n # user_data = json.load(my_col.find())\n # print(user_data['email'])\n\n for x in my_col.find():\n if email == x['email']:\n return x\n\n return None\n\n\ndef get_past_workouts(email, category):\n\n name = email + ' Collection'\n workout_collections = db[name]\n\n past_dict = []\n\n for x in workout_collections.find():\n if category == x['Workout_Type']:\n x.pop('_id')\n past_dict.append(x)\n if len(past_dict) == 0:\n return None\n\n return past_dict\n","sub_path":"backend/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"113174987","text":"import time\nfrom dijkstra_functions import *\nfrom finalmap import *\n\nuserdefined = False\nif userdefined:\n start_nodex = int(input(\"Please enter Start point X coordinate: \"))\n start_nodey = int(input(\"Please enter Start point Y coordinate: \"))\n\n goal_nodex = int(input(\"Please enter Goal point X coordinate: \"))\n goal_nodey = int(input(\"Please enter Goal point Y coordinate: \"))\n clearance = int(input(\"Please enter the Radius of the robot\"))\nelse:\n start_nodex = 5\n start_nodey = 15\n goal_nodex = 200\n goal_nodey = 190\n clearance = 15\n\nstart_pos = (start_nodex, start_nodey)\ngoal_pos = (goal_nodex, goal_nodey)\nplt.plot(start_nodex, start_nodey, \"Dr\")\nplt.plot(goal_nodex, goal_nodey, \"Dr\")\n\nstart_time = time.time()\n\nif __name__ == '__main__':\n final_obs, wall_x, wall_y = finalmap(clearance)\n if start_pos in (zip(wall_x, wall_y) or final_obs):\n print(\"Start Position in obstacle space\")\n\n elif goal_pos in (zip(wall_x, wall_y) or final_obs):\n print(\"goal Position in obstacle space\")\n\n else:\n path = dijkstra(start_pos, goal_pos, final_obs)\n if path is not None:\n scatterx = [x[0] for x in path]\n scattery = [x[1] for x in path]\n plt.plot(scatterx, scattery, color='r', linewidth=4)\n plt.savefig('path_rigid.png')\n plt.show()\n elapsed_time = time.time() - start_time\n print(\"Time Required to Solve \", round(elapsed_time, 2), \"seconds\")\n else:\n print(\"No path found\")\n","sub_path":"dijkstra_rigid.py","file_name":"dijkstra_rigid.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"523679459","text":"import socket\n\nTCP_IP = '218.150.181.230'\nTCP_PORT = 9001\nBUFFER_SIZE = 1024\n\ntcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ntcpsock.bind((TCP_IP, TCP_PORT))\n\n\ntcpsock.listen(5)\n\n# 이미지 파일을 전송받음\nwhile True:\n print (\"Waiting for incoming connections...\")\n (conn, (ip,port)) = tcpsock.accept()\n print (' Got connection from ', (ip,port))\n\n fname = 'C:\\\\Users\\\\AICT\\\\Desktop\\\\image_save_test\\\\save\\\\recv_pic' + str(port) + '.jpg'\n data = conn.recv(1024)\n fsize = data.decode(\"utf-8\",\"ignore\")\n fsize = int(data)\n print('받아오는 파일크기 : ' + str(round(fsize/1024,2)) + 'KB\\n')\n\n total_buffer_size = 0\n with open(fname, 'wb') as f:\n print('receiving data...')\n while True:\n data = conn.recv(BUFFER_SIZE)\n total_buffer_size += BUFFER_SIZE\n f.write(data)\n if total_buffer_size >= fsize:\n #print('not in data')\n f.close()\n break\n print('file received')\n\n # 예측 결과 클라이언트로 전송(문자열)\n msg = \"송이버섯 : 균모는 육질이고 지름 5~15(30)cm이며 구형 또는 반구형에서 편평하게되며 중앙부가 둔하게 돌출된다.\"\n msg = msg.encode(\"utf-8\")\n conn.send(msg)\n print(\"sended message\")\n","sub_path":"Mushoroom Dictionary/image_save_test/receive_from_java.py","file_name":"receive_from_java.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"541047869","text":"#!/usr/bin/env python\n\nfrom distutils.util import strtobool\n\nfrom bottle import abort\nfrom bottle import install\nfrom bottle import post\nfrom bottle import request\nfrom bottle import route\nfrom bottle import run\nfrom bottle import static_file\nfrom bottle.ext import sqlalchemy\nfrom bottle import template\n\nfrom sqlalchemy import Boolean\nfrom sqlalchemy import Column\nfrom sqlalchemy import Integer\nfrom sqlalchemy import String\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.sql import func\n\nfrom wtforms import Form\nfrom wtforms import validators\nfrom wtforms.fields import StringField\nfrom wtforms.fields import TextAreaField\nfrom wtfnocaptcha.fields import NoCaptchaField\n\nimport settings\n\nfrom decorators.slack_request_processor import slack_verification_preprocessor\nfrom contact.views import contact\nfrom oauth2.views import callback\nfrom util import DictObject\n\nroute('/oauth2/callback/', 'GET', callback)\nroute('/contact/', ['GET', 'POST'], contact)\n\n# set up sqlalchemy\nAlchemyBase = declarative_base()\n\nengine = create_engine(settings.DATABASE_CONNECTION_STRING, echo=True)\ncreate_session = sessionmaker(bind=engine)\n\n# set up the sqlalchemy plugin\nsqlalchemy_plugin = sqlalchemy.Plugin(\n engine,\n AlchemyBase.metadata,\n keyword='db',\n create=True,\n commit=True,\n use_kwargs=False\n)\n\n# set up the bottle app\ninstall(sqlalchemy_plugin)\n\n\nclass Excuse(AlchemyBase):\n\n __tablename__ = \"excuses\"\n id = Column(Integer, primary_key=True)\n excuse = Column(String, nullable=False)\n published = Column(Boolean, nullable=False, default=False)\n username = Column(String, nullable=False, default=\"admin\")\n team_id = Column(String, nullable=True, default=None)\n\n def __init__(self, username, excuse):\n self.username = username\n self.excuse = excuse\n\n def __repr__(self):\n return \"{id}: {excuse}\".format(id=self.id, excuse=self.excuse)\n\n @classmethod\n def get_random_excuse(cls, db):\n return db.query(cls).filter(\n cls.published==True\n ).order_by(\n func.random()\n ).first()\n\n\n@post('/slacktion/')\n@slack_verification_preprocessor\ndef process_slack_command(db):\n \"\"\"Parse /commands and route them to their appropriate processing methods\n \"\"\"\n\n # make our lives a little easier\n slack_data = DictObject(request.forms)\n\n # small chance text is empty (if a stupid dev is curling manually)\n if 'text' in slack_data.attributes:\n # match help text\n if slack_data.text == 'help':\n return {\n \"text\": \"Request this help text with `/xqzes help`\\n\"\n \"Request an excuse (visible to everyone) with `/xqzes`\\n\"\n \"Submit a new excuse to a moderator with \"\n \"`/xqzes add `\\n\"\n \"e.g: `/xqzes add I was shopping!`\"\n }\n elif slack_data.text.startswith(\"add\"):\n # here we want to add a new non-approved excuse\n excuse_text = slack_data.text.lstrip(\" add \")\n if len(excuse_text) > 140:\n return {\n 'text': \"We conform to twitter standards (for no particular \"\n \"reason), please keep your excuses shorter than \"\n \"140 characters\"\n }\n excuse = Excuse(slack_data.user_name, excuse_text)\n excuse.team_id = slack_data.team_id\n db.add(excuse)\n return {\n 'text': \"Your excuse has been added to the moderation queue. This \"\n \"can take anywhere from a few minutes to a few years\"\n }\n\n try:\n excuse_text = Excuse.get_random_excuse(db).excuse\n except AttributeError:\n abort(404, \"NO EXCUSE FOR YOU, but, maybe we need one :(\")\n return {\n \"response_type\": \"in_channel\",\n \"text\": excuse_text,\n }\n\n\n@route('/')\ndef hello(db):\n \"\"\"Serve up a plaintext public excuse for the purposes of\n \"\"\"\n try:\n excuse_text = Excuse.get_random_excuse(db).excuse\n except AttributeError:\n abort(404, \"NO EXCUSE FOR YOU, but, maybe we need one :(\")\n\n return template(\n 'home',\n excuse_text=excuse_text,\n slack_client_id=settings.SLACK_OAUTH['client_id'],\n slack_command_scope=settings.SLACK_OAUTH['command_scope'],\n slack_installed=strtobool(request.GET.get('added_to_slack', 'false')),\n )\n\n\n@route('/privacy/')\ndef privacy_policy(db):\n return static_file('privacy_policy.txt', root=settings.TEMPLATE_PATH)\n\n\n@route('/slack_instructions/')\ndef slack_instructions(db):\n return template(\n 'slack_instructions',\n slack_client_id=settings.SLACK_OAUTH['client_id'],\n slack_command_scope=settings.SLACK_OAUTH['command_scope'],\n slack_installed=strtobool(request.GET.get('added_to_slack', 'false')),\n )\n\n\n@route('/submit/')\n@post('/submit/')\ndef submit(db):\n\n # TODO: break this out along with others in to an excuses package.\n class SubmissionForm(Form):\n attribution_name = StringField(\n 'Your Name (for future attribution purposes)',\n [\n validators.InputRequired(),\n validators.Length(\n min=3,\n max=50,\n message=\"Srsly, give us a decent username \"\n \"(%(min)d - %(max)d chars),\"\n \" doesn't even have to be real.\"\n )\n ]\n )\n excuse = TextAreaField(\n 'What\\'s YOUR excuse !?!?',\n [\n validators.Length(\n min=5,\n max=140,\n message=\"Please provide %(min)d - %(max)d \"\n \"characters\"),\n ]\n )\n nocaptcha = NoCaptchaField(\n public_key=settings.RECAPTCHA_SITE_KEY,\n private_key=settings.RECAPTCHA_SECRET_KEY,\n secure=True,\n )\n\n form = SubmissionForm(request.POST, nocaptcha={'ip_address': '127.0.0.1'})\n\n submitted = False\n if request.method == 'POST' and form.validate():\n excuse_record = Excuse(form.attribution_name.data,\n form.excuse.data)\n db.add(excuse_record)\n submitted = True\n\n return template('submit', form=form, submitted=submitted)\n\n\n\n@route('/acknowledgements/')\ndef privacy_policy(db):\n return template('acknowledgements')\n\n\n@route('/static/')\ndef callback(path):\n return static_file(path, root=settings.STATIC_PATH)\n\n\nif __name__ == '__main__':\n run(host='127.0.0.1', port=8088, debug=False)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"505329127","text":"knox = {\n 'kind': 'malinois',\n 'owner': 'sylvie',\n}\n\nodin = {\n 'kind': 'golden retriever',\n 'owner': 'erwan',\n}\n\noggy = {\n 'kind': 'husky',\n 'owner': 'alex',\n}\n\npets = [knox, odin, oggy]\n\n# for pet in pets:\n# print(\"The owner is \" + pet['owner'].title() + \" and it's a \" +\n# pet['kind'].title())\n\nfor pet in pets:\n print(f\"The owner is {pet['owner']} and it's a {pet['kind']}\")","sub_path":"Part 1/chap 6/pets.py","file_name":"pets.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524919467","text":"import board\nimport neopixel\nimport time\n\ndef glow_purple(px, t, max):\n for value in range(max):\n px[0] = (value, 0, value)\n px.show()\n time.sleep(t)\n\ndef dim_purple(px, t, max):\n nums = range(max)\n for num in nums:\n value = max - num\n px[0] = (value, 0, value)\n px.show()\n time.sleep(t)\n\ndef glow_blue(px, t, max):\n mums = range(max)\n for num in nums:\n px[0] = (value/2, 0, value)\n px.show()\n time.sleep(t)\n\ndef dim_blue(px, t, max):\n nums = range(max)\n for num in nums:\n value = max - num\n px[0] = (value/2, 0, value)\n px.show()\n time.sleep(t)\n\ndef main():\n # board.D2 is the output pin that is sewn into the hat.\n # 1 represents the number of connected pixels\n pixels = neopixel.NeoPixel(board.D2, 1)\n frame_duration = 1/60\n max_brightness = 70\n\n while True:\n glow_purple(pixels, frame_duration, max_brightness)\n dim_purple(pixels, frame_duration, max_brightness)\n # glow_blue(pixels, frame_duration, max_brightness)\n # dim_blue(pixels, frame_duration, max_brightness)\n\nmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"43067182","text":"import pytest\nfrom os import path\n\nfrom art.test_handler.tools import polarion\nfrom art.unittest_lib import (\n CoreSystemTest as TestCase,\n testflow,\n tier1,\n tier2,\n)\n\nfrom snmp_traps import (\n restore_selinux_context,\n configs_dir,\n copy_ovirt_notifier_config_file,\n generate_events,\n get_snmp_result,\n finalize_class_helper,\n flush_logs,\n install_snmp_packages,\n remove_snmp_packages,\n setup_class_helper,\n start_ovirt_notifier_service,\n stop_ovirt_notifier_service,\n)\n\nfrom config import NOTIFIER_LOG, OVIRT_USER, OVIRT_GROUP, ENGINE\n\n\n@pytest.fixture(autouse=True, scope=\"module\")\ndef setup_module(request):\n def finalize():\n remove_snmp_packages()\n\n request.addfinalizer(finalize)\n\n install_snmp_packages()\n\n\n@tier1\nclass TestNotifierLogOwnership(TestCase):\n \"\"\"\n Class to test ovirt-notifier log ownership.\n \"\"\"\n @classmethod\n @pytest.fixture(autouse=True, scope=\"class\")\n def setup_class(cls, request):\n if not ENGINE.host.fs.exists(NOTIFIER_LOG):\n pytest.skip(\"No log file exists.\")\n\n @polarion(\"RHEVM-21772\")\n def test_log_ownership(self):\n assert ENGINE.host.os.get_file_owner(\n NOTIFIER_LOG\n ) == [OVIRT_USER, OVIRT_GROUP], \"Wrong log file ownership.\"\n\n\n@tier2\nclass SNMPTestTemplate(TestCase):\n \"\"\"\n Template class for SNMP traps tests.\n \"\"\"\n\n @classmethod\n @pytest.fixture(scope=\"class\")\n def setup_class(cls, request):\n def finalize():\n testflow.teardown(\"Stopping oVirt notifier service.\")\n stop_ovirt_notifier_service()\n\n testflow.teardown(\"Flushing logs.\")\n flush_logs()\n\n testflow.teardown(\"Cleaning environment.\")\n finalize_class_helper()\n\n request.addfinalizer(finalize)\n\n testflow.setup(\"Generating environment.\")\n setup_class_helper()\n\n testflow.setup(\"Copying SNMP oVirt notifier config.\")\n copy_ovirt_notifier_config_file(cls.init_config_file_path())\n\n testflow.setup(\"Restore selinux context on log file.\")\n restore_selinux_context()\n\n testflow.setup(\"Starting ovirt notifier service.\")\n start_ovirt_notifier_service()\n\n @classmethod\n def class_name_to_snake_case(cls):\n def helper(letter):\n if letter.isupper():\n return \"_\" + letter.lower()\n else:\n return letter\n\n return \"\".join(map(helper, cls.__name__[4:])).lstrip(\"_\")\n\n @classmethod\n def init_config_file_name(cls):\n return \".\".join([cls.class_name_to_snake_case(), \"conf\"])\n\n @classmethod\n def init_config_file_path(cls):\n return path.join(configs_dir, cls.init_config_file_name())\n\n @polarion(\"RHEVM-16356\")\n def test_snmp_traps(self):\n testflow.step(\"Generating events on engine.\")\n generate_events()\n testflow.step(\"Checking if the number of events logged right.\")\n assert get_snmp_result()\n\n\nclass TestNoAuthNoPriv(SNMPTestTemplate):\n \"\"\"\n Test if events from engine traps without authentication.\n \"\"\"\n @classmethod\n @pytest.fixture(autouse=True, scope=\"class\")\n def setup_class(cls, request):\n super(TestNoAuthNoPriv, cls).setup_class(request)\n\n\nclass TestAuthNoPriv(SNMPTestTemplate):\n \"\"\"\n Test if events from engine traps with authentication.\n \"\"\"\n @classmethod\n @pytest.fixture(autouse=True, scope=\"class\")\n def setup_class(cls, request):\n super(TestAuthNoPriv, cls).setup_class(request)\n\n\nclass TestAuthPriv(SNMPTestTemplate):\n \"\"\"\n Test if events from engine traps with authentication and privacy.\n \"\"\"\n @classmethod\n @pytest.fixture(autouse=True, scope=\"class\")\n def setup_class(cls, request):\n super(TestAuthPriv, cls).setup_class(request)\n","sub_path":"art/tests/rhevmtests/integration/snmp_traps/test_snmp_traps.py","file_name":"test_snmp_traps.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"139185277","text":"import numpy as np\nfrom scipy.stats import norm\nimport pandas as pd\nimport time\nimport multiprocessing as mp\n\n\nclass ImportanceSampling:\n def __init__(self, quantile, sample_sizes=10000, shifts=3, sim_sizes=1, pool_size=1):\n\n # Initialize arguments\n self.quantile = quantile\n self.sample_sizes = [sample_sizes] if isinstance(sample_sizes, int) else sample_sizes\n self.shifts = [shifts] if isinstance(shifts, (int, float)) else shifts\n self.sim_sizes = [sim_sizes] if isinstance(sim_sizes, int) else sim_sizes\n self.pool_size = pool_size\n\n # Calculate the true percentile\n self.truth = norm.ppf(self.quantile / 100.0)\n\n # Output columns\n self.columns = ['sample_size',\n 'shift',\n 'sim_size',\n 'quantile',\n 'truth',\n 'mean',\n 'std_mean',\n 'std_true',\n 'min',\n 'max',\n 'time']\n\n @staticmethod\n def percentile(data, quantile, likelihood_ratio=None):\n \"\"\"\n Using the 'nearest' interpolation method, PERCENTILE will give the desired percentile\n from the DATA at the supplied QUANTILE.\n If LIKELIHOOD_RATIO is None, then the equally weighted/regular percentile is returned.\n If LIKELIHOOD_RATIO is not None, then the IS inspired likelihood ratio is used to derive\n a weighted percentile.\n Note that None and an array of ones will both give the regular percentile.\n\n :param data: numpy array\n :param quantile: quantile as percentage, e.g. 90 for the 90% percentile.\n :param likelihood_ratio: likelihood ratio evaluated at the data points\n :return: percentile at the desired quantile.\n \"\"\"\n\n if likelihood_ratio is None:\n return np.percentile(data, quantile, interpolation='nearest')\n else:\n sample_size = len(data)\n # likelihood ratio divided by N - 1\n lr = likelihood_ratio / (sample_size - 1)\n idx = data.argsort()\n\n if quantile > 50: # Right tail\n lr[np.argmax(data)] = 0\n lr_cumsum = np.flip(np.flip(lr[idx]).cumsum())\n tail_probability = 1.00 - quantile / 100.0\n else: # Left tail\n lr[np.argmin(data)] = 0\n lr_cumsum = lr[idx].cumsum()\n tail_probability = quantile / 100.0\n idx_nearest = np.argmin(abs(lr_cumsum - tail_probability))\n return data[idx[idx_nearest]]\n\n def generate_importance_sample_and_calculate_percentile(self, sample_size, shift, sim_number):\n \"\"\"\n Generate one importance sample and calculate the percentile.\n\n :param sample_size: size of the sample\n :param shift: mean shift used to simulate the importance sample\n :param sim_number: set the seed for random number generator\n :return: simulated percentile\n \"\"\"\n\n # Set the seed based on sim_number for parallel computing (reproducibility)\n np.random.seed(sim_number)\n\n # Generate the importance sample by adding (mean) shift (also called translation)\n shifted_r = np.random.normal(size=(sample_size,)) + shift\n\n # Calculate p, q and the likelihood ratio\n p = norm.pdf(shifted_r, loc=0, scale=1)\n q = norm.pdf(shifted_r, loc=shift, scale=1)\n likelihood_ratio = p / q\n\n # Calculate and return the percentile\n return self.percentile(shifted_r, self.quantile, likelihood_ratio)\n\n def process_sim_results(self, sim_percentiles, sample_size, shift, sim_size, sim_time):\n \"\"\"\n SIM_PERCENTILES contains estimated percentiles for SIM_SIZE generated IS samples. This function calculates some\n useful statistics to assess the performance of IS.\n :param sim_percentiles: a list of size SIM_SIZE of simulated IS percentiles\n :param sample_size: size of the IS samples underlying the estimated percentiles\n :param shift: (mean) shift used for generating IS samples\n :param sim_size: number of generated IS samples\n :param sim_time: time in seconds spend on generating the IS samples and calculating percentiles\n :return: a list with statistics\n \"\"\"\n return [sample_size,\n shift,\n sim_size,\n self.quantile,\n self.truth,\n np.mean(sim_percentiles),\n np.std(sim_percentiles, ddof=1),\n np.sqrt(np.sum((sim_percentiles - self.truth) ** 2) / (sim_size - 1)),\n np.min(sim_percentiles),\n np.max(sim_percentiles),\n sim_time]\n\n def run(self):\n \"\"\"\n Importance Sampling loop over 1) sample sizes,\n 2) (mean) shifts and\n 3) sim sizes\n :return: DataFrame with results\n \"\"\"\n results = []\n for sample_size in self.sample_sizes:\n for shift in self.shifts:\n for sim_size in self.sim_sizes:\n\n # Prepare arguments for starmap\n iterable = ((sample_size, shift, i) for i in range(sim_size))\n chunk_size = int(sim_size / self.pool_size)\n\n # Start the simulation\n start = time.time()\n with mp.Pool(self.pool_size) as pool:\n sim_percentiles = pool.starmap(self.generate_importance_sample_and_calculate_percentile,\n iterable=iterable,\n chunksize=chunk_size)\n sim_time = round(time.time() - start, 4)\n\n # Display some information\n print(f'Sample size: {sample_size}, Shift: {shift}, Sim size: {sim_size}, Sim time: {sim_time}')\n results.append(self.process_sim_results(np.array(sim_percentiles),\n sample_size,\n shift,\n sim_size,\n sim_time))\n\n # Collect the result in a DataFrame\n df = pd.DataFrame(results, columns=self.columns)\n df.to_excel('results.xlsx', index=False)\n return df\n","sub_path":"importance_sampling/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"382611094","text":"import time\nfrom http.server import HTTPServer\nfrom server import Server\nimport threading\n\nclass Servers(): \n HOST_NAME = ''\n PORT_NUMBER = 4201\n\n def __init__(self):\n print('<<<<<<<<<<<<<<< Create new servers instance >>>>>>>>>>>>>>>>>')\n self.thread = None\n\n self.httpd = HTTPServer((Servers.HOST_NAME, Servers.PORT_NUMBER), Server)\n\n def startServer(self): \n if self.thread:\n print('Error: need stop server, wite stop to conslole') \n return\n print(time.asctime(), 'Server Starts - %s:%s' % (self.HOST_NAME, self.PORT_NUMBER))\n self.thread = threading.Thread(None, self.httpd.serve_forever)\n self.thread.start()\n\n def stopServer(self):\n print(time.asctime(), 'Server Stops - %s:%s' % (self.HOST_NAME, self.PORT_NUMBER))\n self.httpd.RequestHandlerClass.pre_stop()\n self.httpd.shutdown()\n self.thread.join()\n self.httpd.RequestHandlerClass.after_stop()\n self.thread = None\n self.httpd = None\n\n def reload(self, httpd):\n if self.thread is not None and self.thread.isAlive:\n self.stopServer()\n \n self.startServer()\n\n pass\n\n\n","sub_path":"servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605763043","text":"import numpy\nfrom display_particles import plot_particles\nfrom pysph.base.utils import get_particle_array_rigid_body\n\ndef make_sphere(dx):\n '''\n '''\n x,y,z = numpy.mgrid[-0.5:0.5+dx:dx, -0.5:0.5+dx:dx, 0:1:1j]\n\n x = x.ravel()\n y = y.ravel()\n z = z.ravel()\n\n indices = []\n\n for i in range(len(x)):\n if numpy.sqrt(x[i]**2 + y[i]**2) - 0.5 > 1e-9:\n indices.append(i)\n\n x = numpy.delete(x, indices)\n y = numpy.delete(y, indices)\n z = numpy.delete(z, indices)\n\n return x,y,z\n\nclass CollapsingCylinderGeometry():\n def __init__(self, nCylinder_layers = 6, hdx = 1):\n self.container_rho = 15.0 # arbitrarily chosen\n self.hdx = hdx\n self.nCylinder_layers = nCylinder_layers\n\n def sphere_mass(self,n_particles):\n '''Mass of Each Particle constituting the sphere\n '''\n rho = 2.7e-3 # kg/cm**3\n r = 0.5 # cm\n l = 9.9\n\n return 1.0/n_particles * numpy.pi * r**2 * rho # * l \n\n def create_particles(self):\n # create Container\n nx, ny = 500, 500\n dx = 1.0 / (nx -1)\n\n x, y, z = numpy.mgrid[-1:27:nx*1j, -1:27:ny*1j, 0:1:1j]\n\n interior = ((x > 0) & (x < 26)) & (y > 0)# & ((z > 0) & (z < 10))\n container = numpy.logical_not(interior)\n x = x[container].flat\n y = y[container].flat\n z = z[container].flat\n \n container_m = numpy.ones_like(x) * self.container_rho * dx * dx \n container_h = numpy.ones_like(x) * self.hdx * dx\n \n container = get_particle_array_rigid_body(name = 'container' , x = x, \n y = y , z = z , m = container_m, h = container_h )\n\n container.total_mass[0] = numpy.sum(container_m)\n \n # Create Cylinder Arrays\n \n r = 0.5\n nx , ny = 25, 25\n dx = 1.0 / (nx - 1)\n\n _x, _y, _z = make_sphere(dx)\n _id = numpy.ones_like(_x,dtype=int)\n n_sphere_particles = len(_x)\n \n disp = []\n for layer in range(self.nCylinder_layers):\n yc = layer + r\n if layer % 2 == 0:\n for i in range(6):\n xc = i + r\n disp.append((xc, yc, 0.0))\n else:\n for i in range(5):\n xc = i + 2*r\n disp.append((xc, yc, 0.0))\n\n x, y, z, body_id = [], [], [], []\n\n for i, d in enumerate(disp):\n x.append(_x + d[0])\n y.append(_y + d[1])\n z.append(_z + d[2])\n body_id.append(_id * i )\n\n x = numpy.concatenate(x)\n y = numpy.concatenate(y)\n z = numpy.concatenate(z)\n body_id = numpy.concatenate(body_id)\n m = numpy.ones_like(x) * self.sphere_mass(n_sphere_particles)\n h = numpy.ones_like(x) * self.hdx * 1.0 / (n_sphere_particles - 1)\n\n cylinder = get_particle_array_rigid_body(name='cylinder', x=x, y=y,\n z=z, m=m, h=h, body_id=body_id )\n\n return [cylinder, container]\n\nif __name__== '__main__':\n app = CollapsingCylinderGeometry()\n# app = CollapsingCylinderGeometry(nCylinder_layers = 8)\n plot_particles(app.create_particles())\n","sub_path":"collapsing_cylinders_2d.py","file_name":"collapsing_cylinders_2d.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"361190678","text":"#!/usr/bin/env python\nfrom glob import glob\nimport re\nnames=locals()\nfor y in ('2017','2018'):\n for m in range(12):\n names['fp'+y+'-'+'%02d'%(m+1)]=open('mongodb.log.'+y+'-'+'%02d'%(m+1),'w')\nfor f in glob('../mongodb.txt*'):\n for line in open(f):\n for y in ('2017','2018'):\n for m in range(12):\n pattern='^'+y+'-'+'%02d'%(m+1)\n if re.search(pattern,line):\n names['fp'+y+'-'+'%02d'%(m+1)].write(line)\n\n","sub_path":"bak/logsplite.py","file_name":"logsplite.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"124617822","text":"from keras.models import Sequential\nfrom keras.layers import Conv2D, Dense, Flatten, MaxPooling2D, GlobalAveragePooling2D, BatchNormalization\nfrom sklearn.model_selection import train_test_split\nfrom image_processing import get_image_data\nfrom keras.utils import to_categorical\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimages, labels = get_image_data()\nx_train, x_test, y_train, y_test = train_test_split(images, labels)\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\nprint(y_train.shape)\nprint(y_test.shape)\nmodel = Sequential()\n\nmodel.add(Conv2D(120, kernel_size=(3, 3), activation=\"relu\", input_shape=(150, 150, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(90, kernel_size=(3, 3), activation=\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(500, activation=\"relu\"))\nmodel.add(BatchNormalization())\nmodel.add(Dense(100, activation=\"relu\"))\nmodel.add(Dense(5, activation=\"softmax\"))\nprint(model.summary())\n\nmodel.compile(optimizer=\"Adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\ntrain_model = model.fit(x_train, y_train, batch_size=32, validation_data=(x_test, y_test), epochs=10)\n\nscore_history = train_model.history\nprint(score_history)\nacc_history = score_history[\"accuracy\"]\nval_acc_history = score_history[\"val_accuracy\"]\nloss_history = score_history[\"loss\"]\nval_loss_history = score_history[\"val_loss\"]\nx = range(len(acc_history))\n\nplt.plot(x, acc_history, label=\"train_accuracy\")\nplt.plot(x, val_acc_history, label=\"test_accuracy\")\nplt.legend(loc=\"best\")\nplt.title(\"Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Accuracy\")\nplt.show()\n\nplt.plot(x, loss_history, label=\"train_loss\")\nplt.plot(x, val_loss_history, label=\"test_loss\")\nplt.legend(loc=\"best\")\nprint(\"Loss\")\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Loss\")\nplt.show()\n\nscore = model.evaluate(x_test, y_test)\nprint(\"Test loss: {}\".format(score[0]))\nprint(\"Score: {}\".format(score[1]))\nmodel.save(\"./Models/CnnModel3.h5\")\n","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"550695860","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nclass Sigmoid(object):\n\n def segmoid(self,z):\n return 1.0/(1.0 + np.exp(-z))\n\n def plot(self,z):\n sns.set()\n plt.plot(z,self.segmoid(z))\n plt.xlabel('z')\n plt.ylabel('$\\phi (z)$')\n plt.axvline(0.0, color='k')\n plt.axhline(y=0.5, ls='dotted', color='k')\n plt.axhspan(0.0, 1.0, facecolor='1.0', alpha=1.0, ls='dotted')\n\n plt.show()\n\n","sub_path":"ML/Sigmoid.py","file_name":"Sigmoid.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"44666880","text":"from django.urls import path\n\nfrom .portal_views import (\n\t# Portal views\n\tpatient_page,\n\n\tset_Q_Device_and_Q_Start,\n\tset_Q_Device_and_Q_End,\n\tset_to_assigned_unassigned,\n\tinvidual_quarantine,\n\n\tinvidual_files,\n\tdevice_json_display,\n\n\ttop_five_alerts_api,\n\tqrcode_page,\n\thomepage,\n\tcommunication,\n\tmessages,\n\tlogin_page,\n\tlogout_page,\n\tsettings_page,\n\tvitals_page,\n\tquanrentine_surveilance_page,\n\tGateway_Lat_Lng,\n\n\n\tDevice_Confirm,\n\tDevice_Confirm_Create,\n\tDevice_View,\n\tDevice_Create,\n\tDevice_Update,\n\tDevice_Delete,\n\tLastest_Device_Data,\n\n\tWearer_Confirm_Create,\n\tWearer_Confirm,\n\tWearer_View,\n\tWearer_Create,\n\tWearer_Update,\n\tWearer_Delete,\n\n\tGateway_Confirm_Create,\n\tGateway_Confirm,\n\tOnline_Gateways,\n\tOnline_Gateways_API,\n\tGateway_View,\n\tGateway_Create,\n\tGateway_Update,\n\tGateway_Delete,\n\n\tMessage_View,\n\tMessage_Create,\n\tMessage_Delete,\n\n\tSubscription_View,\n\tSubscription_Create,\n\tSubscription_Delete,\n\n\tAlert_View,\n\tAlert_Delete,\n\tLatest_Alerts_View,\n\tGet_Latest_Alerts,\n\n\tGet_All_Device_For_Portal,\n\tGet_All_Wearer_For_Portal,\n\tGet_All_Gateway_For_Portal,\n\tGet_All_Message_For_Portal,\n\tGet_All_Subscription_For_Portal,\n\tGet_All_Alert_For_Portal,\n\tQuanrantine_Surveillance_Data,\n)\n\napp_name = 'apiapp'\n\n\nurlpatterns = [\n\t# Portal views\n\tpath('patient_page/', patient_page, name='patient_page'),\n\n\tpath('set_Q_Device_and_Q_Start/', #//',\n\t\t set_Q_Device_and_Q_Start,\n\t\t name='set_Q_Device_and_Q_Start'),\n\tpath('set_Q_Device_and_Q_End/', #//',\n\t\t set_Q_Device_and_Q_End,\n\t\t name='set_Q_Device_and_Q_End'),\n\tpath('set_to_assigned_unassigned//', set_to_assigned_unassigned),\n\tpath('invidual_quarantine//', invidual_quarantine,\n\t\t name='invidual_quarantine'),\n\n\tpath('invidual_files/', invidual_files, name='invidual_files'),\n\tpath('device_json_display//', device_json_display,\n\t\t name='device_json_display'),\n\n\tpath('top_five_alerts_api/', top_five_alerts_api),\n\tpath('download-bg37/', qrcode_page, name='qrcode_page'),\n\tpath('', homepage, name='homepage'),\n\tpath('communication/', communication, name='communication'),\n\tpath('messages/', messages, name='messages'),\n\tpath('login/', login_page, name='login_page'),\n\tpath('logout/', logout_page, name='logout_page'),\n\tpath('settings/', settings_page, name='settings_page'),\n\tpath('vitals/', vitals_page, name='vitals_page'),\n\tpath('quanrentine_surveilance_page/', quanrentine_surveilance_page,\n\t\t name='quanrentine_surveilance_page'),\n\tpath('Gateway_Lat_Lng/', Gateway_Lat_Lng),\n\n\tpath('device/', Device_View, name='device'),\n\tpath('device-create/', Device_Create, name='Device_Create'),\n\tpath('device-update//', Device_Update, name='Device_Update'),\n\tpath('device-delete//', Device_Delete, name='Device_Delete'),\n\tpath('Lastest_Device_Data/', Lastest_Device_Data),\n\tpath('devices/', Get_All_Device_For_Portal),\n\tpath('device-confirm////', Device_Confirm, name='Device_Confirm'),\n\tpath('Device_Confirm_Create/////',\n\t\t Device_Confirm_Create, name='Device_Confirm_Create'),\n\n\tpath('wearer/', Wearer_View, name='wearer'),\n\tpath('wearer-create/', Wearer_Create, name='Wearer_Create'),\n\tpath('wearer-update//', Wearer_Update, name='Wearer_Update'),\n\tpath('wearer-delete//', Wearer_Delete, name='Wearer_Delete'),\n\tpath('wearers/', Get_All_Wearer_For_Portal),\n\tpath('Wearer_Confirm///', Wearer_Confirm,\n\t\t name='Wearer_Confirm'),\n\tpath('Wearer_Confirm_Create///',\n\t\t Wearer_Confirm_Create, name='Wearer_Confirm_Create'),\n\n\tpath('online-gateways/', Online_Gateways, name='Online_Gateways'),\n\tpath('online-gateways-api/', Online_Gateways_API),\n\tpath('gateway/', Gateway_View, name='gateway'),\n\tpath('gateway-create/', Gateway_Create, name='Gateway_Create'),\n\tpath('gateway-update//', Gateway_Update, name='Gateway_Update'),\n\tpath('gateway-delete//', Gateway_Delete, name='Gateway_Delete'),\n\tpath('gateways/', Get_All_Gateway_For_Portal),\n\tpath('Gateway_Confirm_Create/////////',\n\t\t Gateway_Confirm_Create, name='Gateway_Confirm_Create'),\n\tpath('Gateway_Confirm/////////',\n\t\t Gateway_Confirm, name='Gateway_Confirm'),\n\n\n\tpath('message/', Message_View, name='message'),\n\tpath('message-create/', Message_Create, name='Message_Create'),\n\tpath('message-delete//', Message_Delete, name='Message_Delete'),\n\tpath('get-messages/', Get_All_Message_For_Portal),\n\n\tpath('subscription/', Subscription_View, name='subscription'),\n\tpath('subscription-create/', Subscription_Create, name='Subscription_Create'),\n\tpath('subscription-delete//', Subscription_Delete,\n\t\t\t\t\t\t\t\t\t\t\t\tname='Subscription_Delete'),\n\tpath('subscriptions/', Get_All_Subscription_For_Portal),\n\n\tpath('alert/', Alert_View, name='alert'),\n\tpath('alert-delete//', Alert_Delete, name='Alert_Delete'),\n\tpath('alerts/', Get_All_Alert_For_Portal),\n\tpath('latest_alerts_view/', Latest_Alerts_View, name='Latest_Alerts_View'),\n\tpath('Get_Latest_Alerts/', Get_Latest_Alerts),\n\n\tpath('Quanrantine_Surveillance_Data/',\n\t\t Quanrantine_Surveillance_Data),\n]\n\n","sub_path":"apiapp/portal_urls.py","file_name":"portal_urls.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"121814072","text":"import numpy as np\nimport random\nimport tensorflow as tf\nfrom shseo_load_data import DataGenerator\nfrom tensorflow.python.platform import flags\nfrom tensorflow.keras import layers\nfrom tensorflow.nn import softmax_cross_entropy_with_logits\nimport time\nimport copy\nfrom matplotlib import pyplot as plt\nfrom util import visualization, loss_function\n\nclass MANN(tf.keras.Model):\n\n def __init__(self, num_classes, samples_per_class):\n super(MANN, self).__init__()\n self.num_classes = num_classes\n self.samples_per_class = samples_per_class\n self.lstm_layer1 = tf.keras.layers.LSTM(units=128, return_sequences=True)\n self.lstm_layer2 = tf.keras.layers.LSTM(units=self.num_classes, return_sequences=True)\n\n def call(self, input_images, input_labels):\n \"\"\"\n MANN\n Args:\n input_images: [B, K+1, N, 784] flattened images\n labels: [B, K+1, N, N] ground truth labels\n Returns:\n [B, K+1, N, N] predictions\n \"\"\"\n \n B, K, N, I= input_images.shape\n \n # First K examples of data+labels \n data_train = tf.concat([input_images[:,0:-1,:,:], input_labels[:,0:-1,:,:]], axis=3)\n # Last 1 examples of data+zeros \n data_test = tf.concat([input_images[:,-1:,:,:], tf.zeros_like(input_labels)[:,-1:,:,:]], axis=3)\n \n input_data = tf.concat([data_train, data_test], axis=1) # [B, K+1, N, I+N]\n\n # reshape input data for matching lstm input shape\n reshaped_input_data = tf.reshape(input_data, [-1, K*N, I+N])\n \n # LSTM layers\n hidden_x = self.lstm_layer1(reshaped_input_data)\n out = self.lstm_layer2(hidden_x)\n \n # reshape output\n reshaped_out = tf.reshape(out, [-1, K, N, N])\n \n return reshaped_out\n \n def grad_function(self, images, labels):\n with tf.GradientTape() as tape:\n preds = self(images, labels)\n ce_loss = loss_function(preds, labels)\n\n grads = tape.gradient(ce_loss, self.trainable_variables)\n\n \n return grads, ce_loss\n \n def train(self, FLAGS, data_generator):\n # Set GPU options\n \n optimizer = tf.keras.optimizers.Adam(FLAGS.learning_rate)\n accuracy_metric = tf.keras.metrics.Accuracy()\n \n batch_type = \"train\"\n batch_size = FLAGS.meta_batch_size\n shuffle = FLAGS.shuffle\n \n # record history\n meta_train_losses = []\n meta_test_losses = []\n meta_test_accuracy = []\n steps = []\n \n for step in range(FLAGS.training_step):\n # load data\n \n meta_train_images, meta_train_labels = data_generator.sample_batch(batch_type=batch_type, batch_size=batch_size, shuffle=shuffle)\n \n # train phase\n grads, train_loss = self.grad_function(self, meta_train_images, meta_train_labels)\n optimizer.apply_gradients(zip(grads, self.trainable_variables))\n \n if step % FLAGS.visualization_step == 0:\n print()\n print(\"*\" * 5 + \"Iter \" + str(step) + \"*\" * 5)\n # meta test data sampling\n meta_test_images, meta_test_labels = data_generator.sample_batch(batch_type='test', batch_size=100)\n \n # inference\n preds = self.call(meta_test_images, meta_test_labels)\n \n # calculate train and test loss\n test_loss = loss_function(preds, meta_test_labels)\n print(\"Train Loss: {:.4f}\".format(train_loss.numpy()), \"Test Loss: {:.4f}\".format(test_loss.numpy()))\n \n # claculate accuracy\n argmax_preds = tf.math.argmax(preds[:, -1, :, :], 2)\n argmax_meta_test_labels = tf.math.argmax(meta_test_labels[:, -1, :, :], 2)\n _ = accuracy_metric.update_state(y_true=argmax_meta_test_labels, y_pred=argmax_preds)\n acc = accuracy_metric.result().numpy()\n print(\"Test Accuracy: {:.4f}\".format(acc))\n if step != 0:\n end_time = time.time()\n print(\"Elapsed Time: {:.4f}sec\".format(end_time-start_time))\n \n # record history\n meta_train_losses.append(train_loss.numpy())\n meta_test_losses.append(test_loss.numpy())\n meta_test_accuracy.append(acc)\n steps.append(step)\n \n # visualization\n visualization(FLAGS, meta_train_losses, meta_test_losses, meta_test_accuracy, steps)\n \n # reset start time\n start_time = time.time()","sub_path":"model/model_based/MANN/MANN.py","file_name":"MANN.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"619716786","text":"from abc import ABCMeta, abstractmethod\nfrom confluent_kafka.avro import AvroProducer, AvroConsumer\nfrom typing import Any, Tuple, Optional\nfrom time import sleep\n\nfrom confluent_kafka.cimpl import TopicPartition, OFFSET_END, KafkaError\n\nfrom pyconnect.avroparser import to_value_schema, to_key_schema\nfrom pyconnect.config import SourceConfig\nfrom pyconnect.core import BaseConnector, Status, PyConnectException\n\n\nclass PyConnectSource(BaseConnector, metaclass=ABCMeta):\n\n def __init__(self, config: SourceConfig) -> None:\n super().__init__()\n self.config = config\n self._producer = self._make_producer()\n self._offset_consumer = self._make_offset_consumer()\n self._key_schema: str = None\n self._value_schema: str = None\n self._offset_schema: str = None\n\n def _make_offset_consumer(self) -> AvroConsumer:\n config = {\n 'bootstrap.servers': ','.join(self.config['bootstrap_servers']),\n 'schema.registry.url': self.config['schema_registry'],\n 'enable.auto.commit': False,\n 'offset.store.method': 'none',\n 'group.id': f'{self.config[\"offset_topic\"]}_fetcher',\n 'default.topic.config': {\n 'auto.offset.reset': 'latest'\n },\n }\n offset_consumer = AvroConsumer(config)\n offset_consumer.assign([TopicPartition(self.config['offset_topic'], 0, OFFSET_END)])\n\n return offset_consumer\n\n def _make_producer(self) -> AvroProducer:\n config = {\n 'bootstrap.servers': ','.join(self.config['bootstrap_servers']),\n 'schema.registry.url': self.config['schema_registry']\n }\n return AvroProducer(config)\n\n def _on_eof(self) -> None:\n self._safe_call_and_set_status(self.on_eof)\n\n def _seek(self, idx: Any) -> None:\n self._safe_call_and_set_status(self.seek, idx)\n\n def _produce(self, key, value):\n self._create_schemas_if_necessary(key, value)\n\n self._producer.produce(key=key, value=value,\n key_schema=self._key_schema,\n value_schema=self._value_schema,\n topic=self.config['topic'])\n\n def _create_schemas_if_necessary(self, key, value):\n if self._key_schema is None:\n self._key_schema = to_key_schema(key)\n if self._value_schema is None:\n self._value_schema = to_value_schema(value)\n\n def _get_committed_offset(self) -> Any:\n partition = self._offset_consumer.assignment()[0]\n _, high_offset = self._offset_consumer.get_watermark_offsets(partition)\n partition.offset = high_offset - 1\n self._offset_consumer.seek(partition)\n\n offset_msg = self._offset_consumer.poll(timeout=30)\n if offset_msg is None:\n raise PyConnectException('Offset could not be fetched')\n if offset_msg.error() is None:\n return offset_msg.value()\n if offset_msg.error().code() != KafkaError._PARTITION_EOF:\n raise PyConnectException(f'Kafka library returned error: {offset_msg.err().name()}')\n return None\n\n def _before_run_loop(self) -> None:\n super()._before_run_loop()\n idx = self._get_committed_offset()\n if idx is not None:\n self._safe_call_and_set_status(self.seek, idx)\n\n def _commit(self) -> None:\n idx = self.get_index()\n if self._offset_schema is None:\n self._offset_schema = to_value_schema(idx)\n\n self._producer.produce(topic=self.config['offset_topic'], key=None, value=idx,\n value_schema=self._offset_schema)\n self._producer.flush()\n\n def _run_once(self) -> None:\n try:\n key, value = self.read()\n self._produce(key, value)\n except StopIteration:\n self._on_eof()\n except Exception as e:\n self._handle_general_exception(e)\n if self._status == Status.CRASHED:\n self._on_crash()\n\n def close(self) -> None:\n try:\n self._commit()\n self._offset_consumer.close()\n except RuntimeError:\n pass # no problem, already closed\n\n def on_eof(self) -> None:\n sleep(0.1)\n\n @abstractmethod\n def get_index(self) -> Any:\n raise NotImplementedError()\n\n @abstractmethod\n def read(self) -> Tuple[Any, Any]:\n raise NotImplementedError()\n\n @abstractmethod\n def seek(self, index: Any) -> Optional[Status]:\n raise NotImplementedError()\n","sub_path":"pyconnect/pyconnectsource.py","file_name":"pyconnectsource.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"248967652","text":"\"\"\"XML-RPC access to Roundup\n\nhttp://www.roundup-tracker.org/docs/xmlrpc.html\n\"\"\"\n\nfrom base64 import b64encode\nfrom itertools import chain\nimport re\n\nfrom datetime import datetime\nfrom snakeoil.sequences import iflatten_instance\n\nfrom . import NullRequest, Request, RPCRequest, req_cmd, generator\nfrom ._xmlrpc import LxmlXmlrpc\nfrom ..cache import Cache, csv2tuple\nfrom ..exceptions import AuthError, RequestError, ParsingError\nfrom ..objects import decompress, Item, Attachment, Comment\n\n\ndef parsetime(time):\n return datetime.strptime(time, '')\n\n\nclass RoundupError(RequestError):\n\n def __init__(self, msg, code=None, text=None):\n msg = 'Roundup error: ' + msg\n super().__init__(msg, code, text)\n\n\nclass RoundupIssue(Item):\n\n attributes = {\n 'creator': 'Reporter',\n 'creation': 'Created',\n 'assignedto': 'Assignee',\n 'keyword': 'Keywords',\n 'priority': 'Priority',\n 'status': 'Status',\n 'title': 'Title',\n 'nosy': 'Nosy List',\n 'superseder': 'Duplicate of',\n 'actor': 'Modified by',\n 'activity': 'Modified',\n 'messages': 'Comments',\n 'files': 'Attachments',\n }\n\n type = 'issue'\n\n def __init__(self, service, comments=None, attachments=None, **kw):\n self.service = service\n for k, v in kw.items():\n if k in ('creation', 'activity'):\n setattr(self, k, parsetime(v))\n elif k in ('creator', 'actor'):\n try:\n username = self.service.cache['users'][int(v)-1]\n except IndexError:\n # cache needs update\n username = v\n setattr(self, k, username)\n elif k == 'status':\n try:\n status = self.service.cache['status'][int(v)-1]\n except IndexError:\n # cache needs update\n status = v\n setattr(self, k, status)\n elif k == 'priority' and v is not None:\n try:\n priority = self.service.cache['priority'][int(v)-1]\n except IndexError:\n # cache needs update\n priority = v\n setattr(self, k, priority)\n elif k == 'keyword' and v is not None:\n keywords = []\n for keyword in v:\n try:\n keywords.append(self.service.cache['keyword'][int(keyword)-1])\n except IndexError:\n # cache needs update\n keywords.append(keyword)\n setattr(self, k, keywords)\n else:\n setattr(self, k, v)\n\n self.attachments = attachments if attachments is not None else []\n self.comments = comments if comments is not None else []\n\n def __str__(self):\n lines = []\n print_fields = [\n ('title', 'Title'),\n ('assignedto', 'Assignee'),\n ('creation', 'Created'),\n ('creator', 'Reporter'),\n ('activity', 'Modified'),\n ('actor', 'Modified by'),\n ('id', 'ID'),\n ('status', 'Status'),\n ('priority', 'Priority'),\n ('superseder', 'Duplicate'),\n ('keyword', 'Keywords'),\n ('messages', 'Comments'),\n ('files', 'Attachments'),\n ]\n\n for field, title in print_fields:\n value = getattr(self, field)\n if value is None:\n continue\n\n if field in ('messages', 'files'):\n value = len(value)\n\n if isinstance(value, list):\n value = ', '.join(map(str, value))\n\n lines.append('{:<12}: {}'.format(title, value))\n\n return '\\n'.join(lines)\n\n\nclass RoundupComment(Comment):\n pass\n\n\nclass RoundupAttachment(Attachment):\n pass\n\n\nclass RoundupCache(Cache):\n\n def __init__(self, *args, **kw):\n # default to empty values\n defaults = {\n 'status': (),\n 'priority': (),\n 'keyword': (),\n 'users': (),\n }\n\n converters = {\n 'status': csv2tuple,\n 'priority': csv2tuple,\n 'keyword': csv2tuple,\n 'users': csv2tuple,\n }\n\n super().__init__(defaults=defaults, converters=converters, *args, **kw)\n\n\nclass Roundup(LxmlXmlrpc):\n \"\"\"Support Roundup's XML-RPC interface.\"\"\"\n\n _service = 'roundup'\n _cache_cls = RoundupCache\n\n item = RoundupIssue\n item_endpoint = '/issue'\n attachment = RoundupAttachment\n attachment_endpoint = '/file'\n\n def __init__(self, **kw):\n super().__init__(endpoint='/xmlrpc', **kw)\n\n @property\n def cache_updates(self):\n \"\"\"Pull latest data from service for cache update.\"\"\"\n config_updates = {}\n reqs = []\n\n # get possible status values\n reqs.append(RPCRequest(command='list', params=['status'], service=self))\n\n # get possible priority values\n reqs.append(RPCRequest(command='list', params=['priority'], service=self))\n\n # get possible keyword values\n reqs.append(RPCRequest(command='list', params=['keyword'], service=self))\n\n # get possible user values requires login, otherwise returns empty list\n self.skip_auth = False\n self.auth.read()\n reqs.append(RPCRequest(command='list', params=['user'], service=self))\n\n status, priority, keyword, users = self.send(reqs)\n\n # don't sort, ordering is important for the mapping to work properly\n config_updates['status'] = tuple(status)\n config_updates['priority'] = tuple(priority)\n config_updates['keyword'] = tuple(keyword)\n if users:\n config_updates['users'] = tuple(users)\n\n return config_updates\n\n def inject_auth(self, request, params):\n self.session.headers['Authorization'] = str(self.auth)\n self.authenticated = True\n return request, params\n\n def _get_auth_token(self, user, password, **kw):\n \"\"\"Get an authentication token from the service.\"\"\"\n # generate HTTP basic auth token\n if isinstance(user, str):\n user = user.encode('latin1')\n if isinstance(password, str):\n password = password.encode('latin1')\n authstr = 'Basic ' + (b64encode(b':'.join((user, password))).strip()).decode()\n return authstr\n\n # def create(self, title, **kw):\n # \"\"\"Create a new issue given a list of parameters\n #\n # :returns: ID of the newly created issue\n # :rtype: int\n # \"\"\"\n # params = ['issue']\n # params.append('title={}'.format(title))\n # for k, v in self.item.attributes.items():\n # if kw.get(k, None) is not None:\n # params.append(\"{}={}\".format(k, kw[k]))\n #\n # req = self.create_request(method='create', params=params)\n # data = self.send(req)\n # return data\n #\n # def modify(self, id, **kw):\n # params = ['issue' + str(id[0])]\n # for k, v in self.item.attributes.items():\n # if kw.get(k, None) is not None:\n # params.append(\"{}={}\".format(k, kw[k]))\n #\n # req = self.create_request(method='set', params=params)\n # data = self.send(req)\n # return data\n #\n # def search(self, ids=None, **kw):\n # params = ['issue', ids]\n # search_params = {}\n # if kw['terms']:\n # search_params['title'] = kw['terms']\n # for k, v in self.item.attributes.items():\n # if kw.get(k, None) is not None:\n # search_params[k] = kw[k]\n # params.append(search_params)\n #\n # req = self.create_request(method='filter', params=params)\n # data = self.send(req)\n # return data\n\n def parse_response(self, response):\n \"\"\"Send request object and perform checks on the response.\"\"\"\n try:\n data = super().parse_response(response)\n except RequestError as e:\n # XXX: Hacky method of splitting off exception class from error string,\n # should probably move to using a regex or similar.\n code, msg = re.match(r\"^:(.+)$\", e.msg).groups()\n raise RequestError(msg=msg, code=code, text=e.text)\n\n return data\n\n\n@req_cmd(Roundup, 'get')\nclass _GetRequest(Request):\n\n def __init__(self, ids, service, fields=None, get_comments=False,\n get_attachments=False, **kw):\n \"\"\"Construct a get request.\"\"\"\n if not ids:\n raise ValueError('No {} ID(s) specified'.format(service.item_name))\n\n reqs = []\n for i in ids:\n issue_reqs = []\n params = ['issue' + str(i)]\n if fields is not None:\n params.extend(fields)\n else:\n params.extend(service.item.attributes.keys())\n reqs.append(RPCRequest(service=service, command='display', params=params))\n\n super().__init__(service=service, reqs=reqs)\n self.ids = ids\n self.get_comments = get_comments\n self.get_attachments = get_attachments\n\n def handle_exception(self, e):\n if e.code == 'exceptions.IndexError':\n # issue doesn't exist\n raise RoundupError(msg=e.msg)\n elif e.code == 'exceptions.KeyError':\n # field doesn't exist\n raise RoundupError(msg=\"field doesn't exist: {}\".format(e.msg))\n raise\n\n def parse(self, data):\n issues = []\n files = {}\n messages = {}\n reqs = []\n issues = list(iflatten_instance(data, dict))\n\n file_reqs = []\n msg_reqs = []\n for issue in issues:\n file_ids = issue.get('files', [])\n issue_files = []\n if file_ids and self.get_attachments:\n issue_files.append(self.service.AttachmentsRequest(attachment_ids=file_ids))\n else:\n issue_files.append(NullRequest())\n\n msg_ids = issue.get('messages', [])\n issue_msgs = []\n if msg_ids and self.get_comments:\n issue_msgs.append(self.service.CommentsRequest(comment_ids=msg_ids))\n else:\n issue_msgs.append(NullRequest())\n\n file_reqs.append(issue_files)\n msg_reqs.append(issue_msgs)\n\n attachments = self.service.send(file_reqs)\n comments = self.service.send(msg_reqs)\n\n return (self.service.item(service=self.service, comments=next(comments),\n attachments=next(attachments), id=self.ids[i], **issue)\n for i, issue in enumerate(issues))\n\n\n@req_cmd(Roundup, 'attachments')\nclass _AttachmentsRequest(Request):\n def __init__(self, service, ids=None, attachment_ids=None, get_data=False, *args, **kw):\n \"\"\"Construct an attachments request.\"\"\"\n super().__init__(service)\n # TODO: add support for specifying issue IDs\n if attachment_ids is None:\n raise ValueError('No attachment ID(s) specified')\n\n reqs = []\n for i in attachment_ids:\n params = ['file' + str(i)]\n fields = ['name', 'type', 'creator', 'creation']\n if get_data:\n fields.append('content')\n params.extend(fields)\n reqs.append(RPCRequest(service=service, command='display', params=params))\n\n super().__init__(service=service, reqs=reqs)\n self.ids = ids\n self.attachment_ids = attachment_ids\n\n @generator\n def parse(self, data):\n if self.attachment_ids:\n ids = self.attachment_ids\n else:\n ids = self.ids\n\n return [RoundupAttachment(id=ids[i], filename=d['name'], data=d.get('content', None),\n creator=self.service.cache['users'][int(d['creator'])-1],\n created=parsetime(d['creation']), mimetype=d['type'])\n for i, d in enumerate(data)]\n\n\n@req_cmd(Roundup, 'comments')\nclass _CommentsRequest(Request):\n def __init__(self, service, ids=None, comment_ids=None, created=None, fields=None, *args, **kw):\n \"\"\"Construct a comments request.\"\"\"\n super().__init__(service)\n # TODO: add support for specifying issue IDs\n if comment_ids is None:\n raise ValueError('No comment ID(s) specified')\n\n reqs = []\n for i in comment_ids:\n params = ['msg' + str(i)]\n if fields is not None:\n params.extend(fields)\n reqs.append(RPCRequest(service=service, command='display', params=params))\n\n super().__init__(service=service, reqs=reqs)\n self.ids = ids\n self.comment_ids = comment_ids\n\n @generator\n def parse(self, data):\n if self.comment_ids:\n ids = self.comment_ids\n else:\n ids = self.ids\n\n return [RoundupComment(id=ids[i], count=i, text=d['content'], date=parsetime(d['date']),\n creator=self.service.cache['users'][int(d['author'])-1])\n for i, d in enumerate(data)]\n","sub_path":"src/bite/service/roundup.py","file_name":"roundup.py","file_ext":"py","file_size_in_byte":13239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"244502218","text":"class Teacher:\n @classmethod\n def choose_course(cls, db, name):\n school = ''\n for i in db['teacher']:\n if name == i[0]:\n school = i[1]\n\n for i in db['manage']['infos']['school']:\n if school in list(i.keys()):\n courser = input('输入选择课程')\n #清楚以前课的关联\n for j in i[school]:\n if i[school][j]['teacher'] == name:\n i[school][j]['teacher'] = ''\n #关联当前的课\n for j in i[school]:\n if i[school][j]['course'] == courser:\n i[school][j]['teacher'] = name\n\n @classmethod\n def print_stu(cls, db, name):\n for i in db['student']:\n print(i)\n\n @classmethod\n def print_course(cls, db, name):\n school = ''\n for i in db['teacher']:\n if name == i[0]:\n school = i[1]\n\n for i in db['manage']['infos']['school']:\n if school in list(i.keys()):\n print(list(list(i.values())))\n\n @classmethod\n def alter_score(cls, db):\n grade = input('输入修改课程')\n score = input('输入修改分数')\n for i in db['student']:\n if i['grade'] == grade:\n i['score'] = score\n","sub_path":"python/item3_a/interface/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"523872834","text":"import os\nfrom flask_restplus import Api\nfrom flask import Blueprint\nimport importlib\n\nblueprint = Blueprint('api', __name__)\n\napi = Api(blueprint,\n title='FLASK RESTPLUS API BOILER-PLATE WITH JWT',\n version='1.0',\n description='a boilerplate for flask restplus web service'\n )\n\n\ndef add_namespace():\n routes_path = os.path.join(os.path.dirname(__file__), '.')\n files = ['app.routes.{}'.format(\n x[:-3]) for x in os.listdir(routes_path) if not x.startswith('__')]\n for file in files:\n module = importlib.import_module(file)\n if hasattr(module, 'prefix'):\n prefix = module.prefix\n else:\n prefix = module.__name__.split('.')[-1]\n api.add_namespace(module.route, path='{}'.format(prefix.strip('/')))\n\n\nadd_namespace()\n","sub_path":"app/routes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"197484909","text":"\"\"\"\n每年,政府都会公布一万个最常见的婴儿名字和它们出现的频率,也就是同名婴儿的数量。有些名字有多种拼法,例如,John 和 Jon 本质上是相同的名字,但被当成了两个名字公布出来。给定两个列表,一个是名字及对应的频率,另一个是本质相同的名字对。设计一个算法打印出每个真实名字的实际频率。注意,如果 John 和 Jon 是相同的,并且 Jon 和 Johnny 相同,则 John 与 Johnny 也相同,即它们有传递和对称性。\n\n在结果列表中,选择 字典序最小 的名字作为真实名字。\n\n \n\n示例:\n\n输入:names = [\"John(15)\",\"Jon(12)\",\"Chris(13)\",\"Kris(4)\",\"Christopher(19)\"], synonyms = [\"(Jon,John)\",\"(John,Johnny)\",\"(Chris,Kris)\",\"(Chris,Christopher)\"]\n输出:[\"John(27)\",\"Chris(36)\"]\n \n\n提示:\n\nnames.length <= 100000\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/baby-names-lcci\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import *\n\n\nclass Solution:\n def union(self, i: int, j: int, father: List[int]):\n p_i = self.find(i, father)\n p_j = self.find(j, father)\n father[p_i] = p_j\n\n def find(self, p, father: List[int]):\n r = p\n while father[p] != p:\n p = father[p]\n while father[r] != p:\n father[r], r = p, father[r]\n return p\n\n def trulyMostPopular(self, names: List[str], synonyms: List[str]) -> List[str]:\n names_dict, count = dict(), 0\n for each in names:\n name = each[:each.find('(')]\n names_dict[name] = count\n count += 1\n for each in synonyms:\n each = each.split(',')\n a, b = each[0][1:], each[1][:-1]\n for name in [a, b]:\n if name not in names_dict:\n names_dict[name] = count\n count += 1\n father = [i for i in range(count)]\n for each in synonyms:\n each = each.split(',')\n a, b = each[0][1:], each[1][:-1]\n self.union(names_dict[a], names_dict[b], father)\n\n no_name = dict()\n for (k, v) in names_dict.items():\n f = self.find(v, father)\n if f not in no_name:\n no_name[f] = [k]\n else:\n no_name[f].append(k)\n for each in no_name:\n no_name[each] = [min(no_name[each]), 0]\n\n for each in names:\n i = each.find('(')\n name, freq = each[:i], int(each[i + 1:-1])\n f = self.find(names_dict[name], father)\n no_name[f][1] += freq\n\n return ['{}({})'.format(each[0], each[1]) for each in no_name.values()]\n\n","sub_path":"面试17.07.婴儿名字.py","file_name":"面试17.07.婴儿名字.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"243123415","text":"# -*- coding:utf-8 -*-\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n # 返回合并后列表\n def Merge(self, pHead1, pHead2):\n dummy = ListNode(1)\n pHead = dummy\n while pHead1 and pHead2:\n if pHead1.val <= pHead2.val:\n dummy.next = pHead1\n pHead1 = pHead1.next\n else:\n dummy.next = pHead2\n pHead2 = pHead2.next\n dummy = dummy.next\n if pHead1:\n dummy.next = pHead1\n elif pHead2:\n dummy.next = pHead2\n return pHead.next\n\n\n# Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.\n#\n# Example:\n#\n# Input:\n# [\n# 1->4->5,\n# 1->3->4,\n# 2->6\n# ]\n# Output: 1->1->2->3->4->4->5->6\n\n\n\nfrom Queue import PriorityQueue\n\n\nclass Solution1(object):\n def mergeKLists(self, lists):\n dummy = ListNode(None)\n curr = dummy\n q = PriorityQueue()\n for node in lists:\n # sorted by the value of node,the priority is the node.val such as a min heap \n if node:\n q.put((node.val, node))\n while q.qsize() > 0:\n curr.next = q.get()[1]\n curr = curr.next\n # put the node.next back to the priority queue for sorting\n if curr.next:\n q.put((curr.next.val, curr.next))\n return dummy.next\n","sub_path":"Basic_Algorithm/Linklist/merge_linklist.py","file_name":"merge_linklist.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313332464","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers, Model\n\n# 6-layer ESPCN SISR model. \n# r = upscale factor, channels = number of color channels\ndef espcn_model(r, channels = 3):\n # Arguments for Conv2D\n conv_args = {\n \"activation\": \"relu\",\n \"padding\" : \"same\",\n }\n # Input\n inputs = keras.Input(shape=(None, None, channels))\n # Feature Maps Extraction\n conv1 = layers.Conv2D(64, 5, **conv_args)(inputs)\n conv2 = layers.Conv2D(64, 3, **conv_args)(conv1)\n conv3 = layers.Conv2D(32, 3, **conv_args)(conv2)\n conv4 = layers.Conv2D(32, 3, **conv_args)(conv3)\n conv5 = layers.Conv2D(32, 3, **conv_args)(conv4)\n conv6 = layers.Conv2D(channels*(r*r), 3, **conv_args)(conv5)\n # Efficient Sub-Pixel Convolutional Layer\n outputs = tf.nn.depth_to_space(conv6, r)\n return Model(inputs, outputs)","sub_path":"model/espcn.py","file_name":"espcn.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"369225589","text":"import mysql.connector\nimport json\n\ndbconparamsjson = None\n\ndef get_db_con_params():\n global dbconparamsjson\n jsondata = open(\"./common/DBConParams.json\").read()\n dbconparamsjson = json.loads(jsondata)\n\ndef activecontacts():\n connection, cursor = None, None\n try:\n contacts = []\n connection = mysql.connector.connect(host=dbconparamsjson[\"host\"], user=dbconparamsjson[\"username\"],\n password=dbconparamsjson[\"password\"], database=dbconparamsjson[\"db\"])\n # Get all contacts\n sql = \"SELECT * FROM Contacts WHERE DNDEmailBounce=0\"\n cursor = connection.cursor()\n cursor.execute(sql)\n columns = [column[0] for column in cursor.description]\n for row in cursor.fetchall():\n contacts.append(dict(zip(columns, row)))\n if len(contacts) > 0:\n return {\"contacts\": contacts}\n else:\n return {\"contacts\": None}\n except mysql.connector.Error as err:\n return {\"contacts\": err}\n finally:\n if connection:\n connection.close()\n if cursor:\n cursor.close()\n\n\ndef lambda_handler(event, context):\n get_db_con_params()\n return activecontacts()\n","sub_path":"serverless/activecontacts.py","file_name":"activecontacts.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323312164","text":"\"\"\"\nEssensially this program aims to the same investigation from ./last_modified.py,\nbut this looks into a file, while the other doesn't.\n\nNote:\n- Currently Github API 3.0 hasn't serve any blame's APIs, so this takes a tactics\n to use a local commit information. (Actually, the module 'gitpython' is good at\n dealing with git blames!) So you should clone a repository from github(and so on).\n\n- You cannot use any information in settings.json, because, for the reason I mensioned\n above, this program is somehow isolated from the whole package.\n\"\"\"\n\nimport sys,os\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )\n\nimport git\nimport re\nimport datetime\n\nimport info\n\npath = info.local_repoPath\ntarget_file = info.local_target_file\nrepo = git.Repo(path)\n\nfixMsgPattern = r\"[fF]ix\"\nfixre = re.compile(fixMsgPattern, re.ASCII)\n\npatch_pattern = r\"@@ -(\\d+),(\\d+) \\+(\\d+),(\\d+) @@\"\npatchre = re.compile(patch_pattern)\n\nfor i,commit in enumerate(repo.iter_commits(paths=target_file)):\n\tif not fixre.search(commit.summary):\n\t\tcontinue\n\t\n\t# get lines modified in each commit from patch.\n\t# ---@@ -1,441 +1,609 @@\n\n\t# and the goal is make the list of (bug_commit_sha, bug_start_line, bug_end_line)\n\n\tbug_commit = commit.parents[0]\n\t\n\tfor diff in commit.diff(bug_commit.hexsha,paths=target_file,create_patch=True).iter_change_type('M'):\n\t\t# この時点で\n\t\tlines = str(diff.diff).split(\"\\\\n\")\n\n\t\tfor line in lines:\n\t\t\tif not patchre.search(line):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tmatches = patchre.findall(line)\n\t\t\t# bug_range = (startline, endline)\n\t\t\tbug_start_line = int(matches[0][1])\n\t\t\tbug_end_line = bug_start_line + int(matches[0][2]) - 1\n\t\t\t\n\t\t\t# get bug's blame\n\t\t\tbug_blame_times = []\n\n\t\t\tif bug_start_line == 414:\n\t\t\t\tprint(line)\n\t\t\tfor blame_commit,__ in repo.blame(bug_commit.hexsha, target_file, False, L='{0},{1}'.format(bug_start_line, bug_end_line)):\n\t\t\t\t# (caution) commit.committed_date is int\n\t\t\t\tbug_blame_times.append(datetime.datetime.fromtimestamp(blame_commit.committed_date))\n\n\n\t\t\tfix_commit_date = datetime.datetime.fromtimestamp(commit.committed_date)\n\t\t\ttime_deltas = [fix_commit_date - x for x in bug_blame_times]\n\n\t\t\tfor delta in time_deltas:\n\t\t\t\tprint(delta.seconds)\n","sub_path":"surveys/infile_last_modified.py","file_name":"infile_last_modified.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"510816282","text":"\nfrom helpers import load_json\n\n__all__ = [\n 'create_table_command'\n]\n\n\nclass CreateTable(object):\n create_table_command = \"CREATE TABLE\"\n\n def __init__(self, table_name, columns, first_column_primary_key=True):\n self.table_name = table_name\n self.columns = columns\n self.col1_pkey = first_column_primary_key\n\n def parse_to_create_table_command(self):\n table_cols = \"\"\n\n for i, _key in enumerate(self.columns):\n if i == 0:\n data_type = self.columns[_key]\n table_cols += f\"{_key} {data_type}\"\n if self.col1_pkey is True:\n table_cols += \" PRIMARY KEY\"\n else:\n data_type = self.columns[_key]\n table_cols += f\", {_key} {data_type}\"\n text = \" \".join([self.create_table_command, self.table_name, \"(\" + table_cols + \")\"])\n return text\n\n\ndef create_table_command(json_file='table_columns.json', table_name='congress_bills'):\n col_data = load_json(json_file)\n ct = CreateTable(table_name, col_data)\n return ct.parse_to_create_table_command()\n","sub_path":"create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"265723069","text":"import asyncio\n\nfrom envparse import env\n\nenv.read_envfile()\n\n# Debug\nDEBUG = env('DEBUG', cast=bool, default=False)\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(process)d %(name)s %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG' if DEBUG else 'WARNING',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n },\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'aiohttp.access': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n },\n}\n\n# Application\nAPP_HOST = env('APP_HOST', cast=str, default='0.0.0.0')\nAPP_PORT = env('APP_PORT', cast=int, default=5000)\n\n# Kafka\nKAFKA_BOOTSTRAP_SERVERS = env('KAFKA_BOOTSTRAP_SERVERS', cast=str, default='0.0.0.0:9092')\nKAFKA_TOPIC_DOWNLOAD = env('KAFKA_TOPIC_DOWNLOAD', cast=str, default='university-download')\nKAFKA_TOPIC_SUCCESS = env('KAFKA_TOPIC_SUCCESS', cast=str, default='university-success')\n\n# Webhook\nWEBHOOK_URL = env('WEBHOOK_URL', cast=str, default='http://0.0.0.0:8000/download/webhook')\n\nloop = asyncio.get_event_loop()\n","sub_path":"src/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"546301852","text":"import shlex\nimport subprocess\nfrom custodian import Custodian\nfrom custodian.vasp.jobs import VaspJob\nfrom custodian.vasp.handlers import VaspErrorHandler, AliasingErrorHandler, MeshSymmetryErrorHandler, \\\n UnconvergedErrorHandler, MaxForceErrorHandler, PotimErrorHandler, FrozenJobErrorHandler, NonConvergingErrorHandler, \\\n PositiveEnergyErrorHandler\nfrom custodian.vasp.validators import VasprunXMLValidator\nfrom fireworks import explicit_serialize, FireTaskBase, FWAction\nfrom matmethods.utils.utils import env_chk\n\n__author__ = 'Anubhav Jain '\n__credits__ = 'Shyue Ping Ong '\n\n\n@explicit_serialize\nclass RunVaspDirect(FireTaskBase):\n \"\"\"\n Run VASP directly (no custodian).\n\n Required params:\n vasp_cmd (str): the name of the full executable for running VASP. Supports env_chk.\n \"\"\"\n\n required_params = [\"vasp_cmd\"]\n\n def run_task(self, fw_spec):\n vasp_cmd = env_chk(self[\"vasp_cmd\"], fw_spec)\n\n print(\"Running VASP using exe: {}\".format(vasp_cmd))\n return_code = subprocess.call(vasp_cmd, shell=True)\n print(\"VASP finished running with returncode: {}\".format(return_code))\n\n\n@explicit_serialize\nclass RunVaspCustodianFromObjects(FireTaskBase):\n \"\"\"\n Run VASP using custodian in a generic manner using built-in custodian objects\n\n Required params:\n jobs: ([Job]) - a list of custodian jobs to run\n handlers: ([ErrorHandler]) - a list of error handlers\n\n Optional params:\n validators: ([Validator]) - a list of Validators\n custodian_params ({}) - dict of all other custodian parameters\n \"\"\"\n\n required_params = [\"jobs\", \"handlers\"]\n optional_params = [\"validators\", \"custodian_params\"]\n\n def run_task(self, fw_spec):\n c = Custodian(self[\"handlers\"], self[\"jobs\"], self.get(\"validators\"), **self.get(\"custodian_params\", {}))\n output = c.run()\n return FWAction(stored_data=output)\n\n\n@explicit_serialize\nclass RunVaspCustodian(FireTaskBase):\n \"\"\"\n Run VASP using custodian \"on rails\", i.e. in a simple way that supports most common options.\n\n Required params:\n vasp_cmd (str): the name of the full executable for running VASP. Supports env_chk.\n\n Optional params:\n job_type: (str) - choose from \"normal\" (default), \"double_relaxation_run\" (two consecutive jobs), and \"full_opt_run\"\n handlers: (int) - level of handlers to use,0-4. 0 means no handlers, 2 is the default, 4 is highest level\n scratch_dir: (str) - if specified, uses this directory as the root scratch dir. Supports env_chk.\n gzip_output: (bool) - gzip output (default=T)\n max_errors: (int) - maximum # of errors to fix before giving up (default=2)\n auto_npar: (bool) - use auto_npar (default=F). Recommended set to T for single-node jobs only. Supports env_chk.\n gamma_vasp_cmd: (str) - cmd for Gamma-optimized VASP compilation. Supports env_chk.\n\n \"\"\"\n\n required_params = [\"vasp_cmd\"]\n optional_params = [\"job_type\", \"scratch_dir\", \"gzip_output\", \"max_errors\", \"auto_npar\", \"gamma_vasp_cmd\"]\n\n def run_task(self, fw_spec):\n vasp_cmd = env_chk(self[\"vasp_cmd\"], fw_spec)\n if isinstance(vasp_cmd, basestring):\n vasp_cmd = shlex.split(vasp_cmd)\n\n # initialize variables\n job_type = self.get(\"job_type\", \"normal\")\n scratch_dir = env_chk(self.get(\"scratch_dir\"))\n gzip_output = self.get(\"gzip_output\", True)\n max_errors = self.get(\"max_errors\", 2)\n auto_npar = self.get(\"auto_npar\", True)\n gamma_vasp_cmd = self.get(\"gamma_vasp_cmd\")\n\n # construct jobs\n jobs = []\n if job_type == \"normal\":\n jobs = [VaspJob(vasp_cmd, default_vasp_input_set=None, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd)]\n elif job_type == \"double_relaxation_run\":\n jobs = VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar)\n elif job_type == \"full_opt_run\":\n jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, max_steps=4)\n else:\n raise ValueError(\"Unsupported job type: {}\".format(job_type))\n\n # construct handlers\n handlers = []\n if self['handlers'] > 0:\n handlers.extend([VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(),\n NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler()])\n if self['handlers'] > 1:\n handlers.append(AliasingErrorHandler())\n if self['handlers'] > 2:\n handlers.append(FrozenJobErrorHandler())\n if self['handlers'] > 3:\n handlers.append(MaxForceErrorHandler())\n\n validators = [VasprunXMLValidator()]\n\n c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors,\n scratch_dir=scratch_dir, gzipped_output=gzip_output)\n\n output = c.run()\n return FWAction(stored_data=output)","sub_path":"matmethods/vasp/firetasks/run_calc.py","file_name":"run_calc.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"98321261","text":"# -*- coding: UTF-8 -*-\nimport slacker\nfrom msg import Msg\nimport codecs # File I/O with UTF-8\n\n# User-defined Arguments\ntoken = 'xoxp-2734280152-4137412349-4293823360-a7d3bf'\nslack = slacker.Slacker(token)\nchannel = 'general'\n# filename = '{}_backup_{:%Y%m%d}'.format(channel,date)\nfilename = 'backup.txt'\n# end of the arguments\n\noutput_file = codecs.open(filename, \"w\", \"utf-8-sig\")\ncnt = 0\n# Real 2ch 1001 rule\n_str1001 = '''\n                      γ\n                      (\n                      _ノ\n\n                   /\n                __\n             ,、'\"   .  `' 、\n             i`ー  _    ',\n.             l| !|      i\"\"!|\n                 }: }i    |{  !j\n               〈| 'J |!   }j  :}\n            _ノ;し  i}  {J  |\n         ,、-,、'  ���      ハ- 、\n         ( .( '、_    _ ,ノ  ノ:i   )\n        ,、'\"\"`ー---‐'\"フ、_ - _,、' -'\"\n        (  _   ,、'\"    ̄\n         `ー--─'\"\n千本目の蝋燭が消えますた・・・\n新しい蝋燭を立ててくださいです・・・\n'''\n\n\ndef make_user_dict():\n l = slack.users.list()\n dic = {'USLACKBOT': 'slackbot'}\n for user in l.body['members']:\n dic[user['id']] = user['name']\n return dic\n\n\ndef out_1001(output_file):\n output_file.write('1001 :1001:Over 1000 Thread \\n')\n output_file.write(_str1001)\n\n\ndef MessageReading(message, MessageList, UserDic):\n if 'subtype' not in message:\n MessageList.append(Msg(message['user'], message['ts'], message['text']))\n elif message['subtype'] == 'bot_message':\n print('Bot message Exists.')\n UserDic[message['bot_id']] = message['user_name']\n MessageList.append(Msg(message['bot_id'], message['ts'], message['text'], message['subtype']))\n elif message['subtype'] == 'me_message':\n MessageList.append(Msg(message['user'], message['ts'], message['text'], message['subtype']))\n elif message['subtype'] == 'message_changed':\n MessageList.append(Msg(message['user'], message['ts'], message['text'], message['subtype'], edit_user=message['edited']['user'], edit_ts=message['edited']['ts']))\n else:\n pass\n\n# Getting Channel ID\nre = slack.channels.list()\n_id = 0\nfor chan in re.body['channels']:\n if (chan['name'] == channel):\n _id = chan['id']\n\nprint('Channel id = {}'.format(_id))\n\n# Getting History\nresponse = slack.channels.history(_id, count=1000)\nif response.body['messages']:\n print('Message Exists. {0} message(s) found.'.format(len(response.body['messages'])))\nelse:\n print('No message exists.')\n\nts = 0\ndic = make_user_dict()\nl = []\n\n# Reading File History\nwhile(response.body['has_more'] is True):\n last_user = ''\n for msg in response.body['messages']:\n if 'subtype' in msg:\n l.append(Msg(msg.get('user'), msg['ts'], msg['text'], msg['subtype']))\n else:\n l.append(Msg(msg['user'], msg['ts'], msg['text']))\n ts = msg['ts']\n\n # Output\n # print ('Len of l', len(l))\n l.reverse()\n for msg in l:\n if msg.user not in dic:\n dic[msg.user] = msg.user\n cnt = cnt + 1\n msg.writeToFile(output_file, dic, cnt)\n out_1001(output_file)\n l = []\n response = slack.channels.history(_id, latest=ts, count=1000)\n # print('Latest Timestamp:', ts)\nelse:\n for msg in response.body['messages']:\n MessageReading(msg, l, dic)\n if l:\n l.reverse()\n for msg in l:\n if msg.user not in dic:\n dic[msg.user] = msg.user\n cnt = cnt + 1\n msg.writeToFile(output_file, dic, cnt)\n # msg.print_(dic, cnt)\n\n if len(l) == 1000:\n out_1001(output_file)\n\n l = []\n\noutput_file.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"591169767","text":"import json\nimport boto3\n\n#Set source and destination bucket names and key \nsource = \"source-bucket-name\"\ndestination =\"destination-bucket-name\"\nkey = \"key/pattern of file to copy from souce to destination\"\n\ndef handler(event, context):\n \n #assume role of destination account to copy files, \n #this way the owner of the file will be destination role \n sts_client = boto3.client('sts')\n assumedRoleObject = sts_client.assume_role(\n RoleArn=\"arn of role used to copy files in destination account\",\n RoleSessionName=\"AssumeRoleSession1\")\n credentials = assumedRoleObject['Credentials']\n print(credentials)\n \n #instantiate S3 using the destination accont role\n s3 = boto3.client(\n 's3',\n aws_access_key_id = credentials['AccessKeyId'],\n aws_secret_access_key = credentials['SecretAccessKey'],\n aws_session_token = credentials['SessionToken'],\n\n )\n copy_data = {\n 'Bucket': source,\n 'Key': key}\n s3.copy_object(Bucket=destination, Key=key, CopySource=copy_data)\n \n ","sub_path":"s3-copy/s3_copy_push_method/s3_copy.py","file_name":"s3_copy.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"607280051","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport datetime\nimport os\nimport seaborn as sns\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nfrom sklearn.ensemble import RandomForestRegressor as rfr\n\nimport scipy.stats as stats\n\n\nif '폰트 설정':\n from matplotlib import font_manager, rc\n font_path = \"/Users/kang_chanwoong/Desktop/빅 콘테스트/untitled folder/SB 어그로 L.ttf\"\n font = font_manager.FontProperties(fname=font_path)\n\nos.system('clear')\n\ndata = pd.read_csv('/Users/kang_chanwoong/Desktop/빅 콘테스트/untitled folder/training.csv')\ndata = data[:2891]\n\nfor ch in ['유역평균강수','강우A','강우B','강우C','강우D','수위E','수위D']:\n for i in range(1,7):\n column = str(i) + ch\n x,y = data[[column]],data['유입량']\n train_X, test_X, train_Y, test_Y = train_test_split(x, y, test_size=0.3)\n\n reg = LinearRegression()\n reg.fit(train_X, train_Y)\n pred_Y = reg.predict(test_X)\n\n title = str(i) + ch + '- 유입량 Linear regression Graph'\n plt.scatter(test_X,pred_Y, s = 3, c = 'green', alpha = 0.9)\n plt.scatter(test_X,test_Y, s = 10, c = 'red', alpha = 0.2)\n \n plt.xlabel(str(i) + ch, fontproperties=font)\n plt.ylabel('유입량', fontproperties=font)\n plt.title(title, fontproperties=font)\n\n script_dir = os.path.dirname(__file__)\n date_string = 'Results/'\n results_dir = os.path.join(script_dir, date_string)\n file_name = 'regress' + title + '.png'\n\n if not os.path.isdir(results_dir): \n os.makedirs(results_dir)\n\n plt.savefig(results_dir + file_name, facecolor='#eeeeee')\n plt.close()","sub_path":"data_regress 2.py","file_name":"data_regress 2.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"197300754","text":"\"\"\"Prende i dati di ieri (past.csv), di oggi (present.csv) e dei vaccini (vaccini.csv) per calcolare\nun po' di situazioni. Poi salva tutto nelle rispettive stories e html.\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import date\nimport os\nimport shutil\n\nimport grafichini as graph\nimport dusi\n\n#\n##\n### PREPARO GLI INPUT\n##\n#\n\ndusi.download()\n\npast = pd.read_csv('past.csv')\npresent = pd.read_csv('present.csv')\nvaccini = pd.read_csv('vaccini.csv')\n\nlombardia_past = past[ past['denominazione_regione'] == 'Lombardia' ]\nlombardia_present = present[ present['denominazione_regione'] == 'Lombardia' ]\nvaccini_lombardia = vaccini[ vaccini['area'] == 'LOM' ]\n\n#controllo che siano giusti, altrimenti non se ne fa niente\nprint()\nprint(lombardia_present.tail(1))\nprint(vaccini_lombardia.tail(1))\nif input(\"Dataset impostati giusti? (s/n) \") == \"n\":\n\texit(\"File smarmellati. Ciao!\")\n\n#faccio il backup\nshutil.rmtree('backup')\nos.system('cp -r pantarei backup')\nshutil.copy('past.csv', 'backup')\n\n#\n##\n### ELABORO\n##\n#\n\n#tamponi\ntot_tamponi_present = lombardia_present['tamponi'].values[0]\ntot_tamponi_past = lombardia_past['tamponi'].values[0]\ntamponi_oggi = tot_tamponi_present - tot_tamponi_past\n\n#nuovi positivi\nnuovi_positivi = lombardia_present['nuovi_positivi'].values[0]\n\n#rapporto\npercentuale = np.around(nuovi_positivi / tamponi_oggi * 100, 2)\n\n#in ospedale adesso\nospedalizzati = lombardia_present['totale_ospedalizzati'].values[0]\n\n#in T.I. adesso\nterapie_attuali = lombardia_present['terapia_intensiva'].values[0]\n\n#deceduti oggi\ntot_deceduti_present = lombardia_present['deceduti'].values[0]\ntot_deceduti_past = lombardia_past['deceduti'].values[0]\ndeceduti_oggi = tot_deceduti_present - tot_deceduti_past\n\n#vaccinati\nprimadose_tot = vaccini_lombardia['prima_dose'].sum()\nsecondadose_tot = vaccini_lombardia['seconda_dose'].sum()\nprimadose_perc = np.around(primadose_tot / 10060965 * 100, 2)\nsecondadose_perc = np.around(secondadose_tot / 10060965 * 100, 2)\n\n#\n##\n### ESPORTO\n##\n#\n\n#story dei rapporti\nf = open('pantarei/perc_story.txt', 'a')\nf.write( \"\\n\" + str(percentuale) )\nf.close()\n\n#story degli ospedalizzati\nf = open('pantarei/ospedalizzati_story.txt', 'a')\nf.write( \"\\n\" + str(ospedalizzati) )\nf.close()\n\n#story delle terapie\nf = open('pantarei/terapie_story.txt', 'a')\nf.write( \"\\n\" + str(terapie_attuali) )\nf.close()\n\n#story dei deceduti\nf = open('pantarei/deceduti_story.txt', 'a')\nf.write( \"\\n\" + str(deceduti_oggi) )\nf.close()\n\n#story dei vaccini\nf = open('pantarei/primadose_story.txt', 'a')\nf.write( \"\\n\" + str(primadose_tot) )\nf.close()\nf = open('pantarei/secondadose_story.txt', 'a')\nf.write( \"\\n\" + str(secondadose_tot) )\nf.close()\n\n\"\"\" \"\"\"\n\n#grafico rapporti\ngraph.curve(path = \"pantarei/perc_story.txt\", filename = \"rapporto\", color = \"#f33a30\", ylabel = \"rapporto = pos/tam\")\n\n#grafico ospedalizzati\ngraph.curve(\"pantarei/ospedalizzati_story.txt\", \"ospedalizzati\", \"#f99726\", \"ospedalizzati\")\n\n#grafico terapie\ngraph.curve(\"pantarei/terapie_story.txt\", \"terapie_attuali\", \"#44a546\", \"t.i. occupate\")\n\n#grafico deceduti\ngraph.histo(\"pantarei/deceduti_story.txt\", \"deceduti_giornalieri\", \"#1c8af2\", \"deceduti\")\n\n#grafico vaccini\ngraph.vax(filename = \"vaccini\", color = \"#9023a8\")\n\n\"\"\" \"\"\"\n\n#html del rapporto\nf = open('pantarei/perc.txt', 'w')\nf.write( str(percentuale) + \"%\" )\nf.close()\n\n#html degli ospedalizzati\nf = open('pantarei/ospedalizzati.txt', 'w')\nf.write( str(ospedalizzati) )\nf.close()\n\n#html delle terapie attuali\nf = open('pantarei/terapie_attuali.txt', 'w')\nf.write( str(terapie_attuali) )\nf.close()\n\n#html dei deceduti oggi\nf = open('pantarei/deceduti_oggi.txt', 'w')\nf.write( str(deceduti_oggi) )\nf.close()\n\n#html delle percentuali di vaccinati\nf = open('pantarei/primadose_perc.txt', 'w')\nf.write( str(primadose_perc) + \"%\")\nf.close()\nf = open('pantarei/secondadose_perc.txt', 'w')\nf.write( str(secondadose_perc) + \"%\")\nf.close()\n\n\"\"\" \"\"\"\n\ngiorno = date.today().strftime(\"%d/%m/%Y\")\nf = open('pantarei/data.txt', 'w')\nf.write( str(giorno) )\nf.close()\n\n#\n##\n### CHIUDO\n##\n#\n\nos.remove(\"past.csv\")\nos.rename(r'present.csv', r'past.csv')\n\nprint(\"Fatto. Dati aggiornati al \" + str(giorno) + \".\")","sub_path":"backend/macinino.py","file_name":"macinino.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"69610032","text":"import numpy as np\nimport cv2\nimport socket\nimport time\nfrom getKeys import key_check\nimport requests\nimport urllib3\nimport json\nimport os\nfrom models import pinet\n\nWIDTH = 320\nHEIGHT = 120\nLR = 1e-3\nEPOCHS = 10\nMODEL_NAME = 'trained_models/SDCModel-{}-{}-{}-epochs-300K-data.model'.format(LR, 'pinetv2',EPOCHS)\nt_time = 0.09\n\nclass StreamingServer(object):\n\tdef __init__(self):\n\t\t# Control keys value\n\t\tself.restUrl = 'http://192.168.1.106:8080/control'\n\t\tself.server_socket = socket.socket()\n\t\tself.server_socket.bind(('192.168.1.102', 8000))\n\t\tself.server_socket.listen(1)\n\t\tself.conn, self.client_address = self.server_socket.accept()\n\t\tself.connection = self.conn.makefile('rb')\n\t \n\t\tself.streamingAndCollectData()\n\n\tdef streamingAndCollectData(self):\n\t\tmodel = pinet (WIDTH, HEIGHT, LR)\n\t\tmodel.load(MODEL_NAME)\n\n\t\tprint('Start Testing self driving car.')\n\t\te1 = cv2.getTickCount()\n\n\t\tfor i in list(range(4))[::-1]:\n\t\t\tprint(i+1)\n\t\t\ttime.sleep(1)\n\n\t\tlast_time = time.time()\n\t\tprint('STARTING!!!')\n\n\t\ttry:\n\t\t\tprint(\"Connection from: \", self.client_address)\n\t\t\tprint(\"Streaming...\")\n\t\t\tprint(\"Press 'Q' to exit\")\n\n\t\t\tstream_bytes = b''\n\t\t\tframe = 1\n\t\t\twhile True:\n\t\t\t\tstream_bytes += self.connection.read(1024)\n\t\t\t\tfirst = stream_bytes.find(b'\\xff\\xd8')\n\t\t\t\tlast = stream_bytes.find(b'\\xff\\xd9')\n\t\t\t\tself.conn.sendall(b'WA')\n\t\t\t\tif first != -1 and last != -1:\n\t\t\t\t\tjpg = stream_bytes[first:last + 2]\n\t\t\t\t\tstream_bytes = stream_bytes[last + 2:]\n\t\t\t\t\timage = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n\t\t\t\t\t# select lower half of the image\n\t\t\t\t\troi = image[120:240, :]\n\n\t\t\t\t\tlast_time = time.time()\n\t\t\t\t\t# run a color convert:\n\t\t\t\t\tscreen = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n\t\t\t\t\tscreen = cv2.resize(screen, (320,120))\n\t\t\t\t\tcv2.imshow('image', screen)\n\n\t\t\t\t\tprediction = model.predict([screen.reshape(320,120,1)])[0]\n\t\t\t\t\tprint(prediction)\n\n\t\t\t\t\tturn_thresh = .75\n\t\t\t\t\tfwd_thresh = 0.70\n\n\t\t\t\t\tif prediction[1] > fwd_thresh:\n\t\t\t\t\t\tprint('Forward')\n\t\t\t\t\telif prediction[0] > turn_thresh:\n\t\t\t\t\t\tprint('left')\n\t\t\t\t\telif prediction[2] > turn_thresh:\n\t\t\t\t\t\tprint('right')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('Forward')\n\n\t\t\te2 = cv2.getTickCount()\n\t\t\t# calculate streaming duration\n\t\t\ttime0 = (e2 - e1) / cv2.getTickFrequency()\n\t\t\tprint('Streaming duration:', time0)\n\t\t\t\n\t\tfinally:\n\t\t\tself.connection.close()\n\t\t\tself.server_socket.close()\n\n\t\t\n\nif __name__ == '__main__':\n\tStreamingServer()","sub_path":"Computer/tfMethod/testsdc.py","file_name":"testsdc.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"52852900","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 18 20:15:04 2017\r\n\r\n@author: Oliver\r\n\"\"\"\r\nx = 16\r\nlow = 1.0\r\nhigh = x\r\nepsilon = 0.01\r\nanswer= (high + low)/2.0\r\nnumGuesses = 0\r\n \r\nwhile abs(answer **2 - x) > epsilon:\r\n numGuesses +=1\r\n \r\n if answer **2 < x:\r\n low = answer\r\n else:\r\n high = answer\r\n answer = (high + low)/2.0\r\nprint (\"square root of 16 is \" + str(answer))\r\nprint (\"it took this many guesses \" + str(numGuesses))\r\n","sub_path":"anothersquare root.py","file_name":"anothersquare root.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"111306882","text":"import urllib.request\nimport pprint\nimport time\nimport pickle\nHASH_URL = 'Https://www.blockchain.com/btc/block-height/'\ntry:\n BTC_BLOCK_HASHES = pickle.load(open('btc-block-hashes.vnm','rb'))\nexcept Exception as e:\n print('Block Hash File Needed')\n BTC_BLOCK_HASHES = dict()\n pickle.dump(BTC_BLOCK_HASHES,open('btc-block-hashes.vnm','wb'))\n\ndef Clone_System(target):\n try:\n headers = {}\n headers['User-Agent'] = \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17\"\n url = str(target)\n req = urllib.request.Request(url, headers = headers)\n x = urllib.request.urlopen(req)\n l = str(x.read())\n return l\n except Exception as e:\n print('Error Returning Url [{}]'.format(e))\n\ndef _grabFirst(string):\n position = 0\n for i in string:\n if str(i) == '>' and string[position+1] == '0':\n #print('i: [{}], Position: [{}]'.format(str(i),position))\n return position +1\n else:\n #print('i: [{}], Position: [{}]'.format(str(i),position))\n position += 1\ndef _grabSecond(string):\n allowed = ['a','b','c','d','e','f','0','1','2','3','4','5','6','7','8','9']\n position = 0\n for i in string:\n if i == '<' and string[position-1] in allowed:\n #print('i: [{}], Position: [{}]'.format(str(i),position))\n return position\n else:\n #print('i: [{}], Position: [{}]'.format(str(i),position))\n position += 1\n\ndef _main(block):\n global Block\n BTC_BLOCK_HASHES = pickle.load(open('btc-block-hashes.vnm','rb'))\n Hash = Clone_System(HASH_URL+str(block))\n XHash = Hash[6800:7059]\n #print('Sorting [{}]'.format(XHash))\n starting = _grabFirst(XHash)\n ending = _grabSecond(XHash)\n AHash = XHash[int(starting):int(ending)]\n print('Selected AHash: [{}]'.format(AHash))\n if len(AHash) == 64:\n if str(block) not in BTC_BLOCK_HASHES:\n print('Adding Data [{}] For Block [{}]'.format(XHash,str(block)))\n print('Real Hash: [{}]'.format(AHash))\n BTC_BLOCK_HASHES[str(block)] = AHash\n pickle.dump(BTC_BLOCK_HASHES,open('btc-block-hashes.vnm','wb'))\n elif str(block) in BTC_BLOCK_HASHES:\n if BTC_BLOCK_HASHES[str(block)] != AHash:\n print('We Have A Error, Different Hashes For Block: [{}].'.format(str(block)))\n print('Real Hash: [{}]'.format(AHash))\n BTC_BLOCK_HASHES[str(block)] = AHash\n pickle.dump(BTC_BLOCK_HASHES,open('btc-block-hashes.vnm','wb'))\n else:\n print('Hash Already Logged For Block [{}].'.format(str(block)))\n else:\n print('Length Is Off...[{}].'.format(len(AHash)))\n paused = input('>>: ')\n \n_Started = False\nwhile True:\n global Block\n if _Started == False:\n BTC_BLOCK_HASHES = pickle.load(open('btc-block-hashes.vnm','rb'))\n print('Which Block Are We Starting At?')\n Block = int(input('>>: '))\n _Started = True\n try:\n _main(Block)\n Block += 1\n except Exception as e:\n print('We Had A Error: [{}]'.format(e))\n try:\n print('Resetting Connection')\n mimic = Block - 5\n Block = mimic\n _main(Block)\n except Exception as e:\n print('Failed To Reset Connection. Error: [{}].'.format(e))\n exit()\n \n \n \n\n","sub_path":"Memory/Information/bitcoinBlockHashes.py","file_name":"bitcoinBlockHashes.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2824585","text":"import os\nimport re\nimport tensorflow as tf\nimport numpy as np\nimport pickle\n\nos.chdir(os.path.dirname(__file__))\n\ndef load_data(language):\n\n if language == 'en':\n small_vocab = open('data/small_vocab_en')\n if language == 'fr':\n small_vocab = open('data/small_vocab_fr')\n\n small_vocab_revised = [sequence.replace('\\n', ' ') for sequence in small_vocab]\n sequence_list = [re.sub(\"[^\\w]\", \" \", sequence).split() for sequence in small_vocab_revised]\n\n return sequence_list\n\ndef create_dictionary(sequence_list, output=None):\n\n text = []\n for sequence in sequence_list:\n [text.append(word) for word in sequence]\n text = list(set(text))\n\n word_dict = {}\n for i, word in enumerate(text):\n word_dict[word] = i\n\n word_dict[''] = len(word_dict)\n if sequence_list:\n word_dict[''] = len(word_dict)\n word_dict[''] = len(word_dict)\n\n with open('%s.dump'%(output), 'wb') as f:\n pickle.dump(word_dict, f)\n\ndef change_string2int(sequence_list, vocab_dict):\n text_list = []\n\n for sequence in sequence_list:\n text_int = []\n for word in sequence:\n if word in vocab_dict:\n text_int.append(vocab_dict[word])\n\n text_list.append(text_int)\n\n return text_list\n\nclass preprocess():\n\n def load_preprocess(self):\n\n en_dict_path = './en.dump'\n fr_dict_path = './fr.dump'\n en_exist = os.path.exists(en_dict_path)\n fr_exist = os.path.exists(fr_dict_path)\n\n en_text = load_data(language='en')\n if en_exist == False:\n create_dictionary(en_text, output='en')\n\n en_dict = pickle.load(open('en.dump', 'rb'))\n en_int_text = change_string2int(en_text, en_dict)\n\n fr_text = load_data(language='fr')\n if fr_exist == False:\n create_dictionary(fr_text, output='fr')\n\n fr_dict = pickle.load(open('fr.dump', 'rb'))\n fr_int_text = change_string2int(fr_text, fr_dict)\n\n return en_int_text, en_dict, fr_int_text, fr_dict\n\n#encoder_input = ['hello','how','are','you','','',']\n#decoder_input = ['','i','am','fine','','','']\n#tgt_label = ['i','am','fine','','','']\n#inference phase, the output of each time step will be the input for the next time step\n#preprocess the tgt label data for inference phase \n#saying like this is the start of the translation\n\nsr_int_text, sr_dict, tgt_int_text, tgt_dict = preprocess().load_preprocess()\n\nsr_seq_leg = [len(seq) for seq in sr_int_text]\nmax_sr_seq_leg = max(sr_seq_leg)\nsr_inputs = [seq + [sr_dict['']]*(max_sr_seq_leg-len(seq)) for seq in sr_int_text]\n\ntgt_seq_leg = [len([sr_dict['']] + seq) for seq in tgt_int_text]\nmax_tgt_seq_leg = int(max(tgt_seq_leg)-1)\ntgt_inputs = [[sr_dict['']] + seq + [sr_dict['']] + [sr_dict['']]*(max_tgt_seq_leg-len(seq)) for seq in tgt_int_text]\ntgt_label = [seq + [sr_dict['']] + [sr_dict['']]*(max_tgt_seq_leg -len(seq)) for seq in tgt_int_text]\ntgt_seq_leg = [len(seq) for seq in tgt_label]\n","sub_path":"Basic_RNN/BK/load_preprocess_revised.py","file_name":"load_preprocess_revised.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313739065","text":"#! /usr/bin/env python\n\nimport glob\nimport sys\nimport os, errno\nimport argparse\nimport fnmatch\nimport random\n\nfrom Bio import SeqIO\nfrom Bio.SeqFeature import FeatureLocation\n\n\n\n\nparser = argparse.ArgumentParser(description='Extraction of random sequences')\nparser.add_argument(\"-p\",\"--path\", help=\"searching path of sequences files\",\n type=str)\nparser.add_argument(\"-i\",\"--infile\", help=\"input file list of sequence files, or sequence file\",\n type=str)\nparser.add_argument(\"--pathfind\", help=\"[mode] search all sequences files in given path (-p)\",\n action=\"store_true\")\nparser.add_argument(\"--multiple\", help=\"[mode] extraction from multiple sequences files in given path (-p) otherwise parse a given list (-i)\",\n action=\"store_true\")\nparser.add_argument(\"--single\", help=\"[mode] extraction from a single given fasta file (-i)\",\n action=\"store_true\")\nparser.add_argument(\"--protein\", help=\"[mode] extract the aminoacid sequence\",\n action=\"store_true\")\nparser.add_argument(\"-o\",\"--outfile\", help=\"output file list of concatenated sequences\",\n type=str)\nparser.add_argument(\"-c\",\"--count\", help=\"number of sequences targeted\",\n type=int)\nparser.add_argument(\"-e\",\"--extension\", help=\"extension of files to search (i.e fa,fna,fasta or custom)\",\n type=str)\nif len(sys.argv)==1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\nargs = parser.parse_args()\n\n\nclass ProgressBar:\n\t'''\n\tProgress bar\n\t'''\n\tdef __init__ (self, valmax, maxbar, title):\n\t\tif valmax == 0: valmax = 1\n\t\tif maxbar > 200: maxbar = 200\n\t\tself.valmax = valmax\n\t\tself.maxbar = maxbar\n\t\tself.title = title\n\tdef update(self, val):\n\t\timport sys\n\t\tperc = round((float(val) / float(self.valmax)) * 100)\n\t\tscale = 100.0 / float(self.maxbar)\n\t\tbar = int(perc / scale)\n\t\tout = '\\r %20s [%s%s] %3d %% ' % (self.title, '.' * bar, ' ' * (self.maxbar - bar), perc)\n\t\tsys.stdout.write(out)\n\t\tsys.stdout.flush()\n\n\nfile_extension=args.extension\nnumber_of_seqs=args.count\n\nif args.multiple:\n if args.pathfind:\n 'Search all fasta files files'\n path = args.path\n in_files = []\n for r, d, f in os.walk(path):\n for file in f:\n if str(\".\"+file_extension) in file:\n in_files.append(os.path.join(r, file))\n else:\n 'We parse a list of files'\n input_list = args.infile\n with open(input_list) as f:\n in_files = f.readlines()\n\nif args.single:\n in_files = args.infile\n\n#cat *.fa | grep '>' | sort | uniq | perl -pe 's/>//' > list_taxa\n\nfname=args.outfile\n\n'read sequences and stock the id/seqs in dictionnary'\nfor file in in_files:\n seqs={}\n dicinfo={}\n cur_genome = SeqIO.parse(file, \"fasta\")\n for record in cur_genome:\n seqID=record.id.split(\";\")[0]\n sequence=record.seq\n seqs.setdefault(seqID, []).append(sequence)\n dicinfo.setdefault(seqID, []).append(record.description.replace(\";\",\"\"))\n\n genename=os.path.basename(file).replace(str(\".\"+file_extension),\"\")\n\n if len(seqs) >= number_of_seqs:\n random_keys=random.sample(list(seqs),k=number_of_seqs)\n else:\n print (\"WARN: the number of targeted sequences is greater than the number of sequences found into files. By default, all sequences are kept.\")\n random_keys=random.sample(list(seqs),k=len(seqs))\n\n for taxa in random_keys:\n if args.protein:\n taxa_sequence=str(seqs[taxa][0].translate()).replace(\"-\",\"n\").replace(\"*\",\"\").upper()\n else:\n taxa_sequence=str(seqs[taxa][0]).replace(\"-\",\"n\").upper()\n infos = dicinfo[taxa][0].split(\" \")[1:len(dicinfo[taxa][0].split(\" \"))]\n taxinfo = dict(item.split(\"=\") for item in infos)\n header=\">\"+str(taxinfo['gene'])+\"_\"+str(taxinfo['taxid'])\n\n if os.path.isfile(fname):\n with open(fname, 'a+') as file:\n old_headers = []\n end_file=file.tell()\n file.seek(0)\n for line in file:\n if line.startswith(\">\"):\n old_headers.append(line.replace(\">\",\"\").split(\";\")[0])\n if not taxa in old_headers:\n file.seek(end_file)\n file.write(header+'\\n')\n file.write(str(taxa_sequence)+'\\n')\n else:\n pass\n else :\n with open(fname, 'w') as out:\n out.write(header+'\\n')\n out.write(str(taxa_sequence)+'\\n')\n","sub_path":"src/RandomSeq.py","file_name":"RandomSeq.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"290262440","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('record/', views.record_api),\n path('calculate', views.calculate),\n path('stats', views.stats),\n path('diff', views.diff),\n path('plot', views.plot)\n]\n","sub_path":"mweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"421269664","text":"import sys\nimport os\nimport json\nimport make_arch as march \n\nNUM_QUE = 20\n\nFANOUT = 10\n\ndef main():\n\tglobal NUM_QUE\n\tglobal FANOUT\n\tcore_aff = []\n\tfor i in range(0, NUM_QUE):\n\t\taff = march.make_Simp_core_aff(i, [i])\n\t\tcore_aff.append(aff)\n\tcore_list = range(0, NUM_QUE)\n\n\tmachines = []\n\tsched = march.make_service_sched(\"LinuxNetStack\", [NUM_QUE, core_list], core_aff)\n\n\t# need 2x machines + 1 to hold both ngx, memcached pairs and a load balancer\n\tfor i in range(0, FANOUT*2 + 1):\n\t\tmach_name = \"machine_\" + str(i)\n\t\tmach = march.make_machine(mid = i, name = mach_name, cores = 40, netSched = sched)\n\t\tmachines.append(mach)\n\n\twith open(\"./json/machines.json\", \"w+\") as f:\n\t\tjson.dump(machines, f, indent=2)\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"architecture/make_arch/examples/fannout_2tier/machines.py","file_name":"machines.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"471520864","text":"from auger_ml.optimizers.space.classifier import classifier_space\nfrom auger_ml.optimizers.space.regressor import regressor_space\nimport copy\n\n\nclass SearchSpaceConversions(object):\n @classmethod\n def convert(cls, search_space_ui, classification):\n space_filter = classifier_space if classification else regressor_space\n new_space = {}\n for k, v in search_space_ui.items():\n space_params = space_filter.get(k, None)\n if space_params is not None:\n new_space[k] = cls._convert_parameters(v)\n\n for key, space_param in space_params.items():\n if new_space[k].get(str(key)) is None:\n new_space[k][str(key)] = space_param\n\n return new_space\n\n @staticmethod \n def fill_default_algo_params(trial):\n space_filter = trial.get('search_space', {}) #classifier_space if trial.get('classification', True) else regressor_space\n if len(space_filter) == 0:\n return trial\n\n space_params = space_filter.get(trial.get('algorithm_name'))\n algo_params = trial.get('algorithm_params')\n if space_params is None or algo_params is None:\n return trial\n\n for key, space_param in space_params.items():\n if algo_params.get(str(key)) is None:\n #print(space_param)\n if space_param.get(\"bounds\"):\n algo_params[str(key)] = space_param[\"bounds\"][0]\n elif space_param.get(\"values\"):\n algo_params[str(key)] = space_param[\"values\"][0]\n\n return trial\n\n # {'sklearn.ensemble.RandomForestClassifier': {\n # 'n_estimators': [100],\n # 'criterion': ['gini', 'entropy'],\n # 'max_features': np.arange(0.05, 1.01, 0.05).to_list(),\n # 'min_samples_split': range(2, 21),\n # 'min_samples_leaf': range(1, 21),\n # 'bootstrap': [True, False]\n # }}\n #\n # to\n #\n # {'sklearn.ensemble.RandomForestClassifier': {\n # 'n_estimators': { 'bounds': (100, 100), 'log': False, 'type': 'int', 'tunable': True },\n # 'max_features': { 'bounds': (0.05, 1.), 'log': False, 'type': 'float', 'tunable': True },\n # 'min_samples_split': { 'bounds': (2, 21), 'log': False, 'type': 'int', 'tunable': True },\n # 'min_samples_leaf': { 'bounds': (1, 21), 'log': False, 'type': 'int', 'tunable': True }\n # }}\n\n # {\n # 'n_estimators': [100],\n # 'criterion': ['gini', 'entropy'],\n # 'max_features': np.arange(0.05, 1.01, 0.05).to_list(),\n # 'min_samples_split': range(2, 21),\n # 'min_samples_leaf': range(1, 21),\n # 'bootstrap': [True, False]\n # }\n #\n # to\n #\n # {\n # 'n_estimators': { 'bounds': (100, 100), 'log': False, 'type': 'int', 'tunable': True },\n # 'max_features': { 'bounds': (0.05, 1.), 'log': False, 'type': 'float', 'tunable': True },\n # 'min_samples_split': { 'bounds': (2, 21), 'log': False, 'type': 'int', 'tunable': True },\n # 'min_samples_leaf': { 'bounds': (1, 21), 'log': False, 'type': 'int', 'tunable': True }\n # }\n @classmethod\n def _convert_parameters(cls, parameters):\n def convert_parameters(parameters):\n for k, v in parameters.items():\n vv = cls._convert_value(v)\n if vv is not None:\n yield k, vv\n return {k: v for k, v in convert_parameters(parameters)}\n\n # [100] to { 'bounds': (100, 100), 'log': False, 'type': 'int', 'tunable': True }\n # ['gini', 'entropy'] to None\n # np.arange(0.05, 1.01, 0.05).to_list() to { 'bounds': (0.05, 1.), 'log': False, 'type': 'float', 'tunable': True }\n # range(2, 21) to { 'bounds': (2, 21), 'log': False, 'type': 'int', 'tunable': True }\n # range(1, 21) to { 'bounds': (1, 21), 'log': False, 'type': 'int', 'tunable': True }\n # [True, False] to None\n @classmethod\n def _convert_value(cls, parameter):\n res = None\n type_ = type(parameter[0])\n #print(type_)\n if type_ is str or type_ is bool:\n res = {\n 'values': parameter,\n 'log': False,\n 'type': \"categorical\",\n 'tunable': False\n }\n else:\n if type_ is int:\n type_ = 'int'\n else:\n type_ = 'float'\n res = {\n 'bounds': (min(parameter), max(parameter)),\n 'log': False,\n 'type': type_,\n 'tunable': min(parameter)< max(parameter)\n }\n \n return res\n\n @classmethod\n def _cat_param(cls, values, tunable=True):\n return {\"values\": values,\n \"type\": \"categorical\",\n \"log\": False,\n \"tunable\": tunable}\n\n @classmethod\n def expand_dnn_search_space(cls, space):\n # Perform any dynamic changes to the algorithms search space before starting optimizers\n\n def _expand_dnn(subspace, suffix='Classifier'):\n # Add parameters for each layer\n if \"num_layers\" in subspace:\n num_layers_p = subspace.pop(\"num_layers\")\n num_layers_min = num_layers_p['bounds'][0]\n num_layers_max = num_layers_p['bounds'][1]\n\n if num_layers_max < 5:\n ans = {}\n for i in range(num_layers_min, num_layers_max + 1):\n addon = {\"hidden_layer_sizes_{}\".format(j): subspace[\"hidden_layer_sizes\"] for j in range(1, i + 1)}\n addon2 = {\"hidden_layer_act_{}\".format(j): subspace[\"activation\"] for j in range(1, i + 1)}\n addon.update(addon2)\n ans.update({\"auger_ml.algorithms.dnn.DNN{}Layer{}\".format(i, suffix): addon})\n #print(\"ans = {}\".format(ans))\n return ans\n\n return {}\n\n def _expand_mlp(subspace, suffix='Classifier'):\n # Add parameters for each layer\n if \"num_layers\" in subspace:\n num_layers_p = subspace.pop(\"num_layers\")\n num_layers_min = num_layers_p['bounds'][0]\n num_layers_max = num_layers_p['bounds'][1]\n\n if num_layers_max < 5:\n ans = {}\n for i in range(num_layers_min, num_layers_max + 1):\n addon = {\"hidden_layer_sizes_{}\".format(j): subspace[\"hidden_layer_sizes\"] for j in range(1, i + 1)}\n addon.update({\"activation\": subspace[\"activation\"]})\n ans.update({\"auger_ml.algorithms.mlp.MLP{}{}Layer\".format(suffix, i): addon})\n #print(\"ans = {}\".format(ans))\n return ans\n\n return {}\n\n # Short names\n n = {\n \"DNNC\": \"auger_ml.algorithms.dnn.DeepNeuralNetworkClassifier\",\n \"DNNR\": \"auger_ml.algorithms.dnn.DeepNeuralNetworkRegressor\",\n \"MLPC\": \"sklearn.neural_network.MLPClassifier\",\n \"MLPR\": \"sklearn.neural_network.MLPRegressor\"\n }\n\n space_c = copy.deepcopy(space)\n\n if n['DNNC'] in space_c:\n ss = space_c[n['DNNC']]\n space_c.pop(n['DNNC'])\n space_c.update(_expand_dnn(ss, 'Classifier'))\n if n['DNNR'] in space_c:\n ss = space_c[n['DNNR']]\n space_c.pop(n['DNNR'])\n space_c.update(_expand_dnn(ss, 'Regressor'))\n if n['MLPC'] in space:\n ss = space_c[n['MLPC']]\n space_c.pop(n['MLPC'])\n space_c.update(_expand_mlp(ss, 'Classifier'))\n if n['MLPR'] in space_c:\n ss = space_c[n['MLPR']]\n space_c.pop(n['MLPR'])\n space_c.update(_expand_mlp(ss, 'Regressor'))\n\n return space_c\n\n @staticmethod \n def check_time_series(search_space, options):\n if options.get(\"timeSeriesFeatures\", []): # return False if list is empty\n return search_space\n else:\n search_space = copy.deepcopy(search_space)\n # print('TEMPORARY')\n # return search_space\n algo_name = \"auger_ml.algorithms.ts_dnn.DeepTimeSeriesRegressor\"\n if algo_name in search_space:\n del search_space[algo_name]\n return search_space\n","sub_path":"auger_ml/search_space_conversions.py","file_name":"search_space_conversions.py","file_ext":"py","file_size_in_byte":8365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"155025683","text":"import re\r\n\r\nprint(\"Our Magical Calculator\")\r\n# \\n is an escape character sequence in Python\r\nprint(\"Type 'quit' to exit\\n\")\r\n\r\n# Holds the result of the previously calculated equation:\r\nprevious = 0\r\nrun = True\r\n\r\n\r\ndef performMath():\r\n global run\r\n global previous\r\n\r\n equation = \"\"\r\n\r\n if previous == 0:\r\n equation = input(\"Enter equation:\")\r\n else:\r\n equation = input(str(previous))\r\n\r\n if equation == 'quit':\r\n print(\"Goodbye Human!\")\r\n run = False\r\n else:\r\n equation = re.sub('[a-zA-Z,.:()\" \"]', '', equation)\r\n\r\n # Previous is a variable we are using to store the calculated result\r\n # eval() is a built-in function that calculates values based on a string\r\n if previous == 0:\r\n previous = eval(equation)\r\n else:\r\n previous = eval(str(previous) + equation)\r\n\r\n\r\n# This is the while loop that will allow us to continuously perform calculations:\r\nwhile run:\r\n performMath()","sub_path":"python/01PythonStackskillsCourse/01ProgrammingBasics/05 Calculator Project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"639140012","text":"# -*- coding:utf-8 -*-\n\"\"\"\ncreated by server on 14-7-16下午5:48.\n\"\"\"\nfrom shared.db_opear.configs_data.common_item import CommonItem\nfrom shared.db_opear.configs_data import data_helper\nfrom shared.db_opear.configs_data.data_helper import parse\n\n\nclass StoneConfig(object):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n self._stones = {}\n self._weight = []\n\n def parser(self, config_value):\n\n weights = 0\n for row in config_value:\n data_helper.convert_keystr2num(row.get('mainAttr'))\n data_helper.convert_keystr2num(row.get('minorAttr'))\n row[\"consume\"] = parse(row.get(\"consume\"))\n item = CommonItem(row)\n if item.weight:\n self._weight.append([item.id, weights+item.weight])\n self._stones[item.id] = item\n weights += item.weight\n\n return {'stones': self._stones, 'weight': self._weight}\n","sub_path":"shared/db_opear/configs_data/stone_config.py","file_name":"stone_config.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"214854805","text":"#!/usr/bin/env python3\nimport argparse\nfrom pathlib import Path\nimport cv2\nfrom anonymization import anonymize\nfrom util import blacken_img\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"Anonymizing delivery notes made easy!\"\n )\n parser.add_argument(\"--input\", required=True)\n parser.add_argument(\"--output\", default=None)\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_arguments()\n input_path = Path(args.input)\n img = cv2.imread(str(input_path))\n rects = anonymize(img)\n blackened_img = blacken_img(img, rects)\n output_path = args.output\n cv2.imwrite(output_path, blackened_img)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"533566863","text":"from models import SSD300\n\n\nbox_configs = [\n {'layer_width': 38, 'layer_height': 38, 'num_prior': 3, 'min_size': 30.0,\n 'max_size': None, 'aspect_ratios': [1.0, 2.0, 1/2.0]},\n {'layer_width': 19, 'layer_height': 19, 'num_prior': 6, 'min_size': 60.0,\n 'max_size': 114.0, 'aspect_ratios': [1.0, 1.0, 2.0, 1/2.0, 3.0, 1/3.0]},\n {'layer_width': 10, 'layer_height': 10, 'num_prior': 6, 'min_size': 114.0,\n 'max_size': 168.0, 'aspect_ratios': [1.0, 1.0, 2.0, 1/2.0, 3.0, 1/3.0]},\n {'layer_width': 5, 'layer_height': 5, 'num_prior': 6, 'min_size': 168.0,\n 'max_size': 222.0, 'aspect_ratios': [1.0, 1.0, 2.0, 1/2.0, 3.0, 1/3.0]},\n {'layer_width': 3, 'layer_height': 3, 'num_prior': 6, 'min_size': 222.0,\n 'max_size': 276.0, 'aspect_ratios': [1.0, 1.0, 2.0, 1/2.0, 3.0, 1/3.0]},\n {'layer_width': 1, 'layer_height': 1, 'num_prior': 6, 'min_size': 276.0,\n 'max_size': 330.0, 'aspect_ratios': [1.0, 1.0, 2.0, 1/2.0, 3.0, 1/3.0]},\n ]\n\ndef get_prior_parameters(model):\n box_configurations = []\n for layer in model.layers:\n layer_type = layer.__class__.__name__\n if layer_type == 'PriorBox':\n layer_data = {}\n layer_data['layer_width'] = layer.input_shape[1]\n layer_data['layer_height'] = layer.input_shape[2]\n layer_data['min_size'] = layer.min_size\n layer_data['max_size'] = layer.max_size\n layer_data['aspect_ratios'] = layer.aspect_ratios\n box_configurations.append(layer_data)\n return box_configurations\n\nimage_shape = (300, 300, 3)\nmodel = SSD300(image_shape)\nbox_configurations = get_prior_parameters(image_shape, )\n\n","sub_path":"src/old_code/get_prior_parameters_2.py","file_name":"get_prior_parameters_2.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"469851909","text":"#!/usr/bin/env python\n\n'''\nNuclear Electronic Orbital Hartree-Fock (NEO-HF)\n'''\n\nimport numpy\nfrom pyscf import gto\nfrom pyscf import scf\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.scf import rhf\nfrom pyscf.scf.hf import SCF\n\ndef init_guess_mixed(mol, mixing_parameter = numpy.pi/4):\n ''' Generate density matrix with broken spatial and spin symmetry by mixing\n HOMO and LUMO orbitals following ansatz in Szabo and Ostlund, Sec 3.8.7.\n \n psi_1a = numpy.cos(q)*psi_homo + numpy.sin(q)*psi_lumo\n psi_1b = numpy.cos(q)*psi_homo - numpy.sin(q)*psi_lumo\n \n psi_2a = -numpy.sin(q)*psi_homo + numpy.cos(q)*psi_lumo\n psi_2b = numpy.sin(q)*psi_homo + numpy.cos(q)*psi_lumo\n\n Returns: \n Density matrices, a list of 2D ndarrays for alpha and beta spins\n '''\n # opt: q, mixing parameter 0 < q < 2 pi\n \n #based on init_guess_by_1e\n h1e = scf.hf.get_hcore(mol)\n s1e = scf.hf.get_ovlp(mol)\n mo_energy, mo_coeff = rhf.eig(h1e, s1e)\n mf = scf.HF(mol)\n mo_occ = mf.get_occ(mo_energy=mo_energy, mo_coeff=mo_coeff)\n\n homo_idx=0\n lumo_idx=1\n\n for i in range(len(mo_occ)-1):\n if mo_occ[i]>0 and mo_occ[i+1]<0:\n homo_idx=i\n lumo_idx=i+1\n\n psi_homo=mo_coeff[:, homo_idx]\n psi_lumo=mo_coeff[:, lumo_idx]\n \n Ca=numpy.zeros_like(mo_coeff)\n Cb=numpy.zeros_like(mo_coeff)\n\n\n #mix homo and lumo of alpha and beta coefficients\n q=mixing_parameter\n\n for k in range(mo_coeff.shape[0]):\n if k == homo_idx:\n Ca[:,k] = numpy.cos(q)*psi_homo + numpy.sin(q)*psi_lumo\n Cb[:,k] = numpy.cos(q)*psi_homo - numpy.sin(q)*psi_lumo\n continue\n if k==lumo_idx:\n Ca[:,k] = -numpy.sin(q)*psi_homo + numpy.cos(q)*psi_lumo\n Cb[:,k] = numpy.sin(q)*psi_homo + numpy.cos(q)*psi_lumo\n continue\n Ca[:,k]=mo_coeff[:,k]\n Cb[:,k]=mo_coeff[:,k]\n\n dm =scf.UHF(mol).make_rdm1( (Ca,Cb), (mo_occ,mo_occ) )\n return dm \n\n\n\nclass HF(SCF):\n '''Hartree Fock for NEO\n \n Example:\n \n >>> from pyscf import neo\n >>> mol = neo.Mole()\n >>> mol.build(atom = 'H 0 0 0; F 0 0 0.917', basis = 'ccpvdz')\n >>> mf = neo.HF(mol)\n >>> mf.scf()\n \n '''\n\n def __init__(self, mol, restrict = True):\n SCF.__init__(self, mol)\n\n self.mol = mol\n self.dm_elec = None\n self.dm_nuc = [None]*self.mol.nuc_num\n self.verbose = 4\n\n # set up the Hamiltonian for electrons\n self.restrict = restrict\n if restrict == True:\n self.mf_elec = scf.RHF(self.mol.elec)\n self.dm0_elec = self.mf_elec.init_guess_by_atom()\n else:\n self.mf_elec = scf.UHF(self.mol.elec)\n #self.dm0_elec = init_guess_mixed(self.mol.elec)\n\n self.dm0_elec = self.mf_elec.init_guess_by_atom()\n self.mf_elec.get_hcore = self.get_hcore_elec\n\n def get_hcore_nuc(self, mole):\n 'get the core Hamiltonian for quantum nucleus.'\n\n i = mole.atom_index\n mass = 1836.15267343 * self.mol.mass[i] # the mass of quantum nucleus in a.u.\n\n h = mole.intor_symmetric('int1e_kin')/mass\n h -= mole.intor_symmetric('int1e_nuc')*self.mol._atm[i,0] # times nuclear charge\n\n # Coulomb interactions between quantum nucleus and electrons\n if isinstance(self.dm_elec, numpy.ndarray):\n if self.restrict == False: # unrestricted case\n h -= scf.jk.get_jk((mole, mole, self.mol.elec, self.mol.elec), self.dm_elec[0], scripts='ijkl,lk->ij', aosym ='s4') * self.mol._atm[i,0]\n h -= scf.jk.get_jk((mole, mole, self.mol.elec, self.mol.elec), self.dm_elec[1], scripts='ijkl,lk->ij', aosym ='s4') * self.mol._atm[i,0]\n else:\n h -= scf.jk.get_jk((mole, mole, self.mol.elec, self.mol.elec), self.dm_elec, scripts='ijkl,lk->ij', aosym ='s4') * self.mol._atm[i,0]\n\n # Coulomb interactions between quantum nuclei\n for j in range(len(self.dm_nuc)):\n k = self.mol.nuc[j].atom_index\n if k != i and isinstance(self.dm_nuc[j], numpy.ndarray):\n h += scf.jk.get_jk((mole, mole, self.mol.nuc[j], self.mol.nuc[j]), self.dm_nuc[j], scripts='ijkl,lk->ij') * self.mol._atm[i, 0] * self.mol._atm[k, 0] # times nuclear charge\n\n return h\n\n def get_occ_nuc(self, nuc_energy=None, nuc_coeff=None):\n 'label the occupation for quantum nucleus'\n\n e_idx = numpy.argsort(nuc_energy)\n e_sort = nuc_energy[e_idx]\n nuc_occ = numpy.zeros(nuc_energy.size)\n #nocc = self.mol.nuc_num\n nocc = 1\n nuc_occ[e_idx[:nocc]] = 1\n\n return nuc_occ\n\n def get_init_guess_nuc(self, mole, key=None):\n '''Generate initial guess density matrix for quantum nuclei from core hamiltonian\n\n Returns:\n Density matrix, 2D ndarray\n '''\n h1n = self.get_hcore_nuc(mole)\n s1n = mole.intor_symmetric('int1e_ovlp')\n nuc_energy, nuc_coeff = scf.hf.eig(h1n, s1n)\n nuc_occ = self.get_occ_nuc(nuc_energy, nuc_coeff)\n\n return scf.hf.make_rdm1(nuc_coeff, nuc_occ)\n \n def get_hcore_elec(self, mole=None):\n 'Get the core Hamiltonian for electrons in NEO'\n if mole == None:\n mole = self.mol.elec # the Mole object for electrons in NEO\n\n j = 0\n # Coulomb interactions between electrons and all quantum nuclei\n for i in range(len(self.dm_nuc)):\n if isinstance(self.dm_nuc[i], numpy.ndarray):\n j -= scf.jk.get_jk((mole, mole, self.mol.nuc[i], self.mol.nuc[i]), self.dm_nuc[i], scripts='ijkl,lk->ij', aosym='s4') * self.mol._atm[self.mol.nuc[i].atom_index, 0]\n\n return scf.hf.get_hcore(mole) + j\n\n def get_veff_nuc_bare(self, mol, dm, dm_last=None, vhf_last=None, hermi=1, vhfopt=None):\n 'NOTE: Only for single quantum proton system.'\n return numpy.zeros((mol.nao_nr(), mol.nao_nr()))\n\n def get_veff_nuc(self, mol, dm, dm_last=None, vhf_last=None, hermi=1, vhfopt=None):\n 'get the HF effective potential for quantum nuclei in NEO'\n\n Z2 = self.mol._atm[mol.atom_index, 0]**2\n\n if dm_last is None:\n vj, vk = scf.jk.get_jk(mol, (dm, dm), ('ijkl,ji->kl','ijkl,jk->il'), aosym='s8')\n return Z2*(vj - vk)\n else:\n ddm = numpy.asarray(dm) - numpy.asarray(dm_last)\n vj, vk = scf.jk.get_jk(mol, (ddm, ddm), ('ijkl,ji->kl','ijkl,jk->il'), aosym='s8')\n return Z2*(vj - vk) + numpy.asarray(vhf_last)\n\n def elec_nuc_coulomb(self, dm_elec, dm_nuc):\n 'the energy of Coulomb interactions between electrons and quantum nuclei'\n mol = self.mol\n jcross = 0\n for i in range(len(dm_nuc)):\n jcross -= scf.jk.get_jk((mol.elec, mol.elec, mol.nuc[i], mol.nuc[i]), dm_nuc[i], scripts='ijkl,lk->ij', aosym = 's4') * mol._atm[mol.nuc[i].atom_index, 0]\n if self.restrict == False:\n E = numpy.einsum('ij,ji', jcross, dm_elec[0] + dm_elec[1])\n else:\n E = numpy.einsum('ij,ji', jcross, dm_elec)\n logger.debug(self, 'Energy of e-n Coulomb interactions: %s', E)\n return E\n\n def nuc_nuc_coulomb(self, dm_nuc):\n 'the energy of Coulomb interactions between quantum nuclei'\n mol = self.mol\n E = 0\n for i in range(len(dm_nuc)):\n for j in range(len(dm_nuc)):\n if j != i:\n jcross = scf.jk.get_jk((mol.nuc[i], mol.nuc[i], mol.nuc[j], mol.nuc[j]), dm_nuc[j], scripts='ijkl,lk->ij', aosym='s4') * mol._atm[mol.nuc[i].atom_index, 0] * mol._atm[mol.nuc[j].atom_index, 0]\n E += numpy.einsum('ij,ji', jcross, dm_nuc[i])\n\n logger.debug(self, 'Energy of n-n Comlomb interactions: %s', E*.5) # double counted\n return E*.5 \n\n def energy_tot(self, mf_elec, mf_nuc):\n 'Total energy of NEO'\n mol = self.mol\n E_tot = 0\n\n self.dm_elec = mf_elec.make_rdm1()\n for i in range(len(mf_nuc)):\n self.dm_nuc[i] = mf_nuc[i].make_rdm1()\n\n h1e = mf_elec.get_hcore(mf_elec.mol)\n if self.restrict == False:\n e1 = numpy.einsum('ij,ji', h1e, self.dm_elec[0] + self.dm_elec[1])\n else:\n e1 = numpy.einsum('ij,ji', h1e, self.dm_elec)\n logger.debug(self, 'Energy of e1: %s', e1)\n\n vhf = mf_elec.get_veff(mf_elec.mol, self.dm_elec)\n if self.restrict == False:\n e_coul = (numpy.einsum('ij,ji', vhf[0], self.dm_elec[0]) +\n numpy.einsum('ij,ji', vhf[1], self.dm_elec[1])) * .5 \n else:\n e_coul = numpy.einsum('ij,ji', vhf, self.dm_elec)\n logger.debug(self, 'Energy of e-e Coulomb interactions: %s', e_coul)\n\n E_tot += mf_elec.energy_elec(dm = self.dm_elec, h1e = h1e, vhf = vhf)[0] \n\n for i in range(len(mf_nuc)):\n index = mf_nuc[i].mol.atom_index\n h1n = mf_nuc[i].get_hcore(mf_nuc[i].mol)\n n1 = numpy.einsum('ij,ji', h1n, self.dm_nuc[i])\n logger.debug(self, 'Energy of %s: %s', self.mol.atom_symbol(index), n1)\n E_tot += n1\n\n E_tot = E_tot - self.elec_nuc_coulomb(self.dm_elec, self.dm_nuc) - self.nuc_nuc_coulomb(self.dm_nuc) + mf_elec.energy_nuc() # substract repeatedly counted terms\n\n return E_tot\n\n\n def scf(self, conv_tol = 1e-7, max_cycle = 60, dm0_elec = None, dm0_nuc = None):\n 'self-consistent field driver for NEO'\n\n self.dm_elec = self.mf_elec.init_guess_by_atom()\n\n # set up the Hamiltonian for each quantum nucleus\n self.mf_nuc = [None] * self.mol.nuc_num\n for i in range(len(self.mol.nuc)):\n self.mf_nuc[i] = scf.RHF(self.mol.nuc[i])\n self.mf_nuc[i].get_init_guess = self.get_init_guess_nuc\n self.mf_nuc[i].get_hcore = self.get_hcore_nuc\n self.mf_nuc[i].get_veff = self.get_veff_nuc_bare\n self.mf_nuc[i].get_occ = self.get_occ_nuc\n self.dm_nuc[i] = self.get_init_guess_nuc(self.mol.nuc[i])\n\n self.mf_elec.scf(self.dm0_elec)\n self.dm_elec = self.mf_elec.make_rdm1()\n\n for i in range(len(self.mf_nuc)):\n self.mf_nuc[i].kernel(dump_chk=False)\n self.dm_nuc[i] = self.mf_nuc[i].make_rdm1()\n\n # update density matrix for electrons and quantum nuclei\n #self.dm_elec = self.mf_elec.make_rdm1()\n #for i in range(len(self.mf_nuc)):\n # self.dm_nuc[i] = self.mf_nuc[i].make_rdm1()\n\n E_tot = self.energy_tot(self.mf_elec, self.mf_nuc)\n logger.info(self, 'Initial total Energy of NEO: %.15g\\n' %(E_tot))\n\n scf_conv = False\n cycle = 0\n\n while not scf_conv:\n cycle += 1\n if cycle > max_cycle:\n raise RuntimeError('SCF is not convergent within %i cycles' %(max_cycle))\n\n E_last = E_tot\n self.mf_elec.scf(self.dm0_elec)\n self.dm_elec = self.mf_elec.make_rdm1()\n for i in range(len(self.mf_nuc)):\n self.mf_nuc[i].kernel(dump_chk=False)\n self.dm_nuc[i] = self.mf_nuc[i].make_rdm1()\n\n # update density matrix for electrons and quantum nuclei\n #self.dm_elec = self.mf_elec.make_rdm1()\n #for i in range(len(self.mf_nuc)):\n # self.dm_nuc[i] = self.mf_nuc[i].make_rdm1()\n\n E_tot = self.energy_tot(self.mf_elec, self.mf_nuc)\n logger.info(self, 'Cycle %i Total Energy of NEO: %s\\n' %(cycle, E_tot))\n if abs(E_tot - E_last) < conv_tol:\n scf_conv = True\n logger.debug(self, 'The eigenvalues of the electrons:\\n%s', self.mf_elec.mo_energy)\n\n kinetic_energy = 0\n for i in range(len(self.mf_nuc)):\n logger.debug(self, 'The eigenvalues of the quantum nucleus:\\n%s', self.mf_nuc[i].mo_energy)\n logger.debug(self, 'The coefficents of the quantum nucleus:\\n%s', self.mf_nuc[i].mo_coeff)\n k = numpy.einsum('ij,ji', self.mol.nuc[i].intor_symmetric('int1e_kin')/(1836.15267343 * self.mol.mass[self.mol.nuc[i].atom_index]), self.dm_nuc[i])\n kinetic_energy += k\n x = numpy.einsum('xij,ji->x', self.mol.nuc[i].intor_symmetric('int1e_r', comp=3), self.dm_nuc[i])\n logger.debug(self, 'Expectational position %s' %(x))\n\n logger.debug(self, 'after substracting kinetic energy: %.15g', E_tot - k) \n logger.note(self, 'converged NEO energy = %.15g', E_tot)\n return E_tot\n","sub_path":"pyscf/neo/hf.py","file_name":"hf.py","file_ext":"py","file_size_in_byte":12588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"36645869","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n# Dependencies\nimport tweepy\nimport json\nimport numpy as np\nfrom config import (consumer_key, \n consumer_secret, \n access_token, \n access_token_secret)\n\n\n# In[2]:\n\n\n# Import and Initialize Sentiment Analyzer\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nanalyzer = SentimentIntensityAnalyzer()\n\n\n# In[3]:\n\n\n# Setup Tweepy API Authentication\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth, parser=tweepy.parsers.JSONParser())\n\n\n# In[27]:\n\n\n# Target Search Term\ntarget_term = \"@UN_News_Centre\"\n\n\n# In[28]:\n\n\n# Lists to hold sentiments\ncompound_list = []\npositive_list = []\nnegative_list = []\nneutral_list = []\n\n\n# In[29]:\n\n\n# Grab 25 tweets\npublic_tweets = api.search(target_term, count=25, result_type=\"recent\")\n\n\n# In[30]:\n\n\n# Loop through all tweets\nfor tweet in public_tweets[\"statuses\"]:\n # Run Vader Analysis on each tweet\n results = analyzer.polarity_scores(tweet[\"text\"])\n compound = results[\"compound\"]\n positive = results[\"pos\"]\n negative = results[\"neg\"]\n neutral = results[\"neu\"]\n\n # Add each value to the appropriate array\n compound_list.append(compound)\n positive_list.append(positive)\n negative_list.append(negative)\n neutral_list.append(neutral)\n\n\n# In[31]:\n\n\n# Store the Average Sentiments\nsentiments = {\n \"Compound\": np.mean(compound_list),\n \"Positive\": np.mean(positive_list),\n \"Negative\": np.mean(negative_list),\n \"Neutral\": np.mean(neutral_list)\n}\n\n\n# In[32]:\n\n\n# Print the Sentiments\nprint(sentiments)\n\n","sub_path":"01 Course Activities/0925 - Bot and Module/02-Stu_Recap_Tweet_Analysis/Unsolved/BreakingNews.py","file_name":"BreakingNews.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"514817966","text":"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport codecs\nfrom decimal import Decimal\nimport matplotlib\nimport pandas as pd\nfrom WellDataPlot.ReadData import readDataFromXlsx\nfrom numpy import trapz\n\ndef save_figure(folder_name, figure_name):\n plt.savefig(folder_name + \"/\" + figure_name, transparent=False, dpi=150, bbox_inches=\"tight\")\n\n\n# 閉塞と非閉塞の間の境界線を計算する関数\ndef get_boundary_line(normal_mus, normal_cohs, hang_up_mus, hange_up_cohs):\n mus_used = []\n boundary_line_mus = []\n boundary_line_cohs = []\n\n hang_up_mus, hange_up_cohs, normal_mus, normal_cohs = normal_mus, normal_cohs, hang_up_mus, hange_up_cohs\n for normal_mu in normal_mus:\n if normal_mu in mus_used or not (normal_mu == normal_mu): continue\n if normal_mu >4: continue\n mus_used.append(normal_mu)\n\n # 获取mu位置并计算这些位置中mu对应的coh最大值\n mu_pos = [idx for idx, val in enumerate(normal_mus) if val == normal_mu]\n chos = [normal_cohs[idx] for idx in mu_pos]\n if len(chos) == 0:\n print(chos)\n max_normal_coh = min(chos)\n # 获取mu位置并计算这些位置中mu对应的coh最小值\n if not normal_mu in hang_up_mus:\n min_hang_up_coh = max_normal_coh\n else:\n pos = [idx for idx, val in enumerate(hang_up_mus) if val == normal_mu]\n chos = [hange_up_cohs[idx] for idx in pos]\n min_hang_up_coh = max(chos)\n # 计算平均coh\n average_coh = (max_normal_coh + min_hang_up_coh) / 2\n # 加入list\n boundary_line_mus.append(normal_mu)\n boundary_line_cohs.append(average_coh)\n \n boundary_line_mus, boundary_line_cohs = zip(*sorted(zip(boundary_line_mus, boundary_line_cohs)))\n \n area = trapz(boundary_line_cohs,boundary_line_mus,0.01)\n print(area)\n print(boundary_line_mus)\n print(boundary_line_cohs)\n return boundary_line_mus, boundary_line_cohs , area\n\n\ndef get_min_coh(boundary_line_mus, boundary_line_cohs):\n chos = []\n for idx, mu in enumerate(boundary_line_mus):\n if mu >= 4:\n chos.append(boundary_line_cohs[idx])\n if len(chos) == 0: return 0\n return sum(chos) / len(chos)\n\ndef plot_exit_hang_up_figure(root_dir, filename, sheet_name, xrange=[-1,15e4],yrange=[0,4],title=None,titlesize=30,hangeup_text_position=(4e4, 2.25)):\n ax = plt.gca()\n \n d1, d2, d3 = readDataFromXlsx(filename, sheet_name, root_dir)\n d1 = np.array(d1)\n d2 = np.array(d2)\n d3 = np.array(d3)\n \n\n # 绘制散点\n plt.scatter(d1[:, 1], d1[:, 0], c=\"b\", s=120 , label=\"None Hang Up\")\n plt.scatter(d2[:, 1], d2[:, 0], marker=\"s\", c=\"r\", s=120 , label=\"Hang Up\" )\n if len(d3) > 0:\n plt.scatter(d3[:, 1], d3[:, 0], marker=\"^\", c=\"g\", s=120, label=\"Hang Up(Exit)\")\n\n # 计算鼻塞和非闭塞的边界线,并绘制曲线\n if len(d3) > 0:\n boundary_line_mus, boundary_line_cohs ,area = get_boundary_line(d1[:, 0], d1[:, 1], np.append(d2[:, 0], d3[:, 0]),\n np.append(d2[:, 1], d3[:, 1]))\n else: \n boundary_line_mus, boundary_line_cohs ,area = get_boundary_line(d1[:, 0], d1[:, 1], d2[:, 0],d2[:, 1])\n plt.plot(boundary_line_cohs, boundary_line_mus)\n #plt.text(1e4,0.4,str(area),size=12)\n # *** 图表的格式调整 ***\n # 指定x轴为科学计数法\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0), useMathText=True)\n # 指定图表的x轴y轴坐标的取值范围\n plt.axis(xrange+yrange)\n # x坐标轴名字\n plt.xlabel(r\"Cohesion $\\mathrm{N/m^2}$\", size=20)\n # y坐标轴名字\n plt.ylabel(r\"Friction Coefficient\", size=20)\n # 调整xy坐标轴标签的文字大小\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n # 调整x轴启用科学计数法后,10^n文字过小问题\n plt.gca().xaxis.get_offset_text().set_fontsize(15)\n # 设置图表title\n # if title is not None: plt.title(title, size=titlesize)\n # 绘制图表格子\n plt.grid()\n # 绘制图表中右边的Hang_Up Region文字\n #plt.text(hangeup_text_position[0], hangeup_text_position[1], r'Hang-Up Region', size=20)\n #mark_text = matplotlib.lines.Line2D([], [],color='white', marker='.', label=title)\n #plt.legend(handles=[mark_text],handlelength=0,fontsize=15)\n return boundary_line_mus, boundary_line_cohs\n\ndef plot_hang_up_figure(root_dir, filename, sheet_name, min_coh=None ,xrange=[0,15e4],yrange=[0,10],title=None,titlesize=30,hangeup_text_position=(8e4,6),anotate_position=(5e4,4)):\n boundary_line_mus, boundary_line_cohs = plot_exit_hang_up_figure(root_dir, filename, sheet_name,xrange=xrange,yrange=yrange,title=title,titlesize=titlesize,hangeup_text_position=hangeup_text_position)\n\n # 计算界限粘着力并绘制直线和Minium Cohesion的标注\n if min_coh == None:\n min_coh = get_min_coh(boundary_line_mus, boundary_line_cohs)\n elif min_coh == \"off\":\n pass\n if not (min_coh == \"off\"):\n plt.axvline(min_coh, ls='-', color='g', linewidth=3)\n \n anotate_text = '$Minium\\ Cohesion$\\n$' + \"{:.2e}\".format(Decimal(min_coh)).replace(\"e+\", r\"\\times10^{\") + '}$'\n plt.annotate(anotate_text, xy=(min_coh, 0), xytext=anotate_position,\n arrowprops=dict(facecolor='black', shrink=0.01, width=1), fontsize=12)\n\n return\n df = read_mincho_map(root_dir + \"/\" + \"min_coh_map.csv\")\n\n if df.H0[df.H0 == H].size < 1:\n df2 = pd.DataFrame({\"H0\": [0.8], \"min_coh\": [666]})\n df.append(df2)\n print(df)\n return df\n\n\n\n\ndef plot_experiment_resault(filename, title):\n # 从csv文件中读取数据\n f = codecs.open(filename, encoding='utf-8')\n circle_well_D, circle_well_hangup_num, circle_well_normal_num, rect_well_W, rect_well_hangup_num, rect_well_nonrmal_num = np.genfromtxt(\n f, delimiter=\",\", usecols=(0, 1, 2, 3, 4, 5), unpack=True, skip_header=2)\n f.close()\n\n point_size = [100, 150, 300]\n plt.scatter(circle_well_D, circle_well_hangup_num, s=point_size, alpha=0.5, c=\"r\")\n plt.scatter(circle_well_D, rect_well_hangup_num, alpha=0.5, marker=\"s\", s=point_size)\n\n fp = matplotlib.font_manager.FontProperties(\"Yu Mincho\", size=20)\n plt.axis([78, 103, -0.5, 10.5])\n plt.tight_layout()\n plt.xlabel(r\"内径 $mm$\", fontproperties=fp)\n plt.ylabel(r\"閉塞回数\", fontproperties=fp)\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n plt.title(title, size=20)\n plt.grid()\n\n\ndef read_mincho_map(fname):\n try:\n df: pd.DataFrame = pd.read_csv(fname)\n except FileNotFoundError as FN:\n print(\"No csv File Found, Create A New DataFrame\")\n df = pd.DataFrame({\"H0\": [0.1, 0.2], \"min_coh\": [200, 300]})\n return df","sub_path":"研究计算/WellDataPlot/plotdata.py","file_name":"plotdata.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"320643992","text":"# Copyright (c) Microsoft. All rights reserved.\n\n# Licensed under the MIT license. See LICENSE.md file in the project root\n# for full license information.\n# ==============================================================================\n\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n__C.DATA = edict()\ncfg = __C\n\n# data set config\n__C.DATA.DATASET = \"NucleiVGG16\"\n__C.DATA.MAP_FILE_PATH = \"/scratch/s124262\"\n__C.DATA.CLASS_MAP_FILE = \"class_map.txt\"\n__C.DATA.TRAIN_MAP_FILE = \"train_256_20XX/trainval_nuclei.txt\"\n__C.DATA.TRAIN_ROI_FILE = \"train_256_20XX/trainval_nuclei_roi.txt\"\n__C.DATA.VAL_MAP_FILE = \"test_256_20XX/valval_nuclei.txt\"\n__C.DATA.VAL_ROI_FILE = \"test_256_20XX/valval_nuclei_roi.txt\"\n__C.DATA.TEST_MAP_FILE = \"test_256_20XX/testval_nuclei.txt\"\n__C.DATA.TEST_ROI_FILE = \"test_256_20XX/testval_nuclei_roi.txt\"\n__C.DATA.NUM_TRAIN_IMAGES = 347400\n__C.DATA.NUM_TEST_IMAGES = 30200\n__C.DATA.NUM_VAL_IMAGES = 20100\n__C.DATA.EPOCH_SIZE = 2000\n__C.DATA.VAL_SIZE = 1000\n# __C.DATA.MAP_FILE_PATH = \"../SampledTest\"\n# __C.DATA.CLASS_MAP_FILE = \"class_map.txt\"\n# __C.DATA.TRAIN_MAP_FILE = \"trainval_nuclei.txt\"\n# __C.DATA.TRAIN_ROI_FILE = \"trainval_nuclei_roi.txt\"\n# __C.DATA.VAL_MAP_FILE = \"valval_nuclei.txt\"\n# __C.DATA.VAL_ROI_FILE = \"valval_nuclei_roi.txt\"\n# __C.DATA.TEST_MAP_FILE = \"testval_nuclei.txt\"\n# __C.DATA.TEST_ROI_FILE = \"testval_nuclei_roi.txt\"\n# __C.DATA.NUM_TRAIN_IMAGES = 320\n# __C.DATA.NUM_TEST_IMAGES = 96\n# __C.DATA.NUM_VAL_IMAGES = 64\n# __C.DATA.EPOCH_SIZE = 320\n# __C.DATA.VAL_SIZE = 64\n__C.DATA.PROPOSAL_LAYER_SCALES = [2, 3, 4]\n\n# overwriting proposal parameters for Fast R-CNN\n# minimum relative width/height of an ROI\n__C.roi_min_side_rel = 0.01\n# maximum relative width/height of an ROI\n__C.roi_max_side_rel = 0.04\n# minimum relative area of an ROI\n__C.roi_min_area_rel = 2 * __C.roi_min_side_rel * __C.roi_min_side_rel\n# maximum relative area of an ROI\n__C.roi_max_area_rel = 0.33 * __C.roi_max_side_rel * __C.roi_max_side_rel\n# maximum aspect ratio of an ROI vertically and horizontally\n__C.roi_max_aspect_ratio = 1.0\n\n# For this data set use the following lr factor for Fast R-CNN:\n# __C.CNTK.LR_FACTOR = 10.0","sub_path":"Nuclei-CNTK/utils/configs/Nuclei_configVGG16.py","file_name":"Nuclei_configVGG16.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"232856533","text":"\n#array might be rotated\n#contains duplicates\n\n#so we want to figure out whether or not the target is in the left or right half,\n#but this is dependant on whether or not the minimum value is on the left or the right\n\n#what is really cool here is if we can separate out the worst case and the best case here\n\ndef search(self, nums: List[int], target: int) -> bool:\n if nums == []:\n return False\n\n # worst case\n left = 0\n right = len(nums) - 1\n if nums[left] == nums[right]:\n # loop\n i = left\n while i <= right:\n if nums[i] == target:\n return True\n i += 1\n return False\n\n # find the minimum value with binary search\n\n left = 0\n right = len(nums) - 1\n middle = (left + right) // 2\n\n while left <= right:\n if left == right and right == middle:\n break\n middle = (left + right) // 2\n if nums[right] >= nums[middle]:\n right = middle\n else:\n left = middle + 1\n\n minimum = middle\n print(minimum)\n left = 0\n right = len(nums) - 1\n\n # standard binary search but shifted\n while left <= right:\n middle = (left + right) // 2\n if nums[(middle + minimum) % len(nums)] == target:\n return True\n elif nums[(middle + minimum) % len(nums)] < target:\n left = middle + 1\n else:\n right = middle - 1\n return False","sub_path":"codebreakers/binarysearch_rotated_array2.py","file_name":"binarysearch_rotated_array2.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"562428106","text":"import csv\nimport matplotlib.pyplot as plt\n\ntempo = []\nv1 = []\nv2 = []\n\narq = \"00A\"\n\nwith open(\"/home/hitalo/Projects/estagio/bancoDeDados-MIT/\"+arq+\".csv\") as csvFile:\n reader = csv.DictReader(csvFile)\n for row in reader:\n tempo.append(float(row[\"tempo\"]))\n #v1.append(float(row[\"v1\"]))\n v2.append(float(row[\"val\"]))\n\nplt.subplot(2,1,1)\nplt.plot(tempo, v1)\nplt.xlabel('tempo')\nplt.ylabel('v1') \n\nplt.subplot(2,1,2)\nplt.plot(tempo, v2)\nplt.xlabel('tempo')\nplt.ylabel('val')\n\nplt.show()\n\n","sub_path":"bancoDeDados-MIT/plotDb.py","file_name":"plotDb.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336729993","text":"import numpy as np\na = np.loadtxt('four.embeddings')\nb=np.zeros(shape=(5313,101))\nfor i in range(5313):\n\tfor j in range(5313):\n\t\tif a[j][0]==i:\n\t\t\tb[i]=a[j]\n\t\t\tbreak\nd = b.astype(np.float32)\nnp.savetxt('facebook-sorted.txt',d)\nc=np.shape(b)\nprint(b)\nprint(c)\nprint(d)\n","sub_path":"SNNA/example_preprocessing/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"325431798","text":"from Jumpscale import j\n\nfrom .NACL import NACL\nimport nacl.secret\nimport nacl.utils\nimport base64\nimport hashlib\nfrom nacl.public import PrivateKey, SealedBox\nimport fakeredis\n\nJSBASE = j.baseclasses.object\n\n\nclass NACLFactory(j.baseclasses.object):\n __jslocation__ = \"j.data.nacl\"\n\n def _init(self, **kwargs):\n self._default = None\n\n # check there is core redis\n if isinstance(j.core.db, fakeredis.FakeStrictRedis):\n j.clients.redis.core_get()\n\n def configure(self, name=\"default\", privkey_words=None, sshagent_use=None, generate=False, interactive=True):\n \"\"\"\n secret is used to encrypt/decrypt the private key when stored on local filesystem\n privkey_words is used to put the private key back\n\n will ask for the details of the configuration\n :param: sshagent_use is True, will derive the secret from the private key of the ssh-agent if only 1 ssh key loaded\n secret needs to be None at that point\n :param: generate if True and interactive is False then will autogenerate a key\n\n :return: None\n\n \"\"\"\n n = self.get(name=name, load=False)\n n.configure(privkey_words=privkey_words, sshagent_use=sshagent_use, generate=generate, interactive=interactive)\n return n\n\n def get(self, name=\"default\", load=True):\n \"\"\"\n \"\"\"\n n = NACL(name=name)\n if load:\n n.load()\n return n\n\n @property\n def default(self):\n if self._default is None:\n self._default = self.get()\n return self._default\n\n def test(self):\n \"\"\"\n kosmos 'j.data.nacl.test()'\n \"\"\"\n cl = self.default # get's the default location & generate's keys\n\n data = b\"something\"\n r = cl.sign(data)\n\n assert cl.verify(data, r)\n assert cl.verify(b\"a\", r) == False\n\n pubsignkey32 = cl.verify_key.encode()\n\n assert cl.verify(data, r, pubsignkey32)\n\n a = cl.encryptSymmetric(\"something\")\n b = cl.decryptSymmetric(a)\n\n assert b == b\"something\"\n\n a = cl.encryptSymmetric(\"something\")\n b = cl.decryptSymmetric(a)\n assert b == b\"something\"\n\n a = cl.encryptSymmetric(\"something\")\n b = cl.decryptSymmetric(a)\n assert b == b\"something\"\n\n a = cl.encryptSymmetric(b\"something\")\n b = cl.decryptSymmetric(a)\n assert b == b\"something\"\n\n # now with hex\n a = cl.encryptSymmetric(b\"something\", hex=True)\n b = cl.decryptSymmetric(a, hex=True)\n assert b == b\"something\"\n\n a = cl.encrypt(b\"something\")\n b = cl.decrypt(a)\n\n assert b == b\"something\"\n\n a = cl.encrypt(\"something\") # non binary start\n b = cl.decrypt(a)\n\n # now with hex\n a = cl.encrypt(\"something\", hex=True) # non binary start\n b = cl.decrypt(a, hex=True)\n assert b == b\"something\"\n\n # test asymetric encryptoin between 2 users\n bob_sk = nacl.public.PrivateKey.generate()\n\n # alice send a message to bob, encrypt the message with the public of bob\n message = b\"hello world\"\n encrypted = cl.encrypt(message, public_key=bob_sk.public_key)\n # bob decrypt the message with its private key\n decrypted = cl.decrypt(encrypted, private_key=bob_sk)\n assert message == decrypted\n # ensure no one else can read it\n foo_sk = nacl.public.PrivateKey.generate()\n try:\n cl.decrypt(encrypted, foo_sk)\n raise j.exceptions.Base(\"should have given error\")\n except:\n pass\n\n # LETS NOW TEST THAT WE CAN START FROM WORDS\n\n words = j.data.nacl.default.words\n j.sal.fs.copyDirTree(\"/sandbox/cfg/keys/default\", \"/sandbox/cfg/keys/default_backup\") # make backup\n j.sal.fs.remove(\"/sandbox/cfg/keys/default\")\n try:\n self.default.reset()\n try:\n self.default.load()\n raise j.exceptions.Base(\"should have given error\")\n except:\n pass\n\n self.default._keys_generate(words=words)\n self.default.load()\n\n b = cl.decrypt(a, hex=True)\n assert b == b\"something\"\n\n finally:\n j.sal.fs.copyDirTree(\"/sandbox/cfg/keys/default_backup\", \"/sandbox/cfg/keys/default\")\n j.sal.fs.remove(\"/sandbox/cfg/keys/default_backup\")\n\n self._log_info(\"TEST OK\")\n print(\"TEST OK\")\n\n def test_perf(self):\n \"\"\"\n kosmos 'j.data.nacl.test_perf()'\n \"\"\"\n\n cl = self.default # get's the default location & generate's keys\n data = b\"something\"\n\n nr = 10000\n j.tools.timer.start(\"signing\")\n for i in range(nr):\n p = str(i).encode()\n r = cl.sign(data + p)\n j.tools.timer.stop(i)\n\n nr = 10000\n j.tools.timer.start(\"encode and verify\")\n for i in range(nr):\n p = str(i).encode()\n r = cl.sign(data + p)\n assert cl.verify(data + p, r)\n j.tools.timer.stop(i)\n\n nr = 10000\n data2 = data * 20\n j.tools.timer.start(\"encryption/decryption assymetric\")\n for i in range(nr):\n a = cl.encrypt(data2)\n b = cl.decrypt(a)\n assert data2 == b\n j.tools.timer.stop(i)\n","sub_path":"JumpscaleCore/data/nacl/NACLFactory.py","file_name":"NACLFactory.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"279467664","text":"from functools import partial, wraps\n\n\nclass Assert:\n\n def __init__(self):\n self.messages_error = []\n self.messages_warning = []\n\n def __call__(self, expression, error_message):\n if not expression:\n self.messages_error.append(error_message)\n\n def warn(self, expression, warning_message):\n if not expression:\n self.messages_warning.append(warning_message)\n\n def __len__(self):\n return len(self.messages_error)\n\n def __iter__(self):\n for msg in self.messages_error:\n yield msg\n\n def __repr__(self):\n return 'Assert oject with {} error messages'.format(len(self))\n\n def __str__(self):\n if not self.messages_error:\n str_ = 'No errors found'\n elif len(self.messages_error) == 1:\n str_ = '1 error found: {}'.format(self.messages_error[0])\n else:\n str_ = ('{} errors found: \\n * {}'\n .format(len(self.messages_error),\n '\\n * '.join(self.messages_error)))\n\n if len(self.messages_warning) == 1:\n str_ += '\\n\\n 1 warning: {}'.format(self.messages_warning[0])\n elif len(self.messages_warning) > 1:\n str_ += ('\\n\\n {} warnings: \\n * {}'\n .format(len(self.messages_warning),\n '\\n * '.join(self.messages_warning)))\n\n return str_\n\n\ndef validator(fn):\n\n # TODO: verify fn signature\n\n @wraps(fn)\n def wrapped(**kwargs):\n if 'assert_' in kwargs:\n raise TypeError('Do not include the assert_ parameter in '\n 'validator functions')\n\n if 'data' in kwargs:\n raise TypeError('Do not include the data parameter in '\n 'validator functions')\n\n return partial(fn, **kwargs)\n\n return wrapped\n\n\n@validator\ndef validate_schema(assert_, data, schema, error_on_extra_cols=False):\n \"\"\"Check if a data frame complies with a schema\n \"\"\"\n cols = set(data.columns)\n expected = set(schema)\n missing = expected - cols\n unexpected = cols - expected\n\n msg = 'validate_schema: missing columns {missing}.'.format(missing=missing)\n assert_(not missing, msg)\n\n msg = ('validate_schema: wnexpected columns {unexpected}'\n .format(unexpected=unexpected))\n caller = assert_ if error_on_extra_cols else assert_.warn\n caller(not unexpected, msg)\n\n # validate column types (as many as you can)\n dtypes = data.dtypes.astype(str).to_dict()\n\n for name, dtype in dtypes.items():\n expected = schema.get(name)\n\n if expected is not None:\n msg = ('validate_schema: wrong dtype for column \"{name}\". '\n 'Expected: \"{expected}\". Got: \"{dtype}\"'\n .format(name=name, expected=expected, dtype=dtype))\n assert_(dtype == expected, msg)\n\n return assert_\n\n\n@validator\ndef validate_values(assert_, data, values):\n data_cols = data.columns\n\n for column, (kind, params) in values.items():\n if column not in data_cols:\n assert_.warn(False,\n ('validate_values: declared spec for column \"{}\" but'\n ' it does not appear in the data').format(column))\n elif kind == 'unique':\n expected = set(params)\n unique = set(data[column].unique())\n msg = ('validate_values:: expected unique values of \"{}\" to be a'\n ' subset of {}, got: {}'\n .format(column, expected, unique))\n assert_(expected >= unique, msg)\n elif kind == 'range':\n if len(params) != 2:\n raise ValueError('If kind is range, you must provide two '\n 'values, got {}'.format(params))\n min_expected, max_expected = params\n min_ = data[column].min()\n max_ = data[column].max()\n msg = ('validate_values: expected range of \"{}\" to be ({}, {}), '\n 'got ({}, {})'\n .format(column, min_expected, max_expected, min_, max_))\n assert_(min_expected <= min_ and max_ <= max_expected, msg)\n else:\n ValueError('Got invalid kind, must be \"unique\" or \"range\"')\n\n\ndef data_frame_validator(df, validators):\n \"\"\"\n\n Examples\n --------\n >>> from ploomber.validators import data_frame_validator\n >>> from ploomber.validators import validate_schema, validate_values\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({'x': np.random.rand(3), 'y': np.random.rand(3),\n ... 'z': [0, 1, 2], 'i': ['a', 'b', 'c']})\n >>> data_frame_validator(df,\n ... [validate_schema(schema={'x': 'int', 'z': 'int'}),\n ... validate_values(values={'z': ('range', (0, 1)),\n ... 'i': ('unique', {'a'}),\n ... 'j': ('unique', {'b'})}\n ... )])\n \"\"\"\n assert_ = Assert()\n\n for validator in validators:\n validator(assert_=assert_, data=df)\n\n if len(assert_):\n raise AssertionError(str(assert_))\n","sub_path":"src/ploomber/validators/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"155891787","text":"import torch\nimport torch.nn as nn\n\nclass UNet(nn.Module):\n\n def __init__(self):\n super(UNet, self).__init__()\n self.conv11 = nn.Conv2d(in_channels=1, \\\n out_channels=64, \\\n kernel_size=3, \\\n stride=1, padding=1, bias=True)\n self.conv12 = nn.Conv2d(64, 64, 3, 1, padding=1, bias=True)\n self.bn1 = nn.BatchNorm2d(64)\n self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, return_indices=False, ceil_mode=False)\n\n self.conv21 = nn.Conv2d(64, 128, 3, 1, padding=1, bias=True)\n self.conv22 = nn.Conv2d(128, 128, 3, 1, padding=1, bias=True)\n self.bn2 = nn.BatchNorm2d(128)\n self.maxpool2 = nn.MaxPool2d(2)\n\n self.conv31 = nn.Conv2d(128, 256, 3, 1, padding=1, bias=True)\n self.conv32 = nn.Conv2d(256, 256, 3, 1, padding=1, bias=True)\n self.bn3 = nn.BatchNorm2d(256)\n self.maxpool3 = nn.MaxPool2d(2)\n\n self.conv41 = nn.Conv2d(256, 512, 3, 1, padding=1, bias=True)\n self.conv42 = nn.Conv2d(512, 512, 3, 1, padding=1, bias=True)\n self.bn4 = nn.BatchNorm2d(512)\n self.maxpool4 = nn.MaxPool2d(2)\n\n self.conv51 = nn.Conv2d(512, 1024, 3, 1, padding=1, bias=True)\n self.conv52 = nn.Conv2d(1024, 1024, 3, 1, padding=1, bias=True)\n self.bn5 = nn.BatchNorm2d(1024)\n self.transConv5 = nn.ConvTranspose2d(in_channels=1024, \\\n out_channels=512, \\\n kernel_size=2, \\\n stride=2, \\\n bias=True)\n\n self.conv61 = nn.Conv2d(1024, 512, 3, 1, padding=1, bias=True)\n self.conv62 = nn.Conv2d(512, 512, 3, 1, padding=1, bias=True)\n self.bn6 = nn.BatchNorm2d(512)\n self.transConv6 = nn.ConvTranspose2d(512, 256, 2, 2, bias=True)\n\n self.conv71 = nn.Conv2d(512, 256, 3, 1, padding=1, bias=True)\n self.conv72 = nn.Conv2d(256, 256, 3, 1, padding=1, bias=True)\n self.bn7 = nn.BatchNorm2d(256)\n self.transConv7 = nn.ConvTranspose2d(256, 128, 2, 2, bias=True)\n\n self.conv81 = nn.Conv2d(256, 128, 3, 1, padding=1, bias=True)\n self.conv82 = nn.Conv2d(128, 128, 3, 1, padding=1, bias=True)\n self.bn8 = nn.BatchNorm2d(128)\n self.transConv8 = nn.ConvTranspose2d(128, 64, 2, 2, bias=True)\n\n self.conv91 = nn.Conv2d(128, 64, 3, 1, padding=1, bias=True)\n self.conv92 = nn.Conv2d(64, 64, 3, 1, padding=1, bias=True)\n self.bn9 = nn.BatchNorm2d(64)\n self.conv93 = nn.Conv2d(64, 4, 1, 1)\n self.bn10 = nn.BatchNorm2d(4)\n\n self.relu = nn.ReLU()\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n\n def forward(self, x):\n print('x', x.size())\n block11 = self.relu(self.bn1(self.conv12(self.relu(self.bn1(self.conv11(x))))))\n print('block11', block11.size())\n block12 = self.maxpool1(block11)\n print('block12', block12.size())\n\n block21 = self.relu(self.bn2(self.conv22(self.relu(self.bn2(self.conv21(block12))))))\n print('block21', block21.size())\n block22 = self.maxpool2(block21)\n print('block22', block22.size())\n\n block31 = self.relu(self.bn3(self.conv32(self.relu(self.bn3(self.conv31(block22))))))\n print('block31', block31.size())\n block32 = self.maxpool3(block31)\n print('block32', block32.size())\n\n block41 = self.relu(self.bn4(self.conv42(self.relu(self.bn4(self.conv41(block32))))))\n print('block41', block41.size())\n block42 = self.maxpool4(block41)\n print('block42', block42.size())\n\n################################3\n print(block42.size())\n block51 = self.relu(self.bn5(self.conv52(self.relu(self.bn5(self.conv51(block42))))))\n print('block51', block51.size())\n block52 = self.transConv5(block51)\n print('block52', block52.size())\n\n print('cat', block41.size(), block52.size())\n # combined6 = torch.cat((self.center_crop(block41,block52),block52), dim=1)\n combined6 = torch.cat((block41,block52), dim=1)\n print('combined6',combined6.size())\n block61 = self.relu(self.bn6(self.conv62(self.relu(self.bn6(self.conv61(combined6))))))\n print('block61',block61.size())\n block62 = self.transConv6(block61)\n print('block62', block62.size())\n\n print('cat', block31.size(), block62.size())\n combined7 = torch.cat((block31, block62), dim=1)\n print('combined7',combined7.size())\n block71 = self.relu(self.bn7(self.conv72(self.relu(self.bn7(self.conv71(combined7))))))\n print('block71',block71.size())\n block72 = self.transConv7(block71)\n print('block72',block72.size())\n\n print('cat', block21.size(), block72.size())\n combined8 = torch.cat((block21, block72), dim=1)\n print('combined8',combined8.size())\n block81 = self.relu(self.bn8(self.conv82(self.relu(self.bn8(self.conv81(combined8))))))\n print('block81',block81.size())\n block82 = self.transConv8(block81)\n print('block82',block82.size())\n\n combined9 = torch.cat((block11, block82), dim=1)\n print('combined9',combined9.size())\n block91 = self.relu(self.bn9(self.conv92(self.relu(self.bn9(self.conv91(combined9))))))\n print('block91',block91.size())\n block92 = self.conv93(block91)\n print('block92',block92.size())\n\n # return torch.sigmoid(block92)\n return block92\n\n def center_crop(self, small, large):\n x1, y1 = large.size()[2], large.size()[3]\n x2, y2 = small.size()[2], small.size()[3]\n return large[:,:,int(x1/2 - x2/2) : int(x1/2 + x2/2), int(y1/2 - y2/2) : int(y1/2 + y2/2)]\n\n\n","sub_path":"unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"500354032","text":"# https://leetcode.com/problems/gas-station/description/\r\n\r\n\"\"\"\r\n[4]\r\n[5]\r\n[10, 8, 3, 1, 5, 3, 13]\r\n[1, 3, 9, 8, 1, 13, 2]\r\n[5]\r\n[4]\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n \"\"\"\r\n Notes: Solution is guaranteed to be unique, so no more than 1 gas station will let you complete a full circuit (if possible).\r\n There is (no more than) one starting index s.t. Diff[i] >= 0 for all i.\r\n Cannot just find greatest diff. E.g.: G[10, 0, 5] and C[1, 11, 2] -> need to start at (5, 2), not (10, 1)\r\n Cannot just find biggest negative diff and save for last, nor can you find the biggest continuous negative diff.\r\n E.g.: D[9, 5, -6, -7, 4, -10, 11] -> have to start at 11\r\n \r\n Clarifications not explicitly given: len(gas) == len(cost), don't need to check.\r\n If there is one station, you still need to travel around back to it.\r\n \"\"\"\r\n def canCompleteCircuit(self, gas, cost): # Do not know which gas station we'll be starting at\r\n \"\"\"\r\n :type gas: List[int]\r\n :type cost: List[int]\r\n :rtype: int\r\n \"\"\"\r\n # return self.solution1(gas, cost) # 1505 ms -> 2.61% \r\n # return self.solution2(gas, cost) # 35 ms -> 80.22%\r\n return self.solution3(gas, cost) # 32 ms -> 93.48% \r\n \r\n def solution1(self, gas, cost): # Naive O(n^2) time\r\n if not gas:\r\n return None\r\n n = len(gas)\r\n diff = [gas[i] - cost[i] for i in xrange(n)] # gas you get at station i, minus gas it costs to get to station i+1\r\n diff.extend(diff) # for ease of circular calculations; technically just needed diff[:-1], but probably slower\r\n # diff.extend(diff[i] for i in xrange(n-1))\r\n for i in xrange(n):\r\n idx = self.check(diff, i, n+i)\r\n if idx != -1:\r\n break\r\n return idx\r\n \r\n def check(self, diff, start, end):\r\n curr_gas = 0\r\n for i in xrange(start, end):\r\n curr_gas += diff[i]\r\n if curr_gas < 0:\r\n return -1\r\n return start\r\n \r\n def solution2(self, gas, cost):\r\n \"\"\"\r\n Idea: The unique starting indexing is after the lowest point in the graph; min_val depends on start, but min_idx is constant, \r\n Improvements: could make clearer by initializing min_idx to 0, and then setting to i+1 instead\r\n Proof of correctness:\r\n (1) Start at A; let B be the first station that can't be reached from A. For every C en route to B, the gas coming in was >= 0. \r\n Starting at C would leave you at 0 coming in, so no in-between city C will help. Thus, we jump to B rather than A+1. \r\n *Alt proof here: https://discuss.leetcode.com/topic/8860/fully-commented-o-n-c-solution-enabled-by-a-single-observation-of-mine\r\n ^Assume some in-between city C could reach B. Then A could reach C, and C can reach B, so A can reach B -> contradiction.\r\n (2) WLOG, start at 0. Let i be the lowest cumulative total; then we have to prove that i+1 is the starting point. \r\n Since the min occurs with the sum diff[0] + ... + diff[i], we have diff[i+1] >= 0 (otherwise, i would not be min_idx)\r\n Similarly, diff[i+1] + diff[i+2] >= 0. And diff[i+1] + ... + diff[n-1] >= 0 -> diff[j] >= 0 for all i in [i+1, n-1], so \r\n i+1 is a valid starting point for the second half. Let diff[i+1 -> n-1] = D. Given that diff[0 -> n-1] >= 0, we have:\r\n D + diff[0->i] >= 0. Since D is enough to compensate for diff[0->i] (the most negative cumulative sum starting from 0), \r\n D is enough to compensate for any diff[0->j], j <= i. In particular, D + diff[0] >= 0, D + diff[0] + diff[1] >= 0, etc., and\r\n so from i+1, we have curr_gas >= 0 for every step, and thus i+1 is always a valid starting point (the only one, in this case.)\r\n * See also: https://discuss.leetcode.com/topic/39755/proof-of-if-total-gas-is-greater-than-total-cost-there-is-a-solution-c\r\n \"\"\"\r\n if not gas:\r\n return None\r\n min_idx = -1 ### changed to -1; doesn't matter if overwritten, but if not (curr_gas never went <= 0), will return 0 \r\n min_val = 0 # ^If curr gas never goes below 0 then all diffs are positive, which means n solutions; but by uniqueness, n = 1\r\n curr_gas = 0 # ^So this is purely to address the case of n = 1, with positive cost (e.g. [5] [4])\r\n for i in xrange(len(gas)):\r\n curr_gas += gas[i] - cost[i]\r\n if curr_gas < min_val: ### changed to include equals, because if we get multiple flat, we want to get to last one\r\n min_val = curr_gas # ^changed back to exclude, because flat just means no gain, but also no loss -> no problem\r\n min_idx = i \r\n return min_idx+1 if curr_gas >= 0 else -1 # after min_idx, it must be flat or increasing, otherwise minimum would be elsewhere\r\n\r\n def solution3(self, gas, cost): # Apparently sum is very fast, because despite 3 traversals as opposed to 1, it's faster than Sol2\r\n if len(gas) == 0 or len(cost) == 0 or sum(gas) < sum(cost):\r\n return -1 \r\n position = 0\r\n balance = 0\r\n for i in xrange(len(gas)):\r\n balance += gas[i] - cost[i]\r\n if balance < 0:\r\n balance = 0\r\n position = i + 1\r\n return position\r\n \r\n # From: https://discuss.leetcode.com/topic/5088/my-ac-is-o-1-space-o-n-running-time-solution-does-anybody-have-posted-this-solution\r\n def solution4(self, gas, cost):\r\n start, end = len(gas)-1, 0 # we're arbitrarily starting on the \"last\" gas station\r\n curr_gas = gas[start] - cost[start]\r\n while start > end:\r\n if curr_gas >= 0:\r\n curr_gas += gas[end] - cost[end] # we can keep driving\r\n end += 1\r\n else:\r\n start -= 1 # we can't keep going with the gas have, so we have to have started earlier to build up stock\r\n curr_gas += gas[start] - cost[start]\r\n return start if curr_gas >= 0 else -1\r\n \r\n \"\"\"For reference: old work that was eventually discarded for Solution2, but which would eventually have reached solution4\"\"\"\r\n # start = n # diff has been doubled \r\n # curr_gas = 0\r\n # for i in xrange(start, len(diff)): \r\n # curr_gas += diff[i]\r\n # if curr_gas < 0:\r\n # offset = i - start # [start, i-1] is good \r\n # while curr_gas < 0: # starting from diff[i] is no longer possible, so we need to build up stock by starting earlier \r\n \r\n ","sub_path":"leetcode/python/GasStation.py","file_name":"GasStation.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"458752133","text":"## Format a given string of N characters into groups of K characters (where the\n## first group can be smaller) separated by '-'.\n\ndef solution(S, K):\n S = S.replace('-', \"\")\n N = len(S)\n if K > N:\n raise ValueError('K cannot be larger than N.')\n first_group_size = N%K\n first_part = S[0:first_group_size]\n rest_part = S[first_group_size:]\n str = \"\"\n n_str = \"\"\n while(len(n_str) != (N-first_group_size)):\n str += rest_part[0:K] + '-'\n rest_part = rest_part[K:]\n n_str = str\n n_str = n_str.replace('-', \"\")\n N = len(str)\n str = str[0:N-1]\n if first_group_size != 0:\n print(first_part + '-' + str)\n else:\n print(str.upper())\n\nsolution('2-4A0r7-4k-tehwyw-46h-267hdgshs-362g-shagsat4', 10)\n","sub_path":"Medium/license_key_grouping.py","file_name":"license_key_grouping.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"272534548","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport string\nimport itertools as it\nfrom factcreator import factsLinksProcessing\n\ndef extract(text, fact):\n for sub in fact.substrs:\n if text.find(sub) != -1:\n return True\n\ndef processText(rawText):\n text = re.sub('<[^>]*>', '', rawText)\n text = text.lower()\n# table = string.maketrans(string.punctuation, ' '*len(string.punctuation))\n table = dict((ord(char), u' ') for char in string.punctuation)#it.chain(string.punctuation, string.whitespace))\n return text.translate(table)\n\ndef extractAll(rawText, factStorage, backFacts):\n text = processText(rawText)\n\n approvedFacts = set()\n for fact in factStorage:\n if extract(text, fact):\n approvedFacts.add(fact.name)\n\n approvedFacts = factsLinksProcessing(approvedFacts, backFacts)\n\n return approvedFacts\n\ndef extractSalary(salaryText):\n sr = re.findall(u'(\\d+)\\s*\\-\\s*(\\d+)\\s*(грн|\\$|USD)', salaryText, re.U)\n if len(sr) != 0:\n return sr[0]\n sr = re.findall(u'от\\s*([\\d\\s]+)\\s*до\\s*([\\d\\s]+)\\s*(грн|\\$|USD)', salaryText, re.U)\n if len(sr) != 0:\n return sr[0]\n sr = re.findall(u'([\\d\\s]+)\\s*(грн|\\$|USD)', salaryText, re.U)\n if len(sr) != 0:\n return sr[0]\n return None\n","sub_path":"src/jobinator/facts/simpleextractor.py","file_name":"simpleextractor.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"275350699","text":"from config import mail\nimport pytest\nimport os\n\nfrom masonite.app import App\nfrom masonite.exceptions import DriverNotFound\nfrom masonite.managers.MailManager import MailManager\nfrom masonite.drivers.MailSmtpDriver import MailSmtpDriver as MailDriver\nfrom masonite.drivers.MailMailgunDriver import MailMailgunDriver as Mailgun\n\nclass User:\n pass\n\nif os.getenv('MAILGUN_SECRET'):\n def test_mailgun_driver():\n app = App()\n\n app.bind('Test', object)\n app.bind('MailConfig', mail)\n app.bind('MailSmtpDriver', MailDriver)\n app.bind('MailMailgunDriver', Mailgun)\n user = User\n user.email = 'test@email.com'\n\n assert MailManager(app).driver('mailgun').to(user).to_address == 'test@email.com'\n\n\n def test_mail_renders_template():\n app = App()\n\n app.bind('MailConfig', mail)\n app.bind('MailSmtpDriver', MailDriver)\n app.bind('MailMailgunDriver', Mailgun)\n\n assert 'MasoniteTesting' in MailManager(app).driver('mailgun').to(\n 'idmann509@gmail.com').template('mail/welcome', {'to': 'MasoniteTesting'}).message_body\n","sub_path":"tests/test_mailgun_driver.py","file_name":"test_mailgun_driver.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"59429086","text":"n = int(input())\nnums = list(map(int,input().split()))\n\ndp = [1] * (n)\n\nfor i in range(1,n):\n temp = 0\n\n for j in range(i-1,-1,-1):\n if nums[j] > nums[i]:\n temp = max(temp,dp[j])\n temp += 1\n dp[i] = temp \n\nprint(max(dp))\n\n ","sub_path":"3.beakjoon/DP/BOJ_11722_가장 긴 감소하는 부분 수열.py","file_name":"BOJ_11722_가장 긴 감소하는 부분 수열.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"5782862","text":"'''\r\nHow many circular primes are there below one million?\r\n'''\r\n\r\nfrom sieve import gen_sieve_eratosthenes, is_prime\r\nfrom number import circulate\r\nfrom projecteuler import boolean_product_of\r\nimport time\r\n\r\n\r\n\r\ndef is_circular_prime(p, sieve):\r\n # assuming p is prime\r\n for n in circulate(p)[1:]:\r\n if not is_prime(n, sieve):\r\n return False\r\n \r\n return True\r\n\r\ndef solve(N):\r\n result = 0 #@UnusedVariable\r\n \r\n start = time.clock()\r\n sieve = {}\r\n for p in gen_sieve_eratosthenes(N + 1):\r\n sieve[p] = True\r\n \r\n print('Sieve prepared')\r\n circular_primes = []\r\n for p in sorted(sieve.keys()):\r\n if is_circular_prime(p, sieve):\r\n circular_primes.append(p)\r\n \r\n print(circular_primes)\r\n result = len(circular_primes) #@UnusedVariable\r\n print(\"The number of circular primes below %(N)d is %(result)d\" % vars())\r\n print(time.clock() - start)\r\nif __name__ == \"__main__\":\r\n solve(10**6)\r\n\r\n\r\n\r\n\r\n#def find_first(l, matching_func):\r\n# for e in l:\r\n# if(matching_func(e)):\r\n# return e\r\n# return None\r\n \r\n\r\n#def is_prime(n, sieve_map):\r\n# try:\r\n# sieve_map[n]\r\n# return True\r\n# except KeyError, e:\r\n# return False\r\n","sub_path":"src/projecteuler/problem35.py","file_name":"problem35.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"251304998","text":"from point import Point\n\n\nclass Cluster:\n\tdef __init__(self):\n\t\tself.points = []\n\t\tself.cur_x = 0\n\t\tself.cur_y = 0\n\t\tself.last_x = 0\n\t\tself.last_y = 0\n\n\t@staticmethod\n\tdef init_center(amount_of_classes, cluster_list, point_list):\n\t\tsize = len(point_list)\n\t\tstep = size // amount_of_classes\n\t\tsteper = 0\n\n\t\tfor cluster in cluster_list:\n\t\t\tcluster.cur_x = point_list[steper].x\n\t\t\tcluster.cur_y = point_list[steper].y\n\t\t\tsteper += step\n\n\tdef set_center(self):\n\t\tsum_x = 0\n\t\tsum_y = 0\n\t\tsize = len(self.points)\n\n\t\tfor point in self.points:\n\t\t\tsum_x += point.x\n\t\t\tsum_y += point.y\n\n\t\tself.last_x = self.cur_x\n\t\tself.last_y = self.cur_y\n\n\t\tself.cur_x = sum_x / size\n\t\tself.cur_y = sum_y / size\n\n\tdef clear_points(self):\n\t\tself.points.clear()\n\n\tdef add_point(self, point):\n\t\tself.points.append(point)\n\n\t@staticmethod\n\tdef bind(amount_of_classes, cluster_list, point_list):\n\t\tfor cluster in cluster_list:\n\t\t\tcluster.clear_points()\n\n\t\tsize = len(point_list)\n\n\t\tfor point in point_list:\n\t\t\tpart1 = (cluster_list[0].cur_x - point.x) ** 2\n\t\t\tpart2 = (cluster_list[0].cur_y - point.y) ** 2\n\t\t\tminimal = (part1 + part2) ** (1 / 2)\n\t\t\tcluster = cluster_list[0]\n\n\t\t\tfor j in range(1, amount_of_classes):\n\t\t\t\tpart1_tmp = (cluster_list[j].cur_x - point.x) ** 2\n\t\t\t\tpart2_tmp = (cluster_list[j].cur_y - point.y) ** 2\n\t\t\t\ttmp = (part1_tmp + part2_tmp) ** (1 / 2)\n\n\t\t\t\tif minimal > tmp:\n\t\t\t\t\tminimal = tmp\n\t\t\t\t\tcluster = cluster_list[j]\n\n\t\t\tcluster.add_point(point)\n\n\t\treturn cluster_list\n\n\t@classmethod\n\tdef start(cls, amount_of_classes, cluster_list, point_list):\n\t\tcls.init_center(amount_of_classes, cluster_list, point_list)\n\n\t\twhile True:\n\t\t\tcheck = 0\n\t\t\tcls.bind(amount_of_classes, cluster_list, point_list)\n\n\t\t\tfor cluster in cluster_list:\n\t\t\t\tcluster.set_center()\n\n\t\t\tfor cluster in cluster_list:\n\t\t\t\tif (cluster.cur_x == cluster.last_x) and (cluster.cur_y == cluster.last_y):\n\t\t\t\t\tcheck += 1\n\n\t\t\tif check == amount_of_classes:\n\t\t\t\treturn\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"laba2/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"501534696","text":"import os\nimport hashlib\nimport config\nimport pickle\nimport types\nimport logging\n\nfrom domain.configuration_manager import ConfigurationManager\n\nclass FileBasedConfigurationManager(ConfigurationManager):\n REQUIRED_FIELDS = {\n 'name' : types.StringType ,\n 'output_bit_depth': types.StringType ,\n 'output_sample_frequency' : types.IntType ,\n 'on_modulation_frequency': types.IntType ,\n 'off_modulation_frequency': types.IntType ,\n 'input_bit_depth': types.StringType ,\n 'input_sample_frequency': types.IntType ,\n 'sublayer_height_mm': types.FloatType,\n 'laser_thickness_mm' : types.FloatType,\n 'drips_per_mm':types.FloatType,\n 'max_deflection' :types.FloatType,\n 'calibration_data': types.DictType,\n 'calibration_scale': types.FloatType,\n 'draw_speed' : types.FloatType,\n 'max_lead_distance_mm' : types.FloatType,\n 'use_serial_zaxis' : types.BooleanType,\n 'serial_port': types.StringType,\n 'serial_on': types.StringType,\n 'serial_off' : types.StringType,\n }\n \n CONFIGURATION_EXTENSION = '.cfg'\n\n def __init__(self):\n pass\n\n def list(self):\n printers = []\n if os.path.exists(config.PEACHY_PATH):\n for file_name in os.listdir(config.PEACHY_PATH):\n if file_name.endswith(self.CONFIGURATION_EXTENSION):\n configuration = self._load_configuration(os.path.join(self._path(), file_name))\n if configuration:\n printers.append(configuration['name'])\n return printers\n\n def load(self, printer_name):\n logging.info('Loading configutation for \"%s\"' % printer_name)\n filename = self._get_file_name(printer_name)\n if not os.path.exists(filename):\n raise Exception(\"Printer file not found\")\n configuration = self._load_configuration(filename)\n if configuration:\n return configuration\n else:\n raise Exception(\"Printer file corrupt or damaged\")\n\n def _load_configuration(self, filename):\n with open(filename, 'r') as file_handle:\n configuration = pickle.load(file_handle)\n if self._valid(configuration):\n return configuration\n else:\n return None\n\n def save(self, configuration):\n if self._valid(configuration):\n filename = self._get_file_name(configuration['name'])\n with open(filename,'w') as file_handle:\n pickle.dump(configuration, file_handle)\n else:\n logging.error(\"Saving, Configuration Specified is invalid\\n%s\" % configuration)\n raise Exception(\"Configuration Specified is invalid\\n%s\" % configuration )\n\n def new(self, printer_name):\n new_printer = self.DEFAULTS.copy()\n new_printer['name'] = printer_name\n return new_printer\n\n def _valid(self, configuration):\n valid = True\n for (key, value) in self.REQUIRED_FIELDS.items():\n if not (configuration.has_key(key) and type(configuration[key]) == value):\n logging.warn('%s missing or invalid\\n%s' % (key, configuration))\n valid = False\n return valid\n\n def _path(self):\n if not os.path.exists(config.PEACHY_PATH):\n os.makedirs(config.PEACHY_PATH)\n return config.PEACHY_PATH\n\n def _get_file_name(self, name):\n filename = hashlib.md5(name).hexdigest() + self.CONFIGURATION_EXTENSION\n return os.path.join(self._path(), filename)\n","sub_path":"src/infrastructure/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"482715740","text":"import asyncio\nimport aiopg\nimport json\n\nfrom . import nwdb\nfrom . import core\n\n@core.handler\ndef set_object(client, key, value):\n \"\"\"\n Save an arbitrary object for a member under a key.\n \"\"\"\n client.require_auth()\n value = json.dumps(value)\n with (yield from nwdb.connection()) as conn:\n cursor = yield from conn.cursor()\n yield from cursor.execute(\"\"\"\n update object set value = %s\n where key = %s and member_id = %s and clan_id is null and alliance_id is null\n returning id\n \"\"\", [value, key, client.session[\"member_id\"]])\n rs = yield from cursor.fetchone()\n if rs is None:\n yield from cursor.execute(\"\"\"\n insert into object(member_id, key, value)\n select %s, %s, %s\n \"\"\", [client.session[\"member_id\"], key, value])\n\n\n@core.function\ndef get_object(client, key):\n \"\"\"\n Retrieves an arbitrary object previously stored by the member under a key.\n \"\"\"\n client.require_auth()\n with (yield from nwdb.connection()) as conn:\n cursor = yield from conn.cursor()\n yield from cursor.execute(\"\"\"\n select value from object\n where member_id = %s and key = %s and clan_id is null and alliance_id is null\n \"\"\", [client.session[\"member_id\"], key])\n rs = yield from cursor.fetchone()\n if rs is not None:\n rs = json.loads(rs[0])\n return rs\n\n\n","sub_path":"src/netwrok/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"49194308","text":"import sys\n\n__author__ = 'Ian'\n\n\n# Found main body of source code @ http://stackoverflow.com/questions/3160699/python-progress-bar\n\ndef bar(progress, message):\n barLength = 10 # Modify this to change the length of the progress bar\n status = message\n done = False\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n done = True\n block = int(round(barLength * progress))\n text = \"\\rPercent: [{0}] {1}% {2}\".format(\"#\" * block + \"-\" * (barLength - block), progress * 100, status)\n sys.stdout.write(text)\n sys.stdout.flush()\n return done\n","sub_path":"OrbitalMechanics/OrbitalMechanics/waitbar.py","file_name":"waitbar.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"365289798","text":"import time\nimport numpy as np\nfrom GA import GA\nfrom pprint import pprint\n\nstart = time.time()\nnp.random.seed(time.gmtime())\ntamPopulacaoMain = 200\nnumCromossomosMain = 3\nnumGeracoesMain = 1000\nintervaloMain = [[-10,10],[-10,10],[0,100]]\nresolucaoMain = 32\ntaxaDeMutacaoMain = 0.05\n\n\nGA = GA(tamPopulacaoMain, numGeracoesMain, numCromossomosMain, intervaloMain, resolucaoMain, taxaDeMutacaoMain)\n\nprint (\"-------------------------------------------------------------\")\n\nGA.testes()\n\n\nprint(\"Tempo total: \",end='')\nprint(time.time() - start)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"585244319","text":"\"\"\"'this program is extension of Problem 5 and in this program it\r\nmaintains the List of CompanyShares in a Linked List So new CompanyShares\r\ncan be added or removed easily.\r\n\r\n@author Amit Kumar\r\n@version 1.0\r\n@since 14/01/2019\r\n\"\"\"\r\n# importing important modules\r\nimport json\r\nfrom DataStructureProgram.LinkedList import *\r\n\r\nl1 = SingleLinkedList() # creating object of LinkedList class to access all the method present in that class\r\ncompany_info = json.load(open(\"Problem6_company.json\", \"r\")) # opening and reading the 'company' JSON file and\r\n# converting it to the python Dictionary storing in a variable\r\n\r\n\r\n# 'add_companies' function adds each companies detail to the LinkedList node\r\ndef add_companies():\r\n for val in company_info: # traversing the dictionary\r\n l1.insert_at_end(company_info[val]) # inserting each company details to the LinkedList\r\n print(\"\\nCompanies detail successfully added to the LinkedList\\n\")\r\n\r\n\r\n# 'show_symbols' function displays company name and their symbol for the user reference\r\ndef show_symbols():\r\n print(\"\\n Company Name and their Symbol:\")\r\n print(\"--------------------------------\")\r\n for val in company_info: # traversing through the dictionary\r\n print(company_info[val][\"company_name\"], \"----> \", company_info[val][\"company_symbol\"])\r\n print()\r\n\r\n\r\n# this function delete the node of the LinkedList containing particular company details after getting company symbol\r\n# input from the user\r\ndef del_companies():\r\n show_symbols() # calling show_symbols function to display symbols\r\n try:\r\n symbol = input(\"Enter the Symbol of companies: \")\r\n for val in company_info: # traversing through the Dictionary to delete the user-input company details\r\n if symbol == company_info[val][\"company_symbol\"]:\r\n l1.delete_node(company_info[val]) # deleting the particular company detail from the LinkedList\r\n print(\"\\nCompany details successfully deleted from the LinkedList\\n\")\r\n\r\n except Exception as e: # handling the exception\r\n print(e)\r\n\r\n\r\n# 'CompanyShare' as Main class\r\nclass CompanyShare:\r\n while True:\r\n # displaying main menu\r\n print(\"\\n-----------Company Share Details-----------\")\r\n print(\"enter 1.To add company to LinkedList\")\r\n print(\"enter 2.To delete company from LinkedList\")\r\n print(\"enter 3.To display companies of LinkedList\")\r\n print(\"enter 4.For Exit\")\r\n\r\n global choice\r\n try:\r\n choice = int(input(\"\\nEnter your choice: \"))\r\n except Exception as e:\r\n print(e, \"\\n!!! Invalid Input !!!\\n\")\r\n try:\r\n if choice == 1:\r\n add_companies() # calling method to add company details\r\n elif choice == 2:\r\n del_companies() # calling method to delete particular company detail\r\n elif choice == 3:\r\n l1.display_list() # calling method to display LinkedList content\r\n elif choice == 4:\r\n print(\"\\nClosing.....\")\r\n print(\"Closed Successfully\")\r\n exit(0) # terminating the program\r\n else:\r\n print(\"Invalid choice !!! Please Try again\")\r\n except Exception as e: # handling the exception\r\n print(e, \"\\n!!! Invalid Input !!!\\n\")\r\n\r\n\r\n# from this python file only program will compile not from the imported file(s)\r\nif __name__ == '__main__':\r\n s1 = CompanyShare() # creating object of 'CompanyShare'\r\n","sub_path":"ObjectOrientedPrograms/Problem6_CompanyShareLinkedList.py","file_name":"Problem6_CompanyShareLinkedList.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"383729787","text":"from __future__ import annotations\nfrom color_settings import BLUE\nfrom event.event_gov import set_gov\nimport random\nfrom event.player import *\nfrom event.event_setting import *\nfrom game.game import Game\n\ndict_temp = {}\n# 打开文本文件\nfile = open('dict.txt','r')\n# 遍历文本文件的每一行,strip可以移除字符串头尾指定的字符(默认为空格或换行符)或字符序列\nfor line in file.readlines():\n line = line.strip()\n k = line.split(' ')[0]\n v = line.split(' ')[1]\n dict_temp[k] = v\n# 依旧是关闭文件\nfile.close()\n\nclass Events:\n def __init__(self) :\n self.players=Players(dict_temp['gov'],dict_temp['wfh'])\n self.start_image = START_IMAGE\n self.start_round = START_ROUND\n self.using_player = 0\n self.player=0\n self.event = None\n self.event_list = [0,1,2,3,4,5]\n random.shuffle(self.event_list)\n self.num = 0\n self.buttons = [Buttons('mute',MUTE_IMAGE_BLACK,965,60),Buttons('sound',SOUND_IMAGE_BLACK,965,60),Buttons('play',PLAY_IMAGE_BLACK,920,60)\n ,Buttons('pause',PAUSE_IMAGE_BLACK,920,60),Buttons('last_page',LAST_PAGE_IMAGE_BLACK,40,520)]\n self.buttons_white = [Buttons('mute',MUTE_IMAGE,965,60),Buttons('sound',SOUND_IMAGE,965,60),Buttons('play',PLAY_IMAGE,920,60)\n ,Buttons('pause',PAUSE_IMAGE,920,60)]\n self.mute = False\n self.pause = False\n self.using_event=None\n #self.num=random.randint(1, 5)\n self.chosen = []\n self.next=0\n self.win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n self.notify = None\n self.last_page = False\n self.read = False\n pass\n def run(self):\n clock = pygame.time.Clock()\n self.win.blit(BACKGROUND_IMAGE_CHOOSE_PLAYER,(0,0))\n run = True\n while run:\n clock.tick(FPS)\n self.win.blit(BACKGROUND_IMAGE_CHOOSE_PLAYER,(0,0))\n if self.last_page: #上一頁\n return True\n if self.using_player == 0:\n run = self.choose_player()\n self.read = False\n self.using_event = None\n self.num = 0\n self.next = 0\n else:\n while not self.read:\n quit = self.message_page_show()\n if quit: #press X in message_page\n return False\n run2 = True\n game = Game(self.player)\n while run2 :\n run2 = self.event_happen()\n if self.next == 1:\n print(self.chosen)\n self.impact_model(game)\n self.using_event=None\n game.mute(self.mute)\n quit=game.run()\n if quit:\n return False\n elif game.all_pass:\n quit = self.all_pass(game)\n if quit:\n return False\n else:\n run2 = False\n run = True\n elif game.keep_going:\n \n quit,exit = self.keep_going(game)\n if quit:\n return False\n else:\n run2 = True\n if exit:\n self.using_player = 0\n run2 = False\n run = True\n elif game.fail:\n quit = self.game_fail(game)\n if quit:\n return False\n else:\n run2 = False\n run = True\n else:\n run = False\n for event in pygame.event.get():\n # quit\n if event.type == pygame.QUIT:\n return False\n pygame.display.update()\n def choose_player(self):\n run = True\n self.draw_choose_player()\n self.draw_player_frame()\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = pygame.mouse.get_pos()\n self.get_click_choose_player(x,y)\n self.button_clicked(x,y)\n if event.type == pygame.QUIT:\n run = False\n return run\n def get_click_choose_player(self,x:int,y:int):\n \n if self.start_image_rect.collidepoint(x,y) and self.player != 0:\n self.using_player = self.player\n if (self.players.player_btn[0].icon_image_rect.collidepoint(x,y) or self.players.player_btn[0].word_image_rect.collidepoint(x,y)) and self.players.player_btn[0].unlock:\n self.players.player_btn[0].selected = True\n self.players.player_btn[1].selected = False\n self.player = 1\n elif (self.players.player_btn[1].icon_image_rect.collidepoint(x,y) or self.players.player_btn[1].word_image_rect.collidepoint(x,y)) and self.players.player_btn[1].unlock:\n self.players.player_btn[1].selected = True\n self.players.player_btn[0].selected = False\n self.player = 2\n else: \n self.players.player_btn[0].selected = False\n self.players.player_btn[1].selected = False\n self.player = 0\n def button_clicked(self,x:int,y:int):\n button_name = ''\n for btn in self.buttons:\n if btn.image_rect.collidepoint(x,y):\n button_name = btn.name\n if button_name == 'sound':\n self.mute = not self.mute\n if button_name =='pause':\n self.pause = not self.pause\n if button_name == 'last_page':\n self.last_page = True\n def draw_choose_player(self):\n self.win.blit(BACKGROUND_IMAGE_CHOOSE_PLAYER,(0,0))\n #icon of player\n self.win.blit(self.players.player_btn[0].icon_image, self.players.player_btn[0].icon_image_rect) #show gov\n self.win.blit(self.players.player_btn[0].word_image, self.players.player_btn[0].word_image_rect) #show gov\n self.win.blit(self.players.player_btn[1].icon_image, self.players.player_btn[1].icon_image_rect) #show gov\n self.win.blit(self.players.player_btn[1].word_image, self.players.player_btn[1].word_image_rect) #show gov\n #start button\n self.start_image_rect = self.start_image.get_rect() \n self.start_image_rect.center = (512,500)\n self.win.blit(self.start_image,self.start_image_rect)\n #back money\n bank_image = BACK_MENU\n self.win.blit(bank_image,(20,25))\n font = pygame.font.Font(FONT, 22)\n text = font.render('$ ' + str(dict_temp['money']), True, BROWNGRAY)\n self.win.blit(text, (130,75))\n \n self.draw_button_black()\n def message_page_show(self):\n self.win.blit(BACKGROUND_IMAGE_MESSAGE,(0,0))\n self.draw_button_black()\n read_button = Buttons('read',READ_BUTTON,555,490)\n for event in pygame.event.get():\n # quit\n if event.type == pygame.QUIT:\n return True\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = pygame.mouse.get_pos()\n self.button_clicked(x,y)\n if read_button.image_rect.collidepoint(x,y):\n self.read = True\n pygame.display.update()\n def draw_player_frame(self):\n for btn in self.players.player_btn:\n if btn.selected :\n pygame.draw.rect(self.win, BLACK, btn.frame, 10)\n def event_happen(self):\n run2 = True\n if self.using_event == None:\n self.set_using_event()\n self.events_draw()\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = pygame.mouse.get_pos()\n self.make_decision(x,y)\n self.button_clicked(x,y)\n if event.type == pygame.QUIT:\n run2 = False\n return run2\n def events_draw(self):\n self.win.blit(BACKGROUND_IMAGE_EVENT,(0,0)) #色碼,X點、Y點、寬、高\n self.win.blit(self.using_event.question.image, self.using_event.question.image_rect) #show choose player\n self.win.blit(self.using_event.select1.image, self.using_event.select1.image_rect) #show gov\n self.win.blit(self.using_event.select2.image, self.using_event.select2.image_rect) #show wfh\n self.win.blit(self.using_event.select3.image, self.using_event.select3.image_rect) #show wfh\n self.start_round_rect = self.start_round.get_rect()\n self.start_round_rect.center = (365,520)\n self.win.blit(self.start_round,self.start_round_rect)\n self.draw_event_frame()\n level_image_rect = LEVEL1.get_rect()\n level_image_rect.center= (105,50)\n if self.num == 1:\n level_image = LEVEL1\n elif self.num == 2:\n level_image = LEVEL2\n elif self.num == 3:\n level_image = LEVEL3\n elif self.num == 4:\n level_image = LEVEL4\n elif self.num == 5:\n level_image = LEVEL5\n self.win.blit(level_image,level_image_rect)\n self.draw_button_white()\n pygame.display.update()\n pass\n def draw_event_frame(self):\n \n if self.using_event.select1.selected:\n pygame.draw.rect(self.win, BLACK, self.using_event.select1.frame, 10)\n if self.using_event.select2.selected:\n pygame.draw.rect(self.win, BLACK, self.using_event.select2.frame, 10)\n if self.using_event.select3.selected:\n pygame.draw.rect(self.win, BLACK, self.using_event.select3.frame, 10)\n def set_using_event(self):\n \n self.using_event=get_using_event(self.using_player,self.event_list[self.num])\n self.chosen = []\n self.num += 1\n def make_decision(self,x:int,y:int):\n if self.start_round_rect.collidepoint(x,y) and self.chosen != []:\n self.next=1\n self.using_event.select1.selected = False\n self.using_event.select2.selected = False\n self.using_event.select3.selected = False\n return\n if self.using_event.select1.image_rect.collidepoint(x,y):\n self.using_event.select1.selected = True\n self.using_event.select2.selected = False\n self.using_event.select3.selected = False\n self.notify = self.using_event.select1.notify\n self.chosen = self.using_event.select1.impact\n elif self.using_event.select2.image_rect.collidepoint(x,y):\n self.using_event.select1.selected = False\n self.using_event.select2.selected = True\n self.using_event.select3.selected = False\n self.notify = self.using_event.select2.notify\n self.chosen = self.using_event.select2.impact\n elif self.using_event.select3.image_rect.collidepoint(x,y):\n self.using_event.select1.selected = False\n self.using_event.select2.selected = False\n self.using_event.select3.selected = True\n self.chosen = self.using_event.select3.impact\n self.notify = self.using_event.select3.notify\n else:\n self.using_event.select1.selected = False\n self.using_event.select2.selected = False\n self.using_event.select3.selected = False\n self.chosen = []\n def draw_button_black(self):\n self.win.blit(self.buttons[4].image,self.buttons[4].image_rect)\n if self.mute:\n self.win.blit(self.buttons[0].image,self.buttons[0].image_rect)\n else:\n self.win.blit(self.buttons[1].image,self.buttons[1].image_rect)\n if self.pause:\n self.win.blit(self.buttons[2].image,self.buttons[2].image_rect)\n else:\n self.win.blit(self.buttons[3].image,self.buttons[3].image_rect)\n def draw_button_white(self):\n if self.mute:\n self.win.blit(self.buttons_white[0].image,self.buttons_white[0].image_rect)\n else:\n self.win.blit(self.buttons_white[1].image,self.buttons_white[1].image_rect)\n if self.pause:\n self.win.blit(self.buttons_white[2].image,self.buttons_white[2].image_rect)\n else:\n self.win.blit(self.buttons_white[3].image,self.buttons_white[3].image_rect)\n def impact_model(self,game:Game):\n money_get = random.randint(self.chosen[1],self.chosen[0])\n blood_get = random.randint(self.chosen[3],self.chosen[2])\n tower_upgrade = random.randint(self.chosen[5],self.chosen[4])+5\n game.game_model.money += money_get\n game.game_model.max_hp += blood_get\n game.game_model.hp += blood_get\n game.game_model.notify = self.notify\n game.game_model.tower_money += tower_upgrade\n def keep_going(self,game:Game):\n self.next = 0\n self.chosen = []\n percentage = [0,10,20,40,60]\n while True:\n #print('keep going',stage,money)\n self.win.blit(WIN_STAGE_BG,(0,0))\n text = '*' + str(game.game_model.tower_money) #塔防幣\n show_text(self.win,text,30,550,470)\n text = '#' + str(game.game_model.money) #金錢\n show_text(self.win,text,30,735,470)\n text = str(int(game.game_model.money * percentage[game.game_model.stage] / 100))\n show_text(self.win,text,40,500,335) #中間遊戲幣\n text = str( percentage[game.game_model.stage])+'%'\n show_text(self.win,text,40,248,85)#左上目前%數\n text = str( percentage[game.game_model.stage+1])+'%'\n show_text(self.win,text,40,574,390,BLUE)#下一關%數\n draw_hp(self.win, game.game_model.hp,game.game_model.max_hp)\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = pygame.mouse.get_pos()\n if 86 59 :\n #print(temp)\n if href == temp :\n continue\n #print(href)\n getinfo = get_single_item_data(href)\n list.append(getinfo)\n temp = str(href)\n # print(temp)\n count += 1\n #get_single_item_data(href)\n #if count == 3:\n #break\n\n\n #在這裡獲得所有比賽的東西\n #print(list)\n return list\n\ndef get_single_item_data(item_url):\n source_code = requests.get(item_url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text, \"html.parser\")\n counter = 1\n counter2 = 100\n counter3 = 100\n list = []\n\n for item_name in soup.findAll('span', {'class': 'team-name'}):\n team=item_name.text.split()[0]\n counter += 1\n list.append(team_convert(team))\n if counter == 3:\n break\n counter = 100\n for item_name in soup.findAll('td'):\n #先找客的全部:勝負/得分/背得/全壘打/ERA/IP/H/SV 主\n\n counter += 1\n counter2 += 1\n counter3 += 1\n if(counter == 2) or (counter == 5) or (counter == 10) :\n list.append(item_name.text)\n if(counter2 == 30):\n list.append(item_name.text)\n if (counter3 == 2) or (counter3 == 4) or (counter3 == 7) or (counter3 == 12):\n list.append(item_name.text)\n\n if item_name.text == \"OPS\":\n counter = -11\n elif (item_name.text == \"All Games\") and (counter == -10):\n counter = 1\n elif item_name.text == \"Team Fielding\":\n counter2 = 0\n elif item_name.text == \"Pct.\":\n counter3 = 0\n return list\n\n\n\n # [ 'CHW',\n# 'WSN','KCR','FLA', 'ANA','TBD','SDP', 這個是csv的 輸入是網頁的]\ndef team_convert(text):\n font = text\n if text == \"WSH\":\n font = \"WSN\"\n elif text == \"SF\":\n font = \"SFG\"\n elif text == \"SD\":\n font = \"SDP\"\n elif text == \"KC\":\n font = \"KCR\"\n elif text == \"TB\":\n font = \"TBD\"\n elif text == \"CWS\":\n font = \"CHW\"\n elif text == \"LAA\":\n font = \"ANA\"\n elif text == \"MIA\":\n font = \"FLA\"\n return font\n\ndef team_convert2(text):\n font = text\n if text == \"COL\":\n font = \"Colorado\"\n elif text == \"PIT\":\n font = \"Pittsburgh\"\n elif text == \"WSH\":\n font = \"Washington\"\n elif text == \"NYM\":\n font = \"NY Mets\"\n elif text == \"PHI\":\n font = \"Philadelphia\"\n elif text == \"ATL\":\n font = \"Atlanta\"\n elif text == \"CIN\":\n font = \"Cincinnati\"\n elif text == \"MIL\":\n font = \"Milwaukee\"\n elif text == \"STL\":\n font = \"St. Louis\"\n elif text == \"CHC\":\n font = \"Chicago Cubs\"\n elif text == \"SF\":\n font = \"San Francisco\"\n elif text == \"ARI\":\n font = \"Arizona\"\n elif text == \"LAD\":\n font = \"LA Dodgers\"\n elif text == \"SD\":\n font = \"San Diego\"\n elif text == \"BAL\":\n font = \"Baltimore\"\n elif text == \"DET\":\n font = \"Detroit\"\n elif text == \"KC\":\n font = \"Kansas City\"\n elif text == \"TOR\":\n font = \"Toronto\"\n elif text == \"CLE\":\n font = \"Cleveland\"\n elif text == \"MIN\":\n font = \"Minnesota\"\n elif text == \"TEX\":\n font = \"Texas\"\n elif text == \"TB\":\n font = \"Tampa Bay\"\n elif text == \"CWS\":\n font = \"Chicago Sox\"\n elif text == \"OAK\":\n font = \"Oakland\"\n elif text == \"BOS\":\n font = \"Boston\"\n elif text == \"LAA\":\n font = \"LA Angels\"\n elif text == \"HOU\":\n font = \"Houston\"\n elif text == \"SEA\":\n font = \"Seattle\"\n elif text == \"MIA\":\n font = \"Miami\"\n elif text == \"NYY\":\n font = \"NY Yankees\"\n return font\n\n\n\n\n\nbet = trade_spider()\n\n\n\n\n\n#輸入 OK'gap預測之值 不用輸入'\n# 0.'主隊得分平均減客隊得分平均' 第6點\n# 1'主隊被得平均減客隊被得平均' 第6點\n# 2.'主客勝率相減' OK\n#3.主客全壘打平均相減 找全壘打 OK\n# 6.主客累計得分與被得分相減-再主客相減(2.3個相減) 這個要除以場數OK 找得分OK與被得分OK累計 但其實這邊拿到的就是平均\n# 13現在投手ERA相減 找投手ERA\n# 14現在投手HA相減-網頁有IP 找投手HA 找IP\n# 17現在累計SV 找SV 但是..這邊只能找到投手的 但其他之前的資料 貌似是找隊伍的...\n#還有gamestarted可以抓0.0\n\n\n# 4.M主客守備率相減 這2個可以在MLB官網找\n# 5.M主客SHO平均相減\n\n#http://mlb.mlb.com/stats/sortable.jsp#st_pitching=&elem=%5Bobject+Object%5D&tab_level=child&click_text=Sortable+Team+pitching&game_type='R'&season=2018&season_type=ANY&league_code='MLB'§ionType=st&statType=pitching&page=1&ts=1526789633919&sortColumn=era&sortOrder='asc'&extended=0\n\n\n\nurl = \"http://mlb.mlb.com/stats/sortable.jsp#st_pitching=&elem=%5Bobject+Object%5D&tab_level=child&click_text=Sortable+Team+pitching&game_type='R'&season=2018&season_type=ANY&league_code='MLB'§ionType=st&statType=pitching&page=1&ts=1526789633919&sortColumn=era&sortOrder='asc'&extended=0\"\nsource_code = requests.get(url)\nplain_text = source_code.text\nsoup = BeautifulSoup(plain_text, \"html.parser\")\n\n\"\"\"\nimport dryscrape\nsession = dryscrape.Session()\nsession.visit(url)\nresponse = session.body()\nsoup = BeautifulSoup(response)\nsoup.find(id=\"td\")\n\"\"\"\n\nurl = 'http://www.espn.com/mlb/stats/team/_/stat/fielding/year/2018/seasontype/2'\nsource_code = requests.get(url)\nplain_text =source_code.text\nsoup = BeautifulSoup(plain_text, \"html.parser\")\nteam_list = []\nstat_list = []\ncount = 100\nfor link in soup.findAll('td'):\n count += 1\n if (count >=0) and (count<=6):\n stat_list.append(link.text)\n #print(link.text)\n elif count == 7:\n count = -1\n team_list.append(stat_list)\n stat_list = []\n if(link.text == \"TC\"):\n count = -2\n\nimport pandas as pd\nlast = pd.read_csv(\"Teams.csv\")\ndf = pd.DataFrame(last)\ndf = df[df['yearID'] == 2017]\n#print(df[df['franchID'] == \"WSN\"]['H'].values[0])\n\n\n#勝負/得分/背得/全壘打/ERA/IP/H/SV\nfor game in bet:\n if len(game) <= 17:\n continue\n vis = game[0]\n home = game[1]\n VFP = 0\n HFP = 0\n cvis = team_convert(vis)\n chome = team_convert(home)\n dfv = df[df['franchID'] == cvis]\n dfh = df[df['franchID'] == chome]\n TlR = dfh['R'].values[0] - dfv['R'].values[0]\n TlRA = dfh['RA'].values[0] - dfv['RA'].values[0]\n TlGAP = TlR - TlRA\n TlFP = dfh['FP'].values[0] - dfv['FP'].values[0]\n TlSV = dfh['SV'].values[0] - dfv['SV'].values[0]\n\n for team_stat in team_list:\n if team_convert2(vis) == team_stat[0]:\n VFP = (float(team_stat[4]) + float(team_stat[5])) / (float(team_stat[4]) + float(team_stat[5]) + float(team_stat[2]))\n\n if team_convert2(home) == team_stat[0]:\n HFP = (float(team_stat[4]) + float(team_stat[5])) / (\n float(team_stat[4]) + float(team_stat[5]) + float(team_stat[2]))\n TRnow = float(game[3]) - float(game[11])\n TRAnow = float(game[4]) - float(game[12])\n W = float(game[2].split(\"-\")[0])\n L = float(game[2].split(\"-\")[1])\n TWratenow = W / (W + L)\n W = float(game[10].split(\"-\")[0])\n L = float(game[10].split(\"-\")[1])\n TWratenow -= W / (W + L)\n THRnow = float(game[5]) - float(game[13])\n TFPnow = HFP - VFP\n TAgapnow = TRnow - TRAnow\n TpitcherERA = float(game[6]) -float(game[14])\n TpitcherHA = (float(game[8])/float(game[7]))-(float(game[16])/float(game[15]))\n TSVnow =float(game[9]) - float(game[17])\n\n\n\n\n df2 = data.copy()\n df2.drop(data.index, inplace=True)\n df2 = df2.drop(['totalscore'], axis=1)\n # df2.loc[0] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n df2.loc[0] = [TRnow,TRAnow,TWratenow,THRnow,TFPnow,TAgapnow,TlR,TlRA,\n TlGAP,TlFP,TpitcherERA,TpitcherHA]\n test = df2[attributes]\n x_train = pd.DataFrame(x_train)\n x_train.append(test)\n x_train = preprocessing.scale(x_train)\n L = len(x_train)\n df2.loc[0] = x_train[L - 1]\n test = df2[attributes]\n x_train = pd.DataFrame(x_train)\n x_train.drop([L - 1])\n\n lr.fit(x_train, y_train)\n rrm.fit(x_train, y_train)\n\n print(team_convert2(vis) + \" VS \" + team_convert2(home))\n predictions = lr.predict(test)\n print(\"ML1\")\n print(predictions)\n predictions_rrm = rrm.predict(test)\n print(\"ML2\")\n print(predictions_rrm)\n print(\"------------------------\")\n\nED = time.time()\nprint(ED-ST)\n\n\n# SV目前是投手的 但其他資料是隊伍的\n# 沒有去年投手資料 -- 若真沒辦法 就用平均/自己輸入 但目前是打算用運彩官網的\n# 沒有目前SHO\n# 沒有防治投手局數過少\n# 考慮納入比賽近況 但對戰的隊伍 投手 決定因素很大 或許考慮打擊近況 但也還是關係到投手\n# 確定資料數值是否內容有問題 是否不恰當 是否不一致(該除 還是不該除) 例如SV\n# 設定其他的 例如 大小分 研究如何決定勝率問題\n# labels 屬性問題\n# 模型使用 別的模型 分群大小\n# 是否加入\"得知今年總結果\" 但在輸入就無法 是否有意義\n# 思考更多database 以及 odd bet 的資訊\n# 考慮是否把feature拆開來 不用相減的\n#目前主要問題 是在於 投手上\n\n\n\n#java的下注模擬器 done\n#再加 期望值計算機 計算每場勝率與賠率 例如3場排一起 就是 串3關 / 串2關 / 串2+3 排投資報酬率以及比例 done\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"untitled/bucky-total-nor.py","file_name":"bucky-total-nor.py","file_ext":"py","file_size_in_byte":11397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"96199916","text":"#!/usr/bin/env python\n\"\"\"\n@file runner.py\n@author Lena Kalleske\n@author Daniel Krajzewicz\n@author Michael Behrisch\n@author Jakob Erdmann\n@date 2009-03-26\n@version $Id: runner.py 20433 2016-04-13 08:00:14Z behrisch $\n\nTutorial for traffic light control via the TraCI interface.\n\nSUMO, Simulation of Urban MObility; see http://sumo.dlr.de/\nCopyright (C) 2009-2016 DLR/TS, Germany\n\nThis file is part of SUMO.\nSUMO is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 3 of the License, or\n(at your option) any later version.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport optparse\nimport subprocess\nimport random\nimport time\nimport numpy as np\nfrom qlearning import DeepQ\nfrom qlearning import ExperienceReplay\n\n# we need to import python modules from the $SUMO_HOME/tools directory\ntry:\n sys.path.append(os.path.join(os.path.dirname(\n __file__), '..', '..', '..', '..', \"tools\")) # tutorial in tests\n sys.path.append(os.path.join(os.environ.get(\"SUMO_HOME\", os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"..\")), \"tools\")) # tutorial in docs\n from sumolib import checkBinary\nexcept ImportError:\n sys.exit(\n \"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')\")\n\nimport traci\n# the port used for communicating with your sumo instance\nPORT = 8872\n\nstartTime = {}\n\ndef generate_routefile():\n random.seed(42) # make tests reproducible\n N = 360000 # number of time steps\n p = \"0.009\" #probability of generating car\n # demand per second from different directions\n pWE = 1. / 10\n pEW = 1. / 10\n pNS = 1. / 20\n pSN = 1. / 20\n with open(\"data/cross.rou.xml\", \"w\") as routes:\n print(\"\"\"\n \n \"\"\", file=routes)\n for i in range(8):\n for j in range(8):\n if i == j:\n continue\n print(' ' % (i * 8 + j, N, p, i, j), file=routes)\n print(\"\", file=routes)\n\n# The program looks like this\n# \n# the locations of the tls are NESW\n# \n# \n# \n# \n# \n\ndef actionState(l):\n actions = []\n if traci.trafficlights.getPhase(l) % 2 == 0:\n # we are not already switching\n actions.append(0)\n actions.append(1)\n return actions\n\ndef setState(light, action):\n #0 is default state, 1 is other state\n s = traci.trafficlights.getPhase(light)\n if action == 0:\n if s == 0:\n traci.trafficlights.setPhase(light, 0)\n else:\n traci.trafficlights.setPhase(light, 3)\n return 1\n if action == 1:\n if s == 2:\n traci.trafficlights.setPhase(light, 2)\n else:\n traci.trafficlights.setPhase(light, 1)\n return 1\n return 0\n\ne = 1\n\ndef getReward(changed):\n lanes = traci.lane.getIDList()\n s = 0.0\n for l in lanes:\n q, w, speed = 0, 0, 0\n for c in traci.lane.getLastStepVehicleIDs(l):\n if traci.vehicle.getWaitingTime(c) > 0:\n q += 1\n speed += traci.vehicle.getSpeed(c)\n speed /= max(1, len(traci.lane.getLastStepVehicleIDs(l))) * 10\n w = traci.lane.getWaitingTime(l)\n #s -= 1 * (q) ** 1.5 + 2 * (w) ** 1.5 - speed ** 1.5\n s += speed\n #s /= max(len(traci.vehicle.getIDList()), 1)\n #for l in lanes: \n #s -= traci.lane.getCO2Emission(l) / 10000.0\n #s -= changed\n return s\n\ndef getCO2():\n s = 0\n for l in traci.lane.getIDList():\n s += traci.lane.getCO2Emission(l)\n return s\n\ndef getWaitingTime():\n s = 0\n for l in traci.lane.getIDList():\n for c in traci.lane.getLastStepVehicleIDs(l):\n if traci.vehicle.getWaitingTime(c) > 0:\n s += 1\n return s\n\n\ndef step(lights, loops, history, cars_detected, actions):\n light_state = [0] * len(lights)\n cars_total = [0] * len(loops)\n changed = 0\n for l in lights:\n changed += setState(l, actions[int(l)])\n traci.simulationStep()\n for l in lights:\n light_state[int(l)] = traci.trafficlights.getPhase(l)\n history[int(l)].append(traci.trafficlights.getPhase(l))\n history[int(l)].pop(0)\n for i in loops:\n cars_detected[int(i)].append(traci.inductionloop.getLastStepVehicleNumber(i))\n cars_detected[int(i)].pop(0)\n cars_total[int(i)] = sum(cars_detected[int(i)])\n ss = np.append(cars_total, light_state)\n ss = np.append(ss, sum(history, []))\n cars = traci.vehicle.getIDList()\n r = getReward(changed)\n #for c in cars:\n #travel_time = traci.simulation.getCurrentTime() / 1000 - startTime[c]\n #r -= travel_time\n return ss, r, cars_detected, history\n\n\ndef run():\n \"\"\"execute the TraCI control loop\"\"\"\n # first, generate the route file for this simulation\n #generate_routefile()\n traci.init(PORT)\n loops = traci.inductionloop.getIDList()\n lights = traci.trafficlights.getIDList()\n history_len = 15\n input_size = len(loops) + len(lights) + history_len * len(lights)\n num_actions = 2\n maxSteps = 500\n total_steps = 0\n totalCO2, totalWaitingTime = [0] * 1000, [0] * 1000\n for iteration in xrange(0, 1000):\n R, t = 0, 0\n cars_detected = [[0 for i in range(20)] for j in range(len(loops))]\n history = [[0 for i in range(history_len)] for j in range(len(lights))]\n s = [0] * (input_size)\n actions = [0] * len(lights)\n for l in lights:\n traci.trafficlights.setPhase(l, 0)\n while t < maxSteps:\n for l in lights: \n a = traci.trafficlights.getPhase(l) / 2\n if t % 15 == 0:\n a = 1 - a\n if len(actionState(l)) == 0:\n a = -1\n actions[int(l)] = a\n ss, r, cars_detected, history = step(lights, loops, history, cars_detected, actions)\n R += r\n totalWaitingTime[iteration] += getWaitingTime()\n totalCO2[iteration] += getCO2()\n t += 1\n total_steps += 1\n s = ss\n print('Iteration %i completed with Average CO2: %d and Average waiting time %d reward %i' % (iteration, sum(totalCO2) / total_steps, sum(totalWaitingTime) / total_steps, R))\n for car in traci.vehicle.getIDList():\n traci.vehicle.remove(car)\n traci.close()\n sys.stdout.flush()\n\n\ndef get_options():\n optParser = optparse.OptionParser()\n optParser.add_option(\"--nogui\", action=\"store_true\",\n default=False, help=\"run the commandline version of sumo\")\n options, args = optParser.parse_args()\n return options\n\n\n# this is the main entry point of this script\nif __name__ == \"__main__\":\n options = get_options()\n\n # this script has been called from the command line. It will start sumo as a\n # server, then connect and run\n if options.nogui:\n sumoBinary = checkBinary('sumo')\n else:\n sumoBinary = checkBinary('sumo')\n\n # this is the normal way of using traci. sumo is started as a\n # subprocess and then the python script connects and runs\n sumoProcess = subprocess.Popen([sumoBinary, \"-c\", \"data/cross.sumocfg\", \"--tripinfo-output\",\n \"tripinfo.xml\", \"--remote-port\", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)\n run()\n sumoProcess.wait()\n","sub_path":"shortcycle.py","file_name":"shortcycle.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"547475744","text":"import math,os\r\nimport numpy as np\r\nimport muluzai as mu\r\n\r\ndef zhenggui(path,guanjianzi):#先正规化之后再打标签\r\n\r\n for wenjianming in os.listdir(path):\r\n\r\n path_1 = os.path.join(path, wenjianming, guanjianzi)\r\n\r\n path_new = os.path.join(path, wenjianming, guanjianzi + '_zhengguihua')\r\n\r\n mu.mkdir(path_new)\r\n\r\n for wenjian in os.listdir(path_1):\r\n\r\n path_2 = os.path.join(path_1,wenjian)\r\n\r\n tezheng_1 = np.loadtxt(path_2, delimiter=',')\r\n\r\n tezheng_1 = np.transpose(tezheng_1)#为了方便计算,先转置\r\n\r\n # print(tezheng_1)\r\n # print(tezheng_1.shape)\r\n #\r\n # print(tezheng_1[0])\r\n # print(tezheng_1[1])\r\n\r\n zhenggui_list_1 = []\r\n\r\n for d in tezheng_1:\r\n\r\n heji=0\r\n\r\n for u in d:\r\n\r\n heji += math.pow(u,2)\r\n\r\n scale = math.sqrt(heji)\r\n\r\n zhenggui_list = []\r\n\r\n for u in d:\r\n\r\n u_1 = u/scale\r\n zhenggui_list.append(u_1)\r\n\r\n zhenggui_list_1.append(zhenggui_list)\r\n\r\n zhenggui_list_2 = np.array(zhenggui_list_1)\r\n\r\n zhenggui_list_2 = np.transpose(zhenggui_list_2)\r\n\r\n # print(zhenggui_list_2)\r\n\r\n np.savetxt(path_new + \"/\" + wenjian, zhenggui_list_2,delimiter=',')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# zhenggui(path = r'C:\\Users\\a7825\\Desktop\\新建文件夹 (4)',guanjianzi ='log' )","sub_path":"dahebing/zhengguihua.py","file_name":"zhengguihua.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"165811572","text":"import graphbrain.constants as const\nfrom graphbrain.agents.agent import Agent\n\n\nclass Taxonomy(Agent):\n def __init__(self, hg):\n super().__init__(hg)\n\n def name(self):\n return 'taxonomy'\n\n def languages(self):\n return set()\n\n def input_edge(self, edge):\n if not edge.is_atom():\n et = edge.type()\n if et[0] == 'c':\n ct = edge[0].connector_type()\n parent = None\n if ct[0] == 'b':\n mcs = edge.main_concepts()\n if len(mcs) == 1:\n parent = mcs[0]\n elif ct[0] == 'm' and len(edge) == 2:\n parent = edge[1]\n if parent:\n ont_edge = (const.type_of_pred, edge, parent)\n self.add(ont_edge, primary=False)\n","sub_path":"graphbrain/agents/taxonomy.py","file_name":"taxonomy.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"623527564","text":"from project47.data import *\nfrom project47.routing import *\nfrom project47.simulation import *\nfrom project47.multiday_simulation import *\nfrom project47.customer import Customer\nfrom functools import reduce\n\nimport logging\n\nlogging.basicConfig(level=\"DEBUG\") # Set the global logging level\n\n\ndef test_sample_generator():\n cd = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"data\")\n sample_data = os.path.join(cd, \"Toll_CHC_November_Sample_Data.csv\")\n CHC_data = os.path.join(cd, \"christchurch_street.csv\")\n sample_df, CHC_df, _, CHC_sub, CHC_sub_dict = read_data(\n sample_data,\n CHC_data,\n lat_min=-43.6147000,\n lat_max=-43.4375000,\n lon_min=172.4768000,\n lon_max=172.7816000,\n )\n\n def sample_generator(rg: np.random.Generator):\n lat, lon = get_sample(\n 10, rg, cd, sample_df, CHC_df, CHC_sub, CHC_sub_dict, save=False\n )\n time_windows = np.zeros((len(lat), 2))\n for i in range(len(lat)):\n if rg.random() > 0.5:\n time_windows[i, 0] = 0\n time_windows[i, 1] = 14400\n else:\n time_windows[i, 0] = 14400\n time_windows[i, 1] = 28800\n\n customers = [Customer(lat[i], lon[i], 0.8, 0.8, rg=rg) for i in range(len(lat))]\n\n return customers, time_windows\n\n return sample_generator\n\n\ndef dist_and_time(customers):\n return osrm_get_dist(\n \"\",\n \"\",\n [customer.lat for customer in customers],\n [customer.lon for customer in customers],\n host=\"localhost:5000\",\n save=False,\n )\n\n\ndef route_optimizer(\n depots,\n dm,\n tm,\n time_windows,\n day,\n arrival_days,\n futile_count,\n alternate_locations,\n fss=routing_enums_pb2.FirstSolutionStrategy.AUTOMATIC,\n lsm=routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH,\n tlim=1,\n):\n locs = dm.shape[0]\n r = ORToolsRouting(locs, 5)\n dim, ind = r.add_dimension(dm, 0, 50000, True, \"distance\")\n r.routing.SetArcCostEvaluatorOfAllVehicles(ind)\n dim, ind = r.add_time_windows(tm, time_windows, 28800, 28800, False, \"time\")\n for alternates in alternate_locations:\n r.add_option(alternates, 50000)\n\n r.search_parameters.first_solution_strategy = fss\n r.search_parameters.local_search_metaheuristic = lsm\n r.search_parameters.use_cp_sat = False\n\n s = r.solve(tlim=tlim, log=True)\n\n unscheduled = []\n scheduled = reduce(lambda x, y: x + y, s.routes)\n for i in range(locs):\n if i not in scheduled:\n unscheduled.append(i)\n return s, unscheduled\n\n\ndef simulator(\n routes, dm, tm, delivery_time_windows, customers, rg: np.random.Generator\n):\n return sim(routes, wait_policy(dm, tm, delivery_time_windows, customers, rg))\n\n\ndef test_multiday():\n \"\"\"This is the main example of all the functionality.\n\n The idea is that when we create a new experiment to run, we'd copy the structure of this function and replace\n parts so that it implements the new policies\n \"\"\"\n sample_generator = test_sample_generator()\n\n data = multiday(\n np.array([[-43.5111688], [172.7319266]]),\n sample_generator,\n dist_and_time,\n route_optimizer,\n simulator,\n 1,\n 0,\n 28800,\n seed=123456789,\n replications=2,\n plot=False,\n collection_points=True,\n )\n\n assert len(data) == 2\n\n\ndef test_reproducible():\n \"\"\"Run two identical simulations with same random seed. Check they return the same results.\"\"\"\n sample_generator = test_sample_generator()\n\n data1 = multiday(\n np.array([[-43.5111688], [172.7319266]]),\n sample_generator,\n dist_and_time,\n route_optimizer,\n simulator,\n 2,\n 0,\n 28800,\n seed=123456789,\n )\n\n data2 = multiday(\n np.array([[-43.5111688], [172.7319266]]),\n sample_generator,\n dist_and_time,\n route_optimizer,\n simulator,\n 2,\n 0,\n 28800,\n seed=123456789,\n )\n\n assert data1 == data2\n\n\ndef test_alternate_locations():\n\n cd = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"data\")\n sample_data = os.path.join(cd, \"Toll_CHC_November_Sample_Data.csv\")\n CHC_data = os.path.join(cd, \"christchurch_street.csv\")\n sample_df, CHC_df, _, CHC_sub, CHC_sub_dict = read_data(sample_data, CHC_data)\n\n def sample_generator(rg: np.random.Generator):\n lat, lon = get_sample(\n 20, rg, cd, sample_df, CHC_df, CHC_sub, CHC_sub_dict, save=False\n )\n time_windows = np.zeros((len(lat), 2))\n for i in range(len(lat)):\n time_windows[i, 0] = 0\n time_windows[i, 1] = 28800\n customers = [Customer(lat[i], lon[i], 0.9, 0.9, rg=rg) for i in range(len(lat))]\n for i in range(10):\n customers[i].add_alternate(customers[i + 10])\n\n return customers, time_windows\n\n data = multiday(\n np.array([[-43.5111688], [172.7319266]]),\n sample_generator,\n dist_and_time,\n route_optimizer,\n simulator,\n 1,\n 0,\n 28800,\n replications=5,\n )\n\n\n\"\"\"\ndef test_sim_performance():\n sample_generator = test_sample_generator()\n\n data = multiday(\n np.array([[-43.5111688], [172.7319266]]),\n sample_generator,\n dist_and_time,\n route_optimizer,\n simulator,\n 2,\n 0,\n 28800,\n replications=1000,\n )\n\n assert len(data) == 2000\"\"\"\n\n\ndef test_plot():\n sample_generator = test_sample_generator()\n\n multiday(\n np.array([[-43.5111688], [172.7319266]]),\n sample_generator,\n dist_and_time,\n route_optimizer,\n simulator,\n 1,\n 0,\n 28800,\n seed=293462,\n replications=1,\n plot=True,\n )\n plt.ion()\n plt.show()\n plt.draw()\n plt.pause(1)\n plt.close()\n\n\nif __name__ == \"__main__\":\n test_multiday()\n # test_alternate_locations()\n","sub_path":"tests/test_multiday.py","file_name":"test_multiday.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"293796442","text":"import datetime\nimport random\n\nfrom django.db import models\n\n# Create your models here.\nfrom faker import Faker\n\nfrom group.models import Group\n\n\nclass Student(models.Model):\n first_name = models.CharField(max_length=40, null=False)\n last_name = models.CharField(max_length=20, null=False)\n email = models.EmailField(max_length=50, null=True, db_index=True)\n birthday = models.DateField(default=datetime.date.today)\n phone_number = models.CharField(max_length=25, default=380000000000, unique=True)\n group = models.ForeignKey(to=Group, null=True, on_delete=models.SET_NULL, related_name='students')\n\n def __str__(self):\n return f' {self.first_name},' \\\n f' {self.last_name},' \\\n f' {self.birthday},' \\\n f' {self.email},' \\\n f' {self.phone_number}'\n\n @classmethod\n def generate_student(cls, groups=None):\n faker = Faker()\n if groups is None:\n groups = list(Group.objects.all())\n\n student = cls(\n first_name=faker.first_name(),\n last_name=faker.last_name(),\n email=faker.email(),\n phone_number=faker.phone_number(),\n group=random.choice(groups)\n )\n\n student.save()\n","sub_path":"src/student/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"139090424","text":"from unittest import TestCase\nimport testUtility\nimport sys\nfrom io import StringIO\nimport Dominion\n\n\nclass TestActionCard(TestCase):\n def dataSetUp(self):\n # Data setup\n self.player_names = [\"*Annie\", \"Ben\", \"*Carla\"]\n self.nV = testUtility.GetNumVictory(self.player_names)\n self.nC = testUtility.GetNumCurses(self.player_names)\n self.box = testUtility.GetBoxes(self.nV)\n self.supply_order = testUtility.GetSupplyOrder()\n #Setup supply with 5 cards\n self.supply = testUtility.GetSupply(self.box, self.nV, self.nC, len(self.player_names), 5)\n self.trash = []\n #set player\n self.player = Dominion.Player(self.player_names[1]) #Ben\n\n def test_init(self):\n #init data\n self.dataSetUp()\n cardName = \"Smithy\"\n cost = 4\n actions = 0\n cards = 3\n buys = 0\n coins = 0\n\n #initiate a Smithy action card object\n card = Dominion.Action_card(cardName,cost,actions,cards,buys,coins,)\n\n #verify that card class variables have the expected values\n self.assertEqual(cardName, card.name)\n self.assertEqual(cost, card.cost)\n self.assertEqual(actions, card.actions)\n self.assertEqual(cards, card.cards)\n self.assertEqual(buys, card.buys)\n self.assertEqual(coins, card.coins)\n self.assertEqual(\"action\", card.category)\n\n def test_use(self):\n #init data\n self.dataSetUp()\n cardName = \"Smithy\"\n cost = 4\n actions = 0\n cards = 3\n buys = 0\n coins = 0\n\n #initiate a Smithy action card object\n card = Dominion.Action_card(cardName,cost,actions,cards,buys,coins,)\n\n #add card to hand\n self.player.hand.append(card)\n\n #Assert player has 6 cards in hand and has played no cards\n self.assertEqual(0, len(self.player.played))\n self.assertEqual(6, len(self.player.hand))\n\n #use the card\n card.use(self.player, self.trash)\n\n #Assert changes made\n self.assertEqual(1, len(self.player.played))\n self.assertEqual(5, len(self.player.hand))\n self.assertEqual(\"Smithy\", self.player.played[0].name)\n\n\n def test_augment(self):\n # init data\n self.dataSetUp()\n cardName = \"Smithy\"\n cost = 4\n actions = 0\n cards = 3\n buys = 0\n coins = 0\n\n #do a trun to init some vars\n sys.stdin = StringIO('\\n') #force hitting enter\n self.player.turn([], self.supply, self.trash)\n\n # initiate a Smithy action card object\n card = Dominion.Action_card(cardName, cost, actions, cards, buys, coins, )\n\n # add card to hand\n self.player.hand.append(card)\n\n # Assert player has 6 cards in hand and has played no cards\n self.assertEqual(0, len(self.player.played))\n self.assertEqual(6, len(self.player.hand))\n\n # use the card\n card.use(self.player, self.trash)\n card.augment(self.player)\n\n # Assert changes made\n self.assertEqual(1, len(self.player.played)) # played 1 card\n self.assertEqual(5 + cards, len(self.player.hand)) # drew 3 cards\n self.assertEqual(1 + actions, self.player.actions) # added 0 actions\n self.assertEqual(1 + buys, self.player.buys) # added 0 buys\n\nclass TestPlayer(TestCase):\n def dataSetUp(self):\n # Data setup\n self.player_names = [\"*Annie\", \"Ben\", \"*Carla\"]\n self.nV = testUtility.GetNumVictory(self.player_names)\n self.nC = testUtility.GetNumCurses(self.player_names)\n self.box = testUtility.GetBoxes(self.nV)\n self.supply_order = testUtility.GetSupplyOrder()\n #Setup supply with 5 cards\n self.supply = testUtility.GetSupply(self.box, self.nV, self.nC, len(self.player_names), 5)\n self.trash = []\n # set player\n self.player = Dominion.Player(self.player_names[1]) # Ben\n\n def test_actionBalance(self):\n # init data\n self.dataSetUp()\n\n # test before adding action cards\n self.assertEqual(0, self.player.action_balance()) # should be zero action cards\n\n # add 4 action cards\n self.player.deck += [Dominion.Smithy()] * 2 + [Dominion.Militia()] * 2\n\n # test new balance\n self.assertEqual(-20.0, self.player.action_balance()) # should now be -20.0\n\n # add 6 more action cards\n self.player.deck += [Dominion.Witch()] * 3 + [Dominion.Mine()] * 3\n\n # test new balance\n self.assertEqual(-35.0, self.player.action_balance()) # should now be -35.0\n\n def test_calcPoints(self):\n # init data\n self.dataSetUp()\n\n # test before adding action cards\n self.assertEqual(3, self.player.calcpoints()) # should be 3 from 3 estates\n\n # add 2 provinces (6 points each)\n self.player.deck += [Dominion.Province()] * 2\n\n # test calcPoints\n self.assertEqual(15, self.player.calcpoints())\n\n # add 2 Duchies (3 points each)\n self.player.deck += [Dominion.Duchy()] * 2\n\n # test calcPoints\n self.assertEqual(21, self.player.calcpoints())\n\n # add 6 gardens (2 points each)\n self.player.deck += [Dominion.Gardens()] * 6\n\n # test calcPoints\n self.assertEqual(33, self.player.calcpoints())\n\n def test_draw(self):\n # init data\n self.dataSetUp()\n\n # test before drawing cards\n self.assertEqual(5, len(self.player.hand)) # should be 5 original cards in hand\n self.assertEqual(5, len(self.player.deck)) # should be 5 original cards in deck\n self.assertEqual(0, len(self.player.discard)) # should be 0 original cards in discard pile\n\n # draw a card\n self.player.draw()\n\n # test\n self.assertEqual(6, len(self.player.hand)) # should be 6 cards in hand\n self.assertEqual(4, len(self.player.deck))\n self.assertEqual(0, len(self.player.discard)) # should be 0 original cards in discard pile\n\n # draw a card into the discard pile\n self.player.draw(self.player.discard)\n\n # test\n self.assertEqual(6, len(self.player.hand)) # should be 6 cards in hand\n self.assertEqual(3, len(self.player.deck))\n self.assertEqual(1, len(self.player.discard)) # should be 1 card in discard pile\n\n def test_cardSummary(self):\n # init data\n self.dataSetUp()\n\n # get summary after setup\n summary = self.player.cardsummary()\n\n # test\n self.assertEqual(3, summary['VICTORY POINTS']) # Test for 3 victory points\n self.assertEqual(7, summary['Copper']) # Test for 7 copper cards\n self.assertEqual(3, summary['Estate']) # Test for 3 Estate cards\n\n # add some cards to the deck\n self.player.deck += [Dominion.Smithy()] * 2\n self.player.deck += [Dominion.Duchy()] * 5\n self.player.deck += [Dominion.Bureaucrat()] * 9\n\n # get new summary\n summary = self.player.cardsummary()\n\n # test for the added card plus the old ones\n self.assertEqual(7, summary['Copper']) # Test for 7 copper cards\n self.assertEqual(3, summary['Estate']) # Test for 3 Estate cards\n self.assertEqual(2, summary['Smithy'])\n self.assertEqual(5, summary['Duchy'])\n self.assertEqual(9, summary['Bureaucrat'])\n\nclass GameOver(TestCase):\n def dataSetUp(self):\n # Data setup\n self.player_names = [\"*Annie\", \"Ben\", \"*Carla\"]\n self.nV = testUtility.GetNumVictory(self.player_names)\n self.nC = testUtility.GetNumCurses(self.player_names)\n self.box = testUtility.GetBoxes(self.nV)\n self.supply_order = testUtility.GetSupplyOrder()\n\n # Setup supply with 5 cards\n self.supply = testUtility.GetSupply(self.box, self.nV, self.nC, len(self.player_names), 5)\n self.trash = []\n # set player\n self.player = Dominion.Player(self.player_names[1]) # Ben\n\n def test_GameOver(self):\n # init data\n self.dataSetUp()\n\n # test to make sure game is not over\n self.assertEqual(False, Dominion.gameover(self.supply))\n\n # remove all the provinces from the supply\n del self.supply[\"Province\"]\n\n # test to make sure game is over\n self.assertEqual(True, Dominion.gameover(self.supply))\n\n # make a new supply\n self.supply = testUtility.GetSupply(self.box, self.nV, self.nC, len(self.player_names), 5)\n\n # test to make sure game is not over\n self.assertEqual(False, Dominion.gameover(self.supply))\n\n # remove 3 supply cards\n removed = 0\n for stack in self.supply:\n if removed == 3:\n break\n if stack != 'Province':\n self.supply[stack] = []\n removed += 1\n\n # test to make sure game is over\n self.assertEqual(True, Dominion.gameover(self.supply))\n","sub_path":"projects/yakimova/dominion/unittest_Dominion.py","file_name":"unittest_Dominion.py","file_ext":"py","file_size_in_byte":8939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"556411463","text":"from api import server_api\nfrom api import client_api\n\nresponse = False\n\nwhile response == False:\n response = input(\"Join or Host Room J/H: \")\nif response == 'h':\n print(\"Hosting Server at 127.0.0.1 port 42069\")\n server, addr = server_api.server('0.0.0.0', 42069)\n print(\"Client Found at: \", addr)\n while True:\n msg = input(\">> \")\n server_api.send(server, msg)\n print(\"Waiting For Client Response\")\n print(\"Client[{}] << {}\".format(addr, server_api.recieve(server).decode()))\nelse:\n ip = False\n port = False\n ip=input(\"IP: \")\n port = int(input(\"Port: \"))\n host = client_api.connect(ip, port)\n while True:\n print (\"Waiting for Server\")\n print(\"Server << \" + client_api.recieve(host).decode())\n msg = input(\">> \")\n client_api.send(msg, host)","sub_path":"Examples/Chatroom/chatroom_main.py","file_name":"chatroom_main.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"578456841","text":"\"\"\"\n在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。输入一个数组,求出这个数组中的逆序对的总数。\n示例 1:\n 输入: [7,5,6,4]\n 输出: 5\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def reversePairs(self, nums: List[int]) -> int:\n pass\n\n @classmethod\n def solve_1(cls, nums: List[int]) -> int:\n \"\"\"\n 暴力破解\n 时间复杂度:O(n^2)\n 空间复杂度:O(1)\n \"\"\"\n\n size = len(nums)\n res = 0\n\n for i in range(size - 1):\n for j in range(i + 1, size):\n if nums[i] > nums[j]:\n res += 1\n return res\n\n @classmethod\n def solve_2(cls, nums: List[int]) -> int:\n res = cls.merge_sort(nums, [0] * len(nums), 0, len(nums) - 1)\n print(nums)\n return res\n\n @classmethod\n def merge_sort(cls, nums: List[int], tmp_nums: List[int], left: int, right: int) -> int:\n if left >= right:\n return 0\n\n mid = (left + right) // 2\n cnt = cls.merge_sort(nums, tmp_nums, left, mid) + cls.merge_sort(nums, tmp_nums, mid + 1, right)\n\n i = left\n j = mid + 1\n pos = left\n\n while i <= mid and j <= right:\n if nums[i] <= nums[j]:\n print(nums[i], nums[j], mid)\n tmp_nums[pos] = nums[i]\n i += 1\n cnt += (j - mid - 1)\n else:\n tmp_nums[pos] = nums[j]\n j += 1\n pos += 1\n\n for k in range(i, mid + 1):\n tmp_nums[pos] = nums[k]\n cnt += (j - mid - 1)\n pos += 1\n\n for k in range(j, right + 1):\n tmp_nums[pos] = nums[k]\n pos += 1\n\n nums[left:right + 1] = tmp_nums[left:right + 1]\n return cnt\n\n\nif __name__ == '__main__':\n print(Solution().solve_1([7, 5, 1, 6, 3, 4]))\n print(Solution().solve_2([7, 5, 1, 6, 3, 4]))\n","sub_path":"剑指offer/面试题51.数组中的逆序对.py","file_name":"面试题51.数组中的逆序对.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"387627727","text":"from app import app, create_connection\r\nfrom flask import request, jsonify\r\nfrom flask_mail import Message\r\nimport os\r\n@app.route(\"/upload_machine\", methods=[\"POST\"])\r\ndef upload_machine():\r\n machine_name = request.form[\"machine_name\"]\r\n description = request.form[\"description\"]\r\n\r\n photo = request.files[\"photo\"]\r\n if photo and photo.filename != \"\":\r\n photo_path = os.path.join(app.config[\"UPLOAD_FOLDER\"], photo.filename)\r\n photo.save(photo_path)\r\n else:\r\n photo_path = None\r\n\r\n conn = create_connection()\r\n cursor = conn.cursor()\r\n query = (\r\n \"INSERT INTO Machines (machine_name, description, photo_url)\"\r\n \"VALUES (%s, %s, %s)\"\r\n )\r\n cursor.execute(query, (machine_name, description, photo_path))\r\n\r\n conn.commit()\r\n cursor.close()\r\n conn.close()\r\n\r\n return \"Machine uploaded successfully!\"\r\n\r\n\r\n@app.route(\"/view_machines\", methods=[\"GET\"])\r\ndef view_machines():\r\n conn = create_connection()\r\n cursor = conn.cursor()\r\n\r\n query = \"\"\"\r\n SELECT m.machine_id, m.machine_name, m.description, m.photo_url, \r\n ms.status, \r\n sl.service_date AS last_service_date, sl.next_service_date\r\n FROM Machines AS m\r\n LEFT JOIN (\r\n SELECT machine_id, status\r\n FROM machine_status\r\n WHERE (machine_id, updated_at) IN (\r\n SELECT machine_id, MAX(updated_at)\r\n FROM machine_status\r\n GROUP BY machine_id\r\n )\r\n ) AS ms ON m.machine_id = ms.machine_id\r\n LEFT JOIN (\r\n SELECT machine_id, MAX(service_date) AS service_date, next_service_date\r\n FROM service_logs\r\n GROUP BY machine_id\r\n ) AS sl ON m.machine_id = sl.machine_id\r\n \"\"\"\r\n cursor.execute(query)\r\n machines = cursor.fetchall()\r\n\r\n result = []\r\n for machine in machines:\r\n result.append(\r\n {\r\n \"machine_id\": machine[0],\r\n \"machine_name\": machine[1],\r\n \"description\": machine[2],\r\n \"photo_url\": machine[3],\r\n \"status\": machine[4],\r\n \"last_service_date\": machine[5],\r\n \"next_service_date\": machine[6]\r\n }\r\n )\r\n\r\n cursor.close()\r\n conn.close()\r\n\r\n return jsonify(result)\r\n\r\n\r\n@app.route(\"/get_machine_list\", methods=[\"GET\"])\r\ndef get_machine_list():\r\n conn = create_connection()\r\n cursor = conn.cursor()\r\n\r\n query = \"SELECT machine_id, machine_name FROM Machines\"\r\n cursor.execute(query)\r\n machines = cursor.fetchall()\r\n\r\n result = [{\"machine_id\": machine[0], \"machine_name\": machine[1]} for machine in machines]\r\n\r\n cursor.close()\r\n conn.close()\r\n\r\n return jsonify(result)\r\n\r\n@app.route(\"/service_machine\", methods=[\"POST\"])\r\ndef service_machine():\r\n machine_id = request.form.get(\"machine_id\")\r\n service_date = request.form.get(\"service_date\")\r\n next_service_date = request.form.get(\"next_service_date\")\r\n notes = request.form.get(\"notes\")\r\n\r\n conn = create_connection()\r\n cursor = conn.cursor()\r\n\r\n query = (\r\n \"INSERT INTO ServiceLogs (machine_id, service_date, service_notes) \"\r\n \"VALUES (%s, %s, %s)\"\r\n )\r\n cursor.execute(query, (machine_id, service_date, notes))\r\n conn.commit()\r\n\r\n query = \"UPDATE Machines SET service_by_date = %s WHERE machine_id = %s\"\r\n cursor.execute(query, (next_service_date, machine_id))\r\n conn.commit()\r\n\r\n cursor.close()\r\n conn.close()\r\n\r\n return \"Service details logged successfully!\"\r\n","sub_path":"server_functions/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"558313481","text":"\"\"\"\nModule for defining Transaction related models\n\"\"\"\n\nfrom datetime import date\n\nfrom app.main import db, bcrypt\nfrom sqlalchemy import UniqueConstraint\nimport marshmallow as ma\n\n\n####################################### TRANSACTION ENTITY #######################################\n\nclass Transaction(db.Model):\n \"\"\"Transaction Entity Model, defining transaction database table\"\"\"\n\n __tablename__ = 'transaction'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(250), nullable=False)\n\n date = db.Column(db.Date, nullable=False)\n tx_type = db.Column(db.String(10), nullable=False)\n\n currency = db.Column(db.String(10), nullable=False)\n amount = db.Column(db.Float(precision=2), nullable=False)\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False)\n user = db.relationship(\n 'User',\n backref=db.backref('transactions', lazy=True)\n )\n\n account_id = db.Column(db.Integer, db.ForeignKey('account.id', onupdate='CASCADE', ondelete='RESTRICT'),\n nullable=False)\n account = db.relationship(\n 'Account',\n backref=db.backref('transactions', lazy=True)\n )\n\n category_id = db.Column(db.Integer, db.ForeignKey('category.id', onupdate='CASCADE', ondelete='RESTRICT'),\n nullable=False)\n category = db.relationship(\n 'Category',\n backref=db.backref('transactions', lazy=True)\n )\n\n group_id = db.Column(db.Integer, db.ForeignKey('group.id', onupdate='CASCADE', ondelete='RESTRICT'), nullable=False)\n group = db.relationship(\n 'Group',\n backref=db.backref('transactions', lazy=True)\n )\n\n contract_id = db.Column(db.Integer, db.ForeignKey('contract.id', onupdate='CASCADE', ondelete='SET NULL'),\n nullable=True)\n contract = db.relationship(\n 'Contract',\n backref=db.backref('transactions', lazy=True)\n )\n\n note = db.Column(db.Text, nullable=False)\n\n def __repr__(self):\n return \"\".format(\n self.id, self.date, self.tx_type, self.amount, self.account.name,\n self.category.name, self.group.name, self.user.username)\n\n\n######################################### TRANSACTION SCHEMA #########################################\n\nclass TransactionSchema(ma.Schema):\n \"\"\"Transaction schema for serialization and deserialization\"\"\"\n\n class Meta:\n ordered = True\n\n id = ma.fields.Integer(dump_only=True)\n name = ma.fields.String(\n required=True,\n validate=ma.validate.Regexp(\n '[a-zA-Z0-9_.&%$-]{3,}',\n error='invalid contract name (only letters, 0-9 and _ . & % $ - are allowed, min 3 symbols)'\n )\n )\n\n date = ma.fields.Date(required=True)\n tx_type = ma.fields.String(\n required=True,\n validate=ma.validate.OneOf(\n choices=['Expense', 'Income'],\n error='invalid transaction type (only \"Income\" or \"Expense\")'\n ),\n )\n\n currency = ma.fields.String(\n required=True,\n validate=ma.validate.OneOf(\n choices=['EUR', 'USD'],\n error='invalid currency'\n ),\n )\n\n amount = ma.fields.Float(required=True)\n\n user_id = ma.fields.Integer(dump_only=True)\n account_id = ma.fields.Integer(required=True)\n category_id = ma.fields.Integer(required=True)\n group_id = ma.fields.Integer(required=True)\n contract_id = ma.fields.Integer(dump_only=True)\n\n note = ma.fields.String(missing='')\n","sub_path":"app/main/model/transaction_model.py","file_name":"transaction_model.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"626897273","text":"import pygame\nfrom .TextDrawer import TextDrawer\nfrom .Button import Button\nfrom .Sound import Sound\n\nclass GameMenu:\n\n def __init__(self, screen, textdrawer, players):\n self.screen = screen\n self.textDrawer = textdrawer\n self.item_count = 0\n self.players = players\n self.sound = Sound('assets/menu/menu.ogg')\n self.menutext_pos = []\n\n menu_items = [\n 'Start',\n 'Instellingen',\n 'Spelregels',\n 'Highscores',\n 'Exit',\n ]\n\n def get_current_menu_item(self):\n return self.menu_items[self.item_count]\n\n def next_item(self):\n if self.item_count < len(self.menu_items) - 1:\n self.item_count += 1\n\n def previous_item(self):\n if 0 < self.item_count <= len(self.menu_items):\n self.item_count -= 1\n\n def draw(self):\n if self.players.allPlayersAreChosen:\n self.menu_items[0] = 'Verdergaan'\n # if not 'Huidig spel stoppen' in self.menu_items:\n # self.menu_items.insert(3, 'Huidig spel stoppen')\n else:\n self.menu_items[0] = 'Start'\n # if 'Huidig spel stoppen' in self.menu_items:\n # self.menu_items.remove('Huidig spel stoppen')\n\n main_menu_offset_x = self.textDrawer.base_offset_x - 200\n main_menu_offset_y = 250\n\n cursor_x_offset = main_menu_offset_x - 30\n base_cursor_y = main_menu_offset_y - 4\n\n self.textDrawer.draw_header('Hoofdmenu', 'header', 'white')\n\n for item in self.menu_items:\n self.textDrawer.draw_line(\n item, 'default', 'white', main_menu_offset_x, main_menu_offset_y)\n main_menu_offset_y += 60\n\n self.textDrawer.draw_line(\n '>', 'default', 'white', cursor_x_offset, base_cursor_y + self.item_count * 60)\n\n def reset(self):\n self.item_count = 0\n\n def drawMenuText(self):\n s = pygame.Surface((90, 40))\n s.set_alpha(100) # alpha level\n s.fill((0, 0, 0)) # this fills the entire surface\n self.screen.blit(s, (10, 10)) # (0,0) are the top-left coordinates\n self.menutext_pos = self.textDrawer.draw_menu('Menu (Esc)', 'small', 'white', 20, 24)\n\n\nclass EndOfTurn:\n\n def __init__(self, screen, todraw, drawer):\n self.screen=screen\n self.drawer = drawer\n self.todraw=todraw\n self.border = pygame.Rect(self.screen.get_rect().size[0] // 4, self.screen.get_rect().size[1] // 4,\n self.screen.get_rect().size[0] // 2, self.screen.get_rect().size[1] // 2)\n self.filling = pygame.Rect(self.border.x + 4, self.border.y + 4, self.border.width - 8, self.border.height - 8)\n\n self.button = Button(self.screen, (self.filling.width // 5, self.filling.height // 10), (\n self.filling.x + self.filling.width - (int(self.filling.width // 5 * 1.25)),\n self.filling.y + self.filling.height - (int(self.filling.height // 10 * 1.25))), \"Ga door\", (100, 100, 100),\n self.filling.height // 20)\n\n def draw(self):\n pygame.draw.rect(self.screen, (255, 0, 0), self.border, 4)\n pygame.draw.rect(self.screen, (0, 255, 0), self.filling)\n it = 1\n for line in self.todraw:\n self.drawer.display_text(line, \"bubblegum\", self.filling.x + self.border.width // 30,\n self.filling.y + self.border.height // 10 * it, self.border.height // 20, (20, 20, 20))\n it += 1\n self.button.repaint()\n\n def buttonOn(self, mouse):\n return self.button.collision.collidepoint(pygame.mouse.get_pos())\n\n\nclass EndScreen:\n\n def __init__(self, screen, drawer, playername=None, playerpoints=None, winneravatar=None):\n self.text=[\n \"Gefeliciteerd!\",\n \"Speler {} heeft het spel gewonnen.\\nHij/zij had {} punten.\".format(playername, playerpoints),\n \"Nog een keer spelen?\"\n ]\n self.winneravatar=winneravatar\n self.screen=screen\n self.drawer = drawer\n width = screen.get_rect().size[0]\n height = screen.get_rect().size[1]\n self.border = pygame.Rect(width//10,height//10, width-(width//10)*2, height-(height//10)*2)\n self.filling = pygame.Rect(self.border.x + 4, self.border.y + 4, self.border.width-8, self.border.height-8)\n\n self.yes = Button(screen, (self.filling.height//8*3, self.filling.height//8), (self.filling.x + self.filling.height // 10*0.25,self.filling.y + self.filling.height // 10*8.50), \"Ja, nog een potje\", (255,255,0), self.filling.height // 30)\n\n self.no = Button(screen, (self.filling.height // 8 * 3, self.filling.height // 8), (\n self.filling.x +self.filling.width - self.filling.height // 8 * 3 - self.filling.height // 10 * 0.25, self.filling.y + self.filling.height // 10 * 8.50), \"Nee, sluit af\",\n (255, 255, 0), self.filling.height // 30)\n\n def draw(self):\n pygame.draw.rect(self.screen, (255,0,0), self.border, 4)\n pygame.draw.rect(self.screen, (0, 255, 0), self.filling)\n\n self.drawer.display_text(self.text[0], 'bubblegum', self.filling.x + self.filling.height // 10*0.25, self.filling.y + self.filling.height // 10*0.25,\n self.filling.height // 10, (255, 255, 255))\n\n self.drawer.display_text(self.text[1], 'bubblegum', self.filling.x + self.filling.height // 10*0.25, self.filling.y + self.filling.height // 10*1.50,\n self.filling.height // 20, (255, 255, 255))\n\n self.screen.blit(pygame.transform.scale(self.winneravatar, (self.filling.width//3, self.filling.height//3)), (self.filling.x + self.filling.height // 10*0.25,self.filling.y + self.filling.height // 10*3.50))\n\n self.drawer.display_text(self.text[2], 'bubblegum', self.filling.x + self.filling.height // 10*0.25, self.filling.y + self.filling.height // 10*7.50,\n self.filling.height // 20, (255, 255, 255))\n\n self.yes.repaint()\n self.no.repaint()\n","sub_path":"base/Classes/GameMenu.py","file_name":"GameMenu.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"283431815","text":"class Account:\n # Create a constructor\n # below we initialize the object\n def __init__(self, accno,amount,name,type):\n self.ac = accno\n self.am = amount\n self.nam = name\n self.typ = type\n\n # deposit should parse a parameter to allow hold deposited cash\n def deposit(self, depamount):\n if depamount < 50:\n print(\"Below Deposit Threshold!\")\n else:\n self.am = self.am + depamount\n print(\"Deposit of \", depamount,\"Successful\")\n\n\n def withdraw(self, withdrawn):\n if withdrawn > self.am:\n print(\"Insufficient funds!!!\")\n else:\n self.am = self.am - withdrawn\n print(\"Withdrawal of\", withdrawn, \"Successful\")\n\n\n def checkdetails(self):\n answer = input(\"Would you like to check your account details? Y or N\")\n if answer is 'Y':\n print(\"-----Account Details.-----\")\n print(self.ac)\n print(self.typ)\n print(self.nam)\n print(self.am)\n else:\n print(\"Goodbye!\")\n\n\n def changetype(self):\n answer = input(\"Would you like to change your account type? Y or N\")\n answer2 = input(\"Which letter would you like to choose? P, B or S\")\n account_types = (\"Personal, Business, Savings\")\n if answer is 'Y':\n print(\"Change your accout to\", account_types)\n\n else:\n print(\"Goodbye!\")\n\n\n\n\n\n\n\n def checkbalance(self):\n print(\"Current Balance \", self.am)\n\n\n# create the account object\nob = Account(10003,50000,\"Joe\",\"Personal\")\nob.deposit(40000)\nob.deposit(20000)\nob.deposit(50000)\nob.deposit(40)\nob.withdraw(150000)\nob.withdraw(15000)\nob.deposit(100000)\nob.withdraw(50000)\nob.checkbalance()\nob.checkdetails()\nob.changetype()\n\n\n","sub_path":"PycharmProjects/Vic/lesson 4.py","file_name":"lesson 4.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"269536119","text":"\n\"\"\"Tests for custom user ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pylab as pp\nimport numpy as np\nimport os.path\n\nimport tensorflow as tf\n\n\nclass PermuteItTest(tf.test.TestCase):\n\n def testBasic(self):\n library_filename = os.path.join('/Users/uvapostdoc/libraries/tensorflow/bazel-bin/tensorflow/user_ops',\n 'permute_it.so')\n\n #with self.assertRaises(tf.errors.InvalidArgumentError):\n permute_it = tf.load_op_library(library_filename)\n\n sess = tf.InteractiveSession()\n\n #with tf.Session(''):\n W = tf.Variable(tf.random_normal([3,3]))\n #b = tf.Variable(tf.random_normal([10]))\n #a = tf.Variable(tf.ones( [5], dtype='int32'))\n sess.run(tf.global_variables_initializer())\n\n ids = tf.placeholder(dtype='int32',shape=[3,3])\n\n\n tf.initialize_all_variables()\n Wp0 = permute_it.permute_it( W, ids, axis = 0 )\n Wp1 = permute_it.permute_it( W, ids, axis = 1 )\n print( \"W = \" + str(W.eval()))\n #print \"ids =\",ids.eval( feed_dict={ids:np.array([0,2])})\n #print( \"W[ids,:] = \" + str(Wp.eval( feed_dict={ids:np.array([0,2])})) )\n print( \"Wp1 = \" + str(Wp1.eval( feed_dict={ids:np.array([[2,1,0],[2,1,0],[2,1,0]])} )))\n print( \"Wp0 = \" + str(Wp0.eval( feed_dict={ids:np.array([[2,2,2],[1,1,1],[0,0,0]])} )))\n\nif __name__ == '__main__':\n tf.test.main()\n#\n# import tensorflow as tf\n# from tensorflow.python.ops.gen_user_ops import *\n#\n# sess = tf.InteractiveSession()\n#\n#\n# W = tf.Variable(tf.random_normal([3,5]))\n# b = tf.Variable(tf.random_normal([10]))\n# a = tf.Variable(tf.ones( [5], dtype='int32'))\n#\n# sess.run(tf.initialize_all_variables())\n#\n# #cross_entropy = -tf.reduce_sum(y_*tf.log(y)) + l1*tf.reduce_sum( tf.abs(W[x1+1,:]-W[x1,:]) )\n# #cross_entropy = -tf.reduce_sum(y_*tf.log(y)) + l1a*tf.reduce_sum( tf.abs( tf.matmul(R,W) ) ) + l1b*tf.reduce_sum( tf.abs( tf.matmul(R2,W) ) )\n# #ids = np.array([0,2],dtype='int32')\n# ids = tf.placeholder(dtype='int32',shape=[2])\n#\n# Wp = slice_it( W, ids, axis = 1 )\n# print \"W = \", W.eval()\n# #print \"ids =\",ids.eval( feed_dict={ids:np.array([0,2])})\n# print \"W[ids,:] = \", Wp.eval( feed_dict={ids:np.array([0,2])})\n","sub_path":"tensorflow/user_ops/permute_it_test.py","file_name":"permute_it_test.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"133313178","text":"import vtk\r\nimport numpy as np\r\nimport colorsys\r\n\r\nimport Utilities\r\nfrom Utilities import new_vtk\r\nfrom vtk.util import numpy_support as VN\r\n\r\n \r\nclass PlotClass(object):\r\n \"\"\" Simple interface to VTK's underlying ploting \"\"\"\r\n \r\n def __init__(self):\r\n\r\n # Add FEM Actor to renderer window\r\n self.ren = vtk.vtkRenderer()\r\n self.ren.SetBackground(0.3, 0.3, 0.3)\r\n \r\n self.renWin = vtk.vtkRenderWindow()\r\n self.renWin.AddRenderer(self.ren)\r\n self.iren = vtk.vtkRenderWindowInteractor()\r\n self.iren.SetRenderWindow(self.renWin)\r\n \r\n # Allow user to interact\r\n istyle = vtk.vtkInteractorStyleTrackballCamera()\r\n self.iren.SetInteractorStyle(istyle)\r\n\r\n\r\n def AddMesh(self, meshin, color=[1, 1, 1], style='', scalars=[], name='',\r\n rng=[], stitle='', showedges=True, psize=5, opacity=1,\r\n linethick=[]):\r\n \"\"\" Adds an actor to the renderwindow \"\"\"\r\n \r\n # Create mapper\r\n mapper = vtk.vtkDataSetMapper()\r\n \r\n # Add scalars if they exist\r\n isscalars = False\r\n nscalars = len(scalars)\r\n if nscalars == meshin.GetNumberOfPoints():\r\n mesh = Utilities.CopyGrid(meshin)\r\n Utilities.AddPointScalars(mesh, scalars, name)\r\n isscalars = True\r\n mapper.SetScalarModeToUsePointData()\r\n \r\n\r\n elif nscalars == meshin.GetNumberOfCells():\r\n mesh = Utilities.CopyGrid(meshin)\r\n Utilities.AddCellScalars(mesh, scalars, name)\r\n isscalars = True\r\n mapper.SetScalarModeToUseCellData()\r\n \r\n else:\r\n mesh = meshin\r\n \r\n # Set scalar range\r\n if isscalars:\r\n if not rng:\r\n rng = [np.min(scalars), np.max(scalars)]\r\n mapper.SetScalarRange(rng[0], rng[1])\r\n \r\n # Set Scalar\r\n Utilities.SetVTKInput(mapper, mesh)\r\n \r\n # Create Actor\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n \r\n if style == 'wireframe':\r\n actor.GetProperty().SetRepresentationToWireframe()\r\n elif style == 'points':\r\n actor.GetProperty().SetRepresentationToPoints()\r\n actor.GetProperty().SetPointSize(psize)\r\n else:\r\n actor.GetProperty().SetRepresentationToSurface()\r\n \r\n if showedges:\r\n actor.GetProperty().EdgeVisibilityOn()\r\n actor.GetProperty().SetColor(color)\r\n actor.GetProperty().SetOpacity(opacity)\r\n actor.GetProperty().LightingOff()\r\n \r\n if style == 'wireframe' and linethick:\r\n actor.GetProperty().SetLineWidth(linethick) \r\n\r\n \r\n # Add to renderer\r\n self.ren.AddActor(actor)\r\n \r\n # Add scalar bar\r\n if stitle:\r\n scalarBar = vtk.vtkScalarBarActor()\r\n scalarBar.SetLookupTable(mapper.GetLookupTable())\r\n scalarBar.SetTitle(stitle)\r\n scalarBar.SetNumberOfLabels(5) \r\n self.ren.AddActor(scalarBar)\r\n\r\n\r\n def AddLines(self, lines, color=[1, 1, 1], width=5):\r\n \"\"\" Adds an actor to the renderwindow \"\"\"\r\n \r\n # Create mapper and add lines\r\n mapper = vtk.vtkDataSetMapper()\r\n Utilities.SetVTKInput(mapper, lines)\r\n \r\n # Create Actor\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().SetLineWidth(width); \r\n actor.GetProperty().EdgeVisibilityOn()\r\n actor.GetProperty().SetColor(color)\r\n actor.GetProperty().LightingOff()\r\n \r\n # Add to renderer\r\n self.ren.AddActor(actor)\r\n \r\n\r\n def AddPoints(self, points, color=[1, 1, 1], psize=5):\r\n \r\n # Convert to points actor if points is a numpy array\r\n if type(points) == np.ndarray:\r\n npoints = points.shape[0]\r\n \r\n # Make VTK cells array\r\n cells = np.hstack((np.ones((npoints, 1)), \r\n np.arange(npoints).reshape(-1, 1)))\r\n cells = np.ascontiguousarray(cells, dtype=np.int64)\r\n vtkcells = vtk.vtkCellArray()\r\n vtkcells.SetCells(npoints, VN.numpy_to_vtkIdTypeArray(cells, deep=True))\r\n \r\n # Convert points to vtk object\r\n vtkPoints = Utilities.MakevtkPoints(points)\r\n \r\n # Create polydata\r\n pdata = vtk.vtkPolyData()\r\n pdata.SetPoints(vtkPoints)\r\n pdata.SetVerts(vtkcells)\r\n \r\n # Create mapper and add lines\r\n mapper = vtk.vtkDataSetMapper()\r\n Utilities.SetVTKInput(mapper, pdata)\r\n \r\n # Create Actor\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().SetPointSize(psize); \r\n actor.GetProperty().SetColor(color)\r\n actor.GetProperty().LightingOff()\r\n \r\n self.ren.AddActor(actor)\r\n \r\n \r\n def GetCameraPosition(self):\r\n \"\"\" Returns camera position of active render window \"\"\"\r\n camera = self.ren.GetActiveCamera()\r\n pos = camera.GetPosition()\r\n fpt = camera.GetFocalPoint()\r\n vup = camera.GetViewUp()\r\n return [pos, fpt, vup]\r\n \r\n\r\n def SetCameraPosition(self, cameraloc):\r\n \"\"\" Set camera position of active render window \"\"\"\r\n camera = self.ren.GetActiveCamera()\r\n camera.SetPosition(cameraloc[0])\r\n camera.SetFocalPoint(cameraloc[1]) \r\n camera.SetViewUp(cameraloc[2]) \r\n \r\n\r\n def SetBackground(self, bcolor):\r\n \"\"\" Sets background color \"\"\"\r\n self.ren.SetBackground(bcolor)\r\n \r\n \r\n def AddLegend(self, entries, bcolor=[0.5, 0.5, 0.5], border=False):\r\n \"\"\"\r\n Adds a legend to render window. Entries must be a list containing\r\n one string and color entry for each item\r\n \"\"\"\r\n \r\n legend = vtk.vtkLegendBoxActor()\r\n legend.SetNumberOfEntries(len(entries))\r\n \r\n c = 0\r\n nulldata = vtk.vtkPolyData()\r\n for entry in entries:\r\n legend.SetEntry(c, nulldata, entry[0], entry[1])\r\n c += 1\r\n \r\n legend.UseBackgroundOn()\r\n legend.SetBackgroundColor(bcolor)\r\n if border:\r\n legend.BorderOn()\r\n else:\r\n legend.BorderOff()\r\n \r\n # Add to renderer\r\n self.ren.AddActor(legend)\r\n \r\n \r\n def Plot(self, title=''):\r\n \"\"\" Renders \"\"\"\r\n if title:\r\n self.renWin.SetWindowName(title)\r\n \r\n # Render\r\n self.iren.Initialize()\r\n self.renWin.Render()\r\n self.iren.Start()\r\n \r\n \r\n def AddActor(self, actor):\r\n \"\"\" Adds actor to render window \"\"\"\r\n self.ren.AddActor(actor)\r\n \r\n \r\n def AddAxes(self):\r\n \"\"\" Add axes widget \"\"\"\r\n axes = vtk.vtkAxesActor()\r\n widget = vtk.vtkOrientationMarkerWidget()\r\n widget.SetOrientationMarker(axes)\r\n widget.SetInteractor(self.iren)\r\n widget.SetViewport(0.0, 0.0, 0.4, 0.4)\r\n widget.SetEnabled(1)\r\n widget.InteractiveOn()\r\n \r\n\r\n\r\ndef CreateArrowsActor(pdata):\r\n \"\"\" Creates an actor composed of arrows \"\"\"\r\n \r\n # Create arrow object\r\n arrow = vtk.vtkArrowSource()\r\n arrow.Update()\r\n glyph3D = vtk.vtkGlyph3D()\r\n if new_vtk:\r\n glyph3D.SetSourceData(arrow.GetOutput())\r\n glyph3D.SetInputData(pdata)\r\n else:\r\n glyph3D.SetSource(arrow.GetOutput())\r\n glyph3D.SetInput(pdata)\r\n glyph3D.SetVectorModeToUseVector()\r\n glyph3D.Update()\r\n \r\n # Create mapper \r\n mapper = vtk.vtkDataSetMapper()\r\n mapper.SetInputConnection(glyph3D.GetOutputPort())\r\n \r\n # Create actor\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().LightingOff()\r\n\r\n return actor\r\n \r\n \r\ndef PlotCurvature(mesh, curvtype):\r\n \"\"\"\r\n Plots curvature\r\n Availble options for curvtype:\r\n 'Mean'\r\n 'Gaussian'\r\n 'Maximum ' \r\n \r\n \"\"\"\r\n \r\n # Get curvature values and plot\r\n c = Utilities.GetCurvature(mesh, curvtype)[0]\r\n pobj = PlotClass()\r\n pobj.AddMesh(mesh, scalars=c)\r\n pobj.Plot(); del pobj\r\n\r\n \r\ndef PlotGrids(grids, wFEM=False):\r\n \"\"\"\r\n Creates a plot of several grids as wireframes. When wFEM is true, the first\r\n grid is a white solid\r\n \"\"\"\r\n \r\n # Make grid colors\r\n N = len(grids)\r\n HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]\r\n colors = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)\r\n \r\n pobj = PlotClass()\r\n for i in range(len(grids)):\r\n if not i and wFEM: # Special plotting for first grid\r\n pobj.AddMesh(grids[i])\r\n else:\r\n pobj.AddMesh(grids[i], color=colors[i], style='wireframe')\r\n \r\n # Render plot and delete when finished\r\n pobj.SetBackground([0.8, 0.8, 0.8])\r\n pobj.Plot(); del pobj\r\n\r\n\r\ndef PlotPoly(mesh, representation='surface', color=[1, 1, 1]):\r\n \"\"\" Plots vtk unstructured grid or poly object \"\"\"\r\n pobj = PlotClass()\r\n pobj.AddMesh(mesh, color, style=representation)\r\n pobj.Plot()\r\n del pobj\r\n\r\n\r\ndef Plot(mesh, representation='surface', color=[1, 1, 1]):\r\n \"\"\" calls PlotPoly \"\"\"\r\n PlotPoly(mesh, representation, [1, 1, 1])\r\n \r\n \r\ndef PlotEdges(mesh, angle, width=10):\r\n \"\"\" Plots edges of a mesh \"\"\"\r\n \r\n # Extract edge points from a mesh\r\n edges = Utilities.GetEdgePoints(mesh, angle, False)\r\n \r\n # Render\r\n pobj = PlotClass()\r\n pobj.AddLines(edges, [0, 1, 1], width)\r\n pobj.AddMesh(mesh)\r\n pobj.Plot(); del pobj\r\n \r\n \r\ndef PlotBoundaries(mesh):\r\n \"\"\" Plots boundaries of a mesh \"\"\"\r\n featureEdges = vtk.vtkFeatureEdges()\r\n Utilities.SetVTKInput(featureEdges, mesh)\r\n \r\n featureEdges.FeatureEdgesOff()\r\n featureEdges.BoundaryEdgesOn()\r\n featureEdges.NonManifoldEdgesOff()\r\n featureEdges.ManifoldEdgesOff()\r\n \r\n edgeMapper = vtk.vtkPolyDataMapper();\r\n edgeMapper.SetInputConnection(featureEdges.GetOutputPort());\r\n \r\n edgeActor = vtk.vtkActor();\r\n edgeActor.GetProperty().SetLineWidth(5);\r\n edgeActor.SetMapper(edgeMapper)\r\n\r\n mapper = vtk.vtkDataSetMapper()\r\n Utilities.SetVTKInput(mapper, mesh)\r\n\r\n # Actor\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().LightingOff() \r\n \r\n # Render\r\n pobj = PlotClass()\r\n pobj.AddActor(actor)\r\n pobj.Plot(); del pobj\r\n \r\n ","sub_path":"ANSYScdb/Plotting.py","file_name":"Plotting.py","file_ext":"py","file_size_in_byte":10713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218288307","text":"\"\"\"Everything related to training ultralisks goes here\"\"\"\nfrom sc2.constants import ULTRALISK, ZERGGROUNDARMORSLEVEL3\n\n\nclass TrainUltralisk:\n \"\"\"Ok for now\"\"\"\n\n def __init__(self, main):\n self.controller = main\n\n async def should_handle(self):\n \"\"\"Good for now but it might need to be changed vs particular\n enemy units compositions\"\"\"\n local_controller = self.controller\n if local_controller.time >= 1050 and not local_controller.already_pending_upgrade(ZERGGROUNDARMORSLEVEL3):\n return False\n return local_controller.can_train(ULTRALISK, local_controller.caverns.ready)\n\n async def handle(self):\n \"\"\"Execute the action of training ultralisks\"\"\"\n local_controller = self.controller\n local_controller.add_action(local_controller.larvae.random.train(ULTRALISK))\n return True\n","sub_path":"actions/train/ultralisk.py","file_name":"ultralisk.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"112803686","text":"import os\nimport random\n\nfrom jungle_law.field.Barrier import Barrier\nfrom jungle_law.field.Cell import Cell\n\n\nclass Field:\n \"\"\"\n Class that contains all objects.\n \"\"\"\n def __init__(self, w, h, barrier_count):\n \"\"\"\n Creates field with input width, height and barrier count.\n :param w: Input width.\n :param h: Input height.\n :param barrier_count: Number of barriers.\n \"\"\"\n self.__width = w\n self.__height = h\n self.__cells = [[Cell(i, j) for j in range(h)] for i in range(w)]\n for i in range(barrier_count):\n cell = self.get_free_cell()\n self.add(Barrier(cell.x, cell.y))\n\n def add(self, object):\n \"\"\"\n Add an object to field.\n :param object: Input object.\n \"\"\"\n if self.__cells[object.x][object.y].BlockedBy is not None:\n raise Exception('This Cell is already blocked!')\n self.get_cell(object.x, object.y).BlockedBy = object\n\n def get_near_cells(self, object):\n \"\"\"\n Get cells next to an object.\n :param object: Input object.\n :return: Returns list of near cells.\n \"\"\"\n cells = []\n for step in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n if 0 <= object.x + step[0] < self.__width and 0 <= object.y + step[1] < self.__height:\n cells.append(self.get_cell(object.x + step[0],object.y + step[1]))\n return cells\n\n def get_cell(self, x, y):\n \"\"\"\n Return cell with input coordinates.\n :param x: X-coordinate.\n :param y: Y-coordinate.\n :return: Returns cell with input coordinates.\n \"\"\"\n return self.__cells[x][y]\n\n def get_free_cell(self):\n \"\"\"\n Get cell that not blocked.\n :return: Returns free cell.\n \"\"\"\n if not self._free_cell_exists():\n raise Exception('There is no free cell!')\n while 1:\n x, y = random.randint(0, self.__width - 1), random.randint(0, self.__height - 1)\n if self.__cells[x][y].BlockedBy is None:\n return self.__cells[x][y]\n\n def _free_cell_exists(self):\n \"\"\"\n Check is there free cell on field.\n :return: True - free cell exists, False - otherwise.\n \"\"\"\n for i in range(self.__width):\n for j in range(self.__height):\n if self.__cells[i][j].BlockedBy is None:\n return True\n return False\n\n def paint(self):\n \"\"\"\n Print all objects on field.\n \"\"\"\n os.system('cls')\n for j in range(self.__height):\n for i in range(self.__width):\n self.__cells[i][j].paint()\n print()\n","sub_path":"jungle_law/field/Field.py","file_name":"Field.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"232107024","text":"from pandac.PandaModules import Point3, VBase3\n\nfrom toontown.suit.DistributedCashbotBoss import DistributedCashbotBoss\nfrom toontown.toonbase import ToontownGlobals\nfrom toontown.toonbase import TTLocalizer\nfrom toontown.chat import ChatGlobals\nfrom toontown.distributed.DelayDelete import DelayDelete\n\nfrom direct.interval.IntervalGlobal import *\n\n\nclass DistributedBrutalCashbotBoss(DistributedCashbotBoss):\n notify = directNotify.newCategory('DistributedBrutalCashbotBoss')\n\n ANIM_PLAYRATE = 1.5\n\n def __init__(self, cr):\n DistributedCashbotBoss.__init__(self, cr)\n\n self.bossMaxDamage = ToontownGlobals.BrutalCashbotBossMaxDamage\n\n def announceGenerate(self):\n DistributedCashbotBoss.announceGenerate(self)\n\n self.setName(TTLocalizer.BrutalCashbotBossName)\n base.localAvatar.setCanUseUnites(False)\n\n def disable(self):\n DistributedCashbotBoss.disable(self)\n\n base.localAvatar.setCanUseUnites(True)\n\n def makeBossFleeMovie(self):\n hadEnough = TTLocalizer.BrutalCashbotBossHadEnough\n outtaHere = TTLocalizer.BrutalCashbotBossOuttaHere\n loco = loader.loadModel('phase_10/models/cogHQ/CashBotLocomotive')\n car1 = loader.loadModel('phase_10/models/cogHQ/CashBotBoxCar')\n car2 = loader.loadModel('phase_10/models/cogHQ/CashBotTankCar')\n trainPassingSfx = base.loadSfx('phase_10/audio/sfx/CBHQ_TRAIN_pass.ogg')\n boomSfx = loader.loadSfx('phase_3.5/audio/sfx/ENC_cogfall_apart.ogg')\n rollThroughDoor = self.rollBossToPoint(fromPos=Point3(120, -280, 0), fromHpr=None, toPos=Point3(120, -250, 0), toHpr=None, reverse=0)\n rollTrack = Sequence(Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0))\n g = 80.0 / 300.0\n trainTrack = Track(\n (0 * g, loco.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (1 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (2 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (3 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (4 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (5 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (6 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (7 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (8 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (9 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (10 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (11 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (12 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (13 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),\n (14 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))))\n bossTrack = Track(\n (0.0, Sequence(\n Func(base.camera.reparentTo, render),\n Func(base.camera.setPosHpr, 105, -280, 20, -158, -3, 0),\n Func(self.reparentTo, render),\n Func(self.show),\n Func(self.clearChat),\n Func(self.setPosHpr, *ToontownGlobals.CashbotBossBattleThreePosHpr),\n Func(self.reverseHead),\n ActorInterval(self, 'Fb_firstHit'),\n ActorInterval(self, 'Fb_down2Up'))),\n (1.0, Func(self.setChatAbsolute, hadEnough, ChatGlobals.CFSpeech)),\n (5.5, Parallel(\n Func(base.camera.setPosHpr, 100, -315, 16, -20, 0, 0),\n Func(self.hideBattleThreeObjects),\n Func(self.forwardHead),\n Func(self.loop, 'Ff_neutral'),\n rollTrack,\n self.door3.posInterval(2.5, Point3(0, 0, 25), startPos=Point3(0, 0, 18)))),\n (5.5, Func(self.setChatAbsolute, outtaHere, ChatGlobals.CFSpeech)),\n (5.5, SoundInterval(trainPassingSfx)),\n (8.1, Func(self.clearChat)),\n (9.4, Sequence(\n Func(loco.reparentTo, render),\n Func(car1.reparentTo, render),\n Func(car2.reparentTo, render),\n trainTrack,\n Func(loco.detachNode),\n Func(car1.detachNode),\n Func(car2.detachNode),\n Wait(2))),\n (9.5, SoundInterval(boomSfx)),\n (9.5, Sequence(\n self.posInterval(0.4, Point3(0, -250, 0)),\n Func(self.stash))))\n return bossTrack\n\n def makeIntroductionMovie(self, delayDeletes):\n for toonId in self.involvedToons:\n toon = self.cr.doId2do.get(toonId)\n if toon:\n delayDeletes.append(DelayDelete(toon, 'CashbotBoss.makeIntroductionMovie'))\n\n rtTrack = Sequence()\n startPos = Point3(ToontownGlobals.CashbotBossOffstagePosHpr[0], ToontownGlobals.CashbotBossOffstagePosHpr[1], ToontownGlobals.CashbotBossOffstagePosHpr[2])\n battlePos = Point3(ToontownGlobals.CashbotBossBattleOnePosHpr[0], ToontownGlobals.CashbotBossBattleOnePosHpr[1], ToontownGlobals.CashbotBossBattleOnePosHpr[2])\n battleHpr = VBase3(ToontownGlobals.CashbotBossBattleOnePosHpr[3], ToontownGlobals.CashbotBossBattleOnePosHpr[4], ToontownGlobals.CashbotBossBattleOnePosHpr[5])\n bossTrack = Sequence()\n bossTrack.append(Func(self.reparentTo, render))\n bossTrack.append(Func(self.getGeomNode().setH, 180))\n bossTrack.append(Func(self.pelvis.setHpr, self.pelvisForwardHpr))\n bossTrack.append(Func(self.loop, 'Ff_neutral'))\n track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)\n bossTrack.append(track)\n track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0)\n bossTrack.append(track)\n bossTrack.append(Func(self.getGeomNode().setH, 0))\n bossTrack.append(Func(self.pelvis.setHpr, self.pelvisReversedHpr))\n goonTrack = self._DistributedCashbotBoss__makeGoonMovieForIntro()\n attackToons = TTLocalizer.CashbotBossCogAttack\n rToon = self.resistanceToon\n rToon.setPosHpr(*ToontownGlobals.CashbotRTBattleOneStartPosHpr)\n track = Sequence(\n Func(base.camera.setPosHpr, 82, -219, 5, 267, 0, 0),\n Func(rToon.setChatAbsolute, TTLocalizer.BrutalResistanceToonWelcome, ChatGlobals.CFSpeech),\n Wait(3),\n Sequence(goonTrack, duration=0),\n Parallel(\n base.camera.posHprInterval(4, Point3(108, -244, 4), VBase3(211.5, 0, 0)),\n Sequence(\n Func(rToon.suit.setPlayRate, 1.4, 'walk'),\n Func(rToon.suit.loop, 'walk'),\n Parallel(\n rToon.hprInterval(1, VBase3(180, 0, 0)),\n rToon.posInterval(3, VBase3(120, -255, 0)),\n Sequence(\n Wait(2),\n Func(rToon.clearChat))),\n Func(rToon.suit.loop, 'neutral'),\n self.door2.posInterval(3, VBase3(0, 0, 30)))),\n Func(rToon.setHpr, 0, 0, 0),\n Func(rToon.setChatAbsolute, TTLocalizer.ResistanceToonTooLate, ChatGlobals.CFSpeech),\n Func(base.camera.reparentTo, render),\n Func(base.camera.setPosHpr, 61.1, -228.8, 10.2, -90, 0, 0),\n self.door1.posInterval(2, VBase3(0, 0, 30)),\n Parallel(\n bossTrack,\n Sequence(\n Wait(3),\n Func(rToon.clearChat),\n self.door1.posInterval(3, VBase3(0, 0, 0)))),\n Func(self.setChatAbsolute, TTLocalizer.CashbotBossDiscoverToons1, ChatGlobals.CFSpeech),\n base.camera.posHprInterval(1.5, Point3(93.3, -230, 0.7), VBase3(-92.9, 39.7, 8.3)),\n Func(self.setChatAbsolute, TTLocalizer.BrutalCashbotBossDiscoverToons2, ChatGlobals.CFSpeech),\n Wait(4),\n Func(self.clearChat),\n self.loseCogSuits(self.toonsA + self.toonsB, render, (113, -228, 10, 90, 0, 0)),\n Wait(1),\n Func(rToon.setHpr, 0, 0, 0),\n self.loseCogSuits([rToon], render, (133, -243, 5, 143, 0, 0), True),\n Func(rToon.setChatAbsolute, TTLocalizer.BrutalResistanceToonKeepHimBusy, ChatGlobals.CFSpeech),\n Wait(1),\n Func(self._DistributedCashbotBoss__showResistanceToon, False),\n Sequence(\n Func(rToon.animFSM.request, 'run'),\n rToon.hprInterval(1, VBase3(180, 0, 0)),\n Parallel(\n Sequence(\n rToon.posInterval(1.5, VBase3(109, -294, 0)),\n Parallel(Func(rToon.animFSM.request, 'jump')),\n rToon.posInterval(1.5, VBase3(93.935, -341.065, 2))),\n self.door2.posInterval(3, VBase3(0, 0, 0))),\n Func(rToon.animFSM.request, 'neutral')),\n self.toonNormalEyes(self.involvedToons),\n self.toonNormalEyes([self.resistanceToon], True),\n Func(rToon.clearChat),\n Func(base.camera.setPosHpr, 93.3, -230, 0.7, -92.9, 39.7, 8.3),\n Func(self.setChatAbsolute, attackToons, ChatGlobals.CFSpeech),\n Wait(2),\n Func(self.clearChat))\n return Sequence(Func(base.camera.reparentTo, render), track)\n\n\n def makePrepareBattleThreeMovie(self, delayDeletes):\n for toonId in self.involvedToons:\n toon = self.cr.doId2do.get(toonId)\n if toon:\n delayDeletes.append(DelayDelete(toon, 'CashbotBoss.makePrepareBattleThreeMovie'))\n\n startPos = Point3(ToontownGlobals.CashbotBossBattleOnePosHpr[0], ToontownGlobals.CashbotBossBattleOnePosHpr[1], ToontownGlobals.CashbotBossBattleOnePosHpr[2])\n battlePos = Point3(ToontownGlobals.CashbotBossBattleThreePosHpr[0], ToontownGlobals.CashbotBossBattleThreePosHpr[1], ToontownGlobals.CashbotBossBattleThreePosHpr[2])\n startHpr = Point3(ToontownGlobals.CashbotBossBattleOnePosHpr[3], ToontownGlobals.CashbotBossBattleOnePosHpr[4], ToontownGlobals.CashbotBossBattleOnePosHpr[5])\n battleHpr = VBase3(ToontownGlobals.CashbotBossBattleThreePosHpr[3], ToontownGlobals.CashbotBossBattleThreePosHpr[4], ToontownGlobals.CashbotBossBattleThreePosHpr[5])\n finalHpr = VBase3(135, 0, 0)\n bossTrack = Sequence()\n bossTrack.append(Func(self.reparentTo, render))\n bossTrack.append(Func(self.getGeomNode().setH, 180))\n bossTrack.append(Func(self.pelvis.setHpr, self.pelvisForwardHpr))\n bossTrack.append(Func(self.loop, 'Ff_neutral'))\n track, hpr = self.rollBossToPoint(startPos, startHpr, startPos, battleHpr, 0)\n bossTrack.append(track)\n track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)\n bossTrack.append(track)\n track, hpr = self.rollBossToPoint(battlePos, battleHpr, battlePos, finalHpr, 0)\n bossTrack.append(track)\n rToon = self.resistanceToon\n rToon.setPosHpr(93.935, -341.065, 0, -45, 0, 0)\n goon = self.fakeGoons[0]\n crane = self.cranes[0]\n track = Sequence(\n Func(self._DistributedCashbotBoss__hideToons),\n Func(crane.request, 'Movie'),\n Func(crane.accomodateToon, rToon),\n Func(goon.request, 'Stunned'),\n Func(goon.setPosHpr, 104, -316, 0, 165, 0, 0),\n Parallel(\n self.door2.posInterval(4.5, VBase3(0, 0, 30)),\n self.door3.posInterval(4.5, VBase3(0, 0, 30)),\n bossTrack),\n Func(rToon.loop, 'leverNeutral'),\n Func(base.camera.reparentTo, self.geom),\n Func(base.camera.setPosHpr, 105, -326, 5, 136.3, 0, 0),\n Func(rToon.setChatAbsolute, TTLocalizer.BrutalResistanceToonWatchThis, ChatGlobals.CFSpeech),\n Wait(2),\n Func(rToon.clearChat),\n Func(base.camera.setPosHpr, 105, -326, 20, -45.3, 11, 0),\n Func(self.setChatAbsolute, TTLocalizer.BrutalCashbotBossGetAwayFromThat, ChatGlobals.CFSpeech),\n Wait(2),\n Func(self.clearChat),\n base.camera.posHprInterval(1.5, Point3(105, -326, 5), Point3(136.3, 0, 0), blendType='easeInOut'),\n Func(rToon.setChatAbsolute, TTLocalizer.ResistanceToonCraneInstructions1, ChatGlobals.CFSpeech),\n Wait(4),\n Func(rToon.setChatAbsolute, TTLocalizer.ResistanceToonCraneInstructions2, ChatGlobals.CFSpeech),\n Wait(4),\n Func(rToon.setChatAbsolute, TTLocalizer.BrutalResistanceToonCraneInstructions3, ChatGlobals.CFSpeech),\n Wait(4),\n Func(rToon.setChatAbsolute, TTLocalizer.BrutalResistanceToonCraneInstructions4, ChatGlobals.CFSpeech),\n Wait(4),\n Func(rToon.clearChat),\n Func(base.camera.setPosHpr, 102, -323.6, 0.9, -10.6, 14, 0),\n Func(goon.request, 'Recovery'),\n Wait(2),\n Func(base.camera.setPosHpr, 95.4, -332.6, 4.2, 167.1, -13.2, 0),\n Func(rToon.setChatAbsolute, TTLocalizer.ResistanceToonGetaway, ChatGlobals.CFSpeech),\n Func(rToon.animFSM.request, 'jump'),\n Wait(1.8),\n Func(rToon.clearChat),\n Func(base.camera.setPosHpr, 109.1, -300.7, 13.9, -15.6, -13.6, 0),\n Func(rToon.animFSM.request, 'run'),\n Func(goon.request, 'Walk'),\n Parallel(\n self.door3.posInterval(3, VBase3(0, 0, 0)),\n rToon.posHprInterval(3, Point3(136, -212.9, 0), VBase3(-14, 0, 0), startPos=Point3(110.8, -292.7, 0), startHpr=VBase3(-14, 0, 0)),\n goon.posHprInterval(3, Point3(125.2, -243.5, 0), VBase3(-14, 0, 0), startPos=Point3(104.8, -309.5, 0), startHpr=VBase3(-14, 0, 0))),\n Func(self._DistributedCashbotBoss__hideFakeGoons),\n Func(crane.request, 'Free'),\n Func(self.getGeomNode().setH, 0),\n self.moveToonsToBattleThreePos(self.involvedToons),\n Func(self._DistributedCashbotBoss__showToons))\n return Sequence(Func(base.camera.reparentTo, self), Func(base.camera.setPosHpr, 0, -27, 25, 0, -18, 0), track)\n","sub_path":"toontown/suit/DistributedBrutalCashbotBoss.py","file_name":"DistributedBrutalCashbotBoss.py","file_ext":"py","file_size_in_byte":15254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"396946458","text":"# Copyright 2013, Big Switch Networks, Inc.\n#\n# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with\n# the following special exception:\n#\n# LOXI Exception\n#\n# As a special exception to the terms of the EPL, you may distribute libraries\n# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided\n# that copyright and licensing notices generated by LoxiGen are not altered or removed\n# from the LoxiGen Libraries and the notice provided below is (i) included in\n# the LoxiGen Libraries, if distributed in source code form and (ii) included in any\n# documentation for the LoxiGen Libraries, if distributed in binary form.\n#\n# Notice: \"Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler.\"\n#\n# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain\n# a copy of the EPL at:\n#\n# http://www.eclipse.org/legal/epl-v10.html\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# EPL for the specific language governing permissions and limitations\n# under the EPL.\n\n\"\"\"\n@brief Utilities involving LOXI naming conventions\n\nUtility functions for OpenFlow class generation \n\nThese may need to be sorted out into language specific functions\n\"\"\"\n\nimport of_g\nimport tenjin\n\ndef class_signature(members):\n \"\"\"\n Generate a signature string for a class in canonical form\n\n @param cls The class whose signature is to be generated\n \"\"\"\n return \";\".join([\",\".join([x[\"m_type\"], x[\"name\"], str(x[\"offset\"])])\n for x in members])\n\ndef type_dec_to_count_base(m_type):\n \"\"\"\n Resolve a type declaration like uint8_t[4] to a count (4) and base_type\n (uint8_t)\n\n @param m_type The string type declaration to process\n \"\"\"\n count = 1\n chk_ar = m_type.split('[')\n if len(chk_ar) > 1:\n count_str = chk_ar[1].split(']')[0]\n if count_str in of_g.ofp_constants:\n count = of_g.ofp_constants[count_str]\n else:\n count = int(count_str)\n base_type = chk_ar[0]\n else:\n base_type = m_type\n return count, base_type\n\n##\n# Class types:\n#\n# Virtual\n# A virtual class is one which does not have an explicit wire\n# representation. For example, an inheritance super class\n# or a list type.\n#\n# List\n# A list of objects of some other type\n#\n# TLV16\n# The wire represenation starts with 16-bit type and length fields\n#\n# OXM\n# An extensible match object\n#\n# Message\n# A top level OpenFlow message\n#\n#\n\ndef class_is_message(cls):\n \"\"\"\n Return True if cls is a message object based on info in unified\n \"\"\"\n return \"xid\" in of_g.unified[cls][\"union\"] and cls != \"of_header\"\n\ndef class_is_tlv16(cls):\n \"\"\"\n Return True if cls_name is an object which uses uint16 for type and length\n \"\"\"\n if cls.find(\"of_action\") == 0: # Includes of_action_id classes\n return True\n if cls.find(\"of_instruction\") == 0:\n return True\n if cls.find(\"of_queue_prop\") == 0:\n return True\n if cls.find(\"of_table_feature_prop\") == 0:\n return True\n # *sigh*\n if cls.find(\"of_meter_band_stats\") == 0: # NOT A TLV\n return False\n if cls.find(\"of_meter_band\") == 0:\n return True\n if cls.find(\"of_hello_elem\") == 0:\n return True\n if cls == \"of_match_v3\":\n return True\n if cls == \"of_match_v4\":\n return True\n return False\n\ndef class_is_u16_len(cls):\n \"\"\"\n Return True if cls_name is an object which uses initial uint16 length\n \"\"\"\n return cls in [\"of_group_desc_stats_entry\", \"of_group_stats_entry\",\n \"of_flow_stats_entry\", \"of_bucket\", \"of_table_features\"]\n\ndef class_is_oxm(cls):\n \"\"\"\n Return True if cls_name is an OXM object\n \"\"\"\n if cls.find(\"of_oxm\") == 0:\n return True\n return False\n\ndef class_is_action(cls):\n \"\"\"\n Return True if cls_name is an action object\n\n Note that action_id is not an action object, though it has\n the same header. It looks like an action header, but the type\n is used to identify a kind of action, it does not indicate the\n type of the object following.\n \"\"\"\n if cls.find(\"of_action_id\") == 0:\n return False\n if cls.find(\"of_action\") == 0:\n return True\n\n # For each vendor, check for vendor specific action\n for exp in of_g.experimenter_name_to_id:\n if cls.find(\"of_action\" + exp) == 0:\n return True\n\n return False\n\ndef class_is_action_id(cls):\n \"\"\"\n Return True if cls_name is an action object\n\n Note that action_id is not an action object, though it has\n the same header. It looks like an action header, but the type\n is used to identify a kind of action, it does not indicate the\n type of the object following.\n \"\"\"\n if cls.find(\"of_action_id\") == 0:\n return True\n\n # For each vendor, check for vendor specific action\n for exp in of_g.experimenter_name_to_id:\n if cls.find(\"of_action_id_\" + exp) == 0:\n return True\n\n return False\n\ndef class_is_instruction(cls):\n \"\"\"\n Return True if cls_name is an instruction object\n \"\"\"\n if cls.find(\"of_instruction\") == 0:\n return True\n\n # For each vendor, check for vendor specific action\n for exp in of_g.experimenter_name_to_id:\n if cls.find(\"of_instruction_\" + exp) == 0:\n return True\n\n return False\n\ndef class_is_meter_band(cls):\n \"\"\"\n Return True if cls_name is an instruction object\n \"\"\"\n # meter_band_stats is not a member of meter_band class hierarchy\n if cls.find(\"of_meter_band_stats\") == 0:\n return False\n if cls.find(\"of_meter_band\") == 0:\n return True\n return False\n\ndef class_is_hello_elem(cls):\n \"\"\"\n Return True if cls_name is an instruction object\n \"\"\"\n if cls.find(\"of_hello_elem\") == 0:\n return True\n return False\n\ndef class_is_queue_prop(cls):\n \"\"\"\n Return True if cls_name is a queue_prop object\n \"\"\"\n if cls.find(\"of_queue_prop\") == 0:\n return True\n\n # For each vendor, check for vendor specific action\n for exp in of_g.experimenter_name_to_id:\n if cls.find(\"of_queue_prop_\" + exp) == 0:\n return True\n\n return False\n\ndef class_is_table_feature_prop(cls):\n \"\"\"\n Return True if cls_name is a queue_prop object\n \"\"\"\n if cls.find(\"of_table_feature_prop\") == 0:\n return True\n return False\n\ndef class_is_stats_message(cls):\n \"\"\"\n Return True if cls_name is a message object based on info in unified\n \"\"\"\n\n return \"stats_type\" in of_g.unified[cls][\"union\"]\n\ndef class_is_list(cls):\n \"\"\"\n Return True if cls_name is a list object\n \"\"\"\n return (cls.find(\"of_list_\") == 0)\n\ndef type_is_of_object(m_type):\n \"\"\"\n Return True if m_type is an OF object type\n \"\"\"\n # Remove _t from the type id and see if key for unified class\n if m_type[-2:] == \"_t\":\n m_type = m_type[:-2]\n return m_type in of_g.unified\n\ndef list_to_entry_type(cls):\n \"\"\"\n Return the entry type for a list\n \"\"\"\n slen = len(\"of_list_\")\n return \"of_\" + cls[slen:] \n\ndef type_to_short_name(m_type):\n if m_type in of_g.of_base_types:\n tname = of_g.of_base_types[m_type][\"short_name\"]\n elif m_type in of_g.of_mixed_types:\n tname = of_g.of_mixed_types[m_type][\"short_name\"]\n else:\n tname = \"unknown\"\n return tname\n\ndef type_to_name_type(cls, member_name):\n \"\"\"\n Generate the root name of a member for accessor functions, etc\n @param cls The class name\n @param member_name The member name\n \"\"\"\n members = of_g.unified[cls][\"union\"]\n if not member_name in members:\n debug(\"Error: %s is not in class %s for acc_name defn\" %\n (member_name, cls))\n os.exit()\n\n mem = members[member_name]\n m_type = mem[\"m_type\"]\n id = mem[\"memid\"]\n tname = type_to_short_name(m_type)\n\n return \"o%d_m%d_%s\" % (of_g.unified[cls][\"object_id\"], id, tname)\n\n\ndef member_to_index(m_name, members):\n \"\"\"\n Given a member name, return the index in the members dict\n @param m_name The name of the data member to search for\n @param members The dict of members\n @return Index if found, -1 not found\n\n Note we could generate an index when processing the original input\n \"\"\"\n count = 0\n for d in members:\n if d[\"name\"] == m_name:\n return count\n count += 1\n return -1\n\ndef member_base_type(cls, m_name):\n \"\"\"\n Map a member to its of_ type\n @param cls The class name\n @param m_name The name of the member being gotten\n @return The of_ type of the member\n \"\"\"\n rv = of_g.unified[cls][\"union\"][m_name][\"m_type\"]\n if rv[-2:] == \"_t\":\n return rv\n return rv + \"_t\"\n\ndef member_type_is_octets(cls, m_name):\n return member_base_type(cls, m_name) == \"of_octets_t\"\n\ndef member_returns_val(cls, m_name):\n \"\"\"\n Should get accessor return a value rather than void\n @param cls The class name\n @param m_name The member name\n @return True if of_g config and the specific member allow a \n return value. Otherwise False\n \"\"\"\n m_type = of_g.unified[cls][\"union\"][m_name][\"m_type\"]\n return (config_check(\"get_returns\") ==\"value\" and \n m_type in of_g.of_scalar_types)\n\ndef config_check(str, dictionary = of_g.code_gen_config):\n \"\"\"\n Return config value if in dictionary; else return False.\n @param str The lookup index\n @param dictionary The dict to check; use code_gen_config if None\n \"\"\"\n\n if str in dictionary:\n return dictionary[str]\n\n return False\n\ndef h_file_to_define(name):\n \"\"\"\n Convert a .h file name to the define used for the header\n \"\"\"\n h_name = name[:-2].upper()\n h_name = \"_\" + h_name + \"_H_\"\n return h_name\n\ndef type_to_cof_type(m_type):\n if m_type in of_g.of_base_types:\n if \"cof_type\" in of_g.of_base_types[m_type]:\n return of_g.of_base_types[m_type][\"cof_type\"]\n return m_type\n\n \ndef member_is_scalar(cls, m_name):\n return of_g.unified[cls][\"union\"][m_name][\"m_type\"] in of_g.of_scalar_types\n\ndef type_is_scalar(m_type):\n return m_type in of_g.of_scalar_types\n\ndef skip_member_name(name):\n return name.find(\"pad\") == 0 or name in of_g.skip_members\n\ndef enum_name(cls):\n \"\"\"\n Return the name used for an enum identifier for the given class\n @param cls The class name\n \"\"\"\n return cls.upper()\n\ndef class_in_version(cls, ver):\n \"\"\"\n Return boolean indicating if cls is defined for wire version ver\n \"\"\"\n\n return (cls, ver) in of_g.base_length\n\ndef instance_to_class(instance, parent):\n \"\"\"\n Return the name of the class for an instance of inheritance type parent\n \"\"\"\n return parent + \"_\" + instance\n\ndef sub_class_to_var_name(cls):\n \"\"\"\n Given a subclass name like of_action_output, generate the\n name of a variable like 'output'\n @param cls The class name\n \"\"\"\n pass\n\ndef class_is_var_len(cls, version):\n # Match is special case. Only version 1.2 (wire version 3) is var\n if cls == \"of_match\":\n return version == 3\n\n return not (cls, version) in of_g.is_fixed_length\n\ndef base_type_to_length(base_type, version):\n if base_type + \"_t\" in of_g.of_base_types:\n inst_len = of_g.of_base_types[base_type + \"_t\"][\"bytes\"]\n else:\n inst_len = of_g.base_length[(base_type, version)]\n\ndef version_to_name(version):\n \"\"\"\n Convert an integer version to the C macro name\n \"\"\"\n return \"OF_\" + of_g.version_names[version]\n\n##\n# Is class a flow modify of some sort?\n\ndef cls_is_flow_mod(cls):\n return cls in [\"of_flow_modify\", \"of_flow_add\", \"of_flow_delete\",\n \"of_flow_modify_strict\", \"of_flow_delete_strict\"]\n\n\ndef all_member_types_get(cls, version):\n \"\"\"\n Get the members and list of types for members of a given class\n @param cls The class name to process\n @param version The version for the class\n \"\"\"\n member_types = []\n\n if not version in of_g.unified[cls]:\n return ([], [])\n\n if \"use_version\" in of_g.unified[cls][version]:\n v = of_g.unified[cls][version][\"use_version\"]\n members = of_g.unified[cls][v][\"members\"]\n else:\n members = of_g.unified[cls][version][\"members\"]\n # Accumulate variables that are supported\n for member in members:\n m_type = member[\"m_type\"]\n m_name = member[\"name\"]\n if skip_member_name(m_name):\n continue\n if not m_type in member_types:\n member_types.append(m_type)\n\n return (members, member_types)\n\ndef list_name_extract(list_type):\n \"\"\"\n Return the base name for a list object of the given type\n @param list_type The type of the list as appears in the input,\n for example list(of_port_desc_t).\n @return A pair, (list-name, base-type) where list-name is the\n base name for the list, for example of_list_port_desc, and base-type\n is the type of list elements like of_port_desc_t\n \"\"\"\n base_type = list_type[5:-1]\n list_name = base_type\n if list_name.find(\"of_\") == 0:\n list_name = list_name[3:]\n if list_name[-2:] == \"_t\":\n list_name = list_name[:-2]\n list_name = \"of_list_\" + list_name\n return (list_name, base_type)\n\ndef version_to_name(version):\n \"\"\"\n Convert an integer version to the C macro name\n \"\"\"\n return \"OF_\" + of_g.version_names[version]\n\ndef gen_c_copy_license(out):\n \"\"\"\n Generate the top comments for copyright and license\n \"\"\"\n out.write(\"\"\"\\\n/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University */\n/* Copyright (c) 2011, 2012 Open Networking Foundation */\n/* Copyright (c) 2012, 2013 Big Switch Networks, Inc. */\n\n\"\"\")\n\ndef accessor_returns_error(a_type, m_type):\n is_var_len = (not type_is_scalar(m_type)) and \\\n [x for x in of_g.of_version_range if class_is_var_len(m_type[:-2], x)] != []\n if a_type == \"set\" and is_var_len:\n return True\n elif m_type == \"of_match_t\":\n return True\n else:\n return False\n\ndef render_template(out, name, path, context):\n \"\"\"\n Render a template using tenjin.\n out: a file-like object\n name: name of the template\n path: array of directories to search for the template\n context: dictionary of variables to pass to the template\n \"\"\"\n pp = [ tenjin.PrefixedLinePreprocessor() ] # support \"::\" syntax\n template_globals = { \"to_str\": str, \"escape\": str } # disable HTML escaping\n engine = tenjin.Engine(path=path, pp=pp)\n out.write(engine.render(name, context, template_globals))\n\ndef render_static(out, name, path):\n \"\"\"\n Write out a static template.\n out: a file-like object\n name: name of the template\n path: array of directories to search for the template\n \"\"\"\n # Reuse the tenjin logic for finding the template\n template_filename = tenjin.FileSystemLoader().find(name, path)\n if not template_filename:\n raise ValueError(\"template %s not found\" % name)\n with open(template_filename) as infile:\n out.write(infile.read())\n","sub_path":"loxi_utils/loxi_utils.py","file_name":"loxi_utils.py","file_ext":"py","file_size_in_byte":15259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"60879690","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 24 10:44:49 2017\n\n@author: alessandro\n\"\"\"\n\nimport numpy as np\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF, ConstantKernel as C\nfrom matplotlib import pyplot as plt\nimport time as time\n\n\nclass Agent:\n\n def __init__(self, budgetTot, deadline, nCampaigns, nIntervals, nBids, maxBudget=100.0, maxBid=1.0):\n self.budgetTot = budgetTot\n self.deadline = deadline\n self.nCampaigns = nCampaigns\n self.costs = np.array([])\n self.revenues = np.array([])\n self.t = 0\n self.gps = []\n self.prevBudgets = np.array([])\n self.prevBids = np.array([])\n self.prevClicks = np.array([])\n self.prevConversions = np.array([])\n self.prevHours = np.array([])\n self.valuesPerClick = np.zeros(nCampaigns)\n self.maxTotDailyBudget = maxBudget\n self.maxBid = maxBid\n self.nBudget = nIntervals\n self.nBids = nBids\n\n self.budgets = np.linspace(0, maxBudget, nIntervals)\n self.bids = np.linspace(0, maxBid, nBids)\n self.optimalBidPerBudget = np.zeros((nCampaigns, nIntervals))\n\n self.campaignsValues = []\n self.yMax = np.ones((nCampaigns))\n\n def updateValuesPerClick(self):\n \"\"\"\n Computation of the mean value per click\n :return: sets the value per click for each campaing stored in the agent\n \"\"\"\n for c in range(0, self.nCampaigns):\n value = np.sum(self.prevConversions[:, c]) / np.sum(self.prevClicks[:, c])\n if(np.isnan(value) or np.isinf(value)):\n self.valuesPerClick[c] = 0\n else:\n self.valuesPerClick[c] = value\n\n def initGPs(self):\n for c in range(0, self.nCampaigns):\n #C(1.0, (1e-3, 1e3))\n #l= np.array([200,200])\n #kernel = C(1, (1e-3, 1e1))*RBF(l, ((100, 300),(100,300)))\n l = np.array([1.0, 1.0])\n kernel = C(1.0, (1e-3, 1e3)) * RBF(l, ((1e-3, 1e3),(1e-3, 1e3)))\n #l=1.0\n #kernel = C(1.0, (1e-3, 1e3)) * RBF(l, (1e-3, 1e3))\n alpha = 200\n self.gps.append(GaussianProcessRegressor(kernel=kernel, alpha=alpha, n_restarts_optimizer=10, normalize_y=True))\n\n def dividePotentialClicks(self, numerator, denominator):\n div = numerator / denominator\n div[np.isnan(div)] = 0\n div[np.isinf(div)] = 0\n return div\n\n def updateGP(self, c):\n \"\"\"\n\n :param c: index of the campaign to be updated\n :return:\n \"\"\"\n self.prevBids = np.atleast_2d(self.prevBids)\n self.prevBudgets = np.atleast_2d(self.prevBudgets)\n self.prevClicks = np.atleast_2d(self.prevClicks)\n X = np.array([self.prevBids.T[c, :], self.prevBudgets.T[c, :]])\n X = np.atleast_2d(X).T\n X = self.normalize(X)\n #potentialClicks = self.dividePotentialClicks(self.prevClicks * 24.0, self.prevHours)\n #y = potentialClicks.T[c,:].ravel()\n y = self.prevClicks.T[c, :].ravel()\n y = self.normalizeOutput(y, c)\n self.gps[c].fit(X, y)\n\n def updateMultiGP(self):\n for c in range(0, self.nCampaigns):\n self.updateGP(c)\n\n def updateState(self, bids, budgets, clicks, conversions, costs, revenues, hours):\n bids = np.atleast_2d(bids)\n budgets = np.atleast_2d(budgets)\n clicks = np.atleast_2d(clicks)\n conversions = np.atleast_2d(conversions)\n hours = np.atleast_2d(hours)\n costs = np.atleast_2d(costs)\n revenues = np.atleast_2d(revenues)\n \"\"\"\n self.prevBids=np.atleast_2d(self.prevBids)\n self.prevBudgets=np.atleast_2d(self.prevBudgets)\n self.prevClicks=np.atleast_2d(self.prevClicks)\n self.prevConversions=np.atleast_2d(self.prevConversions)\n self.prevHours=np.atleast_2d(self.prevHours)\n \"\"\"\n if self.t == 0:\n self.prevBudgets = budgets\n self.prevBids = bids\n self.prevClicks = clicks\n self.prevConversions = conversions\n self.prevHours = hours\n self.costs = costs\n self.revenues = revenues\n else:\n self.prevBudgets = np.append(self.prevBudgets, budgets, axis=0)\n self.prevBids = np.append(self.prevBids, bids, axis=0)\n self.prevClicks = np.append(self.prevClicks, clicks, axis=0)\n self.prevConversions = np.append(self.prevConversions, conversions, axis=0)\n self.prevHours = np.append(self.prevHours, hours, axis=0)\n self.costs = np.append(self.costs, costs, axis=0)\n self.revenues = np.append(self.revenues, revenues, axis=0)\n self.updateMultiGP()\n self.updateValuesPerClick()\n self.t += 1\n\n def valueForBudget(self, itemIdx, budget, values):\n idx = np.isclose(budget, self.budgets)\n if len(idx) > 0:\n return values[itemIdx, idx]\n idx = np.argwhere(budget >= self.budgets)\n return values[itemIdx, idx.max()]\n\n def firstRow(self, values):\n firstRow = np.zeros(len(self.budgets)).tolist()\n for i, b in enumerate(self.budgets):\n firstRow[i] = [[values[0, i]], [0], [b]]\n return firstRow\n\n def optimize(self, values):\n #start = time.time()\n valIdx = 0\n itIdx = 1\n bIdx = 2\n h = np.zeros(shape=(self.nCampaigns, len(self.budgets)))\n h=h.tolist()\n h[0] = self.firstRow(values)\n for i in range(1, self.nCampaigns):\n for j,b in enumerate(self.budgets):\n h[i][j] = h[i-1][j][:]\n maxVal = 0\n for bi in range(0,j+1):\n if ((np.sum(h[i-1][bi][valIdx]) + self.valueForBudget(i,b - self.budgets[bi],values)) >maxVal):\n val = h[i-1][bi][valIdx][:]\n val.append(self.valueForBudget(i,b - self.budgets[bi],values))\n newValues = val[:]\n items = h[i-1][bi][itIdx][:]\n items.append(i)\n newItems = items[:]\n selBudgets = h[i-1][bi][bIdx][:]\n selBudgets.append(b - self.budgets[bi])\n newSelBudgets = selBudgets[:]\n h[i][j]=[newValues,newItems,newSelBudgets]\n maxVal = np.sum(newValues)\n newBudgets=h[-1][-1][2]\n newCampaigns=h[-1][-1][1]\n # se non mi lista alcune campagne le listo comunque con budget 0!\n if len(newBudgets) < self.nCampaigns:\n temp = np.zeros(self.nCampaigns)\n temp[newCampaigns] = newBudgets\n newBudgets = temp\n newCampaigns = np.array(range(0, self.nCampaigns))\n return [newBudgets,newCampaigns]\n\n def valuesForCampaigns(self,sampling=False ,bidSampling = True):\n estimatedClicks = np.zeros(shape=(self.nCampaigns, len(self.budgets)))\n if( sampling==False):\n for c in range(0, self.nCampaigns):\n for j,b in enumerate(self.budgets):\n x= np.array([self.bids.T,np.matlib.repmat(b,1,self.nBids).reshape(-1)])\n x=np.atleast_2d(x).T\n x = self.normalize(x)\n if(bidSampling==False):\n estimatedClicksforBids=self.denormalizeOutput(self.gps[c].predict(x),c)\n idxs = np.argwhere(estimatedClicksforBids == estimatedClicksforBids.max()).reshape(-1)\n idx = np.random.choice(idxs)\n self.optimalBidPerBudget[c,j] = self.bids[idx]\n estimatedClicks[c,j] = estimatedClicksforBids.max()\n else:\n [meanEstimatedClicksForBids,sigmaEstimatedClicksForBids] = self.denormalizeOutput(self.gps[c].predict(x,return_std = True),c)\n estimatedClicksforBids = np.random.normal(meanEstimatedClicksForBids,sigmaEstimatedClicksForBids)\n idxs = np.argwhere(estimatedClicksforBids == estimatedClicksforBids.max()).reshape(-1)\n idx = np.random.choice(idxs)\n self.optimalBidPerBudget[c,j] = self.bids[idx]\n estimatedClicks[c,j] = estimatedClicksforBids.max()\n else:\n for c in range(0, self.nCampaigns):\n for j, b in enumerate(self.budgets):\n x= np.array([self.bids.T,np.matlib.repmat(b,1,self.nBids).reshape(-1)])\n x = np.atleast_2d(x).T\n x = self.normalize(x)\n [means,sigmas] = self.gps[c].predict(x,return_std=True)\n\n means = self.denormalizeOutput(means,c)\n sigmas = self.denormalizeOutput(sigmas,c)\n estimatedClicksforBids = np.random.normal(means,sigmas)\n idxs = np.argwhere(estimatedClicksforBids == estimatedClicksforBids.max()).reshape(-1)\n idx = np.random.choice(idxs)\n self.optimalBidPerBudget[c, j] = self.bids[idx]\n estimatedClicks[c, j] = estimatedClicksforBids.max()\n\n self.campaignsValues=estimatedClicks*self.valuesPerClick.reshape((self.nCampaigns, 1))\n return estimatedClicks*self.valuesPerClick.reshape((self.nCampaigns, 1))\n\n\n def chooseAction(self,sampling=False, fixedBid=False, fixedBudget=False, fixedBidValue=1.0, fixedBudgetValue=1000.0):\n\n \"\"\"\n finalBudgets = np.zeros(self.ncampaigns)\n finalBids = np.zeros(self.ncampaigns)\n\n for i in range(0,self.ncampaigns):\n finalBudgets[i] = np.random.choice(self.budgets)\n finalBids[i] = np.random.choice(self.bids)\n\n if (fixedBid == True):\n finalBids = np.ones(self.ncampaigns) * fixedBidValue\n\n if (fixedBudget == True):\n finalBudgets = np.ones(self.ncampaigns)* fixedBudgetValue\n \"\"\"\n values = self.valuesForCampaigns(sampling=sampling)\n\n #values = self.valuesCorrection(values)\n\n [newBudgets,newCampaigns] = self.optimize(values)\n finalBudgets = np.zeros(self.nCampaigns)\n finalBids = np.zeros(self.nCampaigns)\n for i,c in enumerate(newCampaigns):\n finalBudgets[c] = newBudgets[i]\n idx = np.argwhere(np.isclose(self.budgets,newBudgets[i])).reshape(-1)\n finalBids[c] = self.optimalBidPerBudget[c,idx]\n return [finalBudgets,finalBids]\n\n\n def valuesCorrection(self,values):\n for c in range(0, self.nCampaigns):\n for b in range(1, self.nBudget):\n if(values[c,b] 0:\n tmpEntry = DocEntry.get_by_id(int(testid), \n generic_key(dStore['dstore'], \n dStore['dname']))\n else:\n tmpEntry = DocEntry(parent=generic_key(dStore['dstore'], dStore['dname']))\n tmpEntry.test_text = text\n tmpEntry.test_questions = questions\n try:\n tmpEntry.put()\n req.response.write(\"Test case successfully added to the database.\")\n except:\n req.response.write(\"ERROR: Failed to add test case to the database.\")\n\ndef DocParams(test_id, test_templates):\n if test_id == -1:\n return {}\n tmp_dstore = test_templates['doc']['datastore']\n tmpEntity = tmp_dstore['entry'].get_by_id(int(test_id), \n generic_key(tmp_dstore['dstore'],\n tmp_dstore['dname']))\n params = {}\n params['text'] = base64.decodestring(tmpEntity.test_text)\n params['pid'] = test_id\n test_questions = base64.decodestring( tmpEntity.test_questions ).split(\",\")\n params['params'] = {}\n for i in range(0, len(test_questions)):\n params[\"params\"][i] = {}\n params[\"params\"][i]['question'] = base64.decodestring( test_questions[i] )\n return params\n\ndef DocGenerator(number, test_templates, offset):\n ret = {}\n dStore = test_templates['doc']['datastore']\n doc_query = DocEntry.query(\n ancestor=generic_key(dStore['dstore'], dStore['dname']))\n test_cases = doc_query.fetch()\n if number > len(test_cases):\n number = len(test_cases)\n cnt = 0\n #for test_case in test_cases:\n while cnt < number:\n test_case = choice( test_cases)\n test_cases.remove(test_case)\n tmpStr = base64.decodestring(test_case.test_text)\n test_questions = base64.decodestring(test_case.test_questions).split(\",\")\n qLen = len(test_questions)\n if qLen > 3:\n qLen = 3\n for cnt2 in range(0, qLen):\n cnt2 = cnt2\n tmpQuestion = choice (test_questions)\n tmpStr = tmpStr + \"\\n\\n\\t\" + base64.decodestring( tmpQuestion )\n test_questions.remove(tmpQuestion)\n ret[cnt+offset] = tmpStr\n cnt += 1\n if cnt >= number:\n break\n return ret","sub_path":"TestGenerator/documentation.py","file_name":"documentation.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"43656268","text":"'''\nAbbiamo una stringa i cui elementi sono cifre tra '0' e '9' (comprese) che rappresenta la struttura di una parola.\nLa parola contiene al piu' 10 lettere diverse (ma puo' essere piu' lunga di 10 lettere se alcune sono ripetute), \ne la struttura si ottiene dalla parola sostituendo ciascuna lettera con una cifra, secondo le regole:\n- a lettera uguale corrisponde cifra uguale\n- a lettere diverse corrispondono cifre diverse\n\nEsempio: 'cappello' -> '93447228'\nEsempio: 'cappello' -> '12334556'\n\nSia data una \"struttura\" ed un insieme di parole. \nVogliamo ottenere il sottoinsieme delle parole date compatibili con la struttura data.\n\nEsempio: se la struttura e' '1234' e le parole sono { 'cane', 'gatto', 'nasa', 'oca', 'pino'}\nle parole dell'insieme che sono compatibili con la struttura sono {'pino', 'cane'}\n\nScrivere una funzione decod( pfile, struttura) che prende in input:\n- il percorso di un file (pfile), contenente testo organizzato in righe ciascuna composta da una sola parola\n- una stringa di almeno 1 carattere, composta solo da cifre (la struttura delle parole da cercare)\n\nLa funzione deve restituire l'insieme delle parole di pfile che sono compatibili con la struttura data.\n\nPer gli esempi vedere il file grade03.txt\n\nAVVERTENZE: \n\tnon usare caratteri non ASCII, come le lettere accentate;\n\tnon usare moduli che non sono nella libreria standard.\nNOTA: l'encoding del file e' 'utf-8'\nATTENZIONE: Se un test del grader non termina entro 10 secondi il punteggio di quel test e' zero.\n'''\n\n\ndef decod(pfile, codice):\n op_file= open(pfile,\"r\")\n lungc=str(codice)\n lungc=len(lungc)\n strcodice=str(codice)\n tst_file=0\n ins=[]\n while tst_file!='':\n tst_file=op_file.readline()\n parola=tst_file[0:-1]\n lengh=len(parola)\n if(lungc==lengh):\n ins+=codicecodi(parola,strcodice,lengh)\n ins=set(ins)\n return(ins)\n\n\ndef codicecodi(parola,strcodice,lengh):\n c=0\n al=[]\n while c HttpResponse:\n \"\"\"Execute a blast of sequence against all sequences in the database\"\"\"\n blast = BLASTFull('full', voucher_code, gene_code)\n blast.save_seqs_to_file()\n\n if not blast.is_blast_db_up_to_date():\n blast.create_blast_db()\n\n blast.save_query_to_file()\n blast.do_blast()\n result = blast.parse_blast_output()\n blast.delete_query_output_files()\n context = get_context(request)\n context['result'] = result\n return render(request, 'blast_local/index.html', context)\n","sub_path":"blast_local_full/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"300911086","text":"import numpy as np\nimport math as mt\n#import visualization as v\nimport dictionary_surfacearea as dicsurfarea\n#import timeit as timeit\n\n#start = timeit.default_timer()\n\nclass coordinate:\n def __init__(self,x_coord,y_coord,z_coord):\n self.x = x_coord\n self.y = y_coord\n self.z = z_coord\n\nclass tetra:\n def __init__(self,point1,point2,point3,point4,material,position):\n self.p1 = point1\n self.p2 = point2\n self.p3 = point3\n self.p4 = point4\n self.mat = material\n self.pos = position\n\ndecimal = 5\na = 0.10\nb = 0.10\nc = 0.10\n#R = 0.1\ndx = dy = dz = 0.004\ndr = mt.sqrt(dx**2 + dy**2 + dz**2)\n\n# 1 for Yes 0 for No\ntetramode = 1\nsurfaceareacalc = 1\nvolumecalc = 1\nvisualization = 0\n\nne = 3\nnx = int(2*a/dx + ne)\nny = int(2*b/dy + ne)\nnz = int(2*c/dz + ne)\n\ncx = int(nx/2)\ncy = int(ny/2)\ncz = int(nz/2)\n\nvoxel = np.zeros((nx,ny,nz),dtype=int)\nvoxel_count = np.copy(voxel)\ncount = 0\nfor k in range(nz):\n for j in range(ny):\n for i in range(nx):\n voxel_count[i,j,k] = count \n count = count + 1\n if((i-cx)**2/(a/dx)**2 + (j-cy)**2/(b/dy)**2 +(k-cz)**2/(c/dz)**2 <= 1):\n# if(i>(0.5*a/dx) and i<2*a/(dx) and j>(0.5*b/dx) and j<2*b/dx and k>0.5*c/dz and k<2*c/dz):\n voxel[i,j,k] = 1\n\n# Smoothening\n# tagging the voxel on sides exposed\nside_exposed = np.copy(voxel)\n\nfor k in range (nz):\n for j in range(ny):\n for i in range(nx):\n if(voxel[i,j,k] != 0):\n count = 0\n if(voxel[i-1,j,k] == 0):\n count = count + 1\n if(voxel[i+1,j,k] == 0):\n count = count + 1\n if(voxel[i,j-1,k] == 0):\n count = count + 1\n if(voxel[i,j+1,k] == 0):\n count = count + 1\n if(voxel[i,j,k-1] == 0):\n count = count + 1\n if(voxel[i,j,k+1] == 0):\n count = count + 1\n side_exposed[i,j,k] = count\n\n# Removing 5 sides exposed cubes\nfor k in range(nz):\n for j in range(ny):\n for i in range(nx):\n if(side_exposed[i,j,k] == 5):\n voxel[i,j,k] = 0\n side_exposed[i,j,k] = 0\n\n# Recounting\nfor k in range (nz):\n for j in range(ny):\n for i in range(nx):\n if(voxel[i,j,k] != 0):\n count = 0\n if(voxel[i-1,j,k] == 0):\n count = count + 1\n if(voxel[i+1,j,k] == 0):\n count = count + 1\n if(voxel[i,j-1,k] == 0):\n count = count + 1\n if(voxel[i,j+1,k] == 0):\n count = count + 1\n if(voxel[i,j,k-1] == 0):\n count = count + 1\n if(voxel[i,j,k+1] == 0):\n count = count + 1\n side_exposed[i,j,k] = count\n \nvoxel_n = nx*ny*nz\n\n# Creating the coordinate matrix\n# 0 : voxel(x,y,z)\n# 1 : Material tag\n\n# 2 : N1\n# 3 : N2\n# 4 : N3\n# 5 : N4\n\n# 6 : S1\n# 7 : S2\n# 8 : S3\n# 9 : S4\n\n# 10 : W1\n# 11 : W2\n# 12 : W3\n# 13 : W4\n\n# 14 : E1\n# 15 : E2\n# 16 : E3\n# 17 : E4\n\n# 18 : F1\n# 19 : F2 \n# 20 : F3\n# 21 : F4\n\n# 22 : B1\n# 23 : B2\n# 24 : B3\n# 25 : B4\n\nvoxel_db = np.empty((voxel_n,26),dtype=object)\n\nv_count = 0\nfor k in range(nz):\n for j in range(ny):\n for i in range(nx):\n voxel_db[v_count,0] = coordinate(i*dx,j*dy,k*dz)\n voxel_db[v_count,1] = voxel[i,j,k]\n \n # Breaking down the i,j,k in vertex coordinates\n A = coordinate(round((i*dx - dx/2.0),decimal), round((j*dy + dy/2.0),decimal), round((k*dz + dz/2.0),decimal))\n B = coordinate(round((i*dx - dx/2.0),decimal), round((j*dy - dy/2.0),decimal), round((k*dz + dz/2.0),decimal))\n C = coordinate(round((i*dx + dx/2.0),decimal), round((j*dy - dy/2.0),decimal), round((k*dz + dz/2.0),decimal))\n D = coordinate(round((i*dx + dx/2.0),decimal), round((j*dy + dy/2.0),decimal), round((k*dz + dz/2.0),decimal))\n E = coordinate(round((i*dx - dx/2.0),decimal), round((j*dy + dy/2.0),decimal), round((k*dz - dz/2.0),decimal))\n F = coordinate(round((i*dx - dx/2.0),decimal) ,round((j*dy - dy/2.0),decimal), round((k*dz - dz/2.0),decimal))\n G = coordinate(round((i*dx + dx/2.0),decimal), round((j*dy - dy/2.0),decimal), round((k*dz - dz/2.0),decimal))\n H = coordinate(round((i*dx + dx/2.0),decimal), round((j*dy + dy/2.0),decimal), round((k*dz - dz/2.0),decimal))\n I = coordinate(round((i*dx),decimal), round(j*dy,decimal), round(k*dz,decimal))\n Nc = coordinate(round(i*dx,decimal), round(j*dy,decimal), round((k*dz + dz/2.0),decimal))\n Sc = coordinate(round(i*dx,decimal), round(j*dy,decimal), round((k*dz - dz/2.0),decimal))\n Wc = coordinate(round(i*dx,decimal), round((j*dy - dy/2.0),decimal), round(k*dz,decimal))\n Ec = coordinate(round(i*dx,decimal),round((j*dy + dy/2.0),decimal), round(k*dz,decimal))\n Fc = coordinate(round((i*dx + dx/2.0),decimal), round(j*dy,decimal), round(k*dz,decimal))\n Bc = coordinate(round((i*dx - dx/2.0),decimal), round(j*dy,decimal), round(k*dz,decimal))\n mat_tag = voxel[i,j,k]\n \n # North Tetras\n \n voxel_db[v_count,2] = tetra(Nc,A,B,I,mat_tag,'N1') # N1 coordinates 2 \n voxel_db[v_count,3] = tetra(Nc,B,C,I,mat_tag,'N2') # N2 coordinates 3\n voxel_db[v_count,4] = tetra(Nc,C,D,I,mat_tag,'N3') # N3 coordinates 4\n voxel_db[v_count,5] = tetra(Nc,D,A,I,mat_tag,'N4') # N4 coordinates 5\n \n # South Tetras\n voxel_db[v_count,6] = tetra(Sc,E,F,I,mat_tag,'S1') # S1 coordinates 6\n voxel_db[v_count,7] = tetra(Sc,F,G,I,mat_tag,'S2') # S2 coordinates 7\n voxel_db[v_count,8] = tetra(Sc,G,H,I,mat_tag,'S3') # S3 coordinates 8\n voxel_db[v_count,9] = tetra(Sc,H,E,I,mat_tag,'S4') # S4 coordinates 9\n \n # West Tetras\n voxel_db[v_count,10] = tetra(Wc,C,B,I,mat_tag,'W1') # W1 coordinates 10\n voxel_db[v_count,11] = tetra(Wc,B,F,I,mat_tag,'W2') # W2 coordinates 11\n voxel_db[v_count,12] = tetra(Wc,F,G,I,mat_tag,'W3') # W3 coordinates 12\n voxel_db[v_count,13] = tetra(Wc,G,C,I,mat_tag,'W4') # W4 coordinates 13\n \n # East Tetras\n voxel_db[v_count,14] = tetra(Ec,A,D,I,mat_tag,'E1') # E1 coordinates 14\n voxel_db[v_count,15] = tetra(Ec,D,H,I,mat_tag,'E2') # E2 coordinates 15\n voxel_db[v_count,16] = tetra(Ec,H,E,I,mat_tag,'E3') # E3 coordinates 16\n voxel_db[v_count,17] = tetra(Ec,E,A,I,mat_tag,'E4') # E4 coordinates 17\n \n # Front Tetras\n voxel_db[v_count,18] = tetra(Fc,D,C,I,mat_tag,'F1') # F1 coordinates 18\n voxel_db[v_count,19] = tetra(Fc,C,G,I,mat_tag,'F2') # F2 coordinates 19\n voxel_db[v_count,20] = tetra(Fc,G,H,I,mat_tag,'F3') # F3 coordinates 20\n voxel_db[v_count,21] = tetra(Fc,H,D,I,mat_tag,'F4') # F4 coordinates 21\n \n # Back Tetras\n voxel_db[v_count,22] = tetra(Bc,B,A,I,mat_tag,'B1') # B1 coordinates 22\n voxel_db[v_count,23] = tetra(Bc,A,E,I,mat_tag,'B2') # B2 coordinates 23\n voxel_db[v_count,24] = tetra(Bc,E,F,I,mat_tag,'B3') # B3 coordinates 24\n voxel_db[v_count,25] = tetra(Bc,F,B,I,mat_tag,'B4') # B4 coordinates 25\n \n v_count = v_count + 1\n\n# Add centroid\n\nG = np.zeros((voxel_n,24),dtype = object)\nfor vc in range(voxel_n):\n for i in range(24):\n gx = (voxel_db[vc,i+2].p1.x + voxel_db[vc,i+2].p2.x + voxel_db[vc,i+2].p3.x + voxel_db[vc,i+2].p4.x)/4\n gy = (voxel_db[vc,i+2].p1.y + voxel_db[vc,i+2].p2.y + voxel_db[vc,i+2].p3.y + voxel_db[vc,i+2].p4.y)/4\n gz = (voxel_db[vc,i+2].p1.z + voxel_db[vc,i+2].p2.z + voxel_db[vc,i+2].p3.z + voxel_db[vc,i+2].p4.z)/4\n G[vc,i] = coordinate(gx,gy,gz)\n\n \n# Triangle smoothening\nif (tetramode == 1):\n v_count = 0\n for k in range(nz):\n for j in range(ny):\n for i in range(nx):\n # Three Sides Exposed\n if (side_exposed[i,j,k] > 1):\n \n # North Side\n if(voxel[i,j,k+1] == 0):\n voxel_db[v_count,2].mat = 0 \n voxel_db[v_count,3].mat = 0 \n voxel_db[v_count,4].mat = 0 \n voxel_db[v_count,5].mat = 0 \n \n if(side_exposed[i,j+1,k] > 1):\n voxel_db[v_count,14].mat = 0\n if(side_exposed[i,j-1,k] > 1):\n voxel_db[v_count,10].mat = 0\n if(side_exposed[i+1,j,k] > 1):\n voxel_db[v_count,18].mat = 0\n if(side_exposed[i-1,j,k] > 1):\n voxel_db[v_count,22].mat = 0\n \n # South Side\n if(voxel[i,j,k-1] == 0):\n voxel_db[v_count,6].mat = 0 \n voxel_db[v_count,7].mat = 0 \n voxel_db[v_count,8].mat = 0 \n voxel_db[v_count,9].mat = 0 \n \n if(side_exposed[i,j+1,k] > 1):\n voxel_db[v_count,16].mat = 0\n if(side_exposed[i,j-1,k] > 1):\n voxel_db[v_count,12].mat = 0\n if(side_exposed[i+1,j,k] > 1):\n voxel_db[v_count,20].mat = 0\n if(side_exposed[i-1,j,k] > 1):\n voxel_db[v_count,24].mat = 0\n \n # West Side\n if(voxel[i,j-1,k] == 0):\n voxel_db[v_count,10].mat = 0 \n voxel_db[v_count,11].mat = 0 \n voxel_db[v_count,12].mat = 0 \n voxel_db[v_count,13].mat = 0 \n \n if(side_exposed[i,j,k+1] > 1):\n voxel_db[v_count,3].mat = 0\n if(side_exposed[i,j,k-1] > 1):\n voxel_db[v_count,7].mat = 0\n if(side_exposed[i+1,j,k] > 1):\n voxel_db[v_count,19].mat = 0\n if(side_exposed[i-1,j,k] > 1):\n voxel_db[v_count,25].mat = 0\n \n # East Side\n if(voxel[i,j+1,k] == 0):\n voxel_db[v_count,14].mat = 0 \n voxel_db[v_count,15].mat = 0 \n voxel_db[v_count,16].mat = 0 \n voxel_db[v_count,17].mat = 0 \n \n if(side_exposed[i,j,k+1] > 1):\n voxel_db[v_count,5].mat = 0\n if(side_exposed[i,j,k-1] > 1):\n voxel_db[v_count,9].mat = 0\n if(side_exposed[i+1,j,k] > 1):\n voxel_db[v_count,21].mat = 0\n if(side_exposed[i-1,j,k] > 1):\n voxel_db[v_count,23].mat = 0\n \n # Front Side\n if(voxel[i+1,j,k] == 0):\n voxel_db[v_count,18].mat = 0 \n voxel_db[v_count,19].mat = 0 \n voxel_db[v_count,20].mat = 0 \n voxel_db[v_count,21].mat = 0 \n \n if(side_exposed[i,j+1,k] > 1):\n voxel_db[v_count,15].mat = 0\n if(side_exposed[i,j-1,k] > 1):\n voxel_db[v_count,13].mat = 0\n if(side_exposed[i,j,k+1] > 1):\n voxel_db[v_count,4].mat = 0\n if(side_exposed[i,j,k-1] > 1):\n voxel_db[v_count,8].mat = 0\n \n \n # Back Side\n if(voxel[i-1,j,k] == 0):\n voxel_db[v_count,22].mat = 0 \n voxel_db[v_count,23].mat = 0 \n voxel_db[v_count,24].mat = 0 \n voxel_db[v_count,25].mat = 0\n \n if(side_exposed[i,j+1,k] > 1):\n voxel_db[v_count,17].mat = 0\n if(side_exposed[i,j-1,k] > 1):\n voxel_db[v_count,11].mat = 0\n if(side_exposed[i,j,k+1] > 1):\n voxel_db[v_count,2].mat = 0\n if(side_exposed[i,j,k-1] > 1):\n voxel_db[v_count,6].mat = 0\n \n \n v_count = v_count + 1\n\n# Visualization\nif(visualization == 1):\n v.visualize(voxel_db,side_exposed,nx,ny,nz,cx,cy,cz)\n\n# Calculate the surface area\nif(surfaceareacalc == 1):\n \n d = 0\n areasum1 = 0.0\n for k in range(nz):\n for j in range(ny):\n for i in range(nx):\n if(side_exposed[i,j,k] != 0):\n for m in range(2,26):\n if(voxel_db[d,m].mat != 0):\n areasum1 = areasum1 + dicsurfarea.func(voxel_db[d,m].pos,voxel_db,d,nx,ny,nz,dx,voxel_db[d,m].mat)\n d = d + 1\n print(\"Tetra Area = \", areasum1)\n \n voxelarea = 0\n for k in range(nz):\n for j in range(ny):\n for i in range(nx):\n if(voxel[i,j,k] == 1):\n voxelarea = side_exposed[i,j,k] + voxelarea\n actualarea = 4*mt.pi*(((a*b)**1.6+(a*c)**1.6+(b*c)**1.6)/3.0)**(1/1.6)\n print(\"Actual Area = \", actualarea)\n voxelarea = voxelarea*dx*dx\n print(\"voxel area = \", voxelarea)\n \n# Calculate volume\nif(volumecalc == 1):\n \n volume = 0\n for vc in range(voxel_n):\n for i in range(2,26):\n if(voxel_db[vc,i].mat == 1):\n volume = volume + 1\n \n tetra_volume = volume * dx*dy*dz / 24\n print(\"\\ntetra volume = \",tetra_volume)\n \n voxel_volume = 0\n for k in range(nz):\n for j in range(ny):\n for i in range(nx):\n if(voxel[i,j,k] == 1):\n voxel_volume = voxel_volume + dx*dy*dz\n \n print(\"voxel volume = \", voxel_volume)\n \n actualvolume = 4/3*mt.pi*a*b*c\n print(\"actual volume = \",actualvolume,\"\\n\")\n\n#stop = timeit.default_timer()\n#print(\"dx = \",dx,\"time = \",stop-start,\"seconds\")\n#print(\"domain size =\",voxel_n*24)\n#print(\"matrix size =\",voxel_n*24*voxel_n*24)\n\n# Steady State Heat Transfer Solver\n\ndef voxeldatabase():\n return(voxel_db,voxel,dx,dy,dz,voxel_n,G)\n \n","sub_path":"original_code/Ellipsoid.py","file_name":"Ellipsoid.py","file_ext":"py","file_size_in_byte":15087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"568600559","text":"#!/usr/bin/python\n#\n# File name: feature_selection.py\n# Student Name: Mengyi Xu\n# SID: 861051501\n# Email: mxu008@ucr.edu \n\nimport sys\nimport math\nimport time\n\n# Function of calculating the nearest neighbor\n# Input: instances, current feature list, and two indexs\n# Return nearest distance\ndef nearestNeighbor(ins, currFeatures, x, y):\n\tnearest = 0\n\tminDis = float(\"inf\")\n\t#For each neighbor\n\tfor i in range(0, len(ins)):\n\t\tdistance = 0.0\n\t\tif (i != y):\n\t\t\t#Calculate distane\n\t\t\tfor j in range(0, x):\n\t\t\t\tdistance += (ins[y][currFeatures[j]] - ins[i][currFeatures[j]]) * (ins[y][currFeatures[j]] - ins[i][currFeatures[j]])\n\t\t\t# Set min distance and nearest index\n\t\t\tif (distance < minDis):\n\t\t\t\tminDis = distance\n\t\t\t\tnearest = i\n\n\treturn nearest\n\n# Function of forward selection\n# Input: instances\n# Return best subset, best accuracy\ndef forwardSelection(ins):\n\tcurrFeatures = [0] * (len(ins[0]) - 1)\n\tbestSubset = [0] * (len(ins[0]) - 1)\n\trestFeatures = []\n\tbestFeature = 0\n\tbestAccuracy = 0.0\n\t#Initialize rest feature list to fill it up\n\tfor i in range(1, len(ins[0])):\n\t\trestFeatures.append(i)\n\t#From first feature to the last feature\n\tfor i in range(1, len(ins[0])):\n\t\ttemp = 0.0\n\t\t#For each feature in the rest feature list\n\t\tfor j in range(0, len(ins[0]) - i):\n\t\t\tcurrFeatures[i - 1] = restFeatures[j]\n\t\t\t#Calculate accuracy\n\t\t\taccuracy = 0.0\n\t\t\tfor k in range(0, len(ins)):\n\t\t\t\tnearest = nearestNeighbor(ins, currFeatures, i, k)\n\t\t\t\tif (ins[k][0] == ins[nearest][0]):\n\t\t\t\t\taccuracy+=1\n\t\t\taccuracy = accuracy / len(ins) * 100\n\n\t\t\tprint(\"Using feature(s) \"+ str(currFeatures) +\" accuracy is \" + str(accuracy) + \"%\\n\")\n\t\t\t#Set best feature and current accuracy\n\t\t\tif (accuracy > temp):\n\t\t\t\ttemp = accuracy\n\t\t\t\tbestFeature = j\n\n\t\tcurrFeatures[i - 1] = restFeatures[bestFeature]\n\t\trestFeatures[bestFeature] = restFeatures[len(ins[0]) - 1 - i]\n\t\t#Set best accuracy and subset\n\t\tif (temp > bestAccuracy):\n\t\t\tbestAccuracy = temp\n\t\t\tfor j in range(0, i):\n\t\t\t\tbestSubset[j] = currFeatures[j]\n\t\telse:\n\t\t\tprint(\"(Warning, accuracy has decreased1 Continuing search in case of local maximum)\")\n\t\t\n\t\tprint(\"Feature set \" + str(currFeatures) + \" was best, accuracy is \" + str(temp) + \"%\\n\")\n\n\treturn bestSubset, bestAccuracy\n\n# Function of Backward Elimination\n# Input: instances\n# Return best subset, best accuracy\ndef backwardElimination(ins):\n\tcurrFeatures = [0] * (len(ins[0]) - 1)\n\tbestSubset = [0] * (len(ins[0]) - 1)\n\tend = 0\n\teliminator = 0\n\tbestFeature = 0\n\tbestAccuracy = 0.0\n\t#Initialize current feature list to fill it up\n\tfor i in range(0, len(ins[0]) - 1):\n\t\tcurrFeatures[i] = i + 1\n\t#From last feature to the first feature\n\tfor i in range(len(ins[0]) - 2, -1, -1):\n\t\ttemp = 0.0\n\t\tend = currFeatures[i]\n\t\tfor j in range(i, -1, -1):\n\t\t\teliminator = currFeatures[j]\n\t\t\tcurrFeatures[j] = end\n\t\t\t#Calculate accuracy\n\t\t\taccuracy = 0.0\n\t\t\tfor k in range(0, len(ins)):\n\t\t\t\tnearest = nearestNeighbor(ins, currFeatures, i, k)\n\t\t\t\tif (ins[k][0] == ins[nearest][0]):\n\t\t\t\t\taccuracy+=1\n\t\t\taccuracy = accuracy / len(ins) * 100\n\n\t\t\tcurrFeatures[i] = 0\n\t\t\tprint(\"Using feature(s) \"+ str(currFeatures) +\" accuracy is \" + str(accuracy) + \"%\\n\")\n\t\t\t#Set best feature and current accuracy\n\t\t\tif (accuracy > temp):\n\t\t\t\ttemp = accuracy\n\t\t\t\tbestFeature = j\n\t\t\tcurrFeatures[j] = eliminator\n\t\t\t\n\t\t\tif (i == len(ins[0]) - 1):\n\t\t\t\tj = -1\n\t\t\n\t\tcurrFeatures[bestFeature] = end\n\t\t#Set best accuracy and subset\n\t\tif (temp > bestAccuracy):\n\t\t\tbestAccuracy = temp\n\t\t\tfor j in range(0, len(ins[0]) - 1):\n\t\t\t\tbestSubset[j] = currFeatures[j]\n\t\telse:\n\t\t\tprint(\"(Warning, accuracy has decreased1 Continuing search in case of local maximum)\")\n\t\tcurrFeatures[i] = 0\n\t\tprint(\"Feature set \" + str(currFeatures) + \" was best, accuracy is \" + str(temp) + \"%\\n\")\n\t\n\treturn bestSubset, bestAccuracy\n\n# Function of my algorithm by combined with FS and BE\n# Input: instances\n# Return best subset, best accuracy\ndef specialAlgorithm(ins):\n\tcurr1 = [0] * (len(ins[0]) - 1)\n\tcurr2 = [0] * (len(ins[0]) - 1)\n\tbestSubset1 = [0] * (len(ins[0]) - 1)\n\tbestSubset2 = [0] * (len(ins[0]) - 1)\n\trestFeatures = []\n\tend = 0\n\teliminator = 0\n\tbestFeature1 = 0\n\tbestFeature2 = 0\n\tbestAccuracy1 = 0.0\n\tbestAccuracy2 = 0.0\n\t#Initialize rest feature list and the feature list of backforward elimination to fill them up\n\tfor i in range(1, len(ins[0])):\n\t\trestFeatures.append(i)\n\t\tcurr2[i - 1] = i\n\t#First run the Forward_Selection\n\tprint(\"Start Forward_Selection: \")\n\t#From first feature to the feature of mid position in the list\n\tfor i in range(1, len(ins[0]) / 2):\n\t\ttemp = 0.0\n\t\tfor j in range(0, len(ins[0]) - i):\n\t\t\tcurr1[i - 1] = restFeatures[j]\n\t\t\t#Calculate accuracy\n\t\t\taccuracy = 0.0\n\t\t\tfor k in range(0, len(ins)):\n\t\t\t\tnearest = nearestNeighbor(ins, curr1, i, k)\n\t\t\t\tif (ins[k][0] == ins[nearest][0]):\n\t\t\t\t\taccuracy+=1\n\t\t\taccuracy = accuracy / len(ins) * 100\n\n\t\t\tprint(\"Using feature(s) \"+ str(curr1) +\" accuracy is \" + str(accuracy) + \"%\\n\")\n\t\t\t#Set best feature and current accuracy\n\t\t\tif (accuracy > temp):\n\t\t\t\ttemp = accuracy\n\t\t\t\tbestFeature1 = j\n\n\t\tcurr1[i - 1] = restFeatures[bestFeature1]\n\t\trestFeatures[bestFeature1] = restFeatures[len(ins[0]) - 1 - i]\n\t\t#Set best accuracy and subset\n\t\tif (temp > bestAccuracy1):\n\t\t\tbestAccuracy1 = temp\n\t\t\tfor j in range(0, i):\n\t\t\t\tbestSubset1[j] = curr1[j]\n\t\telse:\n\t\t\tbreak\n\t\t\t\t\n\t\tprint(\"Feature set \" + str(curr1) + \" was best, accuracy is \" + str(temp) + \"%\\n\")\n\t#Then run the Backward elimination\n\tprint(\"Start Backward_Elimination: \")\n\t#From the feature of the mid position in the list to the last feature\n\tfor i in range(len(ins[0]) - 2, len(ins[0]) / 2 - 1, -1):\n\t\ttemp = 0.0\n\t\tend = curr2[i]\n\t\tfor j in range(i, -1, -1):\n\t\t\teliminator = curr2[j]\n\t\t\tcurr2[j] = end\n\t\t\t#Calculate accuracy\n\t\t\taccuracy = 0.0\n\t\t\tfor k in range(0, len(ins)):\n\t\t\t\tnearest = nearestNeighbor(ins, curr2, i, k)\n\t\t\t\tif (ins[k][0] == ins[nearest][0]):\n\t\t\t\t\taccuracy+=1\n\t\t\taccuracy = accuracy / len(ins) * 100\n\n\t\t\tcurr2[i] = 0\n\t\t\tprint(\"Using feature(s) \"+ str(curr2) +\" accuracy is \" + str(accuracy) + \"%\\n\")\n\t\t\t#Set best feature and current accuracy\n\t\t\tif (accuracy > temp):\n\t\t\t\ttemp = accuracy\n\t\t\t\tbestFeature2 = j\n\t\t\tcurr2[j] = eliminator\n\t\t\t\n\t\t\tif (i == len(ins[0]) - 1):\n\t\t\t\tj = -1\n\t\t\n\t\tcurr2[bestFeature2] = end\n\t\t#Set best accuracy and subset\n\t\tif (temp > bestAccuracy2):\n\t\t\tbestAccuracy2 = temp\n\t\t\tfor j in range(0, len(ins[0]) - 1):\n\t\t\t\tbestSubset2[j] = curr2[j]\n\t\telse:\n\t\t\tbreak\n\t\tcurr2[i] = 0\n\t\tprint(\"Feature set \" + str(curr2) + \" was best, accuracy is \" + str(temp) + \"%\\n\")\n\t#Compare the bestAccuracys between FS and BE, then return a better result\n\tif (bestAccuracy1 > bestAccuracy2):\n\t\treturn bestSubset1, bestAccuracy1\n\treturn bestSubset2, bestAccuracy2\n\nif __name__ == '__main__':\n\tprint(\"\\nWelcome to Mengyi's Feature Selection Algorithm.\")\n\tfilename = raw_input(\"Type in the name of the file to test: \")\n\t# Choose the algorithm\n\twhile 1:\t\t\t\n\t\tchoice = raw_input(\"\\nType the number of the algorithm you want to run.\\n\\n\\t1. Forward Selection\\n\\t2. Backward Elimination\\n\\t3. Mengyi's Special Algorithm\\n\\t4. Exit\\n\")\n\t\tif (choice == '1' or choice == '2' or choice == '3'):\n\t\t\tbreak\n\t\t# Exit the program\n\t\telif (choice == '4'):\n\t\t\tprint(\"Bye!\\n\")\n\t\t\tsys.exit(0)\n\t\t# Check the validity of input\n\t\telse:\n\t\t\tprint(\"Input is invalid, please try again!\")\n\n\t# Read input file\n\tf = open(filename)\n\tins = []\n\tfor line in f:\n\t\tsFeature = line.split()\n\t\tfeature = [float(n) for n in sFeature]\n\t\tins.append(feature)\n\tf.close()\n\n\tprint(\"\\nThis dataset has \" + str(len(ins[0]) - 1) + \" features (not including the class attribute), with \" + str(len(ins)) + \" instances.\\n\")\n\t# Normalize the data\n\tprint(\"Please wait while I normalize the data...\")\n\tfor j in range(1, len(ins[0]) - 1):\n\t\tmean = 0.0\n\t\tstandDev = 0.0\n\t\t# Compute mean \n\t\tfor i in range(0, len(ins)):\n\t\t\tmean += ins[i][j]\n\t\tmean = mean / len(ins)\n\t\t# Compute standard deviation\n\t\tfor i in range(0, len(ins)):\n\t\t\tstandDev += (ins[i][j] - mean) * (ins[i][j] - mean)\n\t\tstandDev = math.sqrt(standDev / (len(ins) - 1))\n\t\tfor i in range(0, len(ins)):\n\t\t\tins[i][j] = (ins[i][j] - mean) / standDev\n\n\tprint(\"Done!\\n\")\n\tprint(\"Running nearest neighbor with all \" + str(len(ins[0]) - 1) + \" features, using 'leaving-one-out' evalution.\\n\")\n\tprint(\"Beginning search.\\n\")\n\t#Start timer\n\tstart = time.time()\n\t# Forward Selection\n\tif (choice == '1'):\n\t\tbestSubset, bestAccuracy = forwardSelection(ins)\n\t# Backward Elimination\n\tif (choice == '2'):\n\t\tbestSubset, bestAccuracy = backwardElimination(ins)\n\t# Mengyi's Special Algorithm\n\tif (choice == '3'):\n\t\tbestSubset, bestAccuracy = specialAlgorithm(ins)\n\t#End timer\n\tend = time.time()\n\telapsed = end - start\n\tprint(\"Finished search!! The best feature subset is \" + str(bestSubset) + \", which has an accuracy of \" + str(bestAccuracy) + \"%\\n\")\n\tprint(\"Elapsed time: \" + str(elapsed))\n\n","sub_path":"cs170/projects/pro2/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":8755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"643770731","text":"# coding: utf-8\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\nminist = input_data.read_data_sets('../materials/MINIST', one_hot=True)\n\nx = tf.placeholder(tf.float32, [None, 784])\nw = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.matmul(x, w) + b\n\ny_ = tf.placeholder(tf.float32, [None, 10])\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\nif __name__ == '__main__':\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n\n for _ in range(1000):\n batch_xs, batch_ys = minist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.arg_max(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(sess.run(accuracy, feed_dict={x: minist.test.images, y_: minist.test.labels}))\n","sub_path":"tensorflow/minist_softmax.py","file_name":"minist_softmax.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"435790215","text":"# *********************************************************\n\n# Digital Robotic Simulation Gym Space\n# Welcome to the code\n# Hope you enjoy! :D\n\n# *********************************************************\n\n\"\"\"\nDigital Robotic Simulation Gym Space\nAuthors: Jacob Lagares and Sergi Valverde\nContact at jlagarespo@iebesalu.cat or sergivalv@gmail.com\nStarted date: Some day in April of 2018\nLast Release date:??/??/??\n\nDigital Robotic Simulation Gym is a tool for simulating robotic behaviours, such as an\nautomated controller what needs to avoid a set of obstacles, an agent what wants to get\nout of a maze, or similar.\n\nFor doing this, we program the brain.py class in what you change the behaviour of everything\nand we can test a lot of algorythms, from the most simplistic ones, like random direction\nchoosing, to more advanced things, like Machine Learning or this kind of things(what we\ngoing to start working in it in the future!). Perfect place to test cool stuff.\n\nControls:\nESC - exit\nR - reset everything\n\"\"\"\n\n# start\n\nimport sys\nsys.path.insert(0, \"../../\")\n\nimport os.path\nimport pygame\nimport numpy as np\nfrom pygame.locals import *\nfrom brain import Brain as Agent\nfrom obstacle import Obstacle as Obstacle\nfrom problemMap import ProblemMap as ProblemMap\nfrom goal import Goal as Goal\nfrom PIL import Image\n\n# *********************************************************\n# IMPLEMENTATION\n# *********************************************************\n\n# Main simulator constants\n\n# problem variables\nproblemW = 600\nproblemH = 400\nframerate = 10\n\n# the number of obstacles can be set\nNUM_OBSTACLES = 5\n\n# agent\n# inverting x and y with w and h because np uses cols and rows system\nagentPosR = np.random.randint(0, problemH)\nagentPosC = np.random.randint(0, problemW)\n\nsensorW = 50\nsensorH = 50\nspeed = 5\n\n# goal\n# same as the agent definition\ngoalR = problemH / 2\ngoalC = problemW / 2\n\n# *********************************************************\n\n# Get the rect of the screen\nSCREENRECT = Rect(0, 0, problemW, problemH)\n\n# main dir has to be re-defined if new exercises are called from the\n# examples/name/ folder\nmain_dir = os.path.split(os.path.abspath('../'))[0]\nprint(main_dir)\nclock = pygame.time.Clock()\n\n# see if we can load more than standard BMP\nif not pygame.image.get_extended():\n raise SystemExit(\"Sorry, extended image module required\")\n\n\ndef load_image(file):\n \"\"\"\n This would load an image from files\n (PNG, JPEG or BITMAP)\n Used as sprites base\n \"\"\"\n print(\"Loading: \" + file + \" images\")\n file = os.path.join(main_dir, 'data', file)\n try:\n surface = pygame.image.load(file)\n except pygame.error:\n raise SystemExit('Could not load image \"%s\" %s' %(file, pygame.get_error()))\n return surface.convert()\n\n\n# Load image\"s\"\ndef load_images(*files):\n \"\"\"\n Uses load_image(file) to load multiple images\n \"\"\"\n\n print(\"Loading every: \" + files + \" images\")\n imgs = []\n for file in files:\n imgs.append(load_image(file))\n\n return imgs\n\n# Initialize pygame\npygame.init()\n\n# Set the display mode\nwinstyle = 0 # FULLSCREEN\nbestdepth = pygame.display.mode_ok(SCREENRECT.size, winstyle, 32)\nscreen = pygame.display.set_mode(SCREENRECT.size, winstyle, bestdepth)\n\n# Load images, assign to sprite classes\n# (do this before the classes are used, after screen setup)\nimg = load_image('player1.gif')\nimgObstacle = load_image('obstacle.png')\nimgGoal = load_image(\"goal.png\")\n# imgGoal = load_image('obstacle.png')\nAgent.images = [img, pygame.transform.flip(img, 1, 0)]\nObstacle.images = [imgObstacle, pygame.transform.flip(img, 1, 0)]\nGoal.images = [imgGoal, pygame.transform.flip(img, 1, 0)]\n\n# decorate the game window\nicon = pygame.transform.scale(Agent.images[0], (32, 32))\npygame.display.set_icon(icon)\npygame.display.set_caption('Gym 10.0')\npygame.mouse.set_visible(0)\n\n# create the background, tile the bgd image\nbgdtile = load_image('background.gif')\nbackground = pygame.Surface(SCREENRECT.size)\n\n# Render the background\nfor x in range(0, SCREENRECT.width, bgdtile.get_width()):\n for y in range(0, SCREENRECT.height, bgdtile.get_height()):\n background.blit(bgdtile, (x, y))\n\n# Blit the background\nscreen.blit(background, (0, 0))\npygame.display.flip()\n\n# assign default groups to each sprite class\nall = pygame.sprite.RenderUpdates()\nAgent.containers = all\nObstacle.containers = all\nGoal.containers = all\n\nagent = Agent(sensorW, sensorH, speed)\n\n# generate the map\nmp = ProblemMap()\nmp.setMapSize(SCREENRECT.width, SCREENRECT.height)\n\n# generate obstacles and assign them to random positions\nobstacles = [Obstacle() for obs in range(NUM_OBSTACLES)]\n\n# set goal\ngoal = Goal()\ngoal.setPos(goalR, goalC, 50, 50)\nmp.setGoal(goalR, goalC, 50, 50)\n\n# assign positions to obstacles and add them to the map\nfor obs in obstacles:\n x_pos = np.random.randint(0, problemW)\n y_pos = np.random.randint(0, problemH)\n obs.setPos(x_pos, y_pos, 10, 10)\n mp.setObstacle(obs.getX(),\n obs.getY(),\n obs.getW(),\n obs.getH())\n\nagent.setPos(agentPosR, agentPosC, 10, 10)\n\n# the agent is iteratively asking for a new position\n# while it's finding the goal. If it touches an obstacle\n# the game is finished\n\nagent_r, agent_c = agent.getSize()\n\nprint(\"--------------------------------------------------\")\nprint(\"AGENT POS:\", agent.getPos())\nprint(\"SENSOR POS:\", sensorW, sensorH)\nprint(\"--------------------------------------------------\")\nprint(agent_r, agent_c)\n\nif agent_r > sensorW:\n print(\"Agent too big!\")\n exit()\n\nif agent_c > sensorH:\n print(\"Agent too big!\")\n exit()\n\n# save map as bitmap\ndata = mp.getMap(0, 0, problemW, problemH)\nprint(data.shape)\n\ndataRGB = np.stack([data, np.zeros_like(data), data], axis=2)\n\nimg = Image.fromarray(dataRGB.astype(\"uint8\") * 255, \"RGB\")\nimg.save(\"map.png\")\nimg.show()\n\nwhile agent.alive():\n # get current state\n agentPosR, agentPosC = agent.getPos()\n sensor_info = mp.getMap(agentPosR - sensorW / 2,\n agentPosC - sensorH / 2,\n sensorW,\n sensorH)\n\n if mp.evaluate_goal(agentPosR, agentPosC, agent_r, agent_c):\n print(\"PARTYYYY :D\")\n pygame.time.wait(2000)\n pygame.quit()\n\n # evaluate the agent position\n # if the agent is touching one of the obstacles, \n # the problem. If still alive, get sensory information and move\n if mp.evaluate_map(agentPosR, agentPosC, agent_r, agent_c, 5):\n print(\"Agent messed up :(\")\n pygame.time.wait(2000)\n pygame.quit()\n else:\n agent.getSensor(sensor_info)\n agent.nextState(speed)\n\n # update all the sprites and draw the scene\n all.clear(screen, background)\n all.update()\n dirty = all.draw(screen)\n pygame.display.update(dirty)\n\n # cap the framerate\n clock.tick(framerate)\n\n # finish the game if ESC is pressed\n for event in pygame.event.get():\n if event.type == pygame.KEYUP:\n if event.key == K_ESCAPE:\n exit()\n if event.key == K_r:\n # Reset game\n print(\"reset\")\n\n# END\n","sub_path":"models/stupid/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"99200568","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDownload the latest CAL-ACCESS database ZIP.\n\"\"\"\n# Files\nimport os\nimport shutil\nimport requests\nfrom hurry.filesize import size\n\n# Strings\nfrom django.template.loader import render_to_string\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\n\n# Time\nimport time\nfrom datetime import datetime\nfrom django.utils import timezone\n\n# Django stuff\nfrom django.conf import settings\nfrom django.core.files import File\nfrom calaccess_raw.models.tracking import RawDataVersion\n\n# Commands\nfrom clint.textui import progress\nfrom django.core.management.base import CommandError\nfrom calaccess_raw.management.commands import CalAccessCommand\n\n# Logging\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Command(CalAccessCommand):\n \"\"\"\n Download the latest CAL-ACCESS database ZIP.\n \"\"\"\n help = \"Download the latest CAL-ACCESS database ZIP\"\n\n def add_arguments(self, parser):\n \"\"\"\n Adds custom arguments specific to this command.\n \"\"\"\n super(Command, self).add_arguments(parser)\n parser.add_argument(\n \"--noinput\",\n action=\"store_true\",\n dest=\"noinput\",\n default=False,\n help=\"Download the ZIP archive without asking permission\"\n )\n parser.add_argument(\n \"--force-restart\",\n \"--restart\",\n action=\"store_true\",\n dest=\"restart\",\n default=False,\n help=\"Force re-start (overrides auto-resume).\"\n )\n\n def handle(self, *args, **options):\n \"\"\"\n Make it happen.\n \"\"\"\n super(Command, self).handle(*args, **options)\n\n self.download_metadata = self.get_download_metadata()\n logger.debug('Server: %s <-- (from HEAD)' % self.download_metadata['server'])\n logger.debug('ETag: %s <-- (from HEAD)' % self.download_metadata['etag'])\n logger.debug('Last-Modified: %s <-- (from HEAD)' % self.download_metadata['last-modified'])\n logger.debug('Content-Length %s <-- (from HEAD)' % self.download_metadata['content-length'])\n\n self.last_modified_from_head = self.parse_imf_datetime_str(self.download_metadata['last-modified'])\n\n # get or create the RawDataVersion\n self.version, created = self.get_or_create_version(\n self.download_metadata['content-length'],\n self.last_modified_from_head,\n )\n\n # if not called from command-line, assume called by update command\n if not self._called_from_command_line:\n # get the version that the update command is working on\n try:\n last_update_started = RawDataVersion.objects.filter(\n update_start_datetime__isnull=False\n ).latest('update_start_datetime')\n except RawDataVersion.DoesNotExist:\n pass\n else:\n # confirm that update and download commands are working with same version\n if self.version != last_update_started:\n raise CommandError(\n 'Version available to download ({0}) does not match version '\n 'of update ({1}).'.format(\n self.version,\n last_update_started.release_datetime,\n )\n )\n\n # log if a new version was found\n if created:\n logger.info('New CAL-ACCESS version available.')\n\n # if download is stalled, zip file is there, and user did not invoke restart\n if (\n self.version.download_stalled\n and os.path.exists(self.zip_path)\n and not options['restart']\n ):\n # enable resuming\n self.resume = True\n # set current size to partially downloaded zip\n self.local_file_size = os.path.getsize(self.zip_path)\n # set the datetime of last download to last modified date of zip file\n timestamp = os.path.getmtime(self.zip_path)\n self.local_file_datetime = datetime.fromtimestamp(\n timestamp,\n timezone.utc\n )\n else:\n self.resume = False\n self.local_file_size = 0\n self.local_file_datetime = None\n\n if not options['noinput']:\n # get downloaded versions\n downloaded_versions = RawDataVersion.objects.filter(\n download_finish_datetime__isnull=False\n )\n # if there are any\n if downloaded_versions:\n # get the last version downloaded datetime\n last_download_datetime = downloaded_versions.latest(\n 'download_finish_datetime'\n ).download_finish_datetime\n since_last_download = naturaltime(last_download_datetime)\n else:\n last_download_datetime = None\n\n # setting up the prompt\n prompt_context = dict(\n latest_version=self.version,\n resume=self.resume,\n local_file_size=size(self.local_file_size),\n local_file_datetime=self.local_file_datetime,\n download_dir=self.data_dir,\n since_previous_download=since_last_download,\n )\n\n prompt = render_to_string(\n 'calaccess_raw/downloadcalaccessrawdata.txt',\n prompt_context,\n )\n\n # if the user doesn't confirm initially\n if not self.confirm_proceed(prompt):\n # but the user can resume\n if self.resume:\n # prompt to restart\n confirm_restart = self.confirm_proceed(\n 'Do you want re-start your update?\\n'\n )\n # if user confirms restart, do not resume\n if confirm_restart:\n self.resume = False\n else:\n # if the user doesn't confirm restart, cancel\n raise CommandError(\"Download cancelled\")\n # if the user doesn't confirm initial and can't resume\n elif not self.resume:\n # cancel\n raise CommandError(\"Download cancelled\")\n\n # if not resuming, store the download start time\n if not self.resume:\n self.version.download_start_datetime = timezone.now()\n # either way, reset the finish time\n self.version.download_finish_datetime = None\n # save here in case the command doesn't finish\n self.version.save()\n\n # check if local zip file is already completely downloaded before trying\n if self.local_file_size < self.version.expected_size:\n self.download()\n\n logger.debug('Download zip size: %s bytes' % os.path.getsize(self.zip_path))\n # log warning if downloaded zip size is not same as expected size\n if self.version.expected_size != os.path.getsize(self.zip_path):\n raise CommandError(\n 'Expected {0} byte zip, but downloaded {1} byte zip.'.format(\n self.version.expected_size,\n os.path.getsize(self.zip_path)\n )\n )\n\n if getattr(settings, 'CALACCESS_STORE_ARCHIVE', False):\n self.archive()\n\n # store download finish time\n self.version.download_finish_datetime = timezone.now()\n # and save the RawDataVersion\n self.version.save()\n\n def download(self):\n \"\"\"\n Download the ZIP file in pieces.\n \"\"\"\n if self.verbosity:\n if self.resume:\n self.header(\n \"Resuming download of {:%m-%d-%Y %H:%M:%S} ZIP\".format(\n self.version.release_datetime\n )\n )\n else:\n self.header(\n \"Downloading {:%m-%d-%Y %H:%M:%S} ZIP\".format(\n self.version.release_datetime\n )\n )\n\n # Prep\n expected_size = self.version.expected_size\n headers = dict()\n\n if self.resume:\n headers['Range'] = 'bytes=%d-' % self.local_file_size\n expected_size = expected_size - self.local_file_size\n else:\n # flush previous download\n if os.path.exists(self.download_dir):\n shutil.rmtree(self.download_dir)\n os.mkdir(self.download_dir)\n\n # Stream the download\n chunk_size = 1024\n resp = requests.get(self.url, stream=True, headers=headers)\n logger.debug(\n 'Response status {0.status_code} ({0.reason}) from GET request.'.format(resp)\n )\n if not resp.ok:\n resp.raise_for_status()\n\n logger.debug('Server: %s <-- (from GET)' % resp.headers['server'])\n logger.debug('ETag: %s <-- (from GET)' % resp.headers['etag'])\n logger.debug('Last-Modified: %s <-- (from GET)' % resp.headers['last-modified'])\n logger.debug('Content-Length: %s <-- (from GET)' % resp.headers['content-length'])\n\n # Calculate absolute value of diff between last-modifed in HEAD and GET\n last_modified_from_request = self.parse_imf_datetime_str(\n resp.headers['last-modified']\n )\n last_modified_diff = abs(\n self.last_modified_from_head - last_modified_from_request\n )\n # Quit if diff greater than five minutes\n if last_modified_diff.total_seconds() > 300:\n raise CommandError(\n \"Last-modified of HEAD and GET are more than five minutes apart.\"\n )\n\n # in Python 2, need to convert this to long int\n try:\n divisor = long(chunk_size + 1)\n except NameError:\n # no long() in Python 3, all ints are long ints\n divisor = chunk_size + 1\n\n n_iters = float(expected_size) / divisor\n\n with open(self.zip_path, 'ab') as fp:\n for chunk in progress.bar(resp.iter_content(chunk_size=chunk_size), expected_size=n_iters):\n fp.write(chunk)\n fp.flush()\n\n def archive(self, suffix=None):\n \"\"\"\n Save a copy of the download zip file and each file inside.\n \"\"\"\n if self.verbosity:\n self.log(\" Archiving {}\".format(os.path.basename(self.zip_path)))\n # Store the actual download zip file size\n self.version.download_zip_size = os.path.getsize(self.zip_path)\n # Open up the zipped file so we can wrap it in the Django File obj\n release_datetime = self.version.release_datetime\n identifier = \"ccdc-raw-data-{dt:%Y-%m-%d_%H-%M-%S}\".format(dt=release_datetime)\n if suffix:\n identifier += suffix\n with open(self.zip_path, 'rb') as f:\n # Save the zip on the raw data version\n try:\n self.version.download_zip_archive.save(\n identifier,\n File(f),\n metadata=dict(\n title=f\"CAL-ACCESS raw data ({self.version.release_datetime})\"\n )\n )\n except FileExistsError:\n self.version.download_zip_archive.delete()\n time.sleep(60)\n self.version.download_zip_archive.save(\n identifier,\n File(f),\n metadata=dict(\n title=f\"CAL-ACCESS raw data ({self.version.release_datetime})\"\n )\n )\n return identifier\n","sub_path":"calaccess_raw/management/commands/downloadcalaccessrawdata.py","file_name":"downloadcalaccessrawdata.py","file_ext":"py","file_size_in_byte":11731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"132637015","text":"from django.conf import settings\r\nfrom django.core.mail import send_mail\r\nfrom django.contrib.auth import login as auth_login, authenticate, logout\r\nfrom django.shortcuts import render, get_object_or_404, redirect,reverse,HttpResponse,HttpResponseRedirect\r\nfrom django.db.models import Q\r\nfrom .models import Category,Product, Banner,Offer,Front_Page_Section,Highlights,Sub_Category\r\nfrom cart.forms import CartAddProductForm\r\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\r\nfrom django.contrib.auth.models import User\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .forms import SignUpForm, ResetForm\r\nfrom django.contrib.sites.shortcuts import get_current_site\r\nfrom django.utils.encoding import force_bytes, force_text\r\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\r\nfrom django.template.loader import render_to_string\r\nfrom .tokens import account_activation_token\r\nfrom django.views.generic import CreateView\r\nfrom django import forms\r\nfrom django.core.mail import EmailMessage\r\nfrom django.contrib import messages\r\nfrom django.db.models import Model\r\nfrom django import template\r\nfrom django.template.defaultfilters import stringfilter\r\nfrom orders.models import Review, Order, OrderItem\r\nimport random\r\nimport stripe\r\n\r\nregister = template.Library()\r\n\r\n\r\n\r\n\r\n\r\ndef product_list(request, category_slug=None):\r\n \r\n section=Front_Page_Section.objects.filter(activate=True)\r\n offer=Offer.objects.filter(available=True,)\r\n category=None\r\n categories=Category.objects.all()\r\n products=Product.objects.filter(available=True)\r\n username=request.session.get('username')\r\n count=1\r\n \r\n \r\n \r\n if category_slug:\r\n category=get_object_or_404(Category, slug=category_slug)\r\n products=Product.objects.filter(Category=category)\r\n\r\n context={\r\n 'category':category,\r\n # 'categories':categories,\r\n 'products':products,\r\n 'username':username,\r\n \r\n }\r\n\r\n return render(request,'shop/product/query.html',context)\r\n\r\n query=request.GET.get(\"query\")\r\n if query:\r\n products=Product.objects.filter(\r\n Q(name__icontains=query) |\r\n Q(description__icontains=query) |\r\n Q(specifications__contains=query) |\r\n Q(Category__name__icontains=query)\r\n \r\n ).distinct()\r\n\r\n context={\r\n 'category':category,\r\n # 'categories':categories,\r\n 'products':products,\r\n 'username':username,\r\n }\r\n\r\n return render(request,'shop/product/query.html',context)\r\n \r\n context={\r\n 'category':category,\r\n # 'categories':categories,\r\n 'products':products,\r\n 'username':username,\r\n 'offer':offer,\r\n 'section':section,\r\n 'count':count,\r\n }\r\n \r\n return render(request,'shop/product/list.html', context)\r\n\r\n# @login_required\r\ndef product_detail(request,id,slug):\r\n specific=\"\"\r\n product=get_object_or_404(Product, id=id, slug=slug, available=True)\r\n cat=product.Category\r\n similar=Product.objects.filter(Category=cat,available=True)\r\n cart_product_form=CartAddProductForm()\r\n specification=product.specifications\r\n highlight=Highlights.objects.filter(product=product)\r\n reviews=Review.objects.filter(product_id=id)\r\n orders=Order.objects.all()\r\n if specification is not None:\r\n for specific in specification:\r\n specific = specification.split(\"\\n\")\r\n # name,value=highligh.split(\"-\")\r\n \r\n \r\n \r\n context={\r\n 'product':product,\r\n 'cart_product_form': cart_product_form,\r\n 'similar':similar,\r\n 'specification':specific,\r\n 'highlight':highlight,\r\n 'reviews': reviews,\r\n 'orders':orders,\r\n }\r\n return render(request,'shop/product/detail.html', context)\r\n\r\ndef signup(request):\r\n if request.method == \"POST\":\r\n ue=User.objects.all()\r\n for u in ue:\r\n if u.email == request.POST['email']:\r\n print(u.email)\r\n messages.error(request,\"Email address already exists\")\r\n form = SignUpForm()\r\n return render(request,'registration/signup.html',{'form':form})\r\n \r\n form = SignUpForm(data=request.POST)\r\n if form.is_valid(): \r\n form.save()\r\n nu=User.objects.filter(username=request.POST['username'])\r\n for n in nu:\r\n n.is_active=False\r\n n.save()\r\n reciever=request.POST['email']\r\n sender=settings.EMAIL_HOST_USER\r\n for x in range(1):\r\n rannum=random.randint(10000,99999)\r\n request.session['otp']=str(rannum)\r\n request.session['user_id']=request.POST['username']\r\n send_mail(\r\n 'Activate your Quality account',\r\n str(rannum),\r\n sender,\r\n [reciever],\r\n fail_silently=False,\r\n )\r\n\r\n return render(request, 'registration/otp_enter.html')\r\n\r\n # else:\r\n # #raise forms.ValidationError(\"Can't validate\")\r\n # messages.error(request,\"error\")\r\n\r\n else:\r\n form = SignUpForm()\r\n return render(request,'registration/signup.html',{'form':form})\r\n\r\n\r\ndef login(request):\r\n if request.method ==\"POST\":\r\n form=AuthenticationForm(data=request.POST)\r\n username=request.POST['username']\r\n password=request.POST['password']\r\n user=authenticate(username=username,password=password)\r\n if user is not None:\r\n if user.is_active:\r\n auth_login(request,user)\r\n if request.POST.get('remember_me'):\r\n request.session.set_expiry(60*20*24*30)\r\n return redirect('shop:product_list')\r\n #return HttpResponse(\"logged in\")\r\n request.session['username']=username\r\n\r\n else:\r\n messages.error(request,\"Username or Password is wrong, Please try again!\")\r\n \r\n \r\n \r\n\r\n else:\r\n form=AuthenticationForm()\r\n #return HttpResponse(\"wrong\")\r\n return render(request, 'registration/login.html',{'form':form})\r\n\r\ndef logouts(request):\r\n #if request.method==\"POST\":\r\n logout(request)\r\n return redirect('shop:product_list')\r\n\r\n\r\n# def test_mail(request):\r\n# for x in range(1):\r\n# rannum=random.randint(1000,9999)\r\n# send_mail(\r\n# 'Subject here',\r\n# str(rannum),\r\n# 'adityanathtiwari25@gmail.com',\r\n# ['adityanathtiwari62@gmail.com'],\r\n# fail_silently=False,\r\n# )\r\n# return redirect('shop:product_list')\r\n\r\ndef otp_test(request):\r\n if request.method == 'POST':\r\n if request.POST['otp'] == request.session['otp']:\r\n nu=User.objects.filter(username=request.session['user_id'])\r\n for n in nu:\r\n n.is_active=True\r\n n.save()\r\n return redirect('shop:product_list')\r\n else:\r\n user_id=request.session['user_id']\r\n print(user_id)\r\n idst=str(id)\r\n User.objects.filter(username=user_id).delete()\r\n return redirect('shop:product_list')\r\n\r\ndef otp_render(request):\r\n return render(request, 'registration/otp_enter.html')\r\n\r\n\r\ndef offer(request,id):\r\n offer=Offer.objects.filter(id=id)\r\n for off in offer:\r\n title=off.title\r\n products=Product.objects.filter(offer=off)\r\n return render(request,'shop/product/query.html',{'products':products,'title':title})\r\n\r\n\r\ndef section(request, id):\r\n section=Front_Page_Section.objects.filter(id=id)\r\n for sec in section:\r\n cat=sec.category\r\n title=sec.title\r\n products=Product.objects.filter(Category=cat)\r\n return render(request,'shop/product/query.html',{'products':products,'title':title})\r\n #return HttpResponse(\"section\")\r\n\r\n@login_required\r\ndef review(request, pid, slug):\r\n if request.method == \"POST\":\r\n u_id = request.user.id\r\n u_name = request.user.username\r\n next = request.POST.get('next')\r\n print(next)\r\n product_name = Product.objects.filter(id=pid)\r\n review=request.POST['review']\r\n for product in product_name:\r\n pn=product.name\r\n r=Review.objects.all()\r\n r=Review(user_id=u_id,user_name=u_name,product_id=pid,product_name=pn,review=review)\r\n r.save()\r\n return HttpResponseRedirect(next)\r\n\r\n\r\ndef reset_p(request):\r\n return render(request, 'registration/enter_email.html')\r\n\r\n\r\ndef pass_reset_check(request, uidb64, token):\r\n try:\r\n uid = force_text(urlsafe_base64_decode(uidb64))\r\n user = User.objects.get(pk=uid)\r\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\r\n user = None\r\n if user is not None and account_activation_token.check_token(user, token):\r\n user.is_active = True\r\n user.save()\r\n login(request, user)\r\n # return redirect('home')\r\n return HttpResponse('Thank you for your email confirmation. Now you can login your account.')\r\n else:\r\n return HttpResponse('Activation link is invalid!')\r\n\r\ndef my_orders(request):\r\n price=0\r\n length=0\r\n orderitem=OrderItem.objects.all()\r\n for ord in orderitem:\r\n if ord.order is not None:\r\n if ord.order.user_id == request.user.id: \r\n price=price+ord.price\r\n length=length+1\r\n\r\n return render(request, 'shop/product/my_orders.html',{'orderitem':orderitem,'price':price,'length':length})\r\n\r\n\r\ndef delete_order(request,cid,oid,orid,price):\r\n refund = stripe.Refund.create(\r\n charge=cid,\r\n amount=price,\r\n )\r\n uid=request.user.id \r\n if OrderItem.objects.filter(user_id=uid).count() == 1:\r\n Order.objects.filter(id=orid,user_id=uid).delete()\r\n else:\r\n OrderItem.objects.filter(id=oid,user_id=uid).delete()\r\n return redirect('shop:my_orders')\r\n\r\n\r\ndef sub_category(request, id):\r\n sec=Sub_Category.objects.get(id=id)\r\n cat=sec.category\r\n title=sec.title\r\n t=title+\" \"+cat.name\r\n products=Product.objects.filter(brand=sec.title,Category=sec.category)\r\n return render(request,'shop/product/query.html',{'products':products,'title':t})\r\n","sub_path":"website/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"70257326","text":"# Imports\nfrom mapper import *\nfrom gui import GUI\n\n# Driver function\nif __name__ == '__main__':\n\n\t# Read Map\n\tdims, butters, goals, robot, matrix = get_map('maps/map (2).txt')\n\n\t# Define model\n\tmodel = Astar()\n\t# model = IDS()\n\t# model = BI_BFS()\n\n\t# view = 'basic'\n\tview = 'extended'\n\n\t# Build GUI\n\tgui = GUI(\n\t\tmodel = model,\n\t\tmatrix = matrix.copy(),\n\t\tbutters = butters.copy(),\n\t\tgoals = goals.copy(),\n\t\trobot = robot,\n\t)\n\n\t# Get pairs\n\tpairs = get_pairs (\n\t\tmatrix = matrix,\n\t\tbutters = butters,\n\t\tgoals = goals,\n\t\tmodel = model,\n\t)\n\n\t# Print pairs to route\n\tprint('Routes:', pairs, end = '\\n' * 2)\n\n\tpaths = []\n\tfor pair in pairs:\n\n\t\t# Print current route\n\t\tprint(f'From {pair[0]} to {pair[1]}:')\n\n\t\t# Get route\n\t\tpath = get_route(\n\t\t\tmatrix = matrix,\n\t\t\tpair = pair,\n\t\t\tbutters = butters,\n\t\t\tgoals = goals,\n\t\t\trobot = robot,\n\t\t\tmodel = model,\n\t\t)\n\n\t\t# Save path for animation\n\t\tpaths.append((pair, path))\n\n\t\t# update robot's latest location if route was completed successfully\n\t\tif len(path) != 0:\n\t\t\trobot = path[-2][0]\n\n\t\t# Remove the butter and goal\n\t\tbutters.remove(pair[0])\n\t\tgoals.remove(pair[1])\n\n\t\tif path == []:\n\t\t\tprint('Can’t pass the butter ^_^')\n\t\telse:\n\t\t\tview_route(route = path, matrix = matrix, type = view)\n\n\n\t# Animate GUI\n\tgui.animate(routes = paths)","sub_path":"Projects/P1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"42885364","text":"class Settings():\n \"\"\"存储《外星人入侵》的所有设置类\"\"\"\n def __init__(self):\n \"\"\"初始化游戏设置\"\"\"\n #屏幕设置\n self.screen_width=1200\n self.screen_height=600\n #背景颜色\n self.bg_color=(87,250,255)\n\n #飞船速度\n self.ship_speed_factor=1.5\n\n\n #子弹设置\n self.bullet_speed_factor=1\n self.bullet_width=3\n self.bullet_height=15\n self.bullet_color=60,60,60\n","sub_path":"wuzhuangfeichuan_pycharm/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"623681283","text":"import Labyrinthe\n\nlaby= Labyrinthe.creer(11,15)\n# ~ for i in laby:\n\t# ~ print(i)\n\t\ndef entree(lst):\n\t\n\tfor (i,e) in enumerate(laby):\n\t\tfor (ind,elem) in enumerate(laby[i]):\n\t\t\tif(elem==2):\n\t\t\t\treturn (ind , i)\n\ndef sortie(lst):\n\t\n\tfor (i,e) in enumerate(laby):\n\t\tfor (ind,elem) in enumerate(laby[i]):\n\t\t\tif(elem==3):\n\t\t\t\treturn (ind , i)\n\t\t\t\t\ndef taille(lst):\n\tfor (i,e) in enumerate(lst):\n\t\tfor (ind,elem) in enumerate(lst[i]):\n\t\t\ta=(i+1, ind+1)\n\t\t\t\n\treturn a\n\t\t\ndef nbLignes(lst):\n\tfor (i,e) in enumerate(lst):\n\t\ta= (i)\n\t\n\treturn a\n\t\t\ndef nbColonnes(lst):\n\tfor (i,e) in enumerate(lst):\n\t\tfor (ind,elem) in enumerate(lst[i]):\n\t\t\ta=(ind)\n\t\n\treturn a\n\t\ndef voisin_laby(f,lst):\n\t(lgn,col)=f\n\tl=[]\n\t\n\tif (lgn != 0):\n\t\tl +=[(lgn-1,col)]\n\t\n\tif(lgn != nbLignes):\n\t\tl+=[(lgn+1,col)]\n\t\t\n\t\n\tif(col != 0):\t\n\t\tl +=[(lgn,col-1)]\n\t\n\tif(col != nbColonnes):\n\t\tl+=[(lgn,col+1)]\n\telse:\n\t\tl+=[]\n\t\t\n\t\n\t# ~return \"voisinDroite= \",voisinDroite,\"voisinGauche= \",voisinGauche,\"voisinDessus= \",voisinDessus,\"voisinDessous= \",voisinDessous,l\n\treturn l\n\t\ndef voisin_laby_final(f,lst):\n\t(lgn,col)=f\n\tif(lgn==0):\n\t\tvoisinDessus=\"pas de voisin dessus\"\n\telse:\n\t\tvoisinDessus=(lst[lgn-1][col])\n\tif(lgn==nbLignes(lst)):\n\t\tvoisinDessous=\"pas de voisin dessous\"\n\telse:\n\t\tvoisinDessous=(lst[lgn+1][col])\n\t\t\n\tif(col==0):\n\t\tvoisinGauche=\"pas de voisin Gauche\"\n\telse:\t\n\t\tvoisinGauche=(lst[lgn][col-1])\n\tif(col==nbColonnes(lst)):\n\t\tvoisinDroite=\"pas de voisin droite\"\n\telse:\n\t\tvoisinDroite=(lst[lgn][col+1])\n\t\t\n\tl=[voisinDessus,voisinDessous,voisinDroite,voisinGauche]\n\t# ~return \"voisinDroite= \",voisinDroite,\"voisinGauche= \",voisinGauche,\"voisinDessus= \",voisinDessus,\"voisinDessous= \",voisinDessous,l\n\treturn l\n\ndef voisin_laby_acc(f,lst):\n\t(lgn, col)=f\n\tvoisins=voisin_laby(f,lst)\n\tl=[]\n\tfor (i,e) in enumerate(voisins):\n\t\t(lgn,col)= voisins[i]\n\t\tif(lst[lgn][col] !=0 and lst[lgn][col]!=4):\n\t\t\tl += [voisins[i]]\n\t\t\t\n\t\t\n\t\t\n\treturn l\n\t\n# ~ print(position((5,5),laby))\t\t\t\n# ~ print(entree(laby))\n# ~ print(sortie(laby))\n# ~ print(taille(laby))\n# ~ print(nbLignes(laby))\n# ~ print(nbColonnes(laby))\n# ~ print(voisin_laby(0,0,laby))\n# ~ print(voisin_laby(nbLignes(laby),nbColonnes(laby),laby))\n# ~ print(voisin_laby(nbLignes(laby),0,laby))\n# ~ print(voisin_laby((1,1),laby))\n# ~ print(voisin_laby((2,2),laby))\n# ~ print(voisin_laby_final((0,0),laby))\n# ~ print(voisin_laby_acc((1,1),laby))\nprint(99999%100)\n\t\t\t\n\t\t\t\ndef test12():\n\tlst1 = []\n\tfor i in range(3):\n\t\tfor j in range(4):\n\t\t\t\n\t\t\tlst1[i][j] += [i+1]\n\treturn lst1\n\t\n# ~ print(test12())\n\t","sub_path":"old/abby.py","file_name":"abby.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"148198189","text":"from ...log import LogBase\r\n\r\nfrom .common import sxor\r\nfrom .aesecb import AESECB\r\n\r\n\r\nclass AESCBC(LogBase):\r\n '''Class for performing AES CBC cipher operations.'''\r\n\r\n def __init__(self, key, iv=b\"\\x00\"*0x10):\r\n self.aes = AESECB(key)\r\n if len(iv) != self.aes.block_size:\r\n raise ValueError('IV must be of size %X!' % self.aes.block_size)\r\n self.iv = iv\r\n\r\n def encrypt(self, data, iv=None):\r\n '''Encrypts some data in CBC mode.'''\r\n if iv is None:\r\n iv = self.iv\r\n out = b''\r\n while data:\r\n encb = self.aes.encrypt_block_ecb(sxor(data[:0x10], iv))\r\n out += encb\r\n iv = encb\r\n data = data[0x10:]\r\n return out\r\n\r\n def decrypt(self, data, iv=None):\r\n '''Decrypts some data in CBC mode.'''\r\n if len(data) % self.aes.block_size:\r\n raise ValueError('Data is not aligned to block size!')\r\n if iv is None:\r\n iv = self.iv\r\n out = b''\r\n while data:\r\n decb = sxor(self.aes.decrypt_block_ecb(data[:0x10]), iv)\r\n out += decb\r\n iv = data[:0x10]\r\n data = data[0x10:]\r\n return out\r\n\r\n def decrypt_static_iv(self, data, *, iv : bytes=b\"\\x00\" * 0x10, bs : int=0x10) -> bytes:\r\n out = b\"\"\r\n while data:\r\n out += self.decrypt(data[:bs], iv)\r\n data = data[bs:]\r\n return out\r\n\r\n def set_iv(self, iv):\r\n if len(iv) != self.aes.block_size:\r\n raise ValueError('IV must be of size %X!' % self.aes.block_size)\r\n self.iv = iv\r\n","sub_path":"pyhac/common/crypto/aes128/aescbc.py","file_name":"aescbc.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"219340067","text":"# 用于产生各种不同网络模型\nfrom keras import Input, Model\nfrom keras.callbacks import EarlyStopping, CSVLogger\nfrom keras.layers import BatchNormalization, Conv2D, LeakyReLU, MaxPooling2D, Flatten, Dropout, Dense, Concatenate, \\\n Reshape, Add, Activation, AveragePooling2D, ZeroPadding2D\nfrom keras.regularizers import l2\n\n\nclass NerualNetworkModel:\n\n def __init__(self):\n pass\n\n # testmodel\n def test_model(self, input_size):\n # model_data\n X_input = Input(shape=input_size)\n X = Conv2D(filters=32, kernel_size=(1, 1), padding='same')(X_input)\n X = BatchNormalization()(X)\n X = LeakyReLU()(X)\n X = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(X)\n\n X = Flatten()(X)\n X = Dropout(rate=0.5)(X)\n X1 = Dense(62, activation='softmax')(X)\n X2 = Dense(62, activation='softmax')(X)\n X3 = Dense(62, activation='softmax')(X)\n X4 = Dense(62, activation='softmax')(X)\n X = Concatenate(axis=-1)([X1, X2, X3, X4])\n X = Reshape(target_shape=(4, 62))(X)\n\n model = Model(X_input, X)\n return model\n\n # ResNet\n def ResNet50(self, input_size=(64, 64, 3), regularizer=0):\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3\n -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model_data -- a Model() instance in Keras\n \"\"\"\n\n # Define the input as a tensor with shape input_shape\n X_input = Input(input_size)\n\n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n\n # Stage 1\n X = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), name='conv1')(X)\n X = BatchNormalization(axis=3, name='bn_conv1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = self.__convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)\n X = self.__identity_block(X, 3, [64, 64, 256], stage=2, block='b')\n X = self.__identity_block(X, 3, [64, 64, 256], stage=2, block='c')\n\n # Stage 3\n X = self.__convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)\n X = self.__identity_block(X, 3, [128, 128, 512], stage=3, block='b')\n X = self.__identity_block(X, 3, [128, 128, 512], stage=3, block='c')\n X = self.__identity_block(X, 3, [128, 128, 512], stage=3, block='d')\n\n # Stage 4\n X = self.__convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2)\n X = self.__identity_block(X, 3, [256, 256, 1024], stage=4, block='b')\n X = self.__identity_block(X, 3, [256, 256, 1024], stage=4, block='c')\n X = self.__identity_block(X, 3, [256, 256, 1024], stage=4, block='d')\n X = self.__identity_block(X, 3, [256, 256, 1024], stage=4, block='e')\n X = self.__identity_block(X, 3, [256, 256, 1024], stage=4, block='f')\n\n # Stage 5\n X = self.__convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2)\n X = self.__identity_block(X, 3, [512, 512, 2048], stage=5, block='b')\n X = self.__identity_block(X, 3, [512, 512, 2048], stage=5, block='c')\n\n # AVGPOOL\n X = AveragePooling2D((2, 2), name='avg_pool')(X)\n\n # output layer\n X = Flatten()(X)\n X = Dropout(rate=0.5)(X)\n X1 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X2 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X3 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X4 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X = Concatenate(axis=-1)([X1, X2, X3, X4])\n predicts = Reshape(target_shape=(4, 62))(X)\n # Create model_data\n model = Model(inputs=X_input, outputs=predicts, name='ResNet50')\n\n return model\n\n def __identity_block(self, X, f, filters, stage, block):\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value. You'll need this later to add back to the main path.\n X_shortcut = X\n\n # First component of main path\n X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2a')(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n ### START CODE HERE ###\n\n # Second component of main path (≈3 lines)\n X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b')(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path (≈2 lines)\n X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c')(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n ### END CODE HERE ###\n\n return X\n\n def __convolutional_block(self, X, f, filters, stage, block, s=2):\n \"\"\"\n Implementation of the convolutional block as defined in Figure 4\n\n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n s -- Integer, specifying the stride to be used\n\n Returns:\n X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value\n X_shortcut = X\n\n ##### MAIN PATH #####\n # First component of main path\n X = Conv2D(F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a')(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv2D(F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b')(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c')(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n # SHORTCUT PATH\n X_shortcut = Conv2D(F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1')(\n X_shortcut)\n X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X\n\n # LeNet\n def LeNet(self, input_size, regularizer=0, droprate=0):\n X_input = Input(shape=input_size)\n X = X_input\n for i, n_cnn in enumerate([2, 2, 2, 2, 2]):\n for j in range(n_cnn):\n X = Conv2D(32 * 2 ** min(i, 3), kernel_size=3, padding='same')(X)\n X = BatchNormalization()(X)\n X = Activation('relu')(X)\n X = MaxPooling2D(2)(X)\n\n X = Flatten()(X)\n X = Dropout(rate=droprate)(X)\n X1 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X2 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X3 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X4 = Dense(62, kernel_regularizer=l2(regularizer), activation='softmax')(X)\n X = Concatenate(axis=-1)([X1, X2, X3, X4])\n X = Reshape(target_shape=(4, 62))(X)\n\n model = Model(X_input, X)\n return model\n\n\nif __name__ == '__main__':\n model = NerualNetworkModel().LeNet(input_size=(128, 128, 3))\n model.summary()\n","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":8942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"502130510","text":"\nimport pyblish.api\n\n\nclass CollectDelegatedInstance(pyblish.api.ContextPlugin):\n \"\"\"Collect delegated instances form Context\n\n This plugin will set `instance.data[\"publish\"] = False` if that instance\n is not delegated.\n\n This plugin should run after normal instance collector.\n\n \"\"\"\n\n order = pyblish.api.CollectorOrder + 0.3\n label = \"Delegated Instance\"\n\n def process(self, context):\n if not context.data.get(\"contractorAccepted\"):\n return\n\n assignment = context.data[\"contractorAssignment\"]\n\n collected_count = 0\n for instance in context:\n name = instance.data[\"subset\"]\n if name in assignment:\n # version lock\n instance.data[\"versionNext\"] = assignment[name]\n self.log.info(\"{} collected.\".format(name))\n collected_count += 1\n else:\n # Remove not assigned subset instance\n instance.data[\"publish\"] = False\n self.log.info(\"{} skipped.\".format(name))\n\n self.log.info(\"Collected {} instances.\".format(collected_count))\n\n if collected_count == 0:\n raise ValueError(\"No instance to publish, this is a bug.\")\n\n if not collected_count == len(assignment):\n self.log.warning(\"Subset count did not match, this is a bug.\")\n","sub_path":"plugins/global/publish/collect_delegated_instance.py","file_name":"collect_delegated_instance.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"161654562","text":"\nfrom pydht.Dht import Dht\nfrom pydht.placements.Placements import Placements\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport time\nimport multiprocessing as mp\n\nclass Experiment:\n\n baseDir = './experiments/results/'\n filename = ''\n name = ''\n\n def __init__(self, name, size, filename):\n plt.style.use('seaborn-whitegrid')\n self.filename = filename\n self.data = pd.DataFrame()\n self.lastStart = 0\n self.collectTime = []\n self.execTimes = []\n self.runCount = 0\n self.workers = []\n self.tasks = []\n\n def __addDhtAndStart(self, name, bits, placementStrategie, iteration, numNodes, keys, error, phi, multiple, virt):\n start = time.process_time()\n df = pd.DataFrame()\n dht = Dht(name, bits, verbose=False)\n p = Placements(dht, placementStrategie,phi)\n if placementStrategie == 'y0':\n p.start((numNodes,error))\n elif placementStrategie == 'virtual':\n p.start((numNodes, virt))\n elif placementStrategie == 'binWeighted' or placementStrategie == 'binRealWeighted':\n p.start((numNodes,virt))\n else:\n p.start((numNodes,))\n for i in range(0, keys[0]):\n\n dht.addKeys(keys[1], multiple=multiple)\n flatKeyCount = dht.flatKeyCount()\n shareList = dht.nodeList.getShares()\n vNodeMinMax = dht.getVirtualNodesExtrema()\n data = {'name': [name],\n 'iteration': iteration,\n '#nodes': dht.getMasterNodeCount(),\n 'virtNodes': virt,\n 'keys': dht.getKeyCount()+keys[1],\n 'share': dht.getImbalance(),\n 'share_min': dht.getMinIdSpace()[1],\n 'share_max': dht.getMaxIdSpace()[1],\n 'keys/node': dht.getAverageKeyPerNode(),\n 'keys/node_0': np.percentile(flatKeyCount,0),\n 'keys/node_10': np.percentile(flatKeyCount,10),\n 'keys/node_50': np.percentile(flatKeyCount,50),\n 'keys/node_90': np.percentile(flatKeyCount,90),\n 'keys/node_100':np.percentile(flatKeyCount,100),\n 'real/virt': dht.getVirtualNodesAverage(),\n 'real/virt_min': vNodeMinMax[0],\n 'real/virt_max': vNodeMinMax[1],\n 'share_0': np.percentile(shareList,0),\n 'share_100': np.percentile(shareList,100),\n 'share_50': np.percentile(shareList,50),\n 'maxShareY0': np.percentile(shareList, 100),\n 'time': time.process_time()-start}\n df = df.append(pd.DataFrame(data), ignore_index=True)\n\n end = time.process_time()\n return (df, end -start)\n\n def __collect(self, result):\n aE = time.time()\n self.data = self.data.append(result[0], ignore_index=True)\n self.collectTime.append(time.time()-aE)\n self.runCount += 1\n self.execTimes.append(result[1])\n if self.runCount % (self.sumRuns / 10) == 0:\n self.lastEnd = time.time()\n tpn = (self.lastEnd-self.lastStart)/(self.sumRuns // 10)\n print(\"done: \" + str(self.runCount) + \"/\" + str(self.sumRuns) + \" (\" + str((100 / (self.sumRuns)) * self.runCount) + \"%) in \" +\n str(self.lastEnd - self.lastStart) + \" s (\" + str((self.sumRuns - self.runCount) * tpn) + \" remaining) collect: \" + str(sum(self.collectTime)))\n print(\"avarage exec time: \" + str(sum(self.execTimes)/len(self.execTimes)))\n self.lastStart = time.time()\n self.collectTime = []\n\n def __worker(self, inQueue, outQueue):\n tA = []\n done = 0\n start = time.time()\n while (True):\n args = inQueue.get()\n if args == None:\n break\n done += 1\n (df, t) = self.__addDhtAndStart(*args)\n tA.append(t)\n outQueue.put((df, t))\n print(\"Worker done \" + str(done) + \" tasks in \" + str(time.time() - start) + \" Seconds \" + str(sum(tA)) + \" was execution\")\n outQueue.put(None)\n\n def execute(self, processes=mp.cpu_count()):\n inQueue = mp.Queue()\n outQueues = []\n self.sumRuns = len(self.tasks)\n self.lastStart = time.time()\n for x in self.tasks:\n args = x['p']\n inQueue.put(args)\n\n for i in range(processes):\n inQueue.put(None)\n\n for i in range(processes):\n outQueues.append(mp.Queue())\n self.workers.append(mp.Process(target=self.__worker,args = (inQueue,outQueues[-1])))\n self.workers[-1].start()\n\n while (self.runCount < self.sumRuns):\n for x in outQueues:\n val = x.get()\n if val != None:\n self.__collect(val)\n else:\n outQueues.remove(x)\n\n for x in self.workers:\n x.join()\n\n assert(self.runCount > 0)\n print(\"exec time total: \" + str(sum(self.execTimes)) + \" s\\navg exec time/task: \" + str(sum(self.execTimes) / len(self.execTimes)) + \" s\\nruns: \" + str(self.runCount))\n return self.data\n\n def addTask(self, task):\n self.tasks.append(task)\n\n def percentile(self,n):\n def _percentile(x):\n return np.percentile(x, n)\n _percentile.__name__ = '{}-th percentile'.format(n)\n return _percentile\n\n def getPlt(self):\n return plt\n\n def setAxisLabel(self, xLabel, yLabel):\n plt.xlabel(xLabel)\n plt.ylabel(yLabel)\n\n def saveOrShow(self, save):\n if (save):\n plt.savefig(self.baseDir + self.filename)\n else:\n plt.show()","sub_path":"src/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"460520239","text":"from uuid import uuid4\nfrom testify import (TestCase, assert_equal, assert_not_equal, class_setup,\n class_teardown, setup, teardown)\nimport righteous\nfrom ConfigParser import SafeConfigParser\n\nclass RighteousTestCase(TestCase):\n envs = []\n\n @class_setup\n def initialise_righteous(self):\n config = SafeConfigParser()\n config.read('righteous.config')\n if not config.has_section('auth'):\n raise Exception('Please create a righteous.config file with appropriate credentials')\n\n self.auth = dict((key,config.get('auth', key)) for key in config.options('auth'))\n self.server = dict((key,config.get('server-defaults', key)) for key in config.options('server-defaults'))\n righteous.init(self.auth['username'], self.auth['password'], self.auth['account_id'], **self.server)\n\n if not righteous.config.settings.cookies:\n righteous.login()\n\n @setup\n def prepare_test(self):\n self.delete_server = True\n self.env = 'env-%s' % uuid4().hex\n self.username = self.auth['username']\n\n def _create_server(self, instance_type='m1.small'):\n parameters = dict(envname=self.env, email=self.username, mode='unattended', branches='none')\n successful, location = righteous.create_and_start_server(self.env, instance_type, server_template_parameters=parameters)\n assert successful\n assert location is not None\n if self.delete_server:\n self.envs.append(self.env)\n\n def test_list_servers(self):\n servers = righteous.list_servers()\n assert 'servers' in servers\n self.delete_server = False\n\n def test_server_status(self):\n self._create_server()\n\n server = righteous.find_server(self.env)\n assert server is not None\n server_settings = righteous.server_settings(server['href'])\n assert server_settings is not None\n server_info = righteous.server_info(server['href'])\n assert server_info is not None\n assert_equal(server_settings['ec2-instance-type'], 'm1.small')\n assert_equal(server['state'], 'pending')\n\n def test_create_server(self):\n location = righteous.create_server(self.env, 'm1.small')\n assert_not_equal(location, None)\n self.envs.append(self.env)\n\n def test_create_and_start_server(self):\n self._create_server()\n\n def test_stop_server(self):\n self._create_server()\n\n server = righteous.find_server(self.env)\n successful = righteous.stop_server(server['href'])\n assert successful\n\n def test_delete_server(self):\n self._create_server()\n server = righteous.find_server(self.env)\n successful = righteous.stop_server(server['href'])\n assert successful\n\n stopped = False\n while not stopped:\n server = righteous.find_server(self.env)\n stopped = server['state'] == 'stopped'\n successful = righteous.delete_server(server['href'])\n assert successful\n\n self.delete_server = False\n\n @teardown\n def stop_servers(self):\n if self.delete_server:\n server = righteous.find_server(self.env)\n if server:\n righteous.stop_server(server['href'])\n\n @class_teardown\n def delete_servers(self):\n for env in self.envs:\n stopped = False\n while not stopped:\n server = righteous.find_server(env)\n if server:\n stopped = server['state'] == 'stopped'\n else:\n stopped = True\n righteous.delete_server(server['href'])\n\n","sub_path":"tests/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"233287462","text":"# Reading csv file with trajectories of Athens area\n# Making list of all trajectory dataframes --> from seperate files to one file\n# Calculating bearing for every datapoint in individual trajectory\nimport pandas as pd\nimport geopandas as gpd\nfrom geopandas import GeoDataFrame\nfrom shapely.geometry import Point, LineString\nimport pickle\nimport matplotlib.pyplot as plt\nimport compassbearing\nfrom tqdm import tqdm\nfrom tqdm.contrib import tenumerate\n\npath_to_export = \"~/Desktop/Joachim/Master Logistics and Traffic/Erasmus Lausanne/\" \\\n \"Masters Project/athens_project/dataset_csv/\"\ntrajectories = []\ncrs = 'WGS84' # Lat-Lon in right crs, used for geodataframe column\nnumber_of_csvfiles = 10660\nfor id in tqdm(range(1, number_of_csvfiles)):\n # range dependent on csv file, saving the index of python file with trajectory extraction\n t = pd.read_csv(f'{path_to_export}trajectory{id}.csv')\n t = t.rename(columns={'Latitude [deg]': 'Lat', 'Longitude [deg]': 'Lon'})\n geometry = [Point(yx) for yx in zip(t.Lon, t.Lat)]\n # t=t.drop(['Lat','Lon'], axis=1) X and Y needed for get_nearest_edge\n t = GeoDataFrame(t, crs=crs, geometry=geometry)\n t = t[:-1]\n t['Tracked Vehicle'].fillna(t['Tracked Vehicle'].values[0], inplace=True)\n trajectories.append(t)\n\ncolumn_number = 14 # Specific number puts column at preferred place in dataframe\nfor i, j in tenumerate(trajectories):\n bearing = []\n traj_latlon = j[['Lat', 'Lon']].values\n for e, f in j.iterrows():\n if e < len(j) - 1:\n A = (traj_latlon[e][0], traj_latlon[e][1])\n B = (traj_latlon[e + 1][0], traj_latlon[e + 1][1])\n comp = compassbearing.calculate_initial_compass_bearing(A, B)\n if comp == 0:\n if not bearing:\n r = 2\n while A == (traj_latlon[e + r][0], traj_latlon[e + r][1]) \\\n and r + e + 1 < len(j):\n r = r + 1\n if A == (traj_latlon[e + r][0], traj_latlon[e + r][1]):\n bearing.append(999) # Static vehicle\n else:\n C = (traj_latlon[e + r][0], traj_latlon[e + r][1])\n comp_1 = compassbearing.calculate_initial_compass_bearing(A, C)\n bearing.append(comp_1)\n else:\n comp_2 = bearing[e - 1]\n bearing.append(comp_2)\n else:\n bearing.append(comp)\n elif len(j) > 1:\n comp = bearing[e - 1]\n bearing.append(comp)\n else:\n bearing.append(999) # Static vehicle\n j.insert(column_number, \"bearing\", bearing)\n\nstatic_vehicle = []\ntrajectories_moving = []\nfor i, j in tenumerate(trajectories):\n j = j.rename(columns={'Traveled Dist.[m]': 'traveled_dist',\n 'Speed[km / h]': 'speed', 'Tan.Accel.[ms - 2]': 'tan_accel',\n 'Lat.Accel.[ms - 2]': 'lat_accel', 'Time[ms]': 'time'})\n if j['bearing'].nunique() == 1:\n static_vehicle.append(j)\n else:\n trajectories_moving.append(j)\n\n\"\"\"\nwith open('trajects.pkl', 'wb') as f:\n pickle.dump(trajectories, f)\nwith open('trajectories_moving.pkl', 'wb') as f:\n pickle.dump(trajectories_moving, f)\nwith open('static_vehicles.pkl', 'wb') as f:\n pickle.dump(static_vehicle, f)\n\"\"\"","sub_path":"trajectories_dataset.py","file_name":"trajectories_dataset.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143879621","text":"import discord\nfrom discord.ext import commands\nfrom utilities import get_res, fetch_res, get_string, feature_users\n\nclass Verification(commands.Cog):\n def __init__(self, bot: commands.Bot) -> None:\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_member_join(self, member: discord.Member) -> None:\n guild_id = member.guild.id\n\n if guild_id not in feature_users(\"verification\"):\n return\n\n gcs = fetch_res(\"local/guild_configurations\")\n\n guild_config = gcs[str(guild_id)]\n guild_lang = guild_config[\"language\"]\n \n if \"verificationMessage\" in guild_config[\"strings\"]:\n await member.send(guild_config[\"strings\"][\"verificationMessage\"])\n else:\n await member.send(get_string(\"verificationMessage\", guild_lang))\n\n response = await self.bot.wait_for(\n # pyre-ignore[16]: Pyre lacks information about the member class, causing faulty errors\n \"message\", check=lambda m: m.channel.id == member.dm_channel.id and m.author.id == member.id\n )\n\n if \"verificationChannel\" in guild_config[\"channels\"]:\n verification_channel = self.bot.get_channel(guild_config[\"channels\"][\"verificationChannel\"])\n else:\n verification_channel = self.bot.get_guild(guild_id).owner.dm_channel\n\n await verification_channel.send(\n get_string(\"verificationReport\", guild_lang) + f\"{member.mention}:\\n\\n{response.content}\"\n )\n\ndef setup(bot: commands.Bot) -> None:\n bot.add_cog(Verification(bot))\n","sub_path":"src/cogs/verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"317694000","text":"# O(n)\ndef findMode(self, root):\n def dfs(node):\n if not node:\n return\n count[node.val] += 1\n dfs(node.left)\n dfs(node.right)\n\n if not root:\n return []\n\n count = collections.Counter()\n dfs(root)\n max_val = max(count.values())\n return [k for k, v in count.items() if v == max_val]\n","sub_path":"501_mode_BST.py","file_name":"501_mode_BST.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"436351756","text":"import django\nfrom django.conf import settings\nfrom tracker.models import parse_query, Tracker\nfrom tracker.models import Statistic, make_daily_report\nfrom tracker.models import make_monthly_report_country\nfrom django.test import TestCase\n\nimport datetime\n\nclass UnitTestCase(TestCase):\n\n def test_query_string(self):\n\n query = \"label1\"\n self.assertEqual(parse_query(query), [{'lbl': 'label1'}])\n query = \"label1|label2\"\n self.assertEqual(parse_query(query), [{'lbl': 'label1'},\n {'lbl': 'label2'}])\n\n query = \"label1:cat|label2\"\n self.assertEqual(parse_query(query), [{'lbl': 'label1', 'cat': 'cat'},\n {'lbl': 'label2'}])\n\n query = \"label1:cat:ele1|label2\"\n self.assertEqual(parse_query(query),\n [{'lbl': 'label1', 'cat': 'cat', 'domid': 'ele1'},\n {'lbl': 'label2'}])\n\n def test_traker(self):\n\n tracker = Tracker()\n\n tracker.incr_labels('label1')\n self.assertEqual(tracker.labels, {'label1': {'lbl': 'label1'}})\n tracker.incr_labels('label1')\n self.assertEqual(tracker.flush_label('label1'), 2)\n self.assertEqual(tracker.flush_label('label1'), 0)\n tracker.save()\n\n tracker = Tracker()\n self.assertEqual(tracker.labels, {'label1': {'lbl': 'label1'}})\n tracker.reset_cache()\n\n def test_report(self):\n\n tracker = Tracker()\n tracker.incr_labels('label1:cat1:dom1')\n tracker.incr_labels('label1:cat1:dom1')\n\n self.assertEqual(Statistic.objects.count(), 0)\n\n make_daily_report()\n\n self.assertEqual(tracker.flush_label('label1'), 0)\n\n self.assertEqual(Statistic.objects.count(), 1)\n\n stat = Statistic.objects.all()[0]\n self.assertEqual(stat.counter, 2)\n self.assertEqual(stat.category, 'cat1')\n self.assertEqual(stat.dom_id, 'dom1')\n\n tracker.incr_labels('label1:cat1:dom1')\n tracker.incr_labels('label1:cat1:dom1')\n\n stat = Statistic.objects.all()[0]\n self.assertEqual(stat.counter, 2)\n\n make_daily_report()\n\n stat = Statistic.objects.all()[0]\n self.assertEqual(stat.counter, 4)\n\n tracker.reset_cache()\n\n def test_montlyreport(self):\n tracker = Tracker()\n def add_static(labels, **kwargs):\n for label in parse_query(labels):\n s = Statistic(label=label.get('lbl'), category=label.get('cat'),\n dom_id=label.get('domid', ''), **kwargs)\n\n s.save()\n add_static(\"us_link1.com:links|es_link2.com:links|us_link2.com:links\",\n counter=5)\n\n add_static(\"us_link1.com:links|es_link4.com:links|us_link3.com:links\",\n counter=10)\n\n report = make_monthly_report_country()\n def ListEqual(list1, list2):\n if len(list1) != len(list2):\n return False\n for ele in list1:\n if ele not in list2:\n return False\n return True\n self.assertTrue(ListEqual(report['us'].keys(), ['link1.com', 'link2.com',\n 'link3.com']))\n self.assertTrue(ListEqual(report['es'].keys(), ['link4.com',\n 'link2.com']))\n self.assertEqual(report['us']['link1.com'], 15)\n self.assertEqual(report['es']['link2.com'], 5)\n self.assertEqual(report['us']['link3.com'], 10)\n self.assertEqual(report['fr'], dict())\n","sub_path":"tracker/tests/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"416437303","text":"# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom unittest import mock\n\nfrom delfin.drivers import fake_storage\nfrom delfin.task_manager.tasks import resources\nfrom delfin.task_manager.tasks.resources import StorageDeviceTask\n\nfrom delfin import test, context, coordination\n\nstorage = {\n 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n 'name': 'fake_driver',\n 'description': 'it is a fake driver.',\n 'vendor': 'fake_vendor',\n 'model': 'fake_model',\n 'status': 'normal',\n 'serial_number': '2102453JPN12KA000011',\n 'firmware_version': '1.0.0',\n 'location': 'HK',\n 'total_capacity': 1024 * 1024,\n 'used_capacity': 3126,\n 'free_capacity': 1045449,\n}\n\npools_list = [{\n 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n \"name\": \"fake_pool_\" + str(id),\n \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n \"native_storage_pool_id\": \"fake_original_id_\" + str(id),\n \"description\": \"Fake Pool\",\n \"status\": \"normal\",\n \"total_capacity\": 1024 * 1024,\n \"used_capacity\": 3126,\n \"free_capacity\": 1045449,\n}\n]\n\nvols_list = [{\n 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a340',\n \"name\": \"fake_vol_\" + str(id),\n \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n \"description\": \"Fake Volume\",\n \"status\": \"normal\",\n \"native_volume_id\": \"fake_original_id_\" + str(id),\n \"wwn\": \"fake_wwn_\" + str(id),\n \"total_capacity\": 1024 * 1024,\n \"used_capacity\": 3126,\n \"free_capacity\": 1045449,\n}\n]\n\n\nclass TestStorageDeviceTask(test.TestCase):\n def setUp(self):\n super(TestStorageDeviceTask, self).setUp()\n self.driver_api = mock.Mock()\n self.task_manager = StorageDeviceTask(\n context, \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\")\n self.mock_object(self.task_manager, 'driver_api', self.driver_api)\n\n @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n @mock.patch('delfin.drivers.api.API.get_storage')\n @mock.patch('delfin.db.storage_update')\n @mock.patch('delfin.db.storage_get')\n @mock.patch('delfin.db.storage_delete')\n @mock.patch('delfin.db.access_info_delete')\n @mock.patch('delfin.db.alert_source_delete')\n def test_sync_successful(self, alert_source_delete, access_info_delete,\n mock_storage_delete, mock_storage_get,\n mock_storage_update, mock_get_storage, get_lock):\n storage_obj = resources.StorageDeviceTask(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n\n storage_obj.sync()\n self.assertTrue(get_lock.called)\n self.assertTrue(mock_storage_get.called)\n self.assertTrue(mock_storage_delete.called)\n self.assertTrue(access_info_delete.called)\n self.assertTrue(alert_source_delete.called)\n self.assertTrue(mock_storage_update.called)\n mock_get_storage.assert_called_with(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n\n fake_storage_obj = fake_storage.FakeStorageDriver()\n mock_get_storage.return_value = fake_storage_obj.get_storage(context)\n storage_obj.sync()\n\n @mock.patch('delfin.db.storage_delete')\n @mock.patch('delfin.db.alert_source_delete')\n def test_successful_remove(self, mock_alert_del, mock_strg_del):\n storage_obj = resources.StorageDeviceTask(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n storage_obj.remove()\n\n mock_strg_del.assert_called_with(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n mock_alert_del.assert_called_with(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n\n\nclass TestStoragePoolTask(test.TestCase):\n @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n @mock.patch('delfin.drivers.api.API.list_storage_pools')\n @mock.patch('delfin.db.storage_pool_get_all')\n @mock.patch('delfin.db.storage_pools_delete')\n @mock.patch('delfin.db.storage_pools_update')\n @mock.patch('delfin.db.storage_pools_create')\n def test_sync_successful(self, mock_pool_create, mock_pool_update,\n mock_pool_del, mock_pool_get_all,\n mock_list_pools, get_lock):\n pool_obj = resources.StoragePoolTask(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n pool_obj.sync()\n\n self.assertTrue(mock_list_pools.called)\n self.assertTrue(mock_pool_get_all.called)\n self.assertTrue(get_lock.called)\n\n # collect the pools from fake_storage\n fake_storage_obj = fake_storage.FakeStorageDriver()\n\n # add the new pool to DB\n mock_list_pools.return_value = fake_storage_obj.list_storage_pools(\n context)\n mock_pool_get_all.return_value = list()\n pool_obj.sync()\n self.assertTrue(mock_pool_create.called)\n\n # update the new pool of DB\n mock_list_pools.return_value = pools_list\n mock_pool_get_all.return_value = pools_list\n pool_obj.sync()\n self.assertTrue(mock_pool_update.called)\n\n # delete the new pool to DB\n mock_list_pools.return_value = list()\n mock_pool_get_all.return_value = pools_list\n pool_obj.sync()\n self.assertTrue(mock_pool_del.called)\n\n @mock.patch('delfin.db.storage_pool_delete_by_storage')\n def test_remove(self, mock_pool_del):\n pool_obj = resources.StoragePoolTask(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n pool_obj.remove()\n self.assertTrue(mock_pool_del.called)\n\n\nclass TestStorageVolumeTask(test.TestCase):\n @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n @mock.patch('delfin.drivers.api.API.list_volumes')\n @mock.patch('delfin.db.volume_get_all')\n @mock.patch('delfin.db.volumes_delete')\n @mock.patch('delfin.db.volumes_update')\n @mock.patch('delfin.db.volumes_create')\n def test_sync_successful(self, mock_vol_create, mock_vol_update,\n mock_vol_del, mock_vol_get_all, mock_list_vols,\n get_lock):\n vol_obj = resources.StorageVolumeTask(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n vol_obj.sync()\n self.assertTrue(mock_list_vols.called)\n self.assertTrue(mock_vol_get_all.called)\n self.assertTrue(get_lock.called)\n\n # collect the volumes from fake_storage\n fake_storage_obj = fake_storage.FakeStorageDriver()\n\n # add the volumes to DB\n mock_list_vols.return_value = fake_storage_obj.list_volumes(context)\n mock_vol_get_all.return_value = list()\n vol_obj.sync()\n self.assertTrue(mock_vol_create.called)\n\n # update the volumes to DB\n mock_list_vols.return_value = vols_list\n mock_vol_get_all.return_value = vols_list\n vol_obj.sync()\n self.assertTrue(mock_vol_update.called)\n\n # delete the volumes to DB\n mock_list_vols.return_value = list()\n mock_vol_get_all.return_value = vols_list\n vol_obj.sync()\n self.assertTrue(mock_vol_del.called)\n\n @mock.patch('delfin.db.volume_delete_by_storage')\n def test_remove(self, mock_vol_del):\n vol_obj = resources.StorageVolumeTask(\n context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n vol_obj.remove()\n self.assertTrue(mock_vol_del.called)\n","sub_path":"delfin/tests/unit/task_manager/test_resources.py","file_name":"test_resources.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"13552524","text":"from subprocess import call\nimport argparse\nimport utils\nimport os\nimport traceback\nimport numpy as np\ndef get_leg_files(side,path):\n '''\n finds all leg files for a side: right or left\n :param side:\n :param path:\n :return:\n '''\n return filter(lambda x: side in x, utils.find_all_files(path,'.csv'))\n\ndef get_seqs_by_annotator(annotator_name, status_file_name=utils.DEFAULT_ANNOTATION_STATUS_FILE):\n '''\n :returns the list of p-nums for a given annotator that haven't been dropped\n :param annotator_name:\n :param status_file_name:\n :return:\n '''\n annotators = utils.read_column(status_file_name, 'WHO?')\n # get column p-num\n p_nums = utils.read_column(status_file_name, 'p-num')\n subsequences = utils.read_column(status_file_name, 'subsequence')\n # filter by annotator\n indices = [i for i, x in enumerate(annotators) if x == annotator_name]\n seqs_annotator = [os.path.join('%03d' % int(p_nums[i]),subsequences[i]) for i in indices if p_nums[i] != '']\n # filtering out the people\n seqs_non_dropped = set(get_nondroped_seqs())\n p_nums_final = [seq for seq in seqs_annotator if seq in seqs_non_dropped]\n return p_nums_final\n\ndef apply_fix(rules, files, foot, invert=False):\n print(files)\n for f in files:\n frames = utils.read_column(f, 1)\n print('rules')\n print(rules)\n\n labels = utils.read_column(f, 0)\n print('labels read: ')\n print(labels)\n\n values = list(map(lambda x: int(x), frames))\n print('values read: ')\n print(values)\n if invert:\n print('inverting....')\n initial_value = values[0]\n final_value = values[-1]\n i = 0\n for value, label in zip(values, labels):\n # if value is not\n if value - initial_value > 1 and final_value - value > 1:\n # apply inversion\n if label == 'g':\n labels[i] = 'a'\n elif label == 'a':\n labels[i] = 'g'\n i += 1\n\n N = len(labels)\n for i, label in enumerate(labels):\n if i < N-1:\n next_label = labels[i+1]\n # transicion g to a\n cond1 = label == 'g' and next_label == 'a'\n # transition a to g\n cond2 = label == 'a' and next_label == 'g'\n if cond1:\n values[i] = values[i]-rules[1]\n elif cond2:\n values[i] = values[i]-rules[0]\n\n # move the final values\n values[0] -= rules[0]\n values[-1] -= rules[3]\n # correct overlaps\n for i, label_value in enumerate(zip(labels,values)):\n if i < len(values)-1:\n current_value = label_value[1]\n next_value = values[i+1]\n if current_value >= next_value:\n values.pop(i + 1)\n labels.pop(i + 1)\n\n print('corrected labels')\n print(labels)\n print('corrected values')\n print(values)\n values = map(lambda x: str(x), values)\n rows = [labels,values]\n print('final result')\n print(rows)\n print('writing leg file: ' + f)\n utils.write_csv(f,rows)\n\n\ndef correct_annotations(manual_annotation_path):\n rules = {'Yiyang':[2,0,1,0], 'Prateek':[1,1,0,3], 'Benjamin':[2,1,1,0]}\n print('Applying corrections....')\n for annotator_name in rules:\n # special case for benjamin\n if annotator_name == 'Benjamin':\n invert = True\n else:\n invert = False\n people = get_seqs_by_annotator(annotator_name)\n if not people:\n print('No data has been found for annotator: ' + annotator_name)\n continue\n print('Corrections for the annotator: ' + annotator_name + '...')\n for person in people:\n # Look in the folder for all the csv and apply rule for the given annotator\n path_to_annotations = os.path.join(manual_annotation_path,person)\n right_files = get_leg_files('right', path_to_annotations)\n apply_fix(rules[annotator_name], right_files, 'right', invert = invert)\n left_files = get_leg_files('left', path_to_annotations)\n apply_fix(rules[annotator_name], left_files, 'left', invert=invert)\n\ndef get_nondroped_people(status_file_name=utils.DEFAULT_ANNOTATION_STATUS_FILE):\n notes = utils.read_column(status_file_name, 'Notes')\n p_nums = utils.read_column(status_file_name, 'p-num')\n p_nums = [p_num for p_num, note in zip(p_nums, notes) if not note == 'dropped' and not p_num == '']\n p_nums = ['%03d' % int(p_num) for p_num in p_nums]\n return sorted(list(set(p_nums)))\n\n\ndef get_nondroped_seqs(status_file_name = utils.DEFAULT_ANNOTATION_STATUS_FILE):\n notes = utils.read_column(status_file_name, 'Notes')\n p_nums = utils.read_column(status_file_name, 'p-num')\n subsequences = utils.read_column(status_file_name, 'subsequence')\n p_nums = [os.path.join('%03d' % int(p_num) ,ss) for p_num, note, ss in zip(p_nums, notes, subsequences) if not note == 'dropped' and not p_num == '']\n return sorted(list(set(p_nums)))\n\n\n\ndef runGeneration(manual_annotation_path,output_path, fixing):\n\n try:\n if fixing:\n print('=====================================')\n print('=====================================')\n print(' Fixing annotations... ')\n print('=====================================')\n print('=====================================')\n correct_annotations(manual_annotation_path)\n p_nums = get_nondroped_people()\n except AttributeError as e:\n traceback.print_exc()\n print(e)\n print('ERROR: sources/annotationsStatu.csv is not valid')\n return \n people_path = [os.path.join(manual_annotation_path, s) for s in p_nums]\n print('=====================================')\n print('=====================================')\n print('Executing annotation genererator jar... ')\n print('=====================================')\n print('=====================================')\n for person_path in people_path:\n gen_command = [\"java\", \"-cp\", \"./sources/annotation-generator-v-3.jar\", \"com.deeplearning.app.App\", \"-m\",\n person_path,\"-o\",output_path]\n call(gen_command)\n\n print('=====================================')\n print('=====================================')\n print(' FINISH :) ')\n print('=====================================')\n print('=====================================')\n\ndef runHelp():\n call([\"java\", \"-cp\", \"./sources/annotation-generator-v-2.jar\",\n \"com.deeplearning.app.App\", \"-h\"])\n\n\ndef main():\n parser = argparse.ArgumentParser(description='run the annotation-generation.jar that converts cvs seq into a ods')\n parser.add_argument(\"annotation_path\", metavar='path to annotations csv',\n help=\"folder of the csv sequences (example ./90_degree_annotations)\")\n parser.add_argument(\"output_path\", metavar='path output annotations ods',\n help=\"folder to save the final annotation sheets (example ./final_annotations)\")\n parser.add_argument('-f', '--fix', help='flag that activates the fixings', action='store_true')\n\n args = parser.parse_args()\n manual_annotation_path = str(args.annotation_path)\n ######################\n # Configurations\n #########################\n if not args.fix:\n Fixing = False\n else:\n Fixing = True\n ####################\n output_path = args.output_path\n runGeneration(manual_annotation_path, output_path, fixing = Fixing)\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"src/annotation_generator.py","file_name":"annotation_generator.py","file_ext":"py","file_size_in_byte":7853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"116994585","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\n\nfrom flask import json\nfrom six import BytesIO\n\nfrom app.openapi_server.models.coded_error import CodedError # noqa: E501\nfrom app.openapi_server.models.invalid_message_error import InvalidMessageError # noqa: E501\nfrom app.openapi_server.models.json_success import JsonSuccess # noqa: E501\nfrom app.openapi_server.models.json_success_base import JsonSuccessBase # noqa: E501\nfrom app.openapi_server.models.one_ofobjectobject import OneOfobjectobject # noqa: E501\nfrom app.openapi_server.models.one_ofstringinteger import OneOfstringinteger # noqa: E501\nfrom openapi_server.test import BaseTestCase\n\n\nclass TestMessagesController(BaseTestCase):\n \"\"\"MessagesController integration test stubs\"\"\"\n\n def test_add_reaction(self):\n \"\"\"Test case for add_reaction\n\n Add an emoji reaction\n \"\"\"\n query_string = [('emoji_name', 'octopus'),\n ('emoji_code', '1f419'),\n ('reaction_type', 'unicode_emoji')]\n response = self.client.open(\n '/api/v1/messages/{message_id}/reactions'.format(message_id=42),\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_check_messages_match_narrow(self):\n \"\"\"Test case for check_messages_match_narrow\n\n Check if messages match a narrow\n \"\"\"\n query_string = [('msg_ids', [56]),\n ('narrow', None)]\n response = self.client.open(\n '/api/v1/messages/matches_narrow',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_delete_message(self):\n \"\"\"Test case for delete_message\n\n Delete a message\n \"\"\"\n response = self.client.open(\n '/api/v1/messages/{message_id}'.format(message_id=42),\n method='DELETE')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_file_temporary_url(self):\n \"\"\"Test case for get_file_temporary_url\n\n Get public temporary URL\n \"\"\"\n response = self.client.open(\n '/api/v1/user_uploads/{realm_id_str}/{filename}'.format(realm_id_str=1, filename='4e/m2A3MSqFnWRLUf9SaPzQ0Up_/zulip.txt'),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_message_history(self):\n \"\"\"Test case for get_message_history\n\n Get a message's edit history\n \"\"\"\n response = self.client.open(\n '/api/v1/messages/{message_id}/history'.format(message_id=42),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_messages(self):\n \"\"\"Test case for get_messages\n\n Get messages\n \"\"\"\n query_string = [('anchor', openapi_server.OneOfstringinteger()),\n ('num_before', 4),\n ('num_after', 8),\n ('narrow', []),\n ('client_gravatar', False),\n ('apply_markdown', True),\n ('use_first_unread_anchor', False)]\n response = self.client.open(\n '/api/v1/messages',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_raw_message(self):\n \"\"\"Test case for get_raw_message\n\n Get a message's raw Markdown\n \"\"\"\n response = self.client.open(\n '/api/v1/messages/{message_id}'.format(message_id=42),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_mark_all_as_read(self):\n \"\"\"Test case for mark_all_as_read\n\n Mark all messages as read\n \"\"\"\n response = self.client.open(\n '/api/v1/mark_all_as_read',\n method='POST')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_mark_stream_as_read(self):\n \"\"\"Test case for mark_stream_as_read\n\n Mark messages in a stream as read\n \"\"\"\n query_string = [('stream_id', 42)]\n response = self.client.open(\n '/api/v1/mark_stream_as_read',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_mark_topic_as_read(self):\n \"\"\"Test case for mark_topic_as_read\n\n Mark messages in a topic as read\n \"\"\"\n query_string = [('stream_id', 42),\n ('topic_name', 'new coffee machine')]\n response = self.client.open(\n '/api/v1/mark_topic_as_read',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_remove_reaction(self):\n \"\"\"Test case for remove_reaction\n\n Remove an emoji reaction\n \"\"\"\n query_string = [('emoji_name', 'octopus'),\n ('emoji_code', '1f419'),\n ('reaction_type', 'unicode_emoji')]\n response = self.client.open(\n '/api/v1/messages/{message_id}/reactions'.format(message_id=42),\n method='DELETE',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_render_message(self):\n \"\"\"Test case for render_message\n\n Render message\n \"\"\"\n query_string = [('content', 'Hello')]\n response = self.client.open(\n '/api/v1/messages/render',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_send_message(self):\n \"\"\"Test case for send_message\n\n Send a message\n \"\"\"\n query_string = [('type', 'private'),\n ('to', [56]),\n ('content', 'Hello'),\n ('topic', 'Castle'),\n ('queue_id', '1593114627:0'),\n ('local_id', '100.01')]\n response = self.client.open(\n '/api/v1/messages',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_update_message(self):\n \"\"\"Test case for update_message\n\n Edit a message\n \"\"\"\n query_string = [('topic', 'Castle'),\n ('propagate_mode', 'change_one'),\n ('send_notification_to_old_thread', True),\n ('send_notification_to_new_thread', True),\n ('content', 'Hello'),\n ('stream_id', 42)]\n response = self.client.open(\n '/api/v1/messages/{message_id}'.format(message_id=42),\n method='PATCH',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_update_message_flags(self):\n \"\"\"Test case for update_message_flags\n\n Update personal message flags\n \"\"\"\n query_string = [('messages', [56]),\n ('op', 'add'),\n ('flag', 'read')]\n response = self.client.open(\n '/api/v1/messages/flags',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_upload_file(self):\n \"\"\"Test case for upload_file\n\n Upload a file\n \"\"\"\n data = dict(filename='/path/to/file')\n response = self.client.open(\n '/api/v1/user_uploads',\n method='POST',\n data=data,\n content_type='multipart/form-data')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n\nif __name__ == '__main__':\n import unittest\n unittest.main()\n","sub_path":"python-blueplanet/app/openapi_server/test/test_messages_controller.py","file_name":"test_messages_controller.py","file_ext":"py","file_size_in_byte":8735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"246572476","text":"# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\n\n__protobuf__ = proto.module(\n package='google.cloud.securitycenter.v1',\n manifest={\n 'Indicator',\n },\n)\n\n\nclass Indicator(proto.Message):\n r\"\"\"Represents what's commonly known as an Indicator of compromise (IoC)\n in computer forensics. This is an artifact observed on a network or\n in an operating system that, with high confidence, indicates a\n computer intrusion. Reference:\n https://en.wikipedia.org/wiki/Indicator_of_compromise\n\n Attributes:\n ip_addresses (Sequence[str]):\n List of ip addresses associated to the\n Finding.\n domains (Sequence[str]):\n List of domains associated to the Finding.\n \"\"\"\n\n ip_addresses = proto.RepeatedField(\n proto.STRING,\n number=1,\n )\n domains = proto.RepeatedField(\n proto.STRING,\n number=2,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/cloud/securitycenter/v1/securitycenter-v1-py/google/cloud/securitycenter_v1/types/indicator.py","file_name":"indicator.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313844234","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# C1 related functions\n# *******************************************************\n# derivative of cost func 1: C_1(a,b,c) = SUM max(di)\n# ========================================================\ndef df_cost1(a_b_c, x_y):\n [a, b, c] = a_b_c\n [x, y] = x_y\n d_a = (x * math.sqrt(a ** 2 + b ** 2) - a * (a * x + b * y + c)) / (a ** 2 + b ** 2)\n d_b = (y * math.sqrt(a ** 2 + b ** 2) - b * (a * x + b * y + c)) / (a ** 2 + b ** 2)\n d_c = (1 / math.sqrt(a ** 2 + b ** 2))\n df = np.asarray([d_a, d_b, d_c])\n return df\n\n# adjust (a,b,c) based on h and df\n# ========================================================\ndef update_coef(a_b_c, d_abc):\n a_updated = a_b_c[0] + d_abc[0]\n b_updated = a_b_c[1] + d_abc[1]\n c_updated = a_b_c[2] + d_abc[2]\n a_b_c_updated = [a_updated, b_updated, c_updated]\n return a_b_c_updated\n\n# distance between point (x,y) to line, ax+by+c=0\n# ========================================================\ndef normalDistance(a_b_c, x_y):\n a = a_b_c[0]\n b = a_b_c[1]\n c = a_b_c[2]\n x = x_y[0]\n y = x_y[1]\n # compute vertical distance between (x,y) and the line(a,b,c)\n a_b_c = np.asarray([a, b, c])\n d = (a * x + b * y + c) / math.sqrt(a ** 2 + b ** 2)\n return d\n\n# maximum of d (in-batch)\n# ============================\ndef sort_topN_max(d, N_top):\n d_sorted_big2small = np.sort(d)[::-1][0:N_top]\n return d_sorted_big2small\n\n\n# C2 related function\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n# derivative of cost func 2: C_2(a,b,c) = SUM (-ax/b-c/b-y)^2\n# ========================================================\ndef df_cost2(a_b_c, x_y):\n [a, b, c] = a_b_c\n [x, y] = x_y\n da = (-x / b) * 2 * (-a * x / b - c / b - y)\n db = (a * x + c) / b * 2 * (-a * x / b - c / b - y)\n dc = (-1 / b) * 2 * (-a * x / b - c / b - y)\n df = np.asarray([da, db, dc])\n return df\n\n\n# Top_N minimum of a numpy array\n# ========================================================\ndef sort_topN_min(d, N_top):\n d_sorted_small2big = np.sort(d)[::-1][0:N_top]\n return d_sorted_small2big\n\n\n# epsilon\n# ========================================================\ndef epsilon_cost2(a_b_c, x_y):\n [a, b, c] = a_b_c\n [x, y] = x_y\n epsilon = (-a * x / b - c / b - y)\n return epsilon\n\n\n# Plot\n# ###############################################\n# get y from ax+by+c=0\ndef get_y_from_a_b_c(x, a_b_c_final):\n # [a, b, c] = a_b_c_final\n a = a_b_c_final[0]\n b = a_b_c_final[1]\n c = a_b_c_final[2]\n # y = -ax/b -c/b\n y_regression_line = -a * x / b - c / b\n return y_regression_line\n\n\n# Plot\n# ========================================================\ndef plot_data(x_y, a_b_c_final):\n # [x, y] = x_y.reshape(N_batches*N_xy_perBatch,2)\n x = x_y.reshape(N_batches * N_xy_perBatch, 2)[:, 0]\n y = x_y.reshape(N_batches * N_xy_perBatch, 2)[:, 1]\n # [a, b, c] = a_b_c_final\n a = a_b_c_final[0]\n b = a_b_c_final[1]\n c = a_b_c_final[2]\n # get y on the regression line\n y_regression_line = get_y_from_a_b_c(x, a_b_c_final)\n # plot\n plt.figure()\n plt.plot(x, y, 'bo', x, y_regression_line, 'k')\n plt.yscale('linear')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Liner Regression Classifies Input Data')\n plt.grid(True)\n # plt.legend(loc='lower right')\n plt.show()\n\n# Plot\n# ========================================================\ndef plot_data_compare2line(x_y, a_b_c_final, a_b_c_final_2):\n # [x, y] = x_y.reshape(N_batches*N_xy_perBatch,2)\n x = x_y.reshape(N_batches * N_xy_perBatch, 2)[:, 0]\n y = x_y.reshape(N_batches * N_xy_perBatch, 2)[:, 1]\n # [a, b, c] = a_b_c_final\n a = a_b_c_final[0]\n b = a_b_c_final[1]\n c = a_b_c_final[2]\n # get y on the regression line\n y_regression_line = get_y_from_a_b_c(x, a_b_c_final)\n y_regression_line_2 = get_y_from_a_b_c(x, a_b_c_final_2)\n # plot\n plt.figure()\n plt.plot(x, y, 'bo', label='Input Points')\n plt.plot(x, y_regression_line, 'k', label='$Mean(\\epsilon^2)$')\n plt.plot(x, y_regression_line_2, 'g*', label='Sum $\\epsilon^2/N_perBatch$')\n plt.yscale('linear')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Liner Regression Classifies Input Data')\n plt.grid(True)\n plt.legend(loc='lower right')\n plt.show()\n\n# ###############################################\n# Processing\n# ###############################################\n\n# Initializing\n# ===============================================\nN_batches = 7\nN_xy_perBatch = 5\nN_top = 3\nN_iter = 49\nN_epoch = 7\n\n# initial value of a,b,c and input data x_y\n# -------------------------------------------------------\na_b_c_initial_value = np.random.rand(1, 3)\nx_y = np.random.rand(N_batches, N_xy_perBatch, 2)\n\n# initialization of min c1\n# *******************************************************\na_b_c_is = np.zeros((N_batches, N_xy_perBatch, 3))\nd = np.zeros((N_batches, N_xy_perBatch, 1))\nd_mean_list = [] # list\nd_max_index = np.zeros((N_batches, 1))\nd_sortedTopN_big2small = np.zeros((N_batches, N_top))\nd_mean = np.zeros((N_batches, 1))\nd_max_index_list = []\na_b_c_final = np.zeros((1, 3))\nx_y_final = np.zeros((1, 2))\n\n# Initialization of min c2\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\na_b_c_is_c2 = np.empty((N_batches, N_xy_perBatch, 3))\nepsilon = np.zeros((N_batches, N_xy_perBatch, 1))\nepsilon_min_index = np.zeros((N_batches, 1))\nepsilon_min_index_list = []\nepsilon_sortedTopN_small2big = np.zeros((N_batches, N_top))\nepsilon_mean = np.zeros((N_batches, 1))\nepsilon_mean_list = []\na_b_c_final_c2 = np.zeros((1, 3))\nx_y_final_c2 = np.zeros((1, 2))\n\n# Initialization of min c3\n# ######################################################\nepsilon_sum_perBatch = np.zeros((N_batches,1))\nepsilon_sum_perBatch_list = []\nepsilon_sum_perBatch_min_index = np.zeros((N_batches,1))\nepsilon_sum_perBatch_min_index_list = []\nepsilon_sum_mean = np.zeros((N_batches,1))\nepsilon_sum_mean_list = []\na_b_c_final_c3 = np.zeros((1,3))\nx_y_final_c3 = np.zeros((1,2))\n\n\ncount = 0\n\nprint('Starting ........................................................')\nfor k in range(N_epoch):\n print('Epoch = ', k)\n # minimize max(d[i])\n # *******************************************************\n for i in range(N_iter):\n print('Iteration = ', i)\n print('Iteration C1')\n print('======================================================')\n # Parallel processing cross all batches\n # ========================================================\n a_b_c = a_b_c_initial_value[0]\n for j in range(N_batches):\n # Serial Max Processing inside Each Batch\n # -------------------------------------------------------\n h_list = []\n for i in range(N_xy_perBatch):\n count = count + 1\n # derivative and based input(x,y) to update (a,b,c)\n df = df_cost1(a_b_c, x_y[j][i])\n a_b_c_is[j][i] = update_coef(a_b_c, df)\n # compute the normal distance\n d[j][i] = normalDistance(a_b_c_is[j][i], x_y[j][i])\n h_list.append(float(d[j][i]))\n # update a,b,c\n a_b_c = a_b_c_is[j][i]\n # the index of maximum d per batch\n d_max_index[j] = np.argmax(np.asarray(h_list))\n d_max_index_list.append(int(d_max_index[j]))\n # pick up the N_top maximum elements in h_list\n # d_sortedTopN_big2small[j] = sort_topN_max(h_list, N_top)\n d_sortedTopN_big2small[j] = np.sort(h_list)[::-1][0:N_top]\n # mean of the top_N h\n d_mean[j] = np.mean(d_sortedTopN_big2small[j])\n d_mean_list.append(float(d_mean[j]))\n print('len(d_max_index_list) = ', len(d_max_index_list))\n print('d_max_index_list = ', d_max_index_list)\n # -------------------------------------------------------\n # index of the maximum of d_mean of all batches\n d_max_index_allBatches = np.argmax(np.asarray(d_mean_list))\n # minimum value of d\n d_max = max(np.asarray(d_mean_list))\n # based d_max_index_allBatches to reach the final a,b,c\n print('d_max_index_allBatches =', d_max_index_allBatches)\n a_b_c_final = a_b_c_is[d_max_index_allBatches][np.asarray(d_max_index_list)[d_max_index_allBatches]]\n # get the point (x,y)\n x_y_final = x_y[d_max_index_allBatches, np.asarray(d_max_index_list)[d_max_index_allBatches]]\n # *******************************************************\n\n\n\n print('Epoch C2')\n print('======================================================')\n a_b_c = a_b_c_final\n # minimize (-a*x[i]/b-c/b-y[i])^2\n # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n for jj in range(N_batches):\n epsilon_list = []\n epsilon_sum = 0\n for ii in range(N_xy_perBatch):\n # derivative and based input(x,y) to update (a,b,c)\n df_c2 = df_cost2(a_b_c, x_y[jj][ii])\n a_b_c_is_c2[jj][ii] = update_coef(a_b_c, df_c2)\n # epsilon of cost function 2\n epsilon[jj][ii] = epsilon_cost2(a_b_c, x_y[jj][ii])\n epsilon_list.append(float(epsilon[jj][ii]))\n # sum of epsilon\n epsilon_sum = epsilon_sum + epsilon[jj][ii]\n # update a_b_c_c2\n a_b_c = a_b_c_is_c2[jj][ii]\n # epsilon\n # ---------------------------------------------------------------------------\n # the index of minimum of epsilon per batch\n epsilon_min_index[jj] = np.argmin(np.asarray(epsilon_list))\n # save index\n epsilon_min_index_list.append(int(epsilon_min_index[jj][0]))\n # pick up the N_top minimum elements in epsilon_list\n epsilon_sortedTopN_small2big[jj] = np.sort(epsilon_list)[0:N_top]\n # mean of the top_N minimum epsilon\n epsilon_mean[jj] = np.mean(epsilon_sortedTopN_small2big[jj])\n epsilon_mean_list.append(float(epsilon_mean[jj]))\n\n # sum epsilon\n epsilon_sum_perBatch[jj] = epsilon_sum\n epsilon_sum_mean[jj] = epsilon_sum_perBatch[jj]/N_xy_perBatch\n # ---------------------------------------------------------------------------\n print('Epsilon')\n print('-----------------------------------------')\n print('len(epsilon_min_index_list) = ', len(epsilon_min_index_list))\n print('epsilon_min_index_list = ', epsilon_min_index_list)\n # index of the minimum of epsilon of all\n epsilon_min_index_allBatches = np.argmin(epsilon_mean)\n epsilon_min = min(epsilon_mean)\n a_b_c_final_c2 = a_b_c_is_c2[epsilon_min_index_allBatches][np.asarray(epsilon_min_index_list)[epsilon_min_index_allBatches]]\n x_y_final_c2 = x_y[epsilon_min_index_allBatches][np.asarray(epsilon_min_index_list)[epsilon_min_index_allBatches]]\n\n a_b_c_final_c2 = a_b_c_is_c2[epsilon_min_index_allBatches][int(epsilon_min_index[epsilon_min_index_allBatches])]\n x_y_final_c2 = x_y[epsilon_min_index_allBatches][int(epsilon_min_index[epsilon_min_index_allBatches])]\n\n # ---------------------------------------------------------------------------\n print('Epsilon sum')\n print('-----------------------------------------')\n # index of the minimum of epsilon_sum of all\n epsilon_sum_min_index_allBatches = np.argmin(epsilon_sum_mean)\n epsilon_sum_perBatch_min_index_list.append(epsilon_sum_min_index_allBatches)\n epsilon_sum_min = min(np.asarray(epsilon_sum_mean))\n a_b_c_final_c3 = a_b_c_is_c2[epsilon_sum_min_index_allBatches][np.asarray(epsilon_min_index_list)[epsilon_min_index_allBatches]]\n x_y_final_c3 = x_y[epsilon_sum_min_index_allBatches][np.asarray(epsilon_min_index_list)[epsilon_min_index_allBatches]]\n print((len(epsilon_sum_perBatch_min_index_list)), len(epsilon_sum_perBatch_min_index_list))\n print('epsilon_sum_perBatch_min_index_list = ', epsilon_sum_perBatch_min_index_list)\n # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n print('Iteration End !!!\\n')\nprint('a_b_c_final_c2 = ', a_b_c_final_c2)\na_b_c_finalResult = a_b_c_final_c2\nplot_data(x_y, a_b_c_finalResult)\n\nprint('a_b_c_final_c3 = ', a_b_c_final_c3)\na_b_c_finalResult = a_b_c_final_c3\nplot_data(x_y, a_b_c_finalResult)\n\nplot_data_compare2line(x_y,a_b_c_final_c2, a_b_c_final_c3)\nprint('End ......................................................................')\n","sub_path":"ml_BatchParallelLinearRegression/linearRegression_minimax_mls_c3_v2_good.py","file_name":"linearRegression_minimax_mls_c3_v2_good.py","file_ext":"py","file_size_in_byte":12322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"408961946","text":"import logging\nimport copy\nimport numpy as np\nimport pandas as pd\n\nfrom supervised.models.learner import Learner\nfrom supervised.tuner.registry import ModelsRegistry\nfrom supervised.tuner.registry import (\n BINARY_CLASSIFICATION,\n MULTICLASS_CLASSIFICATION,\n REGRESSION,\n)\n\nimport xgboost as xgb\nimport operator\n\nlog = logging.getLogger(__name__)\n\n\nclass XgbLearnerException(Exception):\n def __init__(self, message):\n super(XgbLearnerException, self).__init__(message)\n log.error(message)\n\n\nclass XgbLearner(Learner):\n \"\"\"\n This is a wrapper over xgboost algorithm.\n \"\"\"\n\n algorithm_name = \"Extreme Gradient Boosting\"\n algorithm_short_name = \"Xgboost\"\n\n def __init__(self, params):\n super(XgbLearner, self).__init__(params)\n self.library_version = xgb.__version__\n self.model_file = self.uid + \".xgb.model\"\n self.model_file_path = \"/tmp/\" + self.model_file\n\n self.boosting_rounds = additional.get(\n \"one_step\", 50\n ) # params.get(\"boosting_rounds\", 50)\n self.max_iters = additional.get(\"max_steps\", 3)\n self.learner_params = {\n \"booster\": self.params.get(\"booster\", \"gbtree\"),\n \"objective\": self.params.get(\"objective\"),\n \"eval_metric\": self.params.get(\"eval_metric\"),\n \"eta\": self.params.get(\"eta\", 0.01),\n \"max_depth\": self.params.get(\"max_depth\", 1),\n \"min_child_weight\": self.params.get(\"min_child_weight\", 1),\n \"subsample\": self.params.get(\"subsample\", 0.8),\n \"colsample_bytree\": self.params.get(\"colsample_bytree\", 0.8),\n \"silent\": self.params.get(\"silent\", 1),\n }\n \"\"\"\n mandatory_params = {\n \"objective\": [\"binary:logistic\"],\n \"eval_metric\": [\"auc\", \"logloss\"],\n }\n for p, v in mandatory_params.items():\n if self.learner_params[p] is None:\n msg = \"Please specify the {0}, it should be one from {1}\".format(p, v)\n raise XgbLearnerException(msg)\n \"\"\"\n log.debug(\"XgbLearner __init__\")\n\n def update(self, update_params):\n # Dont need to update boosting rounds, it is adding rounds incrementally\n pass\n\n def fit(self, X, y):\n dtrain = xgb.DMatrix(X, label=y, missing=np.NaN)\n self.model = xgb.train(\n self.learner_params, dtrain, self.boosting_rounds, xgb_model=self.model\n )\n\n def predict(self, X):\n if self.model is None:\n raise XgbLearnerException(\"Xgboost model is None\")\n dtrain = xgb.DMatrix(X, missing=np.NaN)\n return self.model.predict(dtrain)\n\n def copy(self):\n return copy.deepcopy(self)\n\n def save(self):\n self.model.save_model(self.model_file_path)\n\n json_desc = {\n \"library_version\": self.library_version,\n \"algorithm_name\": self.algorithm_name,\n \"algorithm_short_name\": self.algorithm_short_name,\n \"uid\": self.uid,\n \"model_file\": self.model_file,\n \"model_file_path\": self.model_file_path,\n \"params\": self.params,\n }\n\n log.debug(\"XgbLearner save model to %s\" % self.model_file_path)\n return json_desc\n\n def load(self, json_desc):\n\n self.library_version = json_desc.get(\"library_version\", self.library_version)\n self.algorithm_name = json_desc.get(\"algorithm_name\", self.algorithm_name)\n self.algorithm_short_name = json_desc.get(\n \"algorithm_short_name\", self.algorithm_short_name\n )\n self.uid = json_desc.get(\"uid\", self.uid)\n self.model_file = json_desc.get(\"model_file\", self.model_file)\n self.model_file_path = json_desc.get(\"model_file_path\", self.model_file_path)\n self.params = json_desc.get(\"params\", self.params)\n\n log.debug(\"XgbLearner load model from %s\" % self.model_file_path)\n self.model = xgb.Booster() # init model\n self.model.load_model(self.model_file_path)\n\n def importance(self, column_names, normalize=True):\n return None\n\n def get_params_key(self):\n params_key = \"key\"\n for p, v in self.params.items():\n params_key += \"_{}_{}\".format(p, str(v))\n return params_key\n\n\n# For binary classification target should be 0, 1. There should be no NaNs in target.\nXgbLearnerBinaryClassificationParams = {\n \"booster\": [\"gbtree\", \"gblinear\"],\n \"objective\": [\"binary:logistic\"],\n \"eval_metric\": [\"auc\", \"logloss\"],\n \"eta\": [0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, 0.1],\n \"max_depth\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"min_child_weight\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n \"subsample\": [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],\n \"colsample_bytree\": [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],\n}\n\nXgbLearnerRegressionParams = dict(XgbLearnerBinaryClassificationParams)\nXgbLearnerRegressionParams[\"booster\"] = [\"gbtree\"]\nXgbLearnerRegressionParams[\"objective\"] = [\"reg:linear\", \"reg:log\"]\nXgbLearnerRegressionParams[\"eval_metric\"] = [\"rmse\", \"mae\"]\n\nXgbLearnerMulticlassClassificationParams = dict(XgbLearnerBinaryClassificationParams)\n\nadditional = {\n \"one_step\": 50,\n \"train_cant_improve_limit\": 5,\n \"max_steps\": 500,\n \"max_rows_limit\": None,\n \"max_cols_limit\": None,\n}\nrequired_preprocessing = [\n \"missing_values_inputation\",\n \"convert_categorical\",\n \"target_preprocessing\",\n]\n\nModelsRegistry.add(\n BINARY_CLASSIFICATION,\n XgbLearner,\n XgbLearnerBinaryClassificationParams,\n required_preprocessing,\n additional,\n)\nModelsRegistry.add(\n MULTICLASS_CLASSIFICATION,\n XgbLearner,\n XgbLearnerMulticlassClassificationParams,\n required_preprocessing,\n additional,\n)\nModelsRegistry.add(\n REGRESSION,\n XgbLearner,\n XgbLearnerRegressionParams,\n required_preprocessing,\n additional,\n)\n","sub_path":"supervised/models/learner_xgboost.py","file_name":"learner_xgboost.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"530523847","text":"import datetime\nimport jwt\n\nfrom functools import wraps\nfrom flask import request\n\nfrom ..app import db\nfrom ..service.user import get_logged_in_auth\n\n\ndef rate_limit(reqs_per_sec):\n def decorator(f):\n def decorated(*args, **kwargs):\n\n auth = get_logged_in_auth(request)\n if not auth:\n response_object = {\n 'status': 'error',\n 'message': 'Provide a valid auth token.'\n }\n return response_object, 401\n curr_time = datetime.datetime.utcnow()\n interval = datetime.timedelta(seconds=(1 / reqs_per_sec))\n if curr_time - auth.last_modified < interval:\n auth.brute_check += 1\n db.session.commit()\n if auth.brute_check > 60:\n db.session.delete(auth)\n db.session.commit()\n response_object = {\n 'status': 'error',\n 'message': 'You\\'ve been banned'\n }\n return response_object, 429\n attempts = 60 - auth.brute_check\n response_object = {\n 'status': 'error',\n 'message': 'Will ban in %d more attempts' % attempts\n }\n return response_object, 429\n\n return f(*args, **kwargs)\n return decorated\n\n return decorator\n\n\ndef token_required():\n def decorator(f):\n def decorated(*args, **kwargs):\n\n auth = get_logged_in_auth(request)\n if not auth:\n response_object = {\n 'status': 'error',\n 'message': 'Provide a valid auth token.'\n }\n return response_object, 401\n\n return f(*args, **kwargs)\n return decorated\n return decorator\n\n\ndef admin_token_required():\n def decorator(f):\n def decorated(*args, **kwargs):\n auth = get_logged_in_auth(request)\n if auth and auth.admin:\n return f(*args, **kwargs)\n\n response_object = {\n 'status': 'error',\n 'message': 'admin token required'\n }\n return response_object, 401\n return decorated\n return decorator\n","sub_path":"Web/Adrift/clicker2.0/clicker/util/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"181139614","text":"from __future__ import division\n\nimport numpy as np\n\n\ndef _sample_from_gaussian(width, height, c_x, c_y, sigma_exp):\n sigma = 2 ** sigma_exp\n # center of the Gaussian (in frame space) is the center of the GT bbox\n gaussian_mean = np.array([c_x, c_y])\n # covariance (diagonal) matrix defined by w and h of GT bbox\n gaussian_cov = np.array([[sigma * width ** 2, 0], [0, sigma * height ** 2]])\n # sample one seed from the bivariate Gaussian\n seedX, seedY = np.random.multivariate_normal(gaussian_mean, gaussian_cov, 1).T\n return seedX[0], seedY[0]\n\n\ndef seed_generator(mode, width, height, c_x, c_y, sigma_exp):\n \"\"\"\n Generates seed-points given a mode and a bounding-box\n\n Parameters\n ----------\n mode: str\n Strategy used for seed generation. Only elongated_gaussian implemented for the moment\n width: int\n width of the bounding-box enclosing the original object\n height: int\n height of the bounding-box enclosing the original object\n c_x: float\n center-x coordinate of the bounding-box enclosing the original object\n c_y: float\n center-y coordinate of the bounding-box enclosing the original object\n sigma_exp: float\n Defines the variance of the Gaussian from which generating the seeds.\n The more negative, the closer to the center the samples will be.\n\n Returns\n -------\n seed_x: float\n x-coordinate of the seed\n seed_y: float\n y-coordinate of the seed\n\n \"\"\"\n if mode == 'elongated_gaussian':\n seed_x, seed_y = _sample_from_gaussian(width, height, c_x, c_y, sigma_exp)\n elif mode == 'gaussian':\n raise NotImplementedError()\n else:\n raise ValueError('Unknown seed generator mode.')\n\n return seed_x, seed_y\n","sub_path":"seedpoint_tracking/seeds.py","file_name":"seeds.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218754234","text":"#!/usr/bin/env python3\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom optparse import OptionParser\nfrom socketserver import ThreadingMixIn\nfrom cgi import parse_header, parse_multipart, parse_qs\nimport threading\nimport json\n\nuserdb_dict = dict()\nuserlist = \"\"\nmessagedb = []\n\ndef do_LOGIN(username, ip):\n global userdb_dict\n global userlist\n if username not in userdb_dict:\n userdb_dict[username] = ip\n userlist += username\n userlist += \" \"\n print(\"userlist: \", userlist)\n return userlist\n\ndef do_send_message(sender, receiver, codebook, message):\n global messagedb\n messagedb.append([sender,receiver,codebook,message])\n print (\"sender: \" + sender + \" receiver: \" + receiver\n + \" codebook: \" + str(codebook) + \" message: \" + str(message))\n retval = json.dumps({'sent':'true'}, indent=4)\n return retval\n\ndef do_receive_message(receivername):\n global messagedb\n json_message = json.dumps({'nomessage':'nomessage' }, indent=4)\n for i in range(0, len(messagedb)):\n if messagedb[i][1] == receivername:\n json_message = json.dumps({'sender': messagedb[i][0], 'receiver': receivername, 'codebook':str(messagedb[i][2]), 'message':str(messagedb[i][3]) }, indent=4)\n print(json_message)\n return json_message\n return json_message\n\ndef parse_FORM(form, ip):\n\n print(form)\n if \"username\" in form:\n username = form[\"username\"]\n print(\"Username: \" + username + \" IP: \" + ip)\n retval = do_LOGIN(username, ip)\n\n elif \"checkmessage\" in form:\n if \"receivername\" in form:\n receivername = form[\"receivername\"]\n retval = do_receive_message(receivername)\n\n\n elif \"sender\" in form:\n sender = form[\"sender\"]\n\n if \"receiver\" in form:\n receiver = form[\"receiver\"]\n if \"codebook\" in form:\n codebook = form[\"codebook\"]\n if \"message\" in form:\n message = form[\"message\"]\n\n retval = do_send_message(sender, receiver, codebook, message)\n\n else:\n print(\"ERROR: Wrong form parameter(s)\")\n retval = \"ERROR: Wrong form parameter(s)\"\n return retval\n\n\n\nclass RequestHandler(BaseHTTPRequestHandler):\n\n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_HEAD(self):\n self._set_headers()\n\n\n def do_GET(self):\n\n request_path = self.path\n\n print(\"\\n----- Request Start ----->\\n\")\n print(\"Request path:\", request_path)\n print(\"Request headers:\", self.headers)\n print(\"<----- Request End -----\\n\")\n\n self.send_response(200)\n self.send_header(\"Set-Cookie\", \"foo=bar\")\n self.end_headers()\n\n def parse_POST(self):\n ctype, pdict = parse_header(self.headers['content-type'])\n #pdict['boundary'] = bytes(pdict['boundary'], \"utf-8\")\n if ctype == 'multipart/form-data':\n postvars = parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers['content-length'])\n postvars = parse_qs(\n self.rfile.read(length),\n keep_blank_values=1)\n else:\n postvars = {}\n\n newdict = {}\n print(postvars)\n for key, value in postvars.items():\n newkey = str(key)[2:-1]\n newval = str(value)[3:-2]\n newdict[newkey] = newval\n\n return newdict\n\n\n def do_POST(self):\n global userlist\n form = self.parse_POST()\n request_path = self.path\n\n print(\"\\n----- Request Start ----->\\n\")\n print(\"Request path:\", request_path)\n\n request_headers = self.headers\n content_length = request_headers.get('Content-Length')\n length = int(content_length) if content_length else 0\n\n print(\"Content Length:\", length)\n print(\"Request headers:\", request_headers)\n\n # payload_raw = self.rfile.read(length)\n # payload = str(payload_raw)[2:-1]\n\n # print(\"PAYLOAD: \", payload)\n print(\"<----- Request End -----\\n\")\n ip = self.client_address[0]\n print (\"IP address and port number is : \", ip)\n\n self.send_response(200)\n self.end_headers()\n\n retval = parse_FORM(form, ip)\n\n self.wfile.write(bytes(retval, \"utf-8\"))\n self.wfile.write(bytes('\\n', \"utf-8\"))\n\n do_PUT = do_POST\n do_DELETE = do_GET\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n pass\n \"\"\"Handle requests in a separate thread.\"\"\"\n\ndef main():\n print(\"IP: \", end='')\n ip = input()\n port = 8080\n server = ThreadedHTTPServer((ip, port), RequestHandler)\n print('Listening on %s:%s' % (ip,port))\n server.serve_forever()\n\n\nif __name__ == \"__main__\":\n parser = OptionParser()\n parser.usage = (\"Creates an http-server that will echo out any GET or POST parameters\\n\"\n \"Run:\\n\\n\"\n \" reflect\")\n (options, args) = parser.parse_args()\n\n main()\n","sub_path":"HServer.py","file_name":"HServer.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"405043349","text":"# -*- coding: utf-8 -*-\r\nimport json\r\nimport os\r\nimport re\r\nimport urllib.request\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom slackclient import SlackClient\r\nfrom flask import Flask, request, make_response, render_template\r\n\r\napp = Flask(__name__)\r\n\r\nslack_token = \"xoxp-506652758256-507485013618-507374980436-58849ee2e59fe08a9ea036bb8b905981\"\r\nslack_client_id = \"506652758256.506940430913\"\r\nslack_client_secret = \"714110f0bb0e67434431bd1501272f96\"\r\nslack_verification = \"7w9uJemGJuqlLMlwdgx5hxgp\"\r\nsc = SlackClient(slack_token)\r\n\r\n###입력 예시 : 남자,1992,10,22,양력,평달\r\n\r\n# 크롤링 함수 구현하기\r\ndef _crawl_naver_keywords(text):\r\n text = text.split(',')\r\n text[0] = text[0][-2:]\r\n\r\n if '남자' in text : text[0] = '1'\r\n else : text[0] = '2'\r\n\r\n if '양력' in text : text[4] = '01'\r\n else : text[4] = '02'\r\n\r\n if '평달' in text : text[5] = '01'\r\n else : text[5] = '02'\r\n\r\n url = \"http://freeunsesite.co.kr/index.php?unse_yy=2019&unse1_sex=\" + text[0] + \"&unse1_yy=\" + text[1] + \"&unse1_mm=\" + text[2] + \"&unse1_dd=\" + text[3] + \"&unse1_hh=N&unse1_solun=\" + text[4] + \"&unse1_lun_yn=\" + text[5]\r\n req = urllib.request.Request(url)\r\n\r\n sourcecode = urllib.request.urlopen(url).read().decode('cp949')\r\n soup = BeautifulSoup(sourcecode, \"html.parser\")\r\n\r\n res = []\r\n \r\n res.append(\"2019년 총운\")\r\n\r\n for i in soup.find_all(\"td\", style = \"padding:10px; line-height:16px\") :\r\n res.append(i.get_text().strip())\r\n \r\n \r\n # 한글 지원을 위해 앞에 unicode u를 붙혀준다.\r\n return u'\\n'.join(res)\r\n\r\n# 이벤트 핸들하는 함수\r\ndef _event_handler(event_type, slack_event):\r\n print(slack_event[\"event\"])\r\n\r\n if event_type == \"app_mention\":\r\n channel = slack_event[\"event\"][\"channel\"]\r\n text = slack_event[\"event\"][\"text\"]\r\n\r\n keywords = _crawl_naver_keywords(text)\r\n sc.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=keywords\r\n )\r\n\r\n return make_response(\"App mention message has been sent\", 200,)\r\n\r\n # ============= Event Type Not Found! ============= #\r\n # If the event_type does not have a handler\r\n message = \"You have not added an event handler for the %s\" % event_type\r\n # Return a helpful error message\r\n return make_response(message, 200, {\"X-Slack-No-Retry\": 1})\r\n\r\n@app.route(\"/listening\", methods=[\"GET\", \"POST\"])\r\ndef hears():\r\n slack_event = json.loads(request.data)\r\n\r\n if \"challenge\" in slack_event:\r\n return make_response(slack_event[\"challenge\"], 200, {\"content_type\":\r\n \"application/json\"\r\n })\r\n\r\n if slack_verification != slack_event.get(\"token\"):\r\n message = \"Invalid Slack verification token: %s\" % (slack_event[\"token\"])\r\n make_response(message, 403, {\"X-Slack-No-Retry\": 1})\r\n\r\n if \"event\" in slack_event:\r\n event_type = slack_event[\"event\"][\"type\"]\r\n return _event_handler(event_type, slack_event)\r\n\r\n # If our bot hears things that are not events we've subscribed to,\r\n # send a quirky but helpful error response\r\n return make_response(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\r\n you're looking for.\", 404, {\"X-Slack-No-Retry\": 1})\r\n\r\n@app.route(\"/\", methods=[\"GET\"])\r\ndef index():\r\n return \"

    Server is ready.

    \"\r\n\r\nif __name__ == '__main__':\r\n app.run('0.0.0.0', port=8080)","sub_path":"term.py","file_name":"term.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"164875434","text":"# https://github.com/python-engineer/pytorchTutorial\n\n## from youtube\n# https://www.youtube.com/watch?v=EMXfZB8FVUA&list=PLqnslRFeH2UrcDBWF5mfPGpqQDSta6VK4\n# default type for python is float64, but float32 for pytorch and tensorflow\nimport torch\n\nx = torch.rand(2, 2)\ny = torch.rand(2, 2)\ny.add_(x) # function with \"_\" means in place manipulation\nz = x * y\nprint(x[:, 0])\nprint(x[0, 0].item())\nx = torch.rand(4, 4)\n# reshape\ny = x.view(16)\na = torch.ones(5)\nb = a.numpy()\na = torch.from_numpy(b)\n\nx = torch.randn(3, requires_grad=True)\ny = x + 2\n\nz = y * y * 2\nz = z.mean() # scalar\nz.backward()\nprint(x.grad)\nx.requires_grad_(False)\nx.detach()\nwith torch.no_grad():\n y = x + 2\n print(y)\n","sub_path":"src/torch/pytorch_tutorial.py","file_name":"pytorch_tutorial.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"97939764","text":"import matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nimport scipy.stats\nimport seaborn as sns\n\n# sns.set()\n\noption_sac = torch.load('option_sac_cartpole100Hz_experiments.pkl')\nsac = torch.load('sac_cartpole100Hz_experiments.pkl')\n\nos_means = np.zeros((len(option_sac[0]['J_results'])))\ns_means = np.zeros((len(sac[0]['J_results'])))\nos_confidence = np.zeros((len(option_sac[0]['J_results'])))\ns_confidence = np.zeros((len(sac[0]['J_results'])))\n\nfor j in range(os_means.shape[0]):\n os_tmp = np.array([option_sac[i]['J_results'][j]['J_mean'] for i in range(len(option_sac))])\n s_tmp = np.array([sac[i]['J_results'][j]['J_mean'] for i in range(len(option_sac))])\n os_means[j] = np.mean(os_tmp)\n s_means[j] = np.mean(s_tmp)\n os_confidence[j] = scipy.stats.sem(os_tmp) * scipy.stats.t.ppf((1 + 0.95) / 2., os_tmp.shape[0] - 1)\n s_confidence[j] = scipy.stats.sem(s_tmp) * scipy.stats.t.ppf((1 + 0.95) / 2., s_tmp.shape[0] - 1)\n\nfig, ax = plt.subplots()\nax.grid(alpha=0.5, linestyle='-')\nax.plot(os_means, color='green', label='rARHMM SAC')\nax.fill_between(np.arange(os_means.shape[0]), os_means - os_confidence, os_means + os_confidence, alpha=0.2, color='green',\n )\n\nax.plot(s_means, color='orange', label='SAC')\nax.fill_between(np.arange(s_means.shape[0]), s_means - s_confidence, s_means + s_confidence, alpha=0.2, color='orange',\n )\n\nax.set_title(\"Cartpole100Hz - evaluation\")\nax.legend()\nax.set_xlabel('Episode')\nax.set_ylabel('Expected return')\n\nplt.show()\n","sub_path":"results/evaluate_cartpole_results.py","file_name":"evaluate_cartpole_results.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"255858180","text":"#!/usr/bin/python3\n\nimport sys\nimport os\nimport datetime\nimport shutil\n\nnumargs = len(sys.argv) - 1\nprint(\"numargs = %d\" % (numargs))\nif numargs != 3:\n print(\"Useage: %s [start(YYYYMMDDHHMM)] [end(YYYYMMDDHHMM)] [sector(1 or 2)]\" % (sys.argv[0]))\n exit()\nelse:\n print(\"You are good to go\")\n \nstart = sys.argv[1]\nstartObj = datetime.datetime.strptime(start,'%Y%m%d%H%M%S')\nstartUnix = int(startObj.strftime(\"%s\"))\nstartDate = startObj.strftime(\"%Y%m%d\")\nstartYear = startObj.strftime(\"%Y\")\nend = sys.argv[2]\nendObj = datetime.datetime.strptime(end,'%Y%m%d%H%M%S')\nendUnix = int(endObj.strftime(\"%s\"))\nendDate = endObj.strftime(\"%Y%m%d\")\nsector = sys.argv[3] \nprint(\"startUnix = %s, endUnix = %s, sector = %s\" % (startUnix,endUnix,sector))\n\nbase_in_dir = '/home/disk/data/satellite/GOES/GRB16/ABI/Mesoscale-'+sector\nbase_save_dir = '/home/disk/bob/impacts/goes-meso/Mesoscale-'+sector\n#channels = ['Channel01','Channel13']\nchannels = ['Channel01','Channel02','Channel03','Channel04','Channel05','Channel06','Channel07','Channel08','Channel09','Channel10','Channel11','Channel12','Channel13','Channel14','Channel15','Channel16']\n#channels = ['Channel09','Channel10','Channel11','Channel12','Channel14','Channel15','Channel16']\nprefix = 'OR_ABI-L1b-RadM'\n\nfor ichan in range(0,len(channels)):\n indir = base_in_dir+'/'+channels[ichan]\n outdir = base_save_dir+'/'+channels[ichan]\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n print (\"ichan = %d and indir = %s and outdir = %s\" % (ichan,indir,outdir))\n dirlist = os.listdir(indir)\n for dir in dirlist:\n if dir.startswith(startYear):\n dirDate = dir\n dirDateObj = datetime.datetime.strptime(dir,'%Y%m%d')\n dirDateUnix = int(dirDateObj.strftime(\"%s\"))\n if dirDate >= startDate and dirDate <= endDate:\n if not os.path.isdir(outdir+'/'+dirDate):\n os.mkdir(outdir+'/'+dirDate)\n os.chdir(indir+'/'+dirDate)\n filelist = os.listdir('.')\n for file in filelist:\n if file.startswith(prefix):\n parts = file.split('_')\n for part in parts:\n if part.startswith('s'):\n fileDateTime = part[1:-1] # format sYYYYJJJHHMMSSs\n fileDateTimeObj = datetime.datetime.strptime(fileDateTime,'%Y%j%H%M%S')\n fileDateTimeUnix = int(fileDateTimeObj.strftime(\"%s\"))\n if fileDateTimeUnix >= startUnix and fileDateTimeUnix <= endUnix:\n shutil.copy(file,outdir+'/'+dirDate)\n \n \n \n","sub_path":"2022_VERSIONS/get_goes_meso_sector.py","file_name":"get_goes_meso_sector.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"147846750","text":"import os\n\ntry: \n with open('c:/work/pyfile.txt', mode='w')as file:\n file.write('파일 이름 수정과 삭제')\n\n os.rename('c:/work/pyfile.txt','파일수정삭제.txt')\n os.remove('./파일수정삭제.txt')\nexcept Exception as e:\n print('예외 발생: ',e)\nelse:\n print('파일 수정 삭제 성공!')\n\ndname = os.getcwd()\nprint('현재 폴더:', dname)\n\nfs = os.listdir(dname)\nfor f in fs:\n if os.path.isfile(f):\n print('\\t파일',f)\n elif os.path.isdir(f):\n print('\\t폴더:',f)","sub_path":"renamefile.py","file_name":"renamefile.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"582905249","text":"import sys\nfrom pathlib import Path\nimport argparse\n\ndata_folder_path = str(Path('.').absolute().parent.parent)+\"/Data/rule-reasoning-dataset-V2020.2.4\"\nparent_folder_path = str(Path('.').absolute().parent)\nexperiment_folder_path = parent_folder_path+\"/ExperimentClass\"\ndata_processing_folder_path = parent_folder_path+\"/DataProcessing\"\nsys.path+=[data_folder_path, parent_folder_path, experiment_folder_path, data_processing_folder_path]\n\nimport random\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\nfrom torch import optim\nimport torch\nimport os\nimport json\nimport math\n\nimport csv\nimport editdistance\nimport re\nimport time\nimport numpy as np\nimport pickle\n\nfrom T5Vanilla import T5Vanilla\n\nEXP_NUM = sys.argv[1]\nDATA_OPTION = sys.argv[2]\nDATA_DEPTH = sys.argv[3]\nTASK_NAME = sys.argv[4]\nTRAIN_AMOUNT = sys.argv[5]\nFACT_BUFFER_SIZE = sys.argv[6]\nRULE_BUFFER_SIZE = sys.argv[7]\n\nCONTROL_NN = sys.argv[8]\nFACT_NN = sys.argv[9]\nRULE_NN = sys.argv[10]\n\ndataset_name = \"depth-5\" if DATA_OPTION == \"0\" else \"birds-electricity\"\n\ndevice = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\nprint(\"=\"*40+\"\\n\"+\"=\"*40)\nprint(\"device: %s, exp name: %s, dataset name: %s, inference depth: %s. \" % (device, EXP_NUM, dataset_name, DATA_DEPTH))\nprint(\"controller dir:\", \"saved_models/20201118_t5_small/task_\"+TASK_NAME+\"_module_\"+CONTROL_NN+\n \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\nprint(\"fact nn dir:\", \"saved_models/20201118_t5_small/task_\"+TASK_NAME+\"_module_\"+FACT_NN+\n \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\nprint(\"rule nn dir:\", \"saved_models/20201118_t5_small/task_\"+TASK_NAME+\"_module_\"+RULE_NN+\n \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\n\nclass Dataset():\n def __init__(self, fact_buffer_size, rule_buffer_size):\n self.fact_buffer_size = int(fact_buffer_size)\n self.rule_buffer_size = int(rule_buffer_size)\n\n self.instances = self._load_dataset()\n\n def _load_dataset(self):\n data_file_path = data_folder_path+\"/\"+dataset_name+\"/meta-test.jsonl\"\n\n with open(data_file_path, \"r\") as f:\n raw_jsons = list(f)\n\n instances = []\n for raw_json in raw_jsons:\n item = json.loads(raw_json)\n question_tuples = list(item[\"questions\"].items())\n\n n_fact = str(item[\"NFact\"])\n n_rule = str(item[\"NRule\"])\n n_fact_buffer = math.ceil(int(n_fact)/self.fact_buffer_size)\n n_rule_buffer = math.ceil(int(n_rule)/self.rule_buffer_size)\n\n all_facts = [\"fact \"+str(idx+1)+\": \"+triple[1][\"text\"].lower() for idx, triple in enumerate(list(item[\"triples\"].items()))]\n all_rules = [\"rule \"+str(idx+1)+\": \"+rule[1][\"text\"].lower() for idx, rule in enumerate(list(item[\"rules\"].items()))]\n\n fact_buffers = []\n for fact_buffer_index in range(n_fact_buffer):\n fact_buffer_key = \"FACT_BUFFER_\"+str(fact_buffer_index+1)\n fact_buffer_values = \" \".join(all_facts[fact_buffer_index*self.fact_buffer_size:min(len(all_facts), (fact_buffer_index+1)*self.fact_buffer_size)])\n fact_buffers.append((fact_buffer_key, fact_buffer_values))\n\n rule_buffers = []\n for rule_buffer_index in range(n_rule_buffer):\n rule_buffer_key = \"RULE_BUFFER_\"+str(rule_buffer_index+1)\n rule_buffer_values = \" \".join(all_rules[rule_buffer_index*self.rule_buffer_size:min(len(all_rules), (rule_buffer_index+1)*self.rule_buffer_size)])\n rule_buffers.append((rule_buffer_key, rule_buffer_values))\n\n for question_tuple in question_tuples:\n\n #print(int(question_tuple[1][\"QDep\"]))\n if int(question_tuple[1][\"QDep\"]) == int(DATA_DEPTH):\n proof_strategy = question_tuple[1][\"strategy\"]\n proofs = self._get_proofs(question_tuple[1][\"proofs\"])\n\n question_text = question_tuple[1][\"question\"].lower()\n answer_text = question_tuple[1][\"answer\"]\n instance_dict = {\"question\":question_text, \"answer\":answer_text,\n \"n_fact\": n_fact, \"n_rule\": n_rule,\n \"facts_text\": all_facts, \"rules_text\":all_rules,\n \"strategy\": proof_strategy, \"proofs\": proofs}\n\n for buffer_tuple in fact_buffers:\n instance_dict[buffer_tuple[0]] = buffer_tuple[1]\n for buffer_tuple in rule_buffers:\n instance_dict[buffer_tuple[0]] = buffer_tuple[1]\n\n instances.append(instance_dict)\n\n return instances\n\n\n def _get_proofs(self, proofs_string):\n cleaned_proof = []\n\n for raw_proof in proofs_string[2:-2].split(\"OR\"):\n while raw_proof[0]==\" \":\n raw_proof = raw_proof[1:]\n while raw_proof[-1]==\" \":\n raw_proof = raw_proof[:-1]\n\n cleaned_proof.append(raw_proof)\n\n return cleaned_proof\n\n\n @staticmethod\n def print_problem(instance):\n print(\"facts:\")\n for fact in instance[\"facts_text\"]:\n print(\"\\t\"+fact)\n print(\"rules:\")\n for rule in instance[\"rules_text\"]:\n print(\"\\t\"+rule)\n print(\"question and answer:\")\n print(\"\\t\"+instance[\"question\"]+\" \"+str(instance[\"answer\"]))\n\nclass NeuralBackwardChainer:\n\n def __init__(self, device, print_inference_steps = False):\n t5Exp = T5Vanilla(0.0001, device)\n\n self.tokenizer = t5Exp.tokenizer\n self.t5_c = torch.load(\"saved_models/20201118_t5_small/task_\"+TASK_NAME+\"_module_\"+CONTROL_NN+\n \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\n # self.t5_c = torch.load(\"saved_models/20210407_t5_small_train_d5/task_\"+TASK_NAME+\"_module_\"+CONTROL_NN+\n # \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\") # d5 means trained on DU5 data\n\n # This is for debugging on alix only:\n #self.t5_c = torch.load(\"saved_models/20201021_t5_small_ruletaker_multitask_type_c\")\n #self.t5_c = torch.load(\"saved_models/20210407_t5_small_train_d5/task_3nn_module_c_amount_70k_fbs_5_rbs_3_seed_0\")\n self.t5_c.to(device)\n\n self.t5_f = torch.load(\"saved_models/20201118_t5_small/task_\"+TASK_NAME+\"_module_\"+FACT_NN+\n \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\n # self.t5_f = torch.load(\"saved_models/20210407_t5_small_train_d5/task_\"+TASK_NAME+\"_module_\"+FACT_NN+\n # \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\n\n # This is for debugging on alix only:\n #self.t5_f = torch.load(\"saved_models/20201021_t5_small_ruletaker_multitask_type_f\")\n #self.t5_f = torch.load(\n # \"saved_models/20210407_t5_small_train_d5/task_3nn_module_f_amount_70k_fbs_5_rbs_3_seed_0\")\n self.t5_f.to(device)\n\n self.t5_r = torch.load(\"saved_models/20201118_t5_small/task_\"+TASK_NAME+\"_module_\"+RULE_NN+\n \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\n # self.t5_r = torch.load(\"saved_models/20210407_t5_small_train_d5/task_\"+TASK_NAME+\"_module_\"+RULE_NN+\n # \"_amount_\"+TRAIN_AMOUNT+\"_fbs_\"+FACT_BUFFER_SIZE+\"_rbs_\"+RULE_BUFFER_SIZE+\"_seed_0\")\n\n # This is for debugging on alix only:\n #self.t5_r = torch.load(\"saved_models/20201111_t5_small_ruletaker_multitask_type_r\")\n #self.t5_r = torch.load(\n # \"saved_models/20210407_t5_small_train_d5/task_3nn_module_r_amount_70k_fbs_5_rbs_3_seed_0\")\n self.t5_r.to(device)\n\n # This learning rate 0.0001 is used in one of the tutorial, but might not be the best choice.\n self.device = device\n self.depth_limit = 25\n\n self.computation_count = 0\n self.computation_limit = 10000\n\n self.print_inference_steps = print_inference_steps\n\n def neural_backward_chaining(self, episodic_buffer, instance):\n\n depth_count = 0\n self.computation_count = 0\n start_time = time.time()\n final_answer, full_proof = self._one_step_inference(episodic_buffer, instance, depth_count+1)\n end_time = time.time()\n\n return final_answer, full_proof, end_time-start_time\n\n def _one_step_inference(self, episodic_buffer, instance, depth_count, label_return_option = \"standard\"):\n # We don't need computation limit anymore.\n\n #print(\"depth count:\", depth_count)\n\n operation = self._t5_c_forward(\" \".join(episodic_buffer)+\" \")\n\n if self.print_inference_steps:\n print(\"\\t\"+\"-\"*20)\n print(\"\\tepisodic buffer\", episodic_buffer)\n print(\"\\tgenerated operation:\",operation)\n\n if \"GENERATE_SUBGOALS\" in operation:\n proof_to_return = \"\"\n\n if depth_count\")\n\n if self.print_inference_steps:\n print(\"\\tgenerated subgoal:\"+subgoals_text)\n\n if label_return_option!=\"flip\":\n for or_branch_idx, or_branch in enumerate(subgoals_text.split(\" OR \")):\n and_branch_results = []\n and_branch_proofs = []\n for and_branch in or_branch.split(\" AND \"):\n episodic_buffer_ = [episodic_buffer[0], \"episodic buffer: \"+and_branch]\n branch_result, proof_string = self._one_step_inference(episodic_buffer_, instance, depth_count+1)\n and_branch_results.append(branch_result)\n if \"not\" not in and_branch:\n if branch_result==True:\n and_branch_proofs.append(proof_string)\n else:\n if branch_result==False:\n proof_to_return = proof_string\n else:\n and_branch_proofs.append(\"NAF\")\n\n if branch_result == False:\n break\n\n if False not in and_branch_results:\n if \"according to\" in episodic_buffer[-1]:\n episodic_buffer_of_interest = re.findall(r\"according to.+\", episodic_buffer[-1])[0]\n rules_buffer_parsed = [re.findall(r\"\\d+\", matched_rule_)[0]\n for matched_rule_ in\n episodic_buffer_of_interest.split(\" or \")]\n proof_to_return = \"((\"+ \" \".join(and_branch_proofs) +\") -> rule\"+rules_buffer_parsed[or_branch_idx]+\")\"\n\n return True, proof_to_return\n else:\n if \"facts do not contradict\" in subgoals_text and \\\n \"rules do not contradict\" in subgoals_text:\n return True, \"NAF\"\n else:\n return True, \" \".join(and_branch_proofs)\n\n else:\n # The outer loop handles different matched rules.\n # The inner loop handles the preconditions of each matched rule.\n # Logic: in each inner loop, not all preconditions should be true.\n # Logic: in the outer loop, none of the rule should return true.\n and_out_branch_results = []\n for and_out_branch_idx, and_out_branch in enumerate(subgoals_text.split(\" )AND( \")):\n\n and_branch_proofs = []\n and_branch_results = []\n for and_branch in and_out_branch.split(\" AND \"):\n episodic_buffer_ = [episodic_buffer[0], \"episodic buffer: \" + and_branch]\n branch_result, proof_string = self._one_step_inference(episodic_buffer_, instance, depth_count + 1)\n and_branch_results.append(branch_result)\n if proof_string != \"\":\n # If the result of the branch is true, then the returned proof should be either\n # an actual proof of NAF, and should not be \"\". \"\" proof should only appear in false answer.\n and_branch_proofs.append(proof_string)\n\n if branch_result == False:\n and_out_branch_results.append(False)\n break\n\n if False not in and_branch_results:\n if \"according to\" in episodic_buffer[-1]:\n episodic_buffer_of_interest = re.findall(r\"according to.+\", episodic_buffer[-1])[0]\n rules_buffer_parsed = [re.findall(r\"\\d+\", matched_rule_)[0]\n for matched_rule_ in\n episodic_buffer_of_interest.split(\" )and( \")]\n proof_to_return = \"((\"+ \" \".join(and_branch_proofs) +\") -> rule\"+rules_buffer_parsed[and_out_branch_idx]+\")\"\n\n return False, proof_to_return\n else:\n # This condition should never be reached.\n return False, \" \".join(and_branch_proofs) if len(and_branch_proofs)>0 else \"\"\n\n return True, \"NAF\"\n\n # This is to handle the situation of hitting the depth limit.\n if label_return_option==\"standard\":\n if \"not\" not in episodic_buffer[-1]:\n return False, \"\"\n else:\n return False, proof_to_return\n else:\n return True, \"\"\n\n else:\n try: # This is to handle the situation where the operations are not successfully generated.\n operators = operation.split(\" THEN \")\n episodic_buffer_ = episodic_buffer\n buffer_key = operators[0].split(\"GET\")[-1][2:-2]\n if \"GET\" in operators[0] and \"FACT_BUFFER\" in operators[0]:\n episodic_buffer_ = episodic_buffer +[instance[buffer_key]]\n if \"GET\" in operators[0] and \"RULE_BUFFER\" in operators[0]:\n episodic_buffer_ = episodic_buffer+ [instance[buffer_key]]\n if \"RUN\" in operators[1]:\n\n if \"FACT\" in buffer_key:\n answer = self._t5_f_forward(\" \".join(episodic_buffer_+[\"operator: RUN \"])+\" \")\n else:\n answer = self._t5_r_forward(\" \".join(episodic_buffer_ + [\"operator: RUN \"]) + \" \")\n\n if self.print_inference_steps:\n print(\"\\tbuffer input:\"+\" \".join(episodic_buffer_+[\"operator: RUN \"])+\" \")\n print(\"\\tgenerated answer:\"+answer)\n\n if \"true\" in answer:\n if bool(re.findall(r\"confirmed\", answer)):\n text_to_return = \"triple\"+re.findall(r\"\\d+\", answer)[0]\n return True, text_to_return\n else:\n return True, \"\"\n elif \"false\" in answer:\n if bool(re.findall(r\"contradicted\", answer)):\n text_to_return = \"triple\" + re.findall(r\"\\d+\", answer)[0]\n return False, text_to_return\n else:\n return False, \"\"\n else:\n if depth_count 0.50: #aprovacao\n\t\t\tdna_approvedSequences.append(sequence)\n\t\tseqId += 1\n\t\n\t#print(\"#Approved :\",len(dna_approvedSequences))\n\treturn dna_approvedSequences\n\t\t\ndef consensusMotif(positionCountMatrix):\n\tconsensus = []\n\ti = 0\n\n\twhile i < len(positionCountMatrix[0]):\n\t\tw_consensus = [] #para evitar bias para determinada base\n\t\twindow = [positionCountMatrix[0][i],positionCountMatrix[1][i],positionCountMatrix[2][i],positionCountMatrix[3][i]]\n\t\tif window[0] >= window[1] and window[0] >= window[2] and window[0] >= window[3]:\n\t\t\tw_consensus.append('A')\n\t\tif window[1] >= window[0] and window[1] >= window[2] and window[1] >= window[3]:\n\t\t\tw_consensus.append('C')\n\t\tif window[2] >= window[0] and window[2] >= window[1] and window[2] >= window[3]:\n\t\t\tw_consensus.append('G')\n\t\tif window[3] >= window[0] and window[3] >= window[1] and window[3] >= window[2]:\n\t\t\tw_consensus.append('T')\n\t\t\n\t\tconsensus.append(random.choice(w_consensus)) # um dos empatantes eh escolhido com probabilidades equivalentes\n\t\t\n\t\t#print (consensus[i], end = '')\n\t\ti += 1\n\t#print('\\n')\t\t\n\treturn consensus\n\"\"\"\ndef printDNAMatrix(DNAMatrix):\n\tnames = ['A','C','G','T']\n\ti = 0\n\tfor baseVector in DNAMatrix:\n\t\tprint(\"#\",names[i],\":\",end=\" \")\n\t\tfor base in baseVector:\n\t\t\tprint(format(round(base,3),'.2f'), end=\" \")\n\t\tprint('\\n')\n\t\ti += 1\n\treturn\n\"\"\"\n\ndef positionCountMatrix(dna_subsequences): \n #col_1 col_2 ... col_n\n #Linha A\n #Linha C\n #Linha G\n #Linha T\n #o tamanho de todas as sequencias eh igual\n sequenceSize = len(dna_subsequences[0])\n #print(dna_subsequences)\n positionCountMatrix = np.zeros([4,sequenceSize],dtype=int)\n #print(len(dna_subsequences))\n for j in range(len(dna_subsequences)):\n sequence = dna_subsequences[j]\n for i in range(len(sequence)):\n if sequence[i] == 'A':\n positionCountMatrix[0][i] += 1\n elif sequence[i] == 'C':\n positionCountMatrix[1][i] += 1\n elif sequence[i] == 'G':\n positionCountMatrix[2][i] += 1\n elif sequence[i] == 'T':\n positionCountMatrix[3][i] += 1\n \n #print ('PCM')\n #printDNAMatrix(positionCountMatrix)\n return positionCountMatrix\n\ndef evaluator(vector):\n\t\"\"\"\n\tA n-dimensional Rastrigin's function is defined as:\n\n\t\t\t\t\t\t\tn\n\t\t\tf(x) = 10*n + Sigma { x_i^2 - 10*cos(2*PI*x_i) }\n\t\t\t\t\t\t i=1\n\n\twhere -5.12 <= x_i <= 5.12.\n\n\tThus the global minima of the function being f(x) = 0 at all x_i = 0.\n\n\t\"\"\"\n\n\tvector = np.array(vector)\n\n\treturn 10 * vector.size + sum(vector*vector - 10 * np.cos(2 * np.pi * vector))\n\ndef sequenceFromSolution(dna_sequences, solutionVector):\n dna_solutionInstance = []\n dna_subSequences = []\n sequenceSize = len(dna_sequences[0])\n lowerLimit = 7\n higherLimit = 64\n if sequenceSize < higherLimit: #ajusta o tamanho da sequencia para um menor valor\n higherLimit = sequenceSize\n if sequenceSize >= lowerLimit: #uma sequencia nao eh considerada como motivo se for menor que 7\n motifSize = solutionVector[0] #tamanho do motivo\n i = 1\n #motifSize = 7 #tamanho do motivo\n #print(\"candidate size = \",motifSize)\n for sequence in dna_sequences:\n motifStart = solutionVector[i] #-1 pois o vetor comeca no 0\n #print(\"start = \",motifStart)\n subSequence = sequence[motifStart:motifStart+motifSize]\n dna_subSequences.append(subSequence)\n #print(subSequence)\n i += 1\n else:\n print(\"Sequencias de tamanho insuficiente:\",sequenceSize,\"<\",lowerLimit)\n return dna_subSequences\ndef readFasta(filePath):\n\t\n\tdna_sequences = []\n\tfasta_sequences = SeqIO.parse(open(filePath),'fasta')\n\ti = 0\n\tfor fasta in fasta_sequences:\n\t\tname,sequence = fasta.id,str(fasta.seq)\n\t\tdna_sequences.append(sequence)\n\t\t#print(\">Seq\",i,dna_sequences[len(dna_sequences)-1])\n\t\ti += 1\n\treturn dna_sequences\n\t\ndef randomSubSequences(dna_sequences):\n dna_sequences = dna_sequences\n dna_solutionInstance = []\n dna_subSequences = []\n sequenceSize = len(dna_sequences[0])\n lowerLimit = 7\n higherLimit = 64\n if sequenceSize < higherLimit: #ajusta o tamanho da sequencia para um menor valor\n higherLimit = sequenceSize\n\n if sequenceSize >= lowerLimit: #uma sequencia nao eh considerada como motivo se for menor que 7\n motifSize = random.randint(lowerLimit,higherLimit) #tamanho do motivo\n #motifSize = 7 #tamanho do motivo\n #print(\"candidate size = \",motifSize)\n dna_solutionInstance.append(motifSize)\n\n for sequence in dna_sequences:\n motifStart = random.randint(0,sequenceSize-motifSize) #-1 pois o vetor comeca no 0\n #print(\"start = \",motifStart)\n subSequence = sequence[motifStart:motifStart+motifSize]\n dna_subSequences.append(subSequence)\n #print(subSequence)\n dna_solutionInstance.append(motifStart)\n #print(\"Sequencias de tamanho insuficiente:\",sequenceSize,\"<\",lowerLimit)\n\n returnValues = []\n returnValues.append(dna_solutionInstance)\n returnValues.append(dna_subSequences)\n return returnValues\n\ndef printDNAMatrix(DNAMatrix):\n names = ['A','C','G','T']\n i = 0\n for baseVector in DNAMatrix:\n print(\"#\",names[i],\":\",end=\" \")\n for base in baseVector:\n print(base, end=\" \")\n #print(format(round(base,3),'.2f'), end=\" \")\n print('\\n')\n i += 1\n return\ndef positionFrequencyMatrix(positionCountMatrix): #calcula as frequencias\n i = 0\n sumWindow = 0.0\n window = []\n sequenceSize = len(positionCountMatrix[0])\n positionFrequencyMatrix = np.zeros([4,sequenceSize]) #cria uma matriz de floats\n for i in range(sequenceSize):\n window = [positionCountMatrix[0][i],positionCountMatrix[1][i],positionCountMatrix[2][i],positionCountMatrix[3][i]]\n sumWindow = window[0] + window[1] + window[2] + window[3] #somatorio de nucleotideos na posicao i\n positionFrequencyMatrix[0][i] = float(window[0]/sumWindow)\n positionFrequencyMatrix[1][i] = float(window[1]/sumWindow)\n positionFrequencyMatrix[2][i] = float(window[2]/sumWindow)\n positionFrequencyMatrix[3][i] = float(window[3]/sumWindow)\n #print ('PFM')\n #printDNAMatrix(positionFrequencyMatrix)\n return positionFrequencyMatrix\n\ndef similarity(positionFrequencyMatrix):\n\ti = 0 \n\tsequenceSize = len(positionFrequencyMatrix[0])\n\tmaxSum = 0.0\n\twhile i < sequenceSize:\n\t\twindow = [positionFrequencyMatrix[0][i],positionFrequencyMatrix[1][i],positionFrequencyMatrix[2][i],positionFrequencyMatrix[3][i]]\n\t\tmaxSum += max(window)\n\t\ti += 1\n\n\tsimilarity = maxSum/sequenceSize\n\treturn similarity\n\ndef complexity(motif):\n\tmotifSize = len(motif)\n\n\tnumA = 0\n\tnumC = 0\n\tnumG = 0\n\tnumT = 0\n\tmotifSizeFactorial = treefactorial(motifSize)\n\ti = 0\n\twhile i < motifSize: #conta a quantidade de cada base\n\t\tif motif[i] == 'A':\n\t\t\tnumA += 1\n\t\t\t\t\n\t\telif motif[i] == 'C':\n\t\t\tnumC += 1\n\n\t\telif motif[i] == 'G':\n\t\t\tnumG += 1\n\n\t\telif motif[i] == 'T':\n\t\t\tnumT += 1\n\t\ti += 1\n\n\tproductBasesFactorial = treefactorial(numA)*treefactorial(numC)*treefactorial(numG)*treefactorial(numT)\n\t\t\t\t\t\t\t#produtorio do fatorial do numero de cada base\n\n\tcomplexity = math.log(motifSizeFactorial/productBasesFactorial,4) #logaritmo base 4 (numero de bases) \n\treturn complexity\n\ndef isBiased(support,totalSequences):\n\tif totalSequences <= 4 :#minimo dado pelos autores (Alvarez)\n\t\tif support >= 2 :\n\t\t\tisBiased = False\n\t\telse :\n\t\t\tisBiased = True\n\telse :\n\t\tif support >=3 :\n\t\t\tisBiased = False\n\t\telse :\n\t\t\tisBiased = True\n\n\treturn isBiased\n\t\n\n\t\n\n\n\n# ---- BEE CLASS\nclass CandidateMotif(object):\n # representacao do motivo candidato\n def __init__(self,motif,solution,similarity,complexity,support,consensus):\n self.motif = motif\n self.solution = solution\n self.similarity = similarity\n self.support = support\n self.complexity = complexity\n self.consensus = consensus\n\n def _printSolution(self,sequence):\n finalSubSequences = sequenceFromSolution(sequence,self.solution) \n dna_approvedSequences = thresholdConsensus(finalSubSequences,self.consensus)\n \n negSolution = self._negativateSolution(sequence)\n \n \n print(\"=========================\")\n print(\"Final motif:\",end = \"\")\n for base in self.motif:\n print(base, end = \"\")\n print(\"\")\n \"\"\"\n print(self.solution)\n print(negSolution)\n \"\"\"\n i = 1\n while i < len(self.solution):\n currSeq = sequence[i-1]\n substring = currSeq[self.solution[i]:self.solution[i]+self.solution[0]]\n for approved in dna_approvedSequences: \n #print(approved,\"?\")\n if approved == substring:\n #print(approved)\n print(i-1,\",\",negSolution[i],\",\",substring, sep='') #[self.solution[i]:self.solution[i]+self.solution[0]]\n #print(i-1,\",\",negSolution[i],\",\",currSeq[negSolution[i]:negSolution[i]+self.solution[0]-1], sep='') #[self.solution[i]:self.solution[i]+self.solution[0]]\n #print(negSolution[i],\":\",negSolution[i]+self.solution[0]-1)\n i += 1\n print(\"Support\", self.support)\n print(\"Similarity\",self.similarity)\n print(\"Complexity\",self.complexity)\n print(\"=========================\")\n \n def _negativateSolution(self,sequence):\n from copy import deepcopy\n negSolution = deepcopy(self.solution)\n seqLen = len(sequence[0])\n i = 1\n while i < len(self.solution):\n negSolution[i]-=seqLen\n i += 1\n \n return negSolution \n\nclass Bee(object):\n \"\"\" Creates a bee object. \"\"\"\n\n def __init__(self, lower, upper, dna_sequences,obj, funcon=None):\n \"\"\"\n\n Instantiates a bee object randomly.\n\n Parameters:\n ----------\n :param list lower : lower bound of solution vector\n :param list upper : upper bound of solution vector\n :param def fun : evaluation function\n :param def funcon : constraints function, must return a boolean\n\n \"\"\"\n self.obj = obj\n self.dna_sequences = dna_sequences\n #print(dna_sequences)\n biased = True #enquanto nao houver solucao valida, instancia uma nova\n resultVector = []\n solution = -1\n self.dna_subSequence = []\n consensus = []\n pcm = []\n motifSupport = []\n dna_approvedSequences = []\n\n while biased == True:\n resultVector = randomSubSequences(self.dna_sequences)\n solution = resultVector[0]\n self.dna_subSequences = resultVector[1]\n pcm = positionCountMatrix(self.dna_subSequences)\n #print(self.dna_subSequences)\n #printDNAMatrix(pcm)\n consensus = consensusMotif(pcm)\n \n dna_approvedSequences = thresholdConsensus(self.dna_subSequences,consensus)\n \n motifSupport= len(dna_approvedSequences)\n biased = isBiased(motifSupport,len(self.dna_subSequences))\n \n finalPcm = positionCountMatrix(dna_approvedSequences)\n finalPfm = positionFrequencyMatrix(finalPcm)\n finalMotif = consensusMotif(finalPcm)\n motifSimilarity = similarity(finalPfm)\n motifComplexity = complexity(finalMotif)\n \n self.solutionVector = solution\t \n self.candidate = CandidateMotif(finalMotif,solution,motifSimilarity,motifComplexity,motifSupport,consensus)\n self.vector = solution\n self.valid = biased\n #self.candidate._printSolution()\n\n # creates a random solution vector\n #self._random(lower, upper)\n \"\"\"\n # checks if the problem constraint(s) are satisfied\n if not funcon:\n self.valid = True\n else:\n self.valid = funcon(self.vector)\n\n # computes fitness of solution vector\n if (fun != None):\n self.value = fun(self.vector)\n else:\n self.value = sys.float_info.max\n \n \"\"\"\n if(self.obj == 'similarity'):\n self.value = self.candidate.similarity\n elif(self.obj == 'support'):\n self.value = self.candidate.support\n elif(self.obj == 'complexity'):\n self.value = self.candidate.complexity\n \n self._fitness()\n # initialises trial limit counter - i.e. abandonment counter\n \n self.counter = 0\n \n\n \n\n\n def _random(self, lower, upper):\n \"\"\" Initialises a solution vector randomly. \"\"\"\n\n self.vector = []\n for i in range(len(lower)):\n self.vector.append( lower[i] + random.random() * (upper[i] - lower[i]) )\n\n def _fitness(self):\n \"\"\"\n\n Evaluates the fitness of a solution vector.\n\n The fitness is a measure of the quality of a solution.\n\n \"\"\"\n \"\"\"\n if (self.value >= 0):\n self.fitness = 1 / (1 + self.value)\n else:\n self.fitness = 1 + abs(self.value)\n \"\"\"\n self.fitness = self.value\nclass BeeHive(object):\n \"\"\"\n\n Creates an Artificial Bee Colony (ABC) algorithm.\n\n The population of the hive is composed of three distinct types\n of individuals:\n\n 1. \"employees\",\n 2. \"onlookers\",\n 3. \"scouts\".\n\n The employed bees and onlooker bees exploit the nectar\n sources around the hive - i.e. exploitation phase - while the\n scouts explore the solution domain - i.e. exploration phase.\n\n The number of nectar sources around the hive is equal to\n the number of actively employed bees and the number of employees\n is equal to the number of onlooker bees.\n\n \"\"\"\n\n def run(self):\n \"\"\" Runs an Artificial Bee Colony (ABC) algorithm. \"\"\"\n\n cost = {}; cost[\"best\"] = []; cost[\"mean\"] = []\n \n for itr in range(self.max_itrs):\n\n # employees phase\n for index in range(self.size):\n self.send_employee(index)\n \n # onlookers phase\n self.send_onlookers()\n\n # scouts phase\n self.send_scout()\n\n # computes best path\n self.find_best()\n\n # stores convergence information\n cost[\"best\"].append( self.best )\n cost[\"mean\"].append( sum( [ bee.value for bee in self.population ] ) / self.size )\n\n # prints out information about computation\n if self.verbose:\n self._verbose(itr, cost)\n #print(\"\\n\")\n #print(\"****BEST SOLUTION****\")\n #self.bestSolution._printSolution()\n\n #sequenceFromSolution(self.dna_sequences,self.bestSolution.solution)\n \n #for best in cost[\"best\"]:\n #print(best)\n return cost\n\n def __init__(self ,\n lower, upper, dna_sequences,\n numb_bees, max_itrs, max_trials,obj,\n fun = None ,\n selfun = None ,\n seed = None ,\n verbose = False ,\n extra_params = None ,):\n \"\"\"\n\n Instantiates a bee hive object.\n\n 1. INITIALISATION PHASE.\n -----------------------\n\n The initial population of bees should cover the entire search space as\n much as possible by randomizing individuals within the search\n space constrained by the prescribed lower and upper bounds.\n\n Parameters:\n ----------\n\n :param list lower : lower bound of solution vector\n :param list upper : upper bound of solution vector\n :param def fun : evaluation function of the optimal problem\n :param def numb_bees : number of active bees within the hive\n :param int max_trials : max number of trials without any improvment\n :param def selfun : custom selection function\n :param int seed : seed of random number generator\n :param boolean verbose : makes computation verbose\n :param dict extra_params : optional extra arguments for selection function selfun\n\n \"\"\"\n\n # checks input\n assert (len(upper) == len(lower)), \"'lower' and 'upper' must be a list of the same length.\"\n\n \"\"\"# generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n \"\"\"\n self.obj = obj\n self.dna_sequences = dna_sequences\n #print(\"AAA\",len(self.dna_sequences))\n \n #if(len(self.dna_sequences)<3):\n #\n # computes the number of employees\n self.size = int((numb_bees + numb_bees % 2))\n\n # assigns properties of algorithm\n self.dim = len(dna_sequences)+1 \n self.max_itrs = max_itrs\n #if (max_trials == None):\n # self.max_trials = 0.6 * self.size * self.dim\n #else:\n # self.max_trials = max_trials\n self.max_trials = max_trials\n self.selfun = selfun\n self.extra_params = extra_params\n\n # assigns properties of the optimisation problem\n self.evaluate = fun\n self.lower = lower\n self.upper = upper\n\n # initialises current best and its a solution vector\n self.best = 0\n self.solution = None\n\n # creates a bee hive\n \n #sequenceSize-motifSize !!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n \n self.population = [ Bee(lower, upper, self.dna_sequences,self.obj) for i in range(self.size) ]\n\n # initialises best solution vector to food nectar\n self.find_best()\n\n # computes selection probability\n self.compute_probability()\n\n # verbosity of computation\n self.verbose = verbose\n\n def find_best(self):\n \"\"\" Finds current best bee candidate. \"\"\"\n\n values = [ bee.value for bee in self.population ]\n index = values.index(max(values)) #Maximize\n if (values[index] > self.best):\n self.best = values[index]\n self.bestSolution = self.population[index].candidate\n\n def compute_probability(self):\n \"\"\"\n\n Computes the relative chance that a given solution vector is\n chosen by an onlooker bee after the Waggle dance ceremony when\n employed bees are back within the hive.\n\n \"\"\"\n\n # retrieves fitness of bees within the hive\n values = [bee.fitness for bee in self.population]\n max_values = max(values)\n\n # computes probalities the way Karaboga does in his classic ABC implementation\n if (self.selfun == None):\n self.probas = [0.9 * v / max_values + 0.1 for v in values]\n else:\n if (self.extra_params != None):\n self.probas = self.selfun(list(values), **self.extra_params)\n else:\n self.probas = self.selfun(values)\n\n # returns intervals of probabilities\n return [sum(self.probas[:i+1]) for i in range(self.size)]\n\n def send_employee(self, index):\n \"\"\"\n\n 2. SEND EMPLOYED BEES PHASE.\n ---------------------------\n\n During this 2nd phase, new candidate solutions are produced for\n each employed bee by cross-over and mutation of the employees.\n\n If the modified vector of the mutant bee solution is better than\n that of the original bee, the new vector is assigned to the bee.\n\n \"\"\"\n\n # deepcopies current bee solution vector\n zombee = copy.deepcopy(self.population[index])\n\n # draws a dimension to be crossed-over and mutated\n d = random.randint(0, self.dim-1)\n\n # selects another bee\n bee_ix = index;\n bee_ix = index;\n while (bee_ix == index): bee_ix = random.randint(0, self.size-1)\n\n # produces a mutant based on current bee and bee's friend\n zombee.vector[d] = self._mutate(d, index, bee_ix)\n \n # computes fitness of mutant\n if(zombee.obj == 'similarity'):\n zombee.value = zombee.candidate.similarity\n elif(zombee.obj == 'support'):\n zombee.value = zombee.candidate.support\n elif(zombee.obj == 'complexity'):\n zombee.value = zombee.candidate.complexity\n zombee._fitness()\n\n # deterministic crowding\n if (zombee.fitness > self.population[index].fitness):\n self.population[index] = copy.deepcopy(zombee)\n self.population[index].counter = 0\n else:\n self.population[index].counter += 1\n \n\n def send_onlookers(self):\n \"\"\"\n\n 3. SEND ONLOOKERS PHASE.\n -----------------------\n\n We define as many onlooker bees as there are employed bees in\n the hive since onlooker bees will attempt to locally improve the\n solution path of the employed bee they have decided to follow\n after the waggle dance phase.\n\n If they improve it, they will communicate their findings to the bee\n they initially watched \"waggle dancing\".\n\n \"\"\"\n\n # sends onlookers\n numb_onlookers = 0; beta = 0\n while (numb_onlookers < self.size):\n\n # draws a random number from U[0,1]\n phi = random.random()\n\n # increments roulette wheel parameter beta\n beta += phi * max(self.probas)\n beta %= max(self.probas)\n\n # selects a new onlooker based on waggle dance\n index = self.select(beta)\n\n # sends new onlooker\n self.send_employee(index)\n\n # increments number of onlookers\n numb_onlookers += 1\n\n def select(self, beta):\n \"\"\"\n\n 4. WAGGLE DANCE PHASE.\n ---------------------\n\n During this 4th phase, onlooker bees are recruited using a roulette\n wheel selection.\n\n This phase represents the \"waggle dance\" of honey bees (i.e. figure-\n eight dance). By performing this dance, successful foragers\n (i.e. \"employed\" bees) can share, with other members of the\n colony, information about the direction and distance to patches of\n flowers yielding nectar and pollen, to water sources, or to new\n nest-site locations.\n\n During the recruitment, the bee colony is re-sampled in order to mostly\n keep, within the hive, the solution vector of employed bees that have a\n good fitness as well as a small number of bees with lower fitnesses to\n enforce diversity.\n\n Parameter(s):\n ------------\n :param float beta : \"roulette wheel selection\" parameter - i.e. 0 <= beta <= max(probas)\n\n \"\"\"\n\n # computes probability intervals \"online\" - i.e. re-computed after each onlooker\n probas = self.compute_probability()\n\n # selects a new potential \"onlooker\" bee\n for index in range(self.size):\n if (beta < probas[index]):\n return index\n\n def send_scout(self):\n \"\"\"\n\n 5. SEND SCOUT BEE PHASE.\n -----------------------\n\n Identifies bees whose abandonment counts exceed preset trials limit,\n abandons it and creates a new random bee to explore new random area\n of the domain space.\n\n In real life, after the depletion of a food nectar source, a bee moves\n on to other food sources.\n\n By this means, the employed bee which cannot improve their solution\n until the abandonment counter reaches the limit of trials becomes a\n scout bee. Therefore, scout bees in ABC algorithm prevent stagnation\n of employed bee population.\n\n Intuitively, this method provides an easy means to overcome any local\n optima within which a bee may have been trapped.\n\n \"\"\"\n\n # retrieves the number of trials for all bees\n trials = [ self.population[i].counter for i in range(self.size) ]\n\n # identifies the bee with the greatest number of trials\n index = trials.index(max(trials))\n\n # checks if its number of trials exceeds the pre-set maximum number of trials\n if (trials[index] > self.max_trials):\n\n # creates a new scout bee randomly\n self.population[index] = Bee(self.lower, self.upper, self.dna_sequences,self.obj)\n\n # sends scout bee to exploit its solution vector\n self.send_employee(index)\n\n def _mutate(self, dim, current_bee, other_bee):\n \"\"\"\n\n Mutates a given solution vector - i.e. for continuous\n real-values.\n\n Parameters:\n ----------\n\n :param int dim : vector's dimension to be mutated\n :param int current_bee : index of current bee\n :param int other_bee : index of another bee to cross-over\n\n \"\"\"\n return self.population[current_bee].vector[dim] + \\\n random.choice([-1,1]) * \\\n (self.population[current_bee].vector[dim] - self.population[other_bee].vector[dim])\n\n\n def _verbose(self, itr, cost):\n \"\"\" Displays information about computation. \"\"\"\n\n msg = \"# Iter = {} | Best Evaluation Value = {} | Mean Evaluation Value = {} \"\n print(msg.format(int(itr), cost[\"best\"][itr], cost[\"mean\"][itr]))\n\n# ---- END\n","sub_path":"Hive/HiveMotif.py","file_name":"HiveMotif.py","file_ext":"py","file_size_in_byte":27439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"256782509","text":"from util.logger import Logger\nfrom util.utils import Utils, Region\n\n\nclass EnhancementModule(object):\n\n def __init__(self, config, stats):\n \"\"\"Initializes the Enhancement module.\n\n Args:\n config (Config): ALAuto Config instance\n stats (Stats): ALAuto stats instance\n \"\"\"\n self.config = config\n self.stats = stats\n self.last_enhance = 0\n self.region = {\n 'button_favorite': Region(1014, 19, 170, 42),\n 'button_go_back': Region(54, 57, 67, 67),\n 'dock_tab': Region(297, 1015, 155, 40),\n 'first_favorite_ship': Region(209, 209, 80, 120),\n 'fill_button': Region(1467, 917, 140, 38),\n 'enhance_tab_normal_ship': Region(31, 188, 91, 91),\n 'enhance_tab_retro_ship': Region(31, 329, 91, 91),\n 'enhance_orange_button': Region(1705, 916, 167, 40),\n 'confirm_selected_equipment_button': Region(1320, 785, 232, 62),\n 'disassemble_button': Region(1099, 827, 225, 58),\n 'tap_to_continue': Region(661, 840, 598, 203)\n }\n\n def enhancement_logic_wrapper(self):\n \"\"\"Method that fires off the necessary child methods that encapsulates\n the entire action of enhancing a ship\n \"\"\"\n if self.need_to_enhance:\n self.last_enhance = self.stats.combat_done\n Logger.log_msg(\"Opening dock to enhance ship.\")\n\n while True:\n Utils.update_screen()\n\n if Utils.find(\"menu/button_battle\"):\n Utils.touch_randomly(self.region['dock_tab'])\n Utils.script_sleep(1)\n continue\n if Utils.find(\"enhancement/button_favorite\", 0.99):\n self.enhance_ship()\n Utils.touch_randomly(self.region['button_favorite'])\n Utils.touch_randomly(self.region['button_go_back'])\n return\n if Utils.find(\"menu/dock\"):\n Utils.touch_randomly(self.region['button_favorite'])\n continue\n else:\n Utils.touch_randomly(self.region['button_go_back'])\n Utils.script_sleep(2)\n\n def enhance_ship(self):\n \"\"\"\n Method that selects the first (leftmost of the first row) favorite ship and proceeds to enhance her.\n \"\"\"\n\n #selects ship\n Utils.touch_randomly(self.region['first_favorite_ship'])\n Utils.script_sleep(1)\n\n while True:\n Utils.update_screen()\n\n if Utils.find(\"enhancement/menu_enhance\"):\n Logger.log_debug(\"Filling with ships.\")\n #taps the \"fill\" button\n Utils.touch_randomly(self.region['fill_button'])\n Utils.update_screen()\n if Utils.find(\"enhancement/alert_no_items\", 0.85):\n Logger.log_warning(\"Not enough ships to enhance.\")\n break\n if Utils.find(\"enhancement/menu_level\", 0.8):\n self.handle_retirement()\n Logger.log_msg(\"Successfully finished enhancing.\")\n break\n if Utils.find(\"enhancement/menu_details\"):\n Logger.log_debug(\"Opening enhance menu.\")\n if not Utils.find(\"enhancement/menu_retrofit\", 0.9):\n Utils.touch_randomly(self.region['enhance_tab_normal_ship'])\n else:\n Utils.touch_randomly(self.region['enhance_tab_retro_ship'])\n continue\n\n Utils.touch_randomly(self.region['button_go_back'])\n Utils.script_sleep(1)\n return\n\n def handle_retirement(self):\n \"\"\"\n Method that handles the disassembling of the ship materials used in the enhancement process.\n \"\"\"\n\n #tap the \"enhance\" button\n Utils.touch_randomly(self.region['enhance_orange_button'])\n #the enhanced alert lasts about three seconds, so there's enough time to catch it\n #even if the scripts sleeps for a little bit. This pause ensures the script does not take\n #the screenshot before the alert is shown.\n Utils.script_sleep(0.5)\n Utils.update_screen()\n\n if not Utils.find(\"enhancement/alert_enhanced\", 0.85):\n Logger.log_debug(\"Didn't find enhanced alert.\")\n return\n else:\n Logger.log_debug(\"Successfully enhanced ship.\")\n\n while True:\n Utils.update_screen()\n\n if Utils.find(\"menu/alert_info\"):\n Utils.touch_randomly(self.region['confirm_selected_equipment_button'])\n Utils.script_sleep(1)\n continue\n if Utils.find(\"retirement/button_disassemble\"):\n Utils.touch_randomly(self.region['disassemble_button'])\n Utils.script_sleep(1)\n continue\n if Utils.find(\"menu/item_found\"):\n Utils.touch_randomly(self.region['tap_to_continue'])\n Utils.script_sleep(1)\n return\n\n @property\n def need_to_enhance(self):\n \"\"\"Checks whether the script needs to retire ships\n\n Returns:\n bool: True if the script needs to retire ships\n \"\"\"\n # check if it has already retired with current combat count so it doesn't enter a loop\n if self.config.combat['enabled'] and self.stats.combat_done > self.last_enhance:\n return self.stats.combat_done % self.config.combat['retire_cycle'] == 0","sub_path":"modules/enhancement.py","file_name":"enhancement.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"601486287","text":"#Fabric file\n\nfrom fabric.api import local\nfrom fabric.api import run\nfrom fabric.api import cd\nfrom fabric.api import env\nfrom fabric.operations import put\nfrom fabric.context_managers import settings\nfrom fabric.context_managers import lcd\n\nenv.hosts = [\n #'zhangnu@112.124.10.19:22',\n #'zhangnu@42.121.114.19:22',\n #'zhangnu@114.112.172.219:9191',\n #'zhangnu@54.251.110.86:22',\n\n 'root@14.18.206.3:22',\n]\n\nenv.passwords = {\n #'zhangnu@112.124.10.19:22': '$eDBi43#',\n #'zhangnu@42.121.114.19:22': '$eDBi43#',\n #'zhangnu@114.112.172.219:9191': '$eDBi43#',\n #'zhangnu@54.251.110.86:22': '',\n 'zhangnu@112.124.10.19:22': '$eDBi43#',\n}\n\nenv.key_filename = '~/.ssh/zhangnu_id_rsa'\ndef setservers(app):\n if app == 'clientroot':\n env.hosts = [\n 'root@192.241.207.26:22',\n 'root@106.186.116.170:22',\n 'root@14.18.206.3:22',\n 'root@110.34.240.58:22',\n ]\n\n env.passwords = {\n 'root@192.241.207.26:22': 'xvovxbetkreb',\n 'root@106.186.116.170:22': 'Elementary)(17',\n 'root@14.18.206.3:22': 'ofidc.com1010',\n 'root@110.34.240.58:22': 'TrevupRAW8at',\n }\n elif app == 'clientzhangnu':\n env.hosts = [\n 'zhangnu@112.124.10.19:22',\n #'zhangnu@42.121.114.19:22',\n #'zhangnu@114.112.172.219:9191',\n #'zhangnu@54.251.110.86:9191',\n ]\n\n env.passwords = {\n 'zhangnu@112.124.10.19:22': '$eDBi43#',\n #'zhangnu@42.121.114.19:22': '$eDBi43#',\n #'zhangnu@114.112.172.219:9191': '$eDBi43#',\n #'zhangnu@54.251.110.86:9191': '$eDBi43#',\n }\n elif app == 'supervisor':\n env.hosts = [\n 'root@14.18.206.3:22',\n ]\n\n env.passwords = {\n 'root@14.18.206.3:22': 'ofidc.com1010',\n }\n elif app == 'webserver':\n env.hosts = [\n 'root@14.18.206.3:22',\n ]\n\n env.passwords = {\n 'root@14.18.206.3:22': 'ofidc.com1010',\n }\n\ndef pack(app):\n if app == 'clientroot' or app == 'clientzhangnu':\n with lcd('../client'):\n local('rm -f monclient.tar.gz')\n local('mkdir bin')\n local('cp ../deploy/run_monclient.sh bin')\n local('cp mon_client.py util.py bin')\n local('tar -zcf monclient.tar.gz bin/')\n local('rm -rf bin')\n elif app == 'supervisor':\n with lcd('../supervisor'):\n local('rm -f monsupervisor.tar.gz')\n local('mkdir monsupervisor')\n local('cp ../deploy/run_monsupervisor.sh monsupervisor')\n local('cp mon_supervisor.py config.py util.py monsupervisor')\n local('tar -zcf monsupervisor.tar.gz monsupervisor')\n local('rm -rf monsupervisor')\n elif app == 'webserver':\n with lcd('../webserver'):\n local('rm -f monwebserver.tar.gz')\n local('mkdir monwebserver')\n local('cp ../deploy/run_webserver.sh monwebserver')\n local('cp mon_webserver.py config.py util.py monwebserver')\n local('cp -r static/ monwebserver')\n local('cp -r templates/ monwebserver')\n local('tar -zcf monwebserver.tar.gz monwebserver')\n local('rm -rf monwebserver')\n\n\ndef packwin(app):\n if app == 'client':\n with lcd('../client'):\n local('rm -f monclient.tar')\n local('mkdir bin')\n local('cp ../deploy/run_monclient.bat bin')\n local('cp mon_client.py util.py bin')\n local('tar -cf monclient.tar bin/')\n local('rm -rf bin')\n\n\ndef clean(app):\n if app == 'clientroot':\n #kill mon_client\n with settings(warn_only=True):\n run('ps aux | grep mon_client | grep python | awk \\'{print $2}\\' | xargs kill -9')\n run('rm -rf /root/monkk/bin')\n elif app == 'clientzhangnu':\n with settings(warn_only=True):\n run('ps aux | grep mon_client | grep python | awk \\'{print $2}\\' | xargs kill -9')\n run('rm -rf /home/zhangnu/monkk/bin')\n elif app == 'supervisor':\n with settings(warn_only=True):\n run('ps aux | grep mon_supervisor.py | grep python | awk \\'{print $2}\\' | xargs kill -9')\n run('rm -rf /root/monsupervisor')\n elif app == 'webserver':\n with settings(warn_only=True):\n run('ps aux | grep mon_webserver.py | grep python | awk \\'{print $2}\\' | xargs kill -9')\n run('rm -rf /root/monwebserver')\n\n\ndef upload(app):\n if app == 'clientroot':\n put('monclient.tar.gz', '/root/monkk/monclient.tar.gz')\n with cd('monkk'):\n run('tar zxf monclient.tar.gz')\n elif app == 'clientzhangnu':\n with lcd('../client'):\n put('monclient.tar.gz', '/home/zhangnu/monkk/monclient.tar.gz')\n with cd('/home/zhangnu/monkk'):\n run('tar zxf monclient.tar.gz')\n elif app == 'supervisor':\n with lcd('../supervisor/'):\n put('monsupervisor.tar.gz', '/root/monsupervisor.tar.gz')\n run('tar zxf monsupervisor.tar.gz')\n elif app == 'webserver':\n with lcd('../webserver/'):\n put('monwebserver.tar.gz', '/root/monwebserver.tar.gz')\n run('tar zxf monwebserver.tar.gz')\n\n\ndef update_env(app):\n local('')\n pass\n\n\ndef launch(app):\n if app == 'clientroot':\n run('ls')\n with cd('/root/monkk/bin'):\n run('ls')\n run('./run_monclient.sh', pty=False)\n elif app == 'clientzhangnu':\n with cd('/home/zhangnu/monkk/bin'):\n run('./run_monclient.sh', pty=False)\n elif app == 'supervisor':\n with cd('/root/monsupervisor'):\n run('./run_monsupervisor.sh', pty=False)\n elif app == 'webserver':\n with cd('/root/monwebserver'):\n run('./run_webserver.sh', pty=False)\n\ndef afterrun(app):\n pass\n\n\ndef deploy(app):\n #set servers\n setservers(app)\n #local\n pack(app)\n #remote\n clean(app)\n upload(app)\n #update_env(app)\n launch(app)\n afterrun(app)\n\ndef install_pip_package(app):\n if app == 'client':\n with settings(warn_only=True):\n #install setuptools pip\n run('sudo apt-get update')\n run('sudo apt-get install -y python-setuptools')\n run('sudo apt-get install -y python-pip')\n #install package\n run('sudo pip install cron.py==0.0.5')\n\ndef removeoldclient():\n with settings(warn_only=True):\n run('rm -rf archive')\n run('cp -r monkk/archive ./archive')\n run('rm -rf monkk')\n run('mkdir monkk')\n with settings(warn_only=True):\n run('cp -r ./archive monkk/archive')\n run('rm -rf archive')\n\ndef test():\n run('ls')\n with cd('monkk'):\n run('ls')\n run('ls')\n #with settings(warn_only=True):\n # run('cat /etc/issue')\n\ndef killclient():\n with settings(warn_only=True):\n run('ps aux | grep mon_client | grep python | awk \\'{print $2}\\' | xargs kill -9')\n","sub_path":"deploy/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"322415376","text":"from raw_tiles.formatter.gzip import Gzip\nfrom raw_tiles.formatter.msgpack import Msgpack\nfrom raw_tiles.gen import RawrGenerator\nfrom raw_tiles.sink.local import LocalSink\nfrom raw_tiles.source.conn import ConnectionContextManager\nfrom raw_tiles.source.osm import OsmSource\nfrom raw_tiles.tile import Tile\n\n\ndef parse_range(z, args):\n \"\"\"\n Parse args, a string representing a range of tile coordinates (either x or\n y), at zoom level z.\n\n Supported formats are:\n - '*' for all coordinates at that zoom.\n - A single number for a single coordinate.\n - A range of numbers separated by a dash, inclusive of both ends.\n\n Returns a generator over the coordinates.\n \"\"\"\n\n assert len(args) == 1\n arg = args[0]\n\n if arg == \"*\":\n return xrange(0, 2**z - 1)\n r = map(int, arg.split('-'))\n if len(r) == 1:\n r = [r[0], r[0]]\n elif len(r) != 2:\n raise RuntimeError('Expected either a single value or a range '\n 'separated by a dash. Did not understand %r' %\n (arg,))\n # range is inclusive, but xrange is exclusive of the last parameter, so\n # need to shift it by one.\n lo, hi = r\n return xrange(lo, hi + 1)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Generate RAWR tiles')\n parser.add_argument('zoom', metavar='Z', type=int, nargs=1,\n help='The zoom level.')\n parser.add_argument('x', metavar='X', nargs=1,\n help='The x coordinate, or coordinate range '\n '(e.g: 0-8). Use * to indicate the whole range.')\n parser.add_argument('y', metavar='Y', nargs=1,\n help='The y coordinate, or coordinate range '\n '(e.g: 0-8). Use * to indicate the whole range.')\n\n parser.add_argument('--dbparams', help='Database parameters')\n\n args = parser.parse_args()\n\n z = int(args.zoom[0])\n x_range = parse_range(z, args.x)\n y_range = parse_range(z, args.y)\n\n conn_ctx = ConnectionContextManager(args.dbparams)\n src = OsmSource(conn_ctx)\n fmt = Gzip(Msgpack())\n sink = LocalSink('tiles', '.msgpack.gz')\n rawr_gen = RawrGenerator(src, fmt, sink)\n\n for x in x_range:\n for y in y_range:\n tile = Tile(z, x, y)\n rawr_gen(tile)\n","sub_path":"raw_tiles/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"217113793","text":"TRUE_LABEL = 'B'\nFALSE_LABEL = 'O'\n\ndef span_Fscore(predicts, golds, print_result=True):\n def calc_F(n_match, n_predict, n_gold):\n if n_match != 0:\n p = n_match / n_predict\n r = n_match / n_gold\n f = 2*p*r / (p + r)\n else:\n p = r = f = 0\n return p, r, f\n\n def get_span(labels):\n if type(labels) == str:\n labels = labels.split()\n spans = []\n start_index = 0\n for i, label in enumerate(labels):\n if label == TRUE_LABEL:\n spans.append((start_index, i))\n start_index = i\n spans.append((start_index, len(labels)))\n spans = spans[1:] #(0, 0)を取り除く\n return spans\n\n def count(predict_spans, gold_spans):\n n_predict = len(predict_spans)\n n_gold = len(gold_spans)\n n_match = len(set(predict_spans) & set(gold_spans))\n return n_match, n_predict, n_gold\n\n N_match, N_pred, N_gold = [], [], []\n for pred, gold in zip(predicts, golds):\n pred_span = get_span(pred)\n gold_span = get_span(gold)\n n_match, n_pred, n_gold = count(pred_span, gold_span)\n N_match.append(n_match)\n N_pred.append(n_pred)\n N_gold.append(n_gold)\n\n macro_p, macro_r, macro_f = [sum(l) / len(l) for l in list(\n zip(*[calc_F(n_match,n_pred,n_gold) for n_match,n_pred,n_gold in zip(N_match,N_pred,N_gold)]))]\n micro_p, micro_r, micro_f = calc_F(sum(N_match), sum(N_pred), sum(N_gold))\n if print_result:\n print('macro p/r/f : {} / {} / {}'.format(macro_p, macro_r, macro_f))\n print('micro p/r/f : {} / {} / {}'.format(micro_p, micro_r, micro_f))\n score = {\n 'micro': {'presition': micro_p,\n 'recall' : micro_r,\n 'f-score' : micro_f,\n },\n 'macro': {'presition': macro_p,\n 'recall' : macro_r,\n 'f-score' : macro_f,\n }\n }\n return score\n\ndef boundary_Fscore(predicts, golds, print_result=True):\n def calc_F(n_match, n_predict, n_gold):\n if n_match != 0:\n p = n_match / n_predict\n r = n_match / n_gold\n f = 2*p*r / (p + r)\n else:\n p = r = f = 0\n return p, r, f\n\n def bio2bound(labels):\n if type(labels) == str:\n labels = labels.split()\n index = []\n for i, label in enumerate(labels):\n if label == TRUE_LABEL and i != 0: #先頭は無視\n index.append(i)\n return index\n\n def count(predict, gold):\n n_predict = len(predict)\n n_gold = len(gold)\n n_match = len(set(predict) & set(gold))\n return n_match, n_predict, n_gold\n\n N_match, N_pred, N_gold = [], [], []\n for pred, gold in zip(predicts, golds):\n pred_bound = bio2bound(pred)\n gold_bound = bio2bound(gold)\n n_match, n_pred, n_gold = count(pred_bound, gold_bound)\n if n_pred == n_gold == 0:\n continue\n N_match.append(n_match)\n N_pred.append(n_pred)\n N_gold.append(n_gold)\n\n macro_p, macro_r, macro_f = [sum(l) / len(l) for l in list(\n zip(*[calc_F(n_match,n_pred,n_gold) for n_match,n_pred,n_gold in zip(N_match,N_pred,N_gold)]))]\n micro_p, micro_r, micro_f = calc_F(sum(N_match), sum(N_pred), sum(N_gold))\n if print_result:\n print('macro p/r/f : {} / {} / {}'.format(macro_p, macro_r, macro_f))\n print('micro p/r/f : {} / {} / {}'.format(micro_p, micro_r, micro_f)) \n score = {\n 'micro': {'presition': micro_p,\n 'recall' : micro_r,\n 'f-score' : micro_f,\n },\n 'macro': {'presition': macro_p,\n 'recall' : macro_r,\n 'f-score' : macro_f,\n }\n }\n return score\n\n\ndef load_tag(file_name):\n with open(file_name) as f:\n dataset = [line.strip().split('\\t')[-1] for line in f.readlines()]\n return dataset\n\n\nimport sys\nif __name__ == '__main__':\n args = sys.argv\n predict = args[2]\n gold = args[1]\n predict = load_tag(predict)\n gold = load_tag(gold)\n\n print('boundary')\n boundary_Fscore(predict, gold)\n print('span')\n span_Fscore(predict, gold)\n","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"548031660","text":"import structlog\nfrom celery.decorators import periodic_task\nfrom celery.task.schedules import crontab\nfrom city.utils import get_city_weather_by_id\nfrom city.models import CityWeather\n\nlogger = structlog.get_logger(__name__)\n\n\n@periodic_task(run_every=crontab(minute='*/10'), name=\"update_weather\")\ndef update_city_weather():\n weather_list_obj = get_city_weather_by_id()\n if not weather_list_obj == None:\n try:\n CityWeather.objects.all().delete()\n CityWeather.objects.bulk_create(weather_list_obj)\n response = dict(message='Refreshed Database for weather')\n return response\n except Exception as e:\n response = dict(error=str(e))\n return response\n else:\n response = dict(error='API Downtime')\n\n\n\n","sub_path":"city/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"193449774","text":"import numpy as np\r\nfrom ..Math import mmMath as mm\r\n\r\n# ===============================================================================\r\n# Reference : Appendix A of Momentum Control for Balance, SIGGRAPH 2009\r\n# ===============================================================================\r\n\r\n\r\ndef get_P(masses, positions, CM, inertias):\r\n P = np.empty((6, 6*len(masses)))\r\n for i in range(len(masses)):\r\n P[:, 6*i:6*i+6] = _get_P(masses[i], positions[i], CM, inertias[i])\r\n return P\r\n\r\n\r\ndef _get_P(mass, position, CM, inertia):\r\n _P = np.empty((6, 6))\r\n _P[:3, :3] = mass * np.eye(3)\r\n _P[:3, 3:] = np.zeros((3, 3))\r\n _P[3:, :3] = mass * mm.getCrossMatrixForm(position - CM)\r\n _P[3:, 3:] = inertia\r\n return _P\r\n\r\n\r\ndef make_TO(masses):\r\n O = np.zeros((3,3)) \r\n TOs = [None]*len(masses)\r\n for i in range(len(masses)):\r\n TOs[i] = np.concatenate((mm.I_SO3()*masses[i], O), 1)\r\n return np.concatenate(TOs, 1)\r\n\r\n\r\ndef _make_Us(masses, positions, CM):\r\n Us = [None]*len(masses)\r\n for i in range(len(masses)):\r\n Us[i] = masses[i] * mm.getCrossMatrixForm(positions[i] - CM) \r\n# Us[i] = masses[i] * mm.getCrossMatrixForm(CM - positions[i]) \r\n return Us\r\n\r\n\r\n# pure inertia matrix\r\n# CM : CM or origin about angular momentum\r\ndef getPureInertiaMatrix(TO, masses, positions, CM, inertias):\r\n Us = _make_Us(masses, positions, CM)\r\n Vs = inertias\r\n UVs = [None]*len(masses)\r\n for i in range(len(masses)):\r\n UVs[i] = np.concatenate((Us[i], Vs[i]), 1)\r\n return np.concatenate((TO, np.concatenate(UVs, 1)), 0)\r\n\r\n\r\ndef make_dTO(linkNum):\r\n O = np.zeros((3, linkNum*3))\r\n return np.concatenate((O, O), 1)\r\n\r\n\r\ndef _make_dUs(masses, velocities, dCM):\r\n dUs = [None]*len(masses)\r\n for i in range(len(masses)):\r\n dUs[i] = masses[i] * mm.getCrossMatrixForm(velocities[i] - dCM) \r\n# dUs[i] = masses[i] * mm.getCrossMatrixForm(dCM - velocities[i]) \r\n return dUs\r\n\r\n\r\ndef _make_dVs(angVels, inertias):\r\n dVs = [None]*len(angVels)\r\n for i in range(len(angVels)):\r\n dVs[i] = np.dot(mm.getCrossMatrixForm(angVels[i]), inertias[i])\r\n return dVs\r\n\r\n# time derivative of pure inertia matrix\r\ndef getPureInertiaMatrixDerivative(dTO, masses, velocities, dCM, angVels, inertias):\r\n dUs = _make_dUs(masses, velocities, dCM) \r\n dVs = _make_dVs(angVels, inertias)\r\n dUVs = [None]*len(masses)\r\n for i in range(len(masses)):\r\n dUVs[i] = np.concatenate((dUs[i], dVs[i]), 1)\r\n return np.concatenate((dTO, np.concatenate(dUVs, 1)), 0) \r\n\r\n# ===============================================================================\r\n# momentum calculation by standard method\r\n# ===============================================================================\r\n\r\ndef getLinearMomentum(masses, velocities):\r\n L = mm.v3(0.,0.,0.)\r\n for i in range(len(masses)):\r\n L += masses[i] * velocities[i]\r\n return L\r\n\r\ndef getAngularMomentum(origin, inertias, angVelocities, positions, masses, velocities):\r\n H = mm.v3(0.,0.,0.)\r\n for i in range(len(masses)):\r\n H += np.dot(inertias[i], angVelocities[i]) + np.cross(positions[i]-origin, masses[i]*velocities[i])\r\n return H\r\n\r\n\r\nif __name__=='__main__':\r\n from fltk import *\r\n \r\n import Resource.ysMotionLoader as yf\r\n import Simulator.ysPhysConfig as ypc\r\n import Renderer.ysRenderer as yr\r\n import Renderer.csVpRenderer as cvr\r\n import Simulator.csVpWorld as cvw\r\n import Simulator.csVpModel as cvm\r\n import GUI.ysSimpleViewer as ysv\r\n import Optimization.csEQP as ceq\r\n import ArticulatedBody.ysJacobian as yjc\r\n import Util.ysPythonEx as ype\r\n import Motion.ysSkeletonEdit as yme\r\n import ArticulatedBody.ysReferencePoints as yrp\r\n\r\n \r\n def test_momentum_matrix_build(): \r\n bvhFilePath = '../samples/chain_2.bvh'\r\n motion = yf.readBvhFile(bvhFilePath)\r\n \r\n mcfg = ypc.ModelConfig()\r\n mcfg.defaultDensity = 1000.\r\n mcfg.defaultBoneRatio = .8\r\n for i in range(motion[0].skeleton.getElementNum()):\r\n mcfg.addNode(motion[0].skeleton.getElementName(i))\r\n \r\n wcfg = ypc.WorldConfig()\r\n wcfg.planeHeight = -1.\r\n wcfg.gravity = (0,0,0)\r\n stepsPerFrame = 30\r\n wcfg.timeStep = (1/30.)/stepsPerFrame\r\n \r\n vpWorld = cvw.VpWorld(wcfg)\r\n controlModel = cvm.VpControlModel(vpWorld, motion[0], mcfg)\r\n vpWorld.initialize()\r\n\r\n controlModel.fixBody(0)\r\n# controlModel.setJointAngVelocityLocal(0, (.5,0,0))\r\n \r\n linkMasses = controlModel.getBodyMasses()\r\n totalMass = controlModel.getTotalMass()\r\n \r\n totalDOF = controlModel.getTotalDOF()\r\n jointDOFs = controlModel.getDOFs()\r\n J = yjc.makeEmptyJacobian(jointDOFs, controlModel.getBodyNum())\r\n jointPositions = [None]*totalDOF\r\n \r\n VERBOSE = True\r\n# VERBOSE = False\r\n \r\n if VERBOSE:\r\n np.set_printoptions(precision=1, linewidth=200) \r\n \r\n TO = make_TO(linkMasses)\r\n dTO = make_dTO(len(linkMasses))\r\n if VERBOSE:\r\n print('TO')\r\n print(TO)\r\n print('dTO')\r\n print(dTO)\r\n \r\n viewer = ysv.SimpleViewer()\r\n viewer.record(False)\r\n# viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,0,255), yr.LINK_WIREBOX))\r\n# viewer.doc.addObject('motion', motion)\r\n viewer.doc.addRenderer('model', cvr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_LINE))\r\n \r\n def simulateCallback(frame):\r\n linkPositions = controlModel.getBodyPositionsGlobal()\r\n CM = yrp.getCM(linkPositions, linkMasses, totalMass)\r\n inertias = controlModel.getBodyInertiasGlobal()\r\n P = getPureInertiaMatrix(TO, linkMasses, linkPositions, CM, inertias)\r\n \r\n linkVelocities = controlModel.getBodyVelocitiesGlobal()\r\n dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)\r\n dP = getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities, dCM, controlModel.getBodyAngVelocitiesGlobal(), inertias)\r\n \r\n if VERBOSE:\r\n print('P')\r\n print(P)\r\n print('dP')\r\n print(dP)\r\n\r\n for i in range(stepsPerFrame):\r\n vpWorld.step()\r\n viewer.setSimulateCallback(simulateCallback)\r\n \r\n viewer.startTimer(1/30.)\r\n viewer.show()\r\n \r\n Fl.run()\r\n \r\n def test_momentum_standard(): \r\n np.set_printoptions(precision=2, linewidth=200) \r\n \r\n bvhFilePath = '../samples/chain_1_long.bvh'\r\n motion1 = yf.readBvhFile(bvhFilePath)\r\n \r\n bvhFilePath = '../samples/chain_2.bvh'\r\n motion2 = yf.readBvhFile(bvhFilePath)\r\n \r\n mcfg1 = ypc.ModelConfig()\r\n mcfg1.defaultDensity = 1000.\r\n mcfg1.defaultBoneRatio = 1.\r\n for i in range(motion1[0].skeleton.getElementNum()):\r\n mcfg1.addNode(motion1[0].skeleton.getElementName(i))\r\n mcfg2 = ypc.ModelConfig()\r\n mcfg2.defaultDensity = 1000.\r\n mcfg2.defaultBoneRatio = 1.\r\n for i in range(motion2[0].skeleton.getElementNum()):\r\n mcfg2.addNode(motion2[0].skeleton.getElementName(i))\r\n \r\n wcfg = ypc.WorldConfig()\r\n wcfg.planeHeight = 0.\r\n wcfg.gravity = (0,0,0)\r\n stepsPerFrame = 30\r\n wcfg.timeStep = (1/30.)/stepsPerFrame\r\n \r\n vpWorld = cvw.VpWorld(wcfg)\r\n m1 = cvm.VpControlModel(vpWorld, motion1[0], mcfg1)\r\n m2 = cvm.VpControlModel(vpWorld, motion2[0], mcfg2)\r\n vpWorld.initialize()\r\n\r\n force = 1000\r\n# force = 0\r\n torque = 400\r\n\r\n m1.translateByOffset((0,1,1))\r\n m1.applyBodyTorqueGlobal(0, (0,0,torque))\r\n m1.applyBodyForceGlobal(0, (force,0,0))\r\n \r\n m2.translateByOffset((0,1,0))\r\n m2.applyBodyTorqueGlobal(0, (0,0,torque/2.))\r\n m2.applyBodyTorqueGlobal(1, (0,0,torque/2.))\r\n m2.applyBodyForceGlobal(0, (force/2.,0,0))\r\n m2.applyBodyForceGlobal(1, (force/2.,0,0))\r\n \r\n \r\n masses_m1 = m1.getBodyMasses()\r\n masses_m2 = m2.getBodyMasses()\r\n totalMass_m1 = m1.getTotalMass()\r\n totalMass_m2 = m2.getTotalMass()\r\n \r\n p = []\r\n v = []\r\n\r\n CM = []\r\n L_std = []\r\n H_std = []\r\n \r\n viewer = ysv.SimpleViewer()\r\n viewer.record(False)\r\n viewer.doc.addRenderer('model', cvr.VpModelRenderer(m2, (255,240,255), yr.POLYGON_LINE))\r\n viewer.doc.addRenderer('model2', cvr.VpModelRenderer(m1, (255,240,255), yr.POLYGON_LINE))\r\n# viewer.doc.addRenderer('v', yr.VectorsRenderer(v, p, (0,255,0)))\r\n\r\n viewer.doc.addRenderer('L_std', yr.VectorsRenderer(L_std, CM, (255,0,0)))\r\n viewer.doc.addRenderer('H_std', yr.VectorsRenderer(H_std, CM, (255,0,0)))\r\n \r\n viewer.setMaxFrame(100)\r\n \r\n def simulateCallback(frame):\r\n for i in range(stepsPerFrame):\r\n vpWorld.step()\r\n \r\n velocities_m1 = m1.getBodyVelocitiesGlobal()\r\n velocities_m2 = m2.getBodyVelocitiesGlobal()\r\n \r\n positions_m1 = m1.getBodyPositionsGlobal()\r\n positions_m2 = m2.getBodyPositionsGlobal()\r\n CM_m1 = m1.getBodyPositionGlobal(0)\r\n CM_m2 = yrp.getCM(positions_m2, masses_m2, totalMass_m2)\r\n inertias_m1 = m1.getBodyInertiasGlobal()\r\n inertias_m2 = m2.getBodyInertiasGlobal()\r\n angVelocities_m1 = m1.getBodyAngVelocitiesGlobal()\r\n angVelocities_m2 = m2.getBodyAngVelocitiesGlobal()\r\n \r\n L1_std = getLinearMomentum(masses_m1, velocities_m1)\r\n L2_std = getLinearMomentum(masses_m2, velocities_m2)\r\n H1_std = getAngularMomentum(CM_m1, inertias_m1, angVelocities_m1, positions_m1, masses_m1, velocities_m1)\r\n H2_std = getAngularMomentum(CM_m2, inertias_m2, angVelocities_m2, positions_m2, masses_m2, velocities_m2)\r\n \r\n \r\n #===============================================================================\r\n # for rendering\r\n #===============================================================================\r\n p[:] = m1.getBodyPositionsGlobal() + m2.getBodyPositionsGlobal() \r\n v[:] = m1.getBodyVelocitiesGlobal() + m2.getBodyVelocitiesGlobal()\r\n \r\n CM[:] = [yrp.getCM(m1.getBodyPositionsGlobal(), m1.getBodyMasses()), CM_m2]\r\n L_std[:] = [L1_std, L2_std]\r\n H_std[:] = [H1_std, H2_std]\r\n \r\n viewer.setSimulateCallback(simulateCallback)\r\n \r\n viewer.startTimer(1/30.)\r\n viewer.show()\r\n \r\n Fl.run()\r\n \r\n def test_momentum_matrix(): \r\n np.set_printoptions(precision=2, linewidth=200) \r\n \r\n# bvhFilePath = '../samples/chain_1_long.bvh'\r\n bvhFilePath = '../samples/chain_3_rotate.bvh'\r\n motion1 = yf.readBvhFile(bvhFilePath)\r\n \r\n mcfg1 = ypc.ModelConfig()\r\n mcfg1.defaultDensity = 1000.\r\n mcfg1.defaultBoneRatio = 1.\r\n for i in range(motion1[0].skeleton.getElementNum()):\r\n mcfg1.addNode(motion1[0].skeleton.getElementName(i))\r\n wcfg = ypc.WorldConfig()\r\n wcfg.planeHeight = -1.\r\n wcfg.gravity = (0,0,0)\r\n stepsPerFrame = 30\r\n wcfg.timeStep = (1/30.)/stepsPerFrame\r\n \r\n vpWorld = cvw.VpWorld(wcfg)\r\n m1 = cvm.VpControlModel(vpWorld, motion1[0], mcfg1)\r\n vpWorld.initialize()\r\n\r\n # momentum matrix information\r\n masses = m1.getBodyMasses()\r\n totalMass = m1.getTotalMass()\r\n TO = make_TO(masses)\r\n v_sol = ype.makeNestedList([6]*m1.getBodyNum())\r\n\r\n # jacobian for internal joints\r\n DOFs_internal = m1.getInternalJointDOFs()\r\n totalDOF_internal = m1.getTotalInternalJointDOF()\r\n\r\n J_internal = yjc.makeEmptyJacobian(DOFs_internal, m1.getBodyNum())\r\n linkJointMasks_internal = yjc.getAllLinkInternalJointMasks(motion1[0].skeleton)\r\n\r\n dth_flat_internal = ype.makeFlatList(totalDOF_internal)\r\n\r\n # momentum matrix for all joints\r\n DOFs_all = m1.getDOFs()\r\n totalDOF_all = m1.getTotalDOF()\r\n \r\n J_all = yjc.makeEmptyJacobian(DOFs_all, m1.getBodyNum())\r\n linkJointMasks_all = yjc.getAllLinkJointMasks(motion1[0].skeleton)\r\n \r\n dth_flat_all = ype.makeFlatList(totalDOF_all)\r\n \r\n \r\n p = []\r\n v = []\r\n \r\n rd_CM = []\r\n rd_L_std = []\r\n rd_L_jacob_internal = []\r\n rd_L_jacob_all = []\r\n rd_H_std = []\r\n rd_H_jacob_internal = []\r\n rd_H_jacob_all = []\r\n \r\n viewer = ysv.SimpleViewer()\r\n viewer.record(False)\r\n viewer.doc.addRenderer('model', cvr.VpModelRenderer(m1, (255,240,255), yr.POLYGON_LINE))\r\n# viewer.doc.addRenderer('v', yr.VectorsRenderer(v, p, (0,255,0)))\r\n\r\n viewer.doc.addRenderer('L_std', yr.VectorsRenderer(rd_L_std, rd_CM, (255,0,0)))\r\n# viewer.doc.addRenderer('L_jacob_internal', yr.VectorsRenderer(rd_L_jacob_internal, rd_CM, (0,255,0)))\r\n viewer.doc.addRenderer('L_jacob_all', yr.VectorsRenderer(rd_L_jacob_all, rd_CM, (255,255,0)))\r\n\r\n viewer.doc.addRenderer('H_std', yr.VectorsRenderer(rd_H_std, rd_CM, (255,0,0)))\r\n# viewer.doc.addRenderer('H_jacob_internal', yr.VectorsRenderer(rd_H_jacob_internal, rd_CM, (0,255,0)))\r\n viewer.doc.addRenderer('H_jacob_all', yr.VectorsRenderer(rd_H_jacob_all, rd_CM, (255,255,0)))\r\n \r\n viewer.setMaxFrame(100)\r\n \r\n # force\r\n m1.applyBodyTorqueGlobal(0, (0,0,1000))\r\n m1.applyBodyForceGlobal(0, (1000,0,0))\r\n m1.applyBodyTorqueGlobal(0, (0,1000,0))\r\n \r\n def simulateCallback(frame):\r\n for i in range(stepsPerFrame):\r\n vpWorld.step()\r\n \r\n #===============================================================================\r\n # momentum calculation by standard method\r\n #===============================================================================\r\n velocities = m1.getBodyVelocitiesGlobal()\r\n positions = m1.getBodyPositionsGlobal()\r\n CM = yrp.getCM(positions, masses, totalMass)\r\n inertias = m1.getBodyInertiasGlobal()\r\n angVelocities = m1.getBodyAngVelocitiesGlobal()\r\n \r\n L_std = getLinearMomentum(masses, velocities)\r\n H_std = getAngularMomentum(CM, inertias, angVelocities, positions, masses, velocities)\r\n \r\n #===============================================================================\r\n # momentum calculation by centroidal momentum matrix\r\n #===============================================================================\r\n P = getPureInertiaMatrix(TO, masses, positions, CM, inertias)\r\n\r\n # momentum matrix for internal joints and addition of total momentum about CM\r\n# jointPositions_internal = m1.getInternalJointPositionsGlobal()\r\n#\r\n# Rs = m1.getInternalJointOrientationsGlobal()\r\n# jointAxeses_internal = [Rs[i].transpose() for i in range(0,len(Rs))]\r\n# \r\n# yjc.computeJacobian2(J_internal, DOFs_internal, jointPositions_internal, jointAxeses_internal, positions, linkJointMasks_internal)\r\n# \r\n# dth = m1.getInternalJointAngVelocitiesLocal()\r\n# ype.flatten(dth, dth_flat_internal)\r\n# \r\n# PJ_internal = np.dot(P, J_internal)\r\n# LH_internal = np.dot(PJ_internal, dth_flat_internal)\r\n# L2_jacob_internal, H2_jacob_internal = np.hsplit(LH_internal, 2)\r\n# \r\n# p_root = m1.getBodyPositionGlobal(0)\r\n# v_root = m1.getBodyVelocityGlobal(0)\r\n# w_root = m1.getBodyAngVelocityGlobal(0)\r\n# \r\n# L_jacob_internal = mm.v3(0.,0.,0.)\r\n# L_jacob_internal += (totalMass * v_root)\r\n# L_jacob_internal += (-totalMass * np.cross( (CM - p_root), w_root))\r\n#\r\n L_jacob_internal = None\r\n H_jacob_internal = None\r\n\r\n # momentum matrix for all joints\r\n jointPositions_all = m1.getJointPositionsGlobal()\r\n jointAxeses_all = m1.getDOFAxeses()\r\n \r\n yjc.computeJacobian2(J_all, DOFs_all, jointPositions_all, jointAxeses_all, positions, linkJointMasks_all)\r\n \r\n dth = m1.getDOFVelocities()\r\n ype.flatten(dth, dth_flat_all)\r\n \r\n PJ_all= np.dot(P, J_all)\r\n LH_all= np.dot(PJ_all, dth_flat_all)\r\n L_jacob_all, H_jacob_all= np.hsplit(LH_all, 2)\r\n\r\n #===============================================================================\r\n # for rendering\r\n #===============================================================================\r\n p[:] = positions \r\n v[:] = velocities\r\n \r\n rd_CM[:] = [CM]\r\n rd_L_std[:] = [L_std]\r\n rd_L_jacob_internal[:] = [L_jacob_internal]\r\n rd_L_jacob_all[:] = [L_jacob_all]\r\n \r\n rd_H_std[:] = [H_std]\r\n rd_H_jacob_internal[:] = [H_jacob_internal]\r\n rd_H_jacob_all[:] = [H_jacob_all]\r\n\r\n \r\n viewer.setSimulateCallback(simulateCallback)\r\n \r\n viewer.startTimer(1/30.)\r\n viewer.show()\r\n \r\n Fl.run()\r\n \r\n pass\r\n# test_momentum_matrix_build()\r\n# test_momentum_standard()\r\n test_momentum_matrix()\r\n","sub_path":"PyCommon/modules/ArticulatedBody/ysMomentum.py","file_name":"ysMomentum.py","file_ext":"py","file_size_in_byte":17933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"575208427","text":"\"\"\"Demo with Tracking Processor\"\"\"\nimport os\nimport cv2\nfrom trueface.recognition import FaceRecognizer\nfrom trueface.video import VideoStream, QVideoStream\nfrom trueface.spoofv4 import SpoofDetector as SP4\nimport os\n\n#init spoof detector\nsp = SP4(\n './spoofv4/spoofv4.trueface',\n './spoofv4/spoofv4.params',\n os.environ['TF_TOKEN'],\n ctx='gpu')\nthreshold = 0.5\n\n#initialize video capture from your webcam\ncap = VideoStream(src=0).start()\n\ncounter = 0\nwhile(True):\n frame = cap.read()\n result = sp.predict(frame, 0.5)\n print(result)\n counter += 1\n label = \"Real:%0.2f Fake:%0.2f Pred: %s\" % (\n result[\"real\"], \n result[\"fake\"], \n result[\"prediction\"]) \n\n sp.draw_label(frame, \n (int(20), \n int(20)), \n label)\n cv2.imshow('Trueface.ai', frame)\n if cv2.waitKey(33) == ord('q'):\n break\n","sub_path":"python_sdk_deprecated/spoof-detection/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"168259845","text":"\"\"\"Test the mynames module.\"\"\"\nimport pytest\n\nfrom scanner import Scanner\nfrom names import Names\n\n#scan = Scanner(\"scan_test_doc.txt\", name)\n\n@pytest.fixture\ndef init_scanner(data):\n with open('test_file.txt', 'w') as f:\n f.write(data)\n name = Names()\n #scan = Scanner('scan_test_doc.txt', name)\n scan = Scanner('test_file.txt', name)\n return scan\n\n# def test_scanner_keyword(test_scanner):\n# assert test_scanner.get_symbol() == [3, 0]\n\n\n@pytest.mark.parametrize(\"data, expected_output\", [\n (',', [0, None]),\n (';', [1, None]),\n (':', [2, None]),\n ('NAND', [3, 6]),\n ('DEVICES',[3, 0]),\n ('CONNECTIONS',[3, 1]),\n ('7', [4, 7]),\n ('SW1', [5,13]),\n ('->', [6, None]),\n ('', [7, None]),\n ('.', [9, None]),\n ('?', [None, None]),\n ('\\\\*7*\\\\.', [9, None]),\n ('\\\\*7*\\\\\\\\', [None, None]),\n ('''\\\\\\\\fsdfsdf\\nDEVICES''', [3, 0]),\n ('\\\\', [None, None]),\n ('1BC', [4, 1]),\n ('--', [None, None]),\n ('-->', [None, None]),\n ('<-', [None, None]),\n ('\\\\ABC', [None, None]),\n ('B1C', [5, 13]),\n ('*\\\\', [None, None]),\n ('\\\\\\\\ \\n\\\\', [None, None]),\n ('\\\\\\\\ \\n\\\\*ABC*\\\\.', [9, None]),\n ('\\\\\\\\ \\n\\\\*ABC*\\\\\\\\', [None, None]),\n ('\\\\\\\\ \\n\\\\*A\\\\\\\\BC*\\\\.', [9, None]),\n ('\\\\\\\\ \\n\\\\*A*BC*\\\\ .', [9, None]),\n ('\\\\\\\\ \\n\\\\*A\\\\BC*\\\\\\t.', [9, None]),\n ('\\\\\\\\ \\n\\\\*A\\\\\\\\BC**\\\\.', [9, None]),\n ('\\\\\\\\ \\n\\\\*A\\\\\\\\BC\\\\*\\\\.', [9, None]),\n ('\\\\\\\\', [7, None]),\n ('\\\\*', [7, None]),\n ('NOT', [3, 12]),\n ('RC', [3, 11])\n])\n\n\ndef test_scanning(data, expected_output):\n scanner = init_scanner(data)\n assert scanner.get_symbol() == expected_output\n\n\n@pytest.mark.parametrize(\"data, expected_output\", [\n ('109', 109),\n ('60', 60),\n ('463 ', 463),\n ('410asdfg', 410),\n ('407-', 407),\n ('612.', 612),\n ('154,', 154),\n ('157;.plp', 157),\n ('15:17', 15),\n ('asd15', -1),\n ('\\n 15', 15),\n ('\\n sfd15', -1)\n])\n\ndef test_get_number(data, expected_output):\n scanner = init_scanner(data)\n assert scanner.get_number() == expected_output\n\n@pytest.mark.parametrize(\"data, expected_output\", [\n ('PART', 'PART'),\n ('CLK', 'CLK'),\n ('FOUR1', 'FOUR1'),\n ('X26', 'X26'),\n ('CT ', 'CT'),\n ('SW4.3', 'SW4'),\n ('BTD5_I', 'BTD5'),\n ('AH4;BST', 'AH4'),\n ('\\n 12395', ''),\n ('\\n\\t\\rABC', 'ABC'),\n ('2ABC', ''),\n])\n\ndef test_get_name(data, expected_output):\n scanner = init_scanner(data)\n assert scanner.get_name() == expected_output\n\n@pytest.mark.parametrize(\"data, expected_output\", [\n\n ('abcdef', 'b'),\n ('45678', '5'),\n (',.-l', '.'),\n ('l dac', ' '),\n ('\\n 12', '2'),\n ('', '')\n])\n\ndef test_advance(data, expected_output):\n scanner = init_scanner(data)\n assert scanner.advance() == expected_output\n\n\n@pytest.mark.parametrize(\"data, expected_output\", [\n ('ABC:DEVICES 24;',\n [[5, 13], [2, None], [3, 0], [4, 24], [1, None]]),\n ('''\\\\\\\\Comment\\nDEVICES: SW1 -> A1''',\n [[3, 0], [2, None], [5, 13], [6, None], [5, 14]]),\n ('''\\\\\\\\Comment\\nCONNECTION: SW1 -> A1''',\n [[5, 13], [2, None], [5, 14], [6, None], [5, 15]]),\n ('''\\\\*Comment*\\\\DEVICES: SW1 -> A1''',\n [[3, 0], [2, None], [5, 13], [6, None], [5, 14]]),\n ('NAND N! 4,',\n [[3, 6], [5, 13], [None, None], [4, 4], [0, None]]),\n ('\\t123\\nABC\\rDEF\\t\\n\\rDEVICES :',\n [[4, 123], [5, 13], [5, 14], [3, 0], [2, None]]),\n ('''\\\\*Comment\\n*\\\\DEVICES: SW1 -> A1''',\n [[3, 0], [2, None], [5, 13], [6, None], [5, 14]]),\n ('''\\\\*Comment\\n\\\\\\\\anothercomment*\\\\DEVICES: SW1 -> A1''',\n [[3, 0], [2, None], [5, 13], [6, None], [5, 14]])\n])\ndef test_symbol_sequence(data, expected_output):\n \"\"\"Test if a sequence of symbols is correct.\"\"\"\n scanner = init_scanner(data)\n symbols = []\n for i in range(5):\n symbols.append(scanner.get_symbol())\n assert symbols == expected_output\n\n\n@pytest.mark.parametrize(\n \"data, expected_output, error_prev_symb, no_arrow, err_loc\", [\n (\"DEVICES:\\nCLOCK CL3 3,\", \"Line 2: CLOCK CL3 3,\\n\",\n False, True, 4),\n (\"DEVICES:\\nCLOCK\\nCL3\\n3,\", \"Line 3: CL3\\n\",\n False, True, 4),\n (\"DEVICES:\\nCLOCK\\n\\tCL3\\n3,\", \"Line 3: \\tCL3\\n\",\n False, True, 4),\n (\"DEVICES:\\nCLOCK\\n\\n\\n\\tCL3\\n3,\", \"Line 5: \\tCL3\\n\",\n False, True, 4),\n (\"DEVICES:\\nCLOCK CL3 3,\",\n \"Line 2: CLOCK CL3 3,\\n ^\\n\",\n False, False, 4),\n (\"DEVICES:\\n\\tCLOCK CL3 3,\",\n \"Line 2: \\tCLOCK CL3 3,\\n \\t ^\\n\",\n False, False, 4),\n (\"DEVICES:\\nCLOCK\\n\\nCL3 3,\",\n \"Line 4: CL3 3,\\n ^\\n\",\n False, False, 4),\n (\"DEVICES:\\nCLOCK CL3 3,\",\n \"Line 2: CLOCK CL3 3,\\n ^\\n\",\n True, False, 4),\n (\"DEVICES:\\nCLOCK\\tCL3 3,\",\n \"Line 2: CLOCK\\tCL3 3,\\n ^\\n\",\n True, False, 4),\n (\"DEVICES:\\n\\tCLOCK\\tCL3 3,\",\n \"Line 2: \\tCLOCK\\tCL3 3,\\n \\t ^\\n\",\n True, False, 4),\n (\"DEVICES:\\nCLOCK . CL3 3,\",\n \"Line 2: CLOCK . CL3 3,\\n ^\\n\",\n True, False, 5),\n (\"DEVICES:\\nCLOCK \\*Com*\\ CL3 3,\",\n \"Line 2: CLOCK \\*Com*\\ CL3 3,\\n ^\\n\",\n True, False, 4),\n (\"DEVICES:\\n\\tCLOCK \\*Com*\\ CL3 3,\",\n \"Line 2: \\tCLOCK \\*Com*\\ CL3 3,\\n \\t ^\\n\",\n True, False, 4),\n (\"DEVICES:\\n\\tCLOCK\\t\\*Com*\\CL3 3,\",\n \"Line 2: \\tCLOCK\\t\\*Com*\\CL3 3,\\n \\t ^\\n\",\n True, False, 4),\n (\"DEVICES:\\n\\tCLOCK\\*\\tCom*\\CL3 3,\",\n \"Line 2: \\tCLOCK\\*\\tCom*\\CL3 3,\\n \\t ^\\n\",\n True, False, 4),\n (\"DEVICES:\\nCLOCK \\*CLOCK*\\ CL3 3,\",\n \"Line 2: CLOCK \\*CLOCK*\\ CL3 3,\\n ^\\n\",\n True, False, 4),\n (\"DEVICES:\\nCLOCK *CLOCK* CL3 3,\",\n \"Line 2: CLOCK *CLOCK* CL3 3,\\n ^\\n\",\n True, False, 7),\n (\"DEVICES:\\nCLOCK \\CLOCK\\ CL3 3,\",\n \"Line 2: CLOCK \\CLOCK\\ CL3 3,\\n ^\\n\",\n True, False, 7),\n (\"DEVICES:\\nCLOCK CL3 3,\",\n \"Line 1: DEVICES:\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\n\\nCLOCK CL3 3,\",\n \"Line 1: DEVICES:\\n ^\\n\",\n True, False, 3),\n (\"\\tDEVICES:\\nCLOCK CL3 3,\",\n \"Line 1: \\tDEVICES:\\n \\t ^\\n\",\n True, False, 3),\n (\"DEVICES:\\n\\\\\\\\Comment\\nCLOCK CL3 3,\",\n \"Line 1: DEVICES:\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\n\\\\\\nCLOCK CL3 3,\",\n \"Line 2: \\\\\\n ^\\n\",\n True, False, 4),\n (\"DEVICES:\\n\\\\Comm\\nCLOCK CL3 3,\",\n \"Line 2: \\\\Comm\\n ^\\n\",\n True, False, 5),\n (\"DEVICES:\\\\*Com\\nment\\n*\\\\CLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\*Com\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\\\*Com\\nment\\n*CLOCK *\\\\CLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\*Com\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\\\*Com\\nment\\n\\\\CLOCK *\\\\CLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\*Com\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\\\*Com\\nment\\n\\\\CLOCK *\\\\CLOCK*\\\\ CL3 3,\",\n \"Line 3: \\\\CLOCK *\\\\CLOCK*\\\\ CL3 3,\\n ^\\n\",\n True, False, 6),\n (\"DEVICES:\\\\*Com\\\\*\\nment\\n\\\\CLOCK *\\\\CLOCK*\\\\ CL3 3,\",\n \"Line 3: \\\\CLOCK *\\\\CLOCK*\\\\ CL3 3,\\n ^\\n\",\n True, False, 6),\n (\"DEVICES:\\\\*Com\\nment\\n\\\\CLOCK *\\\\*\\\\CLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\*Com\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\\\*C\\nom\\n\\n*\\\\CLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\*C\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\n\\\\\\\\DEVICES:com\\nCLOCK CL3 3,\",\n \"Line 1: DEVICES:\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\n\\\\\\\\DEVICES:com\\n\\nCLOCK CL3 3,\",\n \"Line 1: DEVICES:\\n ^\\n\",\n True, False, 3),\n (\"\\\\*C\\nom*\\\\DEVICES:\\nCLOCK CL3 3,\",\n \"Line 2: om*\\\\DEVICES:\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\\\*Com\\nment\\n*\\\\CLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\*Com\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\n*Comm\\n*CLOCK CL3 3,\",\n \"Line 3: *CLOCK CL3 3,\\n ^\\n\",\n True, False, 6),\n (\"DEVICES:\\n*Comm\\n*CLOCK CL3 3,\",\n \"Line 2: *Comm\\n ^\\n\",\n True, False, 5),\n (\"DEVICES:\\\\\\\\Com\\nCLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\\\\\Com\\n ^\\n\",\n True, False, 3),\n (\"DEVICES:\\\\\\\\*Com\\nCLOCK CL3 3,\",\n \"Line 1: DEVICES:\\\\\\\\*Com\\n ^\\n\",\n True, False, 3)\n ]\n)\ndef test_get_line(capsys, data, expected_output, error_prev_symb,\n no_arrow, err_loc):\n \"\"\"Test printing out of the error line function.\"\"\"\n scanner = init_scanner(data)\n for i in range(err_loc):\n scanner.get_symbol()\n scanner.get_line(error_prev_symb, no_arrow)\n out, err = capsys.readouterr()\n assert out == expected_output\n\n\n@pytest.mark.parametrize(\"file\", [\n ('definitelynonexistentfile.txt'),\n ('definitelynonexistentfileordirectory'),\n ('wx') # Existent directory\n])\ndef test_open_file_fail(file):\n \"\"\"Test if file opening failures are handeled correctly.\"\"\"\n with pytest.raises(SystemExit):\n Scanner(file, Names())\n","sub_path":"Common/test_scanner.py","file_name":"test_scanner.py","file_ext":"py","file_size_in_byte":9657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"608527206","text":"\"\"\"\nThis package contains the GitHub implementations of the interfaces in\nserver.git.Interfaces.\n\"\"\"\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom typing import Optional\nfrom typing import Callable\nimport os\nimport logging\nimport time\nimport requests\n\nimport jwt\n\nfrom IGitt.Interfaces import _fetch, Token\nfrom IGitt.Utils import CachedDataMixin\n\n\nGH_INSTANCE_URL = os.environ.get('GH_INSTANCE_URL', 'https://github.com')\nif not GH_INSTANCE_URL.startswith('http'): # dont cover cause it'll be removed\n GH_INSTANCE_URL = 'https://' + GH_INSTANCE_URL\n logging.warning('Include the protocol in GH_INSTANCE_URL! Omitting it has '\n 'been deprecated.')\nBASE_URL = GH_INSTANCE_URL.replace('github.com', 'api.github.com')\n\n\nclass GitHubMixin(CachedDataMixin):\n \"\"\"\n Base object for things that are on GitHub.\n \"\"\"\n\n def _get_data(self):\n return get(self._token, self._url)\n\n @property\n def hoster(self):\n \"\"\"\n Returns `github`.\n \"\"\"\n return 'github'\n\n @property\n def url(self):\n \"\"\"\n Returns github API url.\n \"\"\"\n return BASE_URL + self._url\n\n @property\n def web_url(self):\n \"\"\"\n Returns the web link for GitHub.\n \"\"\"\n return self.data['html_url']\n\n def __repr__(self): # dont cover\n return '<{} object(url={}) at {}>'.format(self.__class__.__name__,\n self.url,\n hex(id(self)))\n\n\nclass GitHubToken(Token):\n \"\"\"\n Object representation of oauth tokens.\n \"\"\"\n\n def __init__(self, token):\n self._token = token\n\n @property\n def headers(self):\n \"\"\"\n GitHub Access token does not require any special headers.\n \"\"\"\n return {}\n\n @property\n def parameter(self):\n return {'access_token': self._token}\n\n @property\n def value(self):\n return self._token\n\n\nclass GitHubJsonWebToken(Token):\n \"\"\"\n Object representation of JSON Web Token.\n \"\"\"\n def __init__(self, private_key: str, app_id: int):\n self._key = private_key.strip()\n self._app_id = app_id\n self._payload = None\n self._jwt_token = None\n\n @property\n def payload(self):\n \"\"\"\n Returns the payload to be sent for JWT encoding.\n \"\"\"\n if not self._payload:\n self._payload = {\n # issued at time\n 'iat': int(datetime.now().timestamp()),\n # JWT expiration time (10 minute maximum), minus 5 seconds just\n # to be sure and cover up the request time\n 'exp': int(datetime.now().timestamp() + (10 * 60) - 5),\n # GitHub App's identifier\n 'iss': self._app_id\n }\n return self._payload\n\n # testing over recorded requests is unadvisable as it is dependent on the\n # time of execution of tests\n @property\n def is_expired(self): # dont cover\n \"\"\"\n Returns True if the JWT has expired.\n \"\"\"\n return self.payload['exp'] < datetime.now().timestamp()\n\n @property\n def headers(self):\n return {'Authorization': 'Bearer {}'.format(self.value),\n 'Accept': 'application/vnd.github.machine-man-preview+json'}\n\n @property\n def parameter(self):\n \"\"\"\n GitHub's JSON Web Token can only be authenticated via the\n ``Authorization`` header and so, all the nested requests have to be made\n in only that way.\n \"\"\"\n return {}\n\n @property\n def value(self):\n if not self._jwt_token or self.is_expired:\n self._jwt_token = jwt.encode(self.payload, self._key, 'RS256')\n return self._jwt_token.decode('utf-8')\n\n\nclass GitHubInstallationToken(Token):\n \"\"\"\n Object representation of GitHub Installation Token.\n \"\"\"\n def __init__(self,\n installation_id: int,\n jwt_token: GitHubJsonWebToken,\n token: Optional[str]=None,\n expiry: Optional[datetime]=None):\n self._jwt = jwt_token\n self._expiry = expiry\n self._token = token\n self._id = installation_id\n\n @property\n def jwt(self):\n \"\"\"\n Retrieves the JWT being used.\n \"\"\"\n return self._jwt\n\n @property\n def headers(self):\n return {'Authorization': 'token {}'.format(self.value),\n 'Accept': 'application/vnd.github.machine-man-preview+json'}\n\n # testing over recorded requests is unadvisable as it is dependent on the\n # time of execution of tests\n @property\n def is_expired(self): # dont cover\n \"\"\"\n Returns true if the token has expired.\n \"\"\"\n if not self._expiry:\n return True\n return datetime.utcnow() > self._expiry\n\n def _get_new_token(self):\n data = post(self._jwt,\n '/installations/{}/access_tokens'.format(self._id),\n {})\n return data['token'], datetime.strptime(data['expires_at'],\n '%Y-%m-%dT%H:%M:%SZ')\n\n @property\n def value(self):\n if self.is_expired or not self._token:\n self._token, self._expiry = self._get_new_token()\n return self._token\n\n @property\n def parameter(self):\n \"\"\"\n GitHub Installation Token can only be authenticated via the\n ``Authorization`` header and so, all the nested requests have to be\n made in only that way.\n \"\"\"\n return {}\n\n\ndef get(token: Token,\n url: str,\n params: Optional[dict]=None,\n headers: Optional[dict]=None):\n \"\"\"\n Queries GitHub on the given URL for data.\n\n :param token: A Token object.\n :param url: E.g. ``/repo``\n :param params: The query params to be sent.\n :param headers: The request headers to be sent.\n :return:\n A dictionary or a list of dictionary if the response contains multiple\n items (usually in case of pagination) and the HTTP status code.\n :raises RunTimeError:\n If the response indicates any problem.\n \"\"\"\n return _fetch(BASE_URL, 'get', token,\n url, query_params={**dict(params or {}), 'per_page': 100},\n headers=headers)\n\nasync def lazy_get(url: str,\n callback: Callable,\n headers: Optional[dict]=None,\n timeout: Optional[timedelta]=timedelta(seconds=120),\n interval: Optional[timedelta]=timedelta(seconds=10)):\n \"\"\"\n Queries GitHub on the given URL for data, waiting while it\n returns HTTP 202.\n\n :param url: E.g. ``/repo``\n :param callback:\n The function to callback with data after data is obtained.\n An empty dictionary is sent if nothing is returned by the API.\n :param timeout: datetime.timedelta object with time to keep re-trying.\n :param interval:\n datetime.timedelta object with time to keep in between tries.\n :param headers: The request headers to be sent.\n \"\"\"\n url = BASE_URL + url\n response = requests.get(url, headers=headers, timeout=3000)\n\n # Wait and re-request to allow github to process query\n while response.status_code == 202 and timeout.total_seconds() > 0:\n time.sleep(interval.total_seconds())\n timeout -= interval\n response = requests.get(url, headers=headers, timeout=3000)\n\n await callback(response.json())\n\ndef post(token: Token, url: str, data: dict, headers: Optional[dict]=None):\n \"\"\"\n Posts the given data onto GitHub.\n\n :param token: An OAuth token.\n :param url: The URL to access, e.g. ``/repo``.\n :param data: The data to post.\n :param headers: The request headers to be sent.\n :return:\n A dictionary or a list of dictionary if the response contains multiple\n items (usually in case of pagination) and the HTTP status code.\n :raises RunTimeError:\n If the response indicates any problem.\n \"\"\"\n return _fetch(BASE_URL, 'post', token, url, data, headers=headers)\n\n\ndef patch(token: Token, url: str, data: dict, headers: Optional[dict]=None):\n \"\"\"\n Patches the given data onto GitHub.\n\n :param token: An OAuth token.\n :param url: The URL to access, e.g. ``/repo``.\n :param data: The data to post.\n :param headers: The request headers to be sent.\n :return:\n A dictionary or a list of dictionary if the response contains multiple\n items (usually in case of pagination) and the HTTP status code.\n :raises RunTimeError:\n If the response indicates any problem.\n \"\"\"\n return _fetch(BASE_URL, 'patch', token, url, data, headers=headers)\n\n\ndef delete(token: Token,\n url: str,\n params: Optional[dict]=None,\n headers: Optional[dict]=None):\n \"\"\"\n Sends a delete request to the given URL on GitHub.\n\n :param token: An OAuth token.\n :param url: The URL to access, e.g. ``/repo``.\n :param params: The query params to be sent.\n :param headers: The request headers to be sent.\n :raises RuntimeError: If the response indicates any problem.\n \"\"\"\n _fetch(BASE_URL, 'delete', token, url, params, headers=headers)\n\n\ndef put(token: Token, url: str, data: dict, headers: Optional[dict]=None):\n \"\"\"\n Puts the given data onto GitHub.\n\n :param token: An OAuth token.\n :param url: The URL to access, e.g. ``/repo``.\n :param data: The data to post.\n :param headers: The request headers to be sent.\n :return:\n A dictionary or a list of dictionary if the response contains multiple\n items (usually in case of pagination) and the HTTP status code.\n :raises RunTimeError:\n If the response indicates any problem.\n \"\"\"\n return _fetch(BASE_URL, 'put', token, url, data, headers=headers)\n","sub_path":"IGitt/GitHub/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"595923633","text":"def merge_the_tools(string, k):\n numberofsubstrings = (len(string))//k\n for i in range(1, numberofsubstrings+ 1):\n currentstring = string[(i -1) *k: k * i]\n currentstringnoduplicate = []\n [currentstringnoduplicate.append(x) for x in list(currentstring) if x not in currentstringnoduplicate]\n print(\"\".join(currentstringnoduplicate))\n\nif __name__ == '__main__':\n string, k = input(), int(input())\n merge_the_tools(string, k)","sub_path":"HackerRank/python_challenges/3_strings/mergethetools.py","file_name":"mergethetools.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"171647064","text":"\"\"\"\nStable recipes whose function signatures should almost never change in the future.\n\"\"\"\nfrom bokeh.layouts import row\nfrom hover.utils.bokeh_helper import servable\nfrom .subroutine import standard_annotator, standard_explorer\n\n\n@servable(title=\"Simple Annotator\")\ndef simple_annotator(dataset, height=600, width=600):\n \"\"\"\n The most basic recipe, which nonetheless can be useful with decent 2-d embedding.\n\n Layout:\n\n sidebar | [annotate here]\n \"\"\"\n annotator = standard_annotator(dataset, height=height, width=width)\n\n sidebar = dataset.view()\n layout = row(sidebar, annotator.view())\n return layout\n\n\n@servable(title=\"Linked Annotator\")\ndef linked_annotator(dataset, height=600, width=600):\n \"\"\"\n Leveraging CorpusExplorer which has the best search highlights.\n\n Layout:\n\n sidebar | [search here] | [annotate here]\n \"\"\"\n explorer = standard_explorer(dataset, height=height, width=width)\n annotator = standard_annotator(dataset, height=height, width=width)\n\n # link coordinates and selections\n explorer.link_xy_range(annotator)\n explorer.link_selection(\"raw\", annotator, \"raw\")\n\n sidebar = dataset.view()\n layout = row(sidebar, explorer.view(), annotator.view())\n return layout\n","sub_path":"hover/recipes/stable.py","file_name":"stable.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377742428","text":"import sys\r\nfrom PyQt5.QtWidgets import QWidget, QApplication, QPushButton\r\nfrom PyQt5.QtGui import QPainter, QColor\r\nfrom random import randint as r\r\n\r\n\r\nclass Example(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setGeometry(300, 300, 600, 600)\r\n self.p = QPushButton(self)\r\n self.p.move(200, 200)\r\n self.p.resize(150, 50)\r\n self.p.clicked.connect(self.drawFlag)\r\n self.setWindowTitle('Рисование')\r\n self.show()\r\n\r\n def drawFlag(self, qp):\r\n qp = QPainter()\r\n qp.begin(self)\r\n qp.setBrush(QColor(r(0, 255), r(0, 255), r(0, 255)))\r\n size = r(0, 400)\r\n qp.drawEllipse(r(0, 100), r(0, 100), size, size)\r\n qp.end()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n sys.exit(app.exec_())\r\n","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"347352850","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n@app.route(\"/name\", methods=[\"GET\"])\ndef name():\n\n data = {\"name\": \"Jessica\"}\n\n return jsonify(data)\n\n@app.route(\"/hello/\", methods=[\"GET\"])\ndef hello(name):\n\n data = {\"message\": \"Hello there, %s.\" % name}\n\n return jsonify(data)\n\n@app.route(\"/distance\", methods=[\"POST\"])\ndef distance():\n\n import numpy as np\n\n input = request.get_json()\n a = input[\"a\"]\n b = input[\"b\"]\n\n try:\n a0 = float(a[0])\n a1 = float(a[1])\n b0 = float(b[0])\n b1 = float(b[1])\n except ValueError:\n data = {\"distance\": None,\n \"a\": a,\n \"b\": b}\n return jsonify(data)\n\n dist = float(np.sqrt(np.square(a0 - b0) + np.square(a1 - b1)))\n data = {\"distance\": dist,\n \"a\": a,\n \"b\": b}\n\n return jsonify(data)\n","sub_path":"my_flask_service.py","file_name":"my_flask_service.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627895391","text":"# File: xplane_ui.py\n# Creates the User Interface for all X-Plane settings.\n\nimport bpy\nfrom io_xplane2blender.xplane_ops import *\nfrom io_xplane2blender.xplane_config import *\n\n# Class: LAMP_PT_xplane\n# Adds X-Plane lamp settings to the lamp tab. Uses and .\nclass LAMP_PT_xplane(bpy.types.Panel):\n '''XPlane Material Panel'''\n bl_label = \"XPlane\"\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"data\"\n\n def draw(self,context):\n obj = context.object\n\n if(obj.type == \"LAMP\"):\n lamp_layout(self,obj.data)\n custom_layout(self,obj.data,\"LAMP\")\n \n# Class: MATERIAL_PT_xplane\n# Adds X-Plane Material settings to the material tab. Uses and .\nclass MATERIAL_PT_xplane(bpy.types.Panel):\n '''XPlane Material Panel'''\n bl_label = \"XPlane\"\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"material\"\n\n @classmethod\n def poll(self,context):\n if context.material:\n return True\n\n def draw(self,context):\n obj = context.object\n version = int(context.scene.xplane.version)\n\n if(obj.type == \"MESH\"):\n material_layout(self,obj.active_material)\n cockpit_layout(self,obj.active_material)\n custom_layout(self,obj.active_material,\"MATERIAL\")\n\n if version >= 1000:\n conditions_layout(self, obj.active_material, \"MATERIAL\")\n\n\n# Class: SCENE_PT_xplane\n# Adds X-Plane Layer settings to the scene tab. Uses .\nclass SCENE_PT_xplane(bpy.types.Panel):\n '''XPlane Scene Panel'''\n bl_label = \"XPlane\"\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"scene\"\n\n @classmethod\n def poll(self,context):\n return True\n\n def draw(self,context):\n scene = context.scene\n scene_layout(self, scene)\n\n# Class: OBJECT_PT_xplane\n# Adds X-Plane settings to the object tab. Uses , , and .\nclass OBJECT_PT_xplane(bpy.types.Panel):\n '''XPlane Object Panel'''\n bl_label = \"XPlane\"\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"object\"\n\n @classmethod\n def poll(self,context):\n obj = context.object\n\n if obj.type in (\"MESH\",\"EMPTY\",\"ARMATURE\",\"LAMP\"):\n return True\n else:\n return False\n\n def draw(self, context):\n obj = context.object\n version = int(context.scene.xplane.version)\n \n if obj.type in (\"MESH\",\"EMPTY\",\"ARMATURE\",\"LAMP\"):\n animation_layout(self,obj)\n if obj.type == \"MESH\":\n mesh_layout(self,obj)\n manipulator_layout(self,obj)\n type = obj.type\n if type==\"LAMP\":\n type = \"OBJECT\"\n lod_layout(self,obj)\n weight_layout(self,obj)\n custom_layout(self,obj,type)\n\n # v1000\n if version >= 1000:\n conditions_layout(self, obj, \"OBJECT\")\n \n\n# Class: BONE_PT_xplane\n# Adds X-Plane settings to the bone tab. Uses .\nclass BONE_PT_xplane(bpy.types.Panel):\n '''XPlane Object Panel'''\n bl_label = \"XPlane\"\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"bone\"\n\n @classmethod\n def poll(self,context):\n bone = context.bone\n \n if bone:\n return True\n else:\n return False\n\n def draw(self, context):\n bone = context.bone\n obj = context.object\n weight_layout(self,bone)\n animation_layout(self,bone,True)\n\n# Class: OBJECT_MT_xplane_datarefs\n# Adds the X-Plane datarefs search menu. This is not implemented yet.\nclass OBJECT_MT_xplane_datarefs(bpy.types.Menu):\n '''XPlane Datarefs Search Menu'''\n bl_label = \"XPlane Datarefs\"\n\n def draw(self,context):\n self.search_menu(xplane_datarefs,\"text.open\")\n\n# Function: scene_layout\n# Draws the UI layout for scene tabs. Uses .\n#\n# Parameters:\n# self - Instance of current panel class.\n# scene - Blender scene.\ndef scene_layout(self, scene):\n layout = self.layout\n row = layout.row()\n row.prop(scene.xplane,\"version\",text=\"X-Plane Version\")\n\n row = layout.row()\n row.prop(scene.xplane,\"optimize\",text=\"Optimize\")\n\n row = layout.row()\n row.prop(scene.xplane,\"debug\",text=\"Debug\")\n\n if scene.xplane.debug:\n box = layout.box()\n box.prop(scene.xplane,\"profile\",text=\"Profiling\")\n box.prop(scene.xplane,\"log\",text=\"Log\")\n\n row = layout.row()\n\n if len(scene.xplane.layers)!=0:\n for i in range(0,len(scene.layers)):\n row = layout.row()\n layer_layout(self, scene, row, i)\n else:\n row.operator('scene.add_xplane_layers')\n\n# Function: layer_layout\n# Draws the UI layout for . Uses .\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# scene - Blender scene\n# UILayout layout - Instance of sublayout to use.\n# int layer - index.\ndef layer_layout(self, scene, layout, layer):\n version = int(scene.xplane.version)\n box = layout.box()\n li = str(layer+1)\n\n if scene.xplane.layers[layer].expanded:\n expandIcon = \"TRIA_DOWN\"\n expanded = True\n else:\n expandIcon = \"TRIA_RIGHT\"\n expanded = False\n \n box.prop(scene.xplane.layers[layer],\"expanded\", text=\"Layer \"+li, expand=True, emboss=False, icon=expandIcon)\n\n if expanded:\n column = box.column()\n column.prop(scene.xplane.layers[layer],\"export\", text=\"Export\")\n column.prop(scene.xplane.layers[layer],\"name\", text=\"Name\")\n \n if scene.xplane.layers[layer].cockpit:\n checkboxIcon = \"CHECKBOX_HLT\"\n else:\n checkboxIcon = \"CHECKBOX_DEHLT\"\n\n column.label('Textures')\n tex_box = column.box()\n tex_box.prop(scene.xplane.layers[layer], \"texture\", text=\"Default\")\n tex_box.prop(scene.xplane.layers[layer], \"texture_lit\", text=\"Night\")\n tex_box.prop(scene.xplane.layers[layer], \"texture_normal\", text=\"Normal / Specular\")\n column.prop(scene.xplane.layers[layer], \"cockpit\", text=\"Cockpit\",icon=checkboxIcon, toggle=True)\n\n # cockpit regions\n if scene.xplane.layers[layer].cockpit:\n cockpit_box = column.box()\n #cockpit_box.prop(scene.xplane.layers[layer],\"panel_texture\", text=\"Panel Texture\")\n cockpit_box.prop(scene.xplane.layers[layer],\"cockpit_regions\", text=\"Cockpit regions\")\n num_regions = int(scene.xplane.layers[layer].cockpit_regions)\n\n if num_regions>0:\n if len(scene.xplane.layers[layer].cockpit_region) < num_regions:\n region_box = cockpit_box.box()\n region_box.operator(\"scene.add_xplane_layer_cockpit_regions\").index = layer\n else:\n for i in range(0,num_regions):\n # get cockpit region or create it if not present\n if len(scene.xplane.layers[layer].cockpit_region)>i:\n cockpit_region = scene.xplane.layers[layer].cockpit_region[i]\n\n if cockpit_region.expanded:\n expandIcon = \"TRIA_DOWN\"\n else:\n expandIcon = \"TRIA_RIGHT\"\n\n region_box = cockpit_box.box()\n region_box.prop(cockpit_region,\"expanded\",text=\"Cockpit region %i\" % (i+1), expand=True, emboss=False, icon=expandIcon)\n\n if cockpit_region.expanded:\n region_box.prop(cockpit_region,\"left\")\n region_box.prop(cockpit_region,\"top\")\n region_split = region_box.split(percentage=0.5)\n region_split.prop(cockpit_region,\"width\")\n region_split.label(\"= %d\" % (2 ** cockpit_region.width))\n region_split = region_box.split(percentage=0.5)\n region_split.prop(cockpit_region,\"height\")\n region_split.label(\"= %d\" % (2 ** cockpit_region.height))\n\n # v1010\n # cockpit_lit\n cockpit_lit_box = column.row()\n cockpit_lit_box.prop(scene.xplane.layers[layer], \"cockpit_lit\", \"3D-Cockpit lighting\")\n\n # LODs\n else:\n lods_box = column.box()\n lods_box.prop(scene.xplane.layers[layer],\"lods\", text=\"Levels of detail\")\n num_lods = int(scene.xplane.layers[layer].lods)\n\n if num_lods > 0:\n\n if len(scene.xplane.layers[layer].lod) < num_lods:\n lod_box = lods_box.box()\n lod_box.operator(\"scene.add_xplane_layer_lods\").index = layer\n else:\n for i in range(0,num_lods):\n if len(scene.xplane.layers[layer].lod)>i:\n lod = scene.xplane.layers[layer].lod[i]\n\n if lod.expanded:\n expandIcon = \"TRIA_DOWN\"\n else:\n expandIcon = \"TRIA_RIGHT\"\n\n lod_box = lods_box.box()\n lod_box.prop(lod,\"expanded\",text=\"Level of detail %i\" % (i+1), expand=True, emboss=False, icon=expandIcon)\n\n if lod.expanded:\n lod_box.prop(lod,\"near\")\n lod_box.prop(lod,\"far\")\n\n column.separator()\n column.prop(scene.xplane.layers[layer], \"slungLoadWeight\", text=\"Slung Load weight\")\n\n # v1000\n if version >= 1000:\n # blend\n blend_box = column.box()\n blend_box.prop(scene.xplane.layers[layer], \"blend\", text=\"Blend\")\n\n if(scene.xplane.layers[layer].blend==\"off\"):\n row = blend_box.row()\n row.prop(scene.xplane.layers[layer], \"blendRatio\", text=\"Alpha cutoff ratio\")\n\n # slope_limit\n slope_box = column.box()\n slope_box.prop(scene.xplane.layers[layer], \"slope_limit\", text=\"Slope limit\")\n\n if(scene.xplane.layers[layer].slope_limit==True):\n row = slope_box.row()\n row.prop(scene.xplane.layers[layer], \"slope_limit_min_pitch\", text=\"Min. pitch\")\n row = slope_box.row()\n row.prop(scene.xplane.layers[layer], \"slope_limit_max_pitch\", text=\"Max. pitch\")\n row = slope_box.row()\n row.prop(scene.xplane.layers[layer], \"slope_limit_min_roll\", text=\"Min. roll\")\n row = slope_box.row()\n row.prop(scene.xplane.layers[layer], \"slope_limit_max_roll\", text=\"Max. roll\")\n\n # tilted\n tilted_box = column.row()\n tilted_box.prop(scene.xplane.layers[layer], \"tilted\", text=\"Tilted\")\n\n # require surface\n require_box = column.row()\n require_box.prop(scene.xplane.layers[layer], \"require_surface\", \"Require surface\")\n\n # specular\n specular_box = column.box()\n specular_box.prop(scene.xplane.layers[layer], \"overrideSpecularity\", \"Override specularity\")\n\n if scene.xplane.layers[layer].overrideSpecularity == True:\n row = specular_box.row()\n row.prop(scene.xplane.layers[layer], \"specular\", \"Specularity\")\n\n # v1010\n if version >= 1010:\n # shadow\n shadow_box = column.row()\n shadow_box.prop(scene.xplane.layers[layer], \"shadow\", \"Cast shadows\")\n\n\n custom_layer_layout(self, box, scene, layer)\n\n# Function: custom_layer_layout\n# Draws the UI layout for the custom attributes of a .\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# UILayout layout - Instance of sublayout to use.\n# scene - Blender scene\n# int layer - index.\ndef custom_layer_layout(self,layout, scene, layer):\n layout.separator()\n row = layout.row()\n row.label(\"Custom Properties\")\n row.operator(\"scene.add_xplane_layer_attribute\", text=\"Add Property\").index = layer\n box = layout.box()\n for i, attr in enumerate(scene.xplane.layers[layer].customAttributes):\n subbox = box.box()\n subrow = subbox.row()\n subrow.prop(attr,\"name\")\n subrow.prop(attr,\"value\")\n subrow.operator(\"scene.remove_xplane_layer_attribute\",text=\"\",emboss=False,icon=\"X\").index = (layer,i)\n if type in (\"MATERIAL\",\"MESH\"):\n subrow = subbox.row()\n subrow.prop(attr,\"reset\")\n \n# Function: mesh_layout\n# Draws the additional UI layout for Mesh-Objects. This includes light-level and depth-culling.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\ndef mesh_layout(self, obj):\n layout = self.layout\n row = layout.row()\n row.prop(obj.xplane, \"depth\", text=\"Use depth culling\")\n row = layout.row()\n row.prop(obj.xplane, \"export_mesh\", text=\"Export mesh in layers\")\n row = layout.row()\n\n# Function: lamp_layout\n# Draws the UI layout for lamps.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\ndef lamp_layout(self, obj):\n layout = self.layout\n row = layout.row()\n row.prop(obj.xplane, \"type\", text=\"Type\")\n\n if obj.xplane.type in (\"named\",\"param\"):\n row = layout.row()\n row.prop(obj.xplane,\"name\",text=\"Name\")\n if obj.xplane.type==\"param\":\n row = layout.row()\n row.prop(obj.xplane,\"params\",text=\"Parameters\")\n elif obj.xplane.type==\"custom\":\n row = layout.row()\n row.prop(obj.xplane,\"size\",text=\"Size\")\n row = layout.row()\n row.label(\"Texture coordinates:\")\n row = layout.row()\n row.prop(obj.xplane,\"uv\",text=\"\")\n row = layout.row()\n row.prop(obj.xplane,\"dataref\",text=\"Dataref\")\n row = layout.row()\n row.operator('xplane.dataref_search',text=\"Search dataref\",emboss=True,icon=\"VIEWZOOM\")\n\n# Function: material_layout\n# Draws the UI layout for materials.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\ndef material_layout(self, obj):\n version = int(bpy.context.scene.xplane.version)\n layout = self.layout\n\n row = layout.row()\n row.prop(obj.xplane, \"draw\", text=\"Draw\")\n\n if (obj.xplane.draw):\n row = layout.row()\n row.prop(obj.xplane, \"overrideSpecularity\", text=\"Override specularity\")\n\n if obj.xplane.overrideSpecularity:\n row = layout.row()\n row.prop(obj.xplane, \"shinyRatio\", text=\"Shiny ratio\")\n\n row = layout.row()\n\n # v1000 blend / v9000 blend\n if version >= 1000:\n row.prop(obj.xplane, \"blend_v1000\", text=\"Blend\")\n else:\n row.prop(obj.xplane, \"blend\", text=\"Use alpha cutoff\")\n\n if obj.xplane.blend==True or obj.xplane.blend_v1000 == 'off':\n row = layout.row()\n row.prop(obj.xplane, \"blendRatio\", text=\"Alpha cutoff ratio\")\n\n row = layout.row()\n row.prop(obj.xplane, \"surfaceType\", text=\"Surface type\")\n\n if obj.xplane.surfaceType!='none':\n row = layout.row()\n row.prop(obj.xplane,\"deck\",text=\"Deck\")\n\n row = layout.row()\n row.prop(obj.xplane,\"solid_camera\",text=\"Camera collision\")\n\n row = layout.row()\n row.prop(obj.xplane, \"poly_os\", text=\"Polygon offset\")\n row = layout.row()\n\n row.prop(obj.xplane,\"lightLevel\", text=\"Override light level\")\n \n if obj.xplane.lightLevel:\n box = layout.box()\n box.prop(obj.xplane,\"lightLevel_v1\",text=\"Value 1\")\n row = box.row()\n row.prop(obj.xplane,\"lightLevel_v2\",text=\"Value 2\")\n row = box.row()\n row.prop(obj.xplane,\"lightLevel_dataref\",text=\"Dataref\")\n row = box.row()\n row.operator('xplane.dataref_search',text=\"Search dataref\",emboss=True,icon=\"VIEWZOOM\")\n\n# Function: custom_layout\n# Draws the additional UI layout for custom attributes.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\n# string type - Type of object. (\"MESH\",\"MATERIAL\",\"LAMP\")\ndef custom_layout(self,obj,type):\n if type in (\"MESH\",\"ARMATURE\",\"OBJECT\"):\n oType = 'object'\n elif type==\"MATERIAL\":\n oType = 'material'\n elif type=='LAMP':\n oType = 'lamp'\n else:\n oType = None\n\n layout = self.layout\n layout.separator()\n\n if oType:\n # regular attributes\n row = layout.row()\n row.label(\"Custom Properties\")\n row.operator(\"object.add_xplane_\"+oType+\"_attribute\", text=\"Add Property\")\n box = layout.box()\n for i, attr in enumerate(obj.xplane.customAttributes):\n subbox = box.box()\n subrow = subbox.row()\n subrow.prop(attr,\"name\")\n subrow.operator(\"object.remove_xplane_\"+oType+\"_attribute\",text=\"\",emboss=False,icon=\"X\").index = i\n subrow = subbox.row()\n subrow.prop(attr,\"value\")\n if type in (\"MATERIAL\",\"MESH\",\"LAMP\",\"ARMATURE\"):\n subrow = subbox.row()\n subrow.prop(attr,\"reset\")\n subrow = subbox.row()\n subrow.prop(attr,\"weight\")\n\n # animation attributes\n if type in (\"MESH\",\"ARMATURE\",\"OBJECT\"):\n row = layout.row()\n row.label(\"Custom Animation Properties\")\n row.operator(\"object.add_xplane_object_anim_attribute\", text=\"Add Property\")\n box = layout.box()\n for i, attr in enumerate(obj.xplane.customAnimAttributes):\n subbox = box.box()\n subrow = subbox.row()\n subrow.prop(attr,\"name\")\n subrow.operator(\"object.remove_xplane_object_anim_attribute\",text=\"\",emboss=False,icon=\"X\").index = i\n subrow = subbox.row()\n subrow.prop(attr,\"value\")\n subrow = subbox.row()\n subrow.prop(attr,\"weight\")\n \n# Function: animation_layout\n# Draws the UI layout for animations. This includes Datarefs.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\n# bool bone - True if the object is a bone.\ndef animation_layout(self,obj,bone = False):\n layout = self.layout\n layout.separator()\n row = layout.row()\n row.label(\"Datarefs\")\n if bone:\n row.operator(\"bone.add_xplane_dataref\", text=\"Add Dataref\")\n else:\n row.operator(\"object.add_xplane_dataref\", text=\"Add Dataref\")\n box = layout.box()\n for i, attr in enumerate(obj.xplane.datarefs):\n subbox = box.box()\n subrow = subbox.row()\n # TODO: search is causing memory leak!\n# if len(bpy.data.scenes[0].xplane_datarefs)>0:\n# subrow.prop_search(attr,\"path\",bpy.data.scenes[0],\"xplane_datarefs\",text=\"\",icon=\"VIEWZOOM\")\n# else:\n# subrow.prop(attr,\"path\")\n subrow.prop(attr,\"path\")\n if bone:\n subrow.operator(\"bone.remove_xplane_dataref\",text=\"\",emboss=False,icon=\"X\").index = i\n else:\n subrow.operator(\"object.remove_xplane_dataref\",text=\"\",emboss=False,icon=\"X\").index = i\n subrow = subbox.row()\n subrow.operator('xplane.dataref_search',text=\"Search dataref\",emboss=True,icon=\"VIEWZOOM\")\n subrow = subbox.row()\n subrow.prop(attr,\"anim_type\",text=\"Animation\")\n subrow = subbox.row()\n\n if attr.anim_type in ('transform','translate','rotate'):\n if bpy.context.object.animation_data:\n if bone:\n subrow.operator(\"bone.add_xplane_dataref_keyframe\",text=\"\",icon=\"KEY_HLT\").index = i\n subrow.operator(\"bone.remove_xplane_dataref_keyframe\",text=\"\",icon=\"KEY_DEHLT\").index = i\n else:\n subrow.operator(\"object.add_xplane_dataref_keyframe\",text=\"\",icon=\"KEY_HLT\").index = i\n subrow.operator(\"object.remove_xplane_dataref_keyframe\",text=\"\",icon=\"KEY_DEHLT\").index = i\n subrow.prop(attr,\"value\")\n subrow = subbox.row()\n subrow.prop(attr,\"loop\",text=\"Loops\")\n else:\n subrow.label('Object not animated.')\n elif attr.anim_type in (\"show\",\"hide\"):\n subrow.prop(attr,\"show_hide_v1\")\n subrow = subbox.row()\n subrow.prop(attr,\"show_hide_v2\")\n\n# Function: cockpit_layout\n# Draws the UI layout for cockpit parameters. This includes panel.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\ndef cockpit_layout(self,obj):\n layout = self.layout\n row = layout.row()\n row.prop(obj.xplane,'panel',text='Part of Cockpit panel')\n\n if obj.xplane.panel:\n row = layout.row()\n row.prop(obj.xplane,'cockpit_region',text=\"Cockpit region\")\n\n# Function: manipulator_layout\n# Draws the UI layout for manipulator settings.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\ndef manipulator_layout(self,obj):\n layout = self.layout\n row = layout.row()\n row.prop(obj.xplane.manip,'enabled',text='Manipulator')\n\n if obj.xplane.manip.enabled:\n box = layout.box()\n box.prop(obj.xplane.manip,'type',text=\"Type\")\n\n type = obj.xplane.manip.type\n \n box.prop(obj.xplane.manip,'cursor',text=\"Cursor\")\n box.prop(obj.xplane.manip,'tooltip',text=\"Tooltip\")\n\n if type!='drag_xy':\n box.prop(obj.xplane.manip,'dataref1',text=\"Dataref\")\n box.operator('xplane.dataref_search',text=\"Search dataref\",emboss=True,icon=\"VIEWZOOM\")\n else:\n box.prop(obj.xplane.manip,'dataref1',text=\"Dataref 1\")\n box.prop(obj.xplane.manip,'dataref2',text=\"Dataref 2\")\n box.operator('xplane.dataref_search',text=\"Search dataref\",emboss=True,icon=\"VIEWZOOM\")\n\n # drag axis lenghts\n if type in ('drag_xy','drag_axis','command_axis'):\n box.prop(obj.xplane.manip,'dx',text=\"dx\")\n box.prop(obj.xplane.manip,'dy',text=\"dy\")\n\n if type in('drag_axis','command_axis'):\n box.prop(obj.xplane.manip,'dz',text=\"dz\")\n\n elif type == 'drag_axis_pix':\n box.prop(obj.xplane.manip,'dx',text=\"dx\")\n box.prop(obj.xplane.manip, 'step', text=\"Step\")\n box.prop(obj.xplane.manip, 'exp', text=\"Exp\")\n\n # values\n if type=='drag_xy':\n box.prop(obj.xplane.manip,'v1_min',text=\"v1 min\")\n box.prop(obj.xplane.manip,'v1_max',text=\"v1 max\")\n box.prop(obj.xplane.manip,'v2_min',text=\"v2 min\")\n box.prop(obj.xplane.manip,'v2_max',text=\"v2 max\")\n elif type=='drag_axis' or type == 'drag_axis_pix':\n box.prop(obj.xplane.manip,'v1',text=\"v1\")\n box.prop(obj.xplane.manip,'v2',text=\"v2\")\n elif type=='command':\n box.prop(obj.xplane.manip,'command',text=\"Command\")\n elif type=='command_axis':\n box.prop(obj.xplane.manip,'positive_command',text=\"Pos. command\")\n box.prop(obj.xplane.manip,'negative_command',text=\"Neg. command\")\n elif type=='push':\n box.prop(obj.xplane.manip,'v_down',text=\"v down\")\n box.prop(obj.xplane.manip,'v_up',text=\"v up\")\n elif type=='radio':\n box.prop(obj.xplane.manip,'v_down',text=\"v down\")\n elif type=='toggle':\n box.prop(obj.xplane.manip,'v_on',text=\"v On\")\n box.prop(obj.xplane.manip,'v_off',text=\"v Off\")\n elif type in ('delta','wrap'):\n box.prop(obj.xplane.manip,'v_down',text=\"v down\")\n box.prop(obj.xplane.manip,'v_hold',text=\"v hold\")\n box.prop(obj.xplane.manip,'v1_min',text=\"v min\")\n box.prop(obj.xplane.manip,'v1_max',text=\"v max\")\n\n# Function: conditions_layout\n# Draws the UI layout for conditions.\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\n# type - object type\ndef conditions_layout(self, obj, type):\n layout = self.layout\n\n type = type.lower()\n\n # regular attributes\n row = layout.row()\n row.label(\"Conditions\")\n row.operator('object.add_xplane_' + type + '_condition', text=\"Add Condition\")\n box = layout.box()\n for i, attr in enumerate(obj.xplane.conditions):\n subbox = box.box()\n subrow = subbox.row()\n subrow.prop(attr,\"variable\")\n subrow.operator('object.remove_xplane_' + type + '_condition',text=\"\",emboss=False,icon=\"X\").index = i\n subrow = subbox.row()\n subrow.prop(attr,\"value\")\n\n# Function: lod_layout\n# Draws the UI for Levels of detail\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\ndef lod_layout(self,obj):\n layout = self.layout\n row = layout.row()\n row.prop(obj.xplane,\"lod\",text=\"LOD\")\n\n# Function: weight_layout\n# Draws the UI for Object weight\n#\n# Parameters:\n# UILayout self - Instance of current UILayout.\n# obj - Blender object.\ndef weight_layout(self,obj):\n layout = self.layout\n row = layout.row()\n row.prop(obj.xplane,'override_weight')\n if obj.xplane.override_weight:\n row.prop(obj.xplane,'weight')\n\n# Function: parseDatarefs\n# Parses the DataRefs.txt file which is located within the io_xplane2blender addon directory and stores results in a list.\n# This list should later be used to help search for datarefs with an autocomplete field.\n#def parseDatarefs():\n# import os\n# search_data = []\n# filePath = os.path.dirname(__file__)+'/DataRefs.txt'\n# if os.path.exists(filePath):\n# try:\n# file = open(filePath,'r')\n# i = 0\n# for line in file:\n# if i>1:\n# parts = line.split('\\t')\n# if (len(parts)>1 and parts[1] in ('float','int')):\n# search_data.append(parts[0])\n# i+=1\n# except IOError:\n# print(IOError)\n# finally:\n# file.close()\n# return search_data\n\n\n# Function: showError\n# Draws a window displaying an error message.\n#\n# Parameters:\n# string message - The message to display.\n#\n# Todos:\n# - Not working at all.\ndef showError(message):\n bpy.ops.xplane.error(\n 'INVOKE_DEFAULT',\n msg_text=message\n )\n setErrors(True)\n\n# Function: showProgress\n# Draws a progress bar together with a message.\n#\n# Parameters:\n# float progress - value between 0 and 1 indicating the current progress.\n# string message - An aditional message to display.\n#\n# Todos:\n# - Not working at all.\ndef showProgress(progress,message):\n bpy.ops.xplane.msg(\n 'INVOKE_DEFAULT',\n msg_text='%s - %s' % (str(round(progress*100))+'%',message)\n )\n\nclass XPlaneMessage(bpy.types.Operator):\n bl_idname = 'xplane.msg'\n bl_label = 'XPlane2Blender message'\n msg_type = bpy.props.StringProperty(default='INFO')\n msg_text = bpy.props.StringProperty(default='')\n def execute(self, context):\n self.report(self.msg_type, self.msg_text)\n return {'FINISHED'}\n\n def invoke(self,context,event):\n wm = context.window_manager\n return wm.invoke_popup(self)\n\n def draw(self,context):\n layout = self.layout\n row = layout.row()\n row.label(text=self.msg_type+': '+self.msg_text)\n\n\nclass XPlaneError(bpy.types.Operator):\n bl_idname = 'xplane.error'\n bl_label = 'XPlane2Blender error'\n msg_type = bpy.props.StringProperty(default='ERROR')\n msg_text = bpy.props.StringProperty(default='')\n\n def execute(self, context):\n # self.report({self.msg_type}, self.msg_text)\n return {'FINISHED'}\n\n def invoke(self,context,event):\n wm = context.window_manager\n return wm.invoke_props_dialog(self)\n\n def draw(self,context):\n layout = self.layout\n row = layout.row()\n row.label(text=self.msg_type+': '+self.msg_text)\n\n\nclass XPlaneDatarefSearch(bpy.types.Operator):\n bl_label = 'XPlane dataref search'\n bl_description = 'Search for XPlane dataref'\n bl_idname = 'xplane.dataref_search'\n\n #datarefs = parseDatarefs()\n\n def execute(self,context):\n import webbrowser\n webbrowser.open('http://xplane.anzui.de/dataref-search/')\n return {'FINISHED'}\n\n# def invoke(self,context,event):\n# wm = context.window_manager\n# return wm.invoke_popup(operator=self)\n#\n# def draw(self,context):\n# layout = self.layout\n# row = layout.row()\n# row.label('Search Datarefs')\n# layout.separator()\n# box = layout.box()\n# datarefs = parseDatarefs()\n# for dataref in datarefs:\n# #subrow = box.row()\n# subrow.label(dataref)\n#\n## return {'FINISHED'}\n\n# Function: addXPlaneUI\n# Registers all UI Panels.\ndef addXPlaneUI():\n# datarefs = parseDatarefs()\n#\n# for dataref in datarefs:\n# prop = bpy.data.scenes[0].xplane_datarefs.add()\n# prop.name = dataref\n \n bpy.utils.register_class(BONE_PT_xplane)\n bpy.utils.register_class(LAMP_PT_xplane)\n bpy.utils.register_class(MATERIAL_PT_xplane)\n bpy.utils.register_class(OBJECT_PT_xplane)\n bpy.utils.register_class(SCENE_PT_xplane)\n bpy.utils.register_class(XPlaneMessage)\n bpy.utils.register_class(XPlaneError)\n bpy.utils.register_class(XPlaneDatarefSearch)\n\n# Function: removeXPlaneUI\n# Unregisters all UI Panels.\ndef removeXPlaneUI():\n bpy.utils.unregister_class(BONE_PT_xplane)\n bpy.utils.unregister_class(LAMP_PT_xplane)\n bpy.utils.unregister_class(MATERIAL_PT_xplane)\n bpy.utils.unregister_class(OBJECT_PT_xplane)\n bpy.utils.unregister_class(SCENE_PT_xplane)\n bpy.utils.unregister_class(XPlaneMessage)\n bpy.utils.unregister_class(XPlaneError)\n bpy.utils.unregister_class(XPlaneDatarefSearch)","sub_path":"blender_25/io_xplane2blender/xplane_ui.py","file_name":"xplane_ui.py","file_ext":"py","file_size_in_byte":30114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"394663178","text":"#!/usr/bin/env python\n\n#A driving pyroot program for some EventLoop Algorithms\n#author : Serhat Istin\n#contact : istin@cern.ch\n\n\nimport optparse\nimport os\nimport re #for regex\n\nimport ROOT\nfrom ROOT import gROOT\nROOT.gROOT.Macro('$ROOTCOREDIR/scripts/load_packages.C')\nfrom ROOT import * \n\n#comment this our if you wish to see root errors\n#To ignore INFO messages, 1001 is enough. To ignore WARNING it has to be above 2001. \n#To ignore ERROR it has to be above 3001. Check what level the printouts are and set it as appropriate.\ngROOT.ProcessLine( \"gErrorIgnoreLevel = 3001;\")\n\n\ndef xcn(_dsid_):\n SampleXsectionSvc.svc(\"XSection-MC15-13TeV.data\")\n return SampleXsectionSvc.svc().sampleXsection().getXsection(_dsid_)\ndef kfactor(_dsid_):\n SampleXsectionSvc.svc(\"XSection-MC15-13TeV.data\")\n return SampleXsectionSvc.svc().sampleXsection().getKfactor(_dsid_)\n\ndef Run(_algorithm_,_samplesdir_,_driver_,_nevents_,_nskip_,_override_,_submitdir_,_treename_):\n sh=SH.SampleHandler()\n scndir=SH.ScanDir()\n scndir.sampleDepth(0)\n #use below regex pattern to run on MC samples (as they have 6 digit DSIDs)\n #scndir.samplePattern(\"*[[:digit:]]{6}*\")#use such posix standart regex as used in boost lib\n scndir.scan(sh,_samplesdir_)\n sh.setMetaString(\"nc_tree\",_treename_)\n job=EL.Job()\n job.sampleHandler(sh)\n job.options().setInteger(\"nc_EventLoop_MaxEvents\",_nevents_)\n job.options().setInteger(\"nc_EventLoop_SkipEvents\",_nskip_)\n job.options().setInteger(\"nc_EventLoop_RemoveSubmitDir\",_override_)# be careful with this!\n \n #Loop over samples... print some info e.x cross sections ...\n print(\"***********LVLQRUNNER RUN INFO************\")\n for samp in sh:\n sampname=samp.getMetaString(\"sample_name\")\n pattern=re.findall('[0-9]{8}', sampname )\n if(len(pattern)<=0):\n pattern=re.findall('[0-9]{6}', sampname )\n dsid=pattern[0]\n print(\"Found MC DSID \"+dsid+\" with XCN=\"+str(xcn(int(dsid)))+\" (pb)\")\n sh.setMetaDouble(\"nc_xs\",xcn(int(dsid)))\n else:\n print(\"Running on Data\")\n print(\"*******************************************\") \n # You can also control these job parameters\n # \"nc_EventLoop_EventsPerWorker\" --> some number\n # \"nc_EventLoop_EventsPerWorker\" --> some number\n # \"nc_EventLoop_RemoveSubmitDir\" --> ifyou set it to a nonzero value, already existing submission directories will be removed once you submit again\n #alg1=E6AlgZqX0b()\n job.algsAdd(_algorithm_)\n dv=\"None\"\n if _driver_==\"Direct\":\n dv=EL.DirectDriver()\n elif _driver_==\"Proof\":\n dv=EL.ProofDriver()\n else:\n print(\"Driver not supported : \"+driver)\n return\n dv.submit(job, _submitdir_)\n return \n\ndef main():\n commparser = optparse.OptionParser()\n commparser.add_option('-i', '--inputsample',help=\"provided sample directory (Mandatory)\",dest=\"i\",metavar='DIR')\n commparser.add_option('-o', '--outdir',help=\"submission directory where results are written (Mandatory)\",dest=\"o\",metavar='DIR')\n commparser.add_option('-c', '--channel',help=\"Channel (EE | MM) (Mandatory)\",dest=\"c\",metavar='STRING')\n commparser.add_option('-y', '--year',help=\"Year (NUMBER/ Default is 2015+2016)\",dest=\"y\",metavar='NUMBER')\n\n commparser.add_option('-s', '--systematic',help=\"Name of the Object Systematic to run. Default is nominal (Optional)\",dest=\"s\",metavar='STRING')\n commparser.add_option('-p', '--proof',help=\"use proof drive (optional)\",dest=\"p\",metavar='FLAG',action='store_true')\n (options, args) = commparser.parse_args()\n \n if options.i is None or options.o is None or options.c is None:\n commparser.print_help()\n os.sys.exit(-1)\n treename=\"nominal\"\n if options.s is not None:\n treename=options.s\n\n Driver=\"Direct\"\n if options.p:\n Driver=\"Proof\"\n \n\n Alg=E6AlgZqX0b()\n Alg.SetChannel(options.c)\n outdir=options.o+\"_\"+options.c\n if (options.y is not None):\n outdir+=\"_\"+options.y\n Alg.SetYear(int(options.y))\n Run(Alg,options.i,Driver,-1,0,1,outdir,treename)\n \n #Run(E6AlgZqX0b(),options.i,Driver,-1,0,1,options.o+\"_1\",treename)\n #Run(E6AlgZqX0b(),options.i,Driver,-1,0,1,options.o+\"_2\",treename)\n \n #you can define multiple runs here such as...\n #Run(E6AlgZqX0b(),\"/home/istin/ATLAS/data/minitops/singletop/\",\"Direct\",-1,0,1,\"bokemon2\")\n #Run(E6AlgZqX0b(),\"/home/istin/ATLAS/data/minitops/singletop/\",\"Direct\",-1,0,1,\"bokemon3\")\nif __name__ == \"__main__\":\n main()\n","sub_path":"VLQLight/python/LVLQRunner.py","file_name":"LVLQRunner.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"122147229","text":"# coding=UTF-8\nfrom util import ByteOrder,ByteTool\nimport re\n'''\n没法处理太大的包\n'''\nINFORM = 0\nPATH = 0\nCOUNT = 1\nTEST = 0\n\n\ndef load_pcap(file_name):\n print(\"Opening file \" + file_name)\n f = open(file_name,'rb')\n n = 0\n bytes = []\n s = f.read(1)\n print(\"Loading......\")\n while s:\n bytes.append(ord(s))\n n = n + 1\n if n % 500 == 0:\n print(\"%d bytes\" % n)\n s = f.read(1)\n print('total bytes: %d'%n)\n f.close()\n if(TEST == 1):\n n = 0\n for byte in bytes:\n n = n + 1\n print('0x%02x,' % (byte), end='')\n if n % 16 == 0:\n print('')\n if n > 160:\n break\n print('')\n return bytes\n\n\ndef build_pcap(file_name, num=1000):\n pcap_file = PcapFile(load_pcap(file_name), num)\n return pcap_file\n\n\nclass PcapFile(object):\n _raw_data = None\n _pcap_head = None\n _packets = []\n _packets_num = None\n\n def __init__(self, bytes, num):\n _packets = []\n if PATH == 1:\n print(\"Pcap Initing\")\n self._packets_num = num\n packet_id = 0\n self._raw_data = bytes\n self._change_data = bytes\n if self._pcap_head is None:\n self.fill_pcap_head(self._change_data)\n self._change_data = self._change_data[24:]\n while len(self._change_data) > 0:\n packet = Packet(self._change_data[0:1700], packet_id)\n self._packets.append(packet)\n self._change_data = self._change_data[packet.get_packet_head().get_real_len()+16:]\n packet_id += 1\n if packet_id >= self._packets_num:\n print(\"达到指定包数量\")\n break\n print(\"共有\" + str(len(self._packets)) + \"个包\")\n\n def fill_pcap_head(self, data):\n data = data[0:24]\n self._pcap_head = PcapHead(data)\n\n def get_packets(self):\n return self._packets\n\n\nclass PcapHead(object):\n _magic_4B = None\n _major_2B = None\n _minor_2B = None\n _thisZone_4B = None\n _sigFigs_4B = None\n _snapLen_4B = None\n _linkType_4B = None\n\n def __init__(self, datas):\n if PATH == 1:\n print(\" PcapHead Initing\")\n self._magic_4B = datas[0:4]\n if PcapHead.signature(self._magic_4B) is False:\n raise Exception(\"不支持的文件格式\")\n self._major_2B = ByteOrder.bytes2int(datas[4:6])\n self._minor_2B = ByteOrder.bytes2int(datas[6:8])\n self._thisZone_4B = ByteOrder.bytes2int(datas[8:12])\n self._sigFigs_4B = ByteOrder.bytes2int(datas[12:16])\n self._snapLen_4B = ByteOrder.bytes2int(datas[16:20])\n self._linkType_4B = ByteOrder.bytes2int(datas[20:24])\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return \" order:%s major:%d minor:%d zone:%d sig:%d snap_len:%d type:%d\" % (\n ByteOrder.order, self._major_2B, self._minor_2B, self._thisZone_4B, self._sigFigs_4B,self._snapLen_4B,\n self._linkType_4B)\n\n @staticmethod\n def signature(data):\n \"\"\"验证签名同时确定排序,虽然此时还未读取到后续大小端\"\"\"\n sig = ByteOrder.bytes2int(data)\n if sig == 0xa1b2c3d4:\n ByteOrder.order = \"big\"\n return True\n elif sig == 0xd4c3b2a1:\n ByteOrder.order = \"little\"\n return True\n return False\n\n\nclass Packet(object):\n _packet_id = None\n _packetHead_16B = None\n _etherHead_14B = None\n _packet_data = None\n _IPHead = None\n _transportHead = None\n _application = None\n _application_raw = None\n\n def __init__(self, datas, id):\n ICMP_SKIP_FLAG = 0\n if PATH == 1:\n print(\" Packet Initing\")\n self._packet_id = id\n if COUNT == 1:\n print(\" Packet id \" + str(id+1))\n self._packet_data = datas\n self._packetHead_16B = PacketHead(self._packet_data[0:16])\n self._packet_data = self._packet_data[16:self._packetHead_16B.get_real_len()+16]\n self._etherHead_14B = EtherHead(self._packet_data[0:14])\n self._packet_data = self._packet_data[14:]\n self._IPHead = IPHead(self._packet_data)\n self._packet_data = self._packet_data[self.get_ip_head().get_ip_len():]\n app_len = 0\n if self.get_ip_head().get_protocol() == 17:\n self._transportHead = UDPHead(self._packet_data[0:8])\n self._packet_data = self._packet_data[8:]\n app_len = self._transportHead.get_total_len()-8\n elif self.get_ip_head().get_protocol() == 6:\n self._transportHead = TCPHead(self._packet_data)\n self._packet_data = self._packet_data[self._transportHead.get_tcp_len():]\n app_len = self._IPHead.get_total_len() - self._IPHead.get_ip_len() - self._transportHead.get_tcp_len()\n elif self.get_ip_head().get_protocol() == 1:\n #todo\n ICMP_SKIP_FLAG = 1\n #注意ICMP报文未设计\n if (len(self._packet_data) != 0 and ICMP_SKIP_FLAG == 0):\n self._application_raw = self._packet_data\n matcher = ApplicationFinder(self._packet_data[0:app_len], self.get_ip_head().get_protocol(),\n self._transportHead.get_ports())\n self._application = matcher.get_application()\n\n def get_packet_head(self):\n return self._packetHead_16B\n\n def get_ether_head(self):\n return self._etherHead_14B\n\n def get_ip_head(self):\n return self._IPHead\n\n def get_transport_head(self):\n return self._transportHead\n\n def get_application(self):\n return self._application\n\n def get_application_raw(self):\n return self._application_raw\n\n\nclass PacketHead(object):\n _secondTime_4B = None\n _millsecondTime_4B = None\n _captureLen_4B = None\n _realLen_4B = None\n\n def __init__(self,datas):\n if PATH == 1:\n print(\" PacketHead Initing\")\n self._secondTime_4B =ByteOrder.bytes2int(datas[0:4])\n self._millsecondTime_4B = ByteOrder.bytes2int(datas[4:8])\n self._captureLen_4B = ByteOrder.bytes2int(datas[8:12])\n self._realLen_4B = ByteOrder.bytes2int(datas[12:16])\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return \" second:%d millsecond:%d cap_len:%d real_len %d\" % (self._secondTime_4B, self._millsecondTime_4B,\n self._captureLen_4B, self._realLen_4B)\n\n def get_second_time(self):\n return self._secondTime_4B\n\n def get_millisecond_time(self):\n return self._millsecondTime_4B\n\n def get_real_len(self):\n return self._realLen_4B\n\n\nclass EtherHead(object):\n _dst_mac_6B = None\n _src_mac_6B = None\n _type_2B = None\n\n def __init__(self,datas):\n if PATH == 1:\n print(\" Ether Initing\")\n self._dst_mac_6B = int.from_bytes(datas[0:6], \"big\")\n self._src_mac_6B = int.from_bytes(datas[6:12], \"big\")\n self._type_2B = int.from_bytes(datas[12:14], \"big\")\n #if hex(self._type_2B) == \"0x800\":\n # print(\"IPv4\")\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return\" mac_src:%012x mac_dst:%012x type:%04x\" % (self._src_mac_6B, self._dst_mac_6B, self._type_2B)\n\n def get_mac_src(self):\n return self._src_mac_6B\n\n def get_mac_dst(self):\n return self._dst_mac_6B\n\n\nclass IPHead(object):\n #b是位,B是字节,4b也即是半字节\n _IP_version_4b = None\n _IP_len_4b = None\n _DS_1B = None\n _total_len_2B = None\n _identification_2B = None\n _flag_3b = None\n _deviation_13b = None\n _TTL_1B = None\n _protocol_1B = None\n _check_sum_2B = None\n _srcIp_4B = None\n _dstIp_4B = None\n\n def __init__(self,datas):\n if PATH == 1:\n print(\" IPHead Initing\")\n self._IP_version_4b = ByteTool.disassembleBytes(datas[0:1], 0, 3)\n self._IP_len_4b = ByteTool.disassembleBytes(datas[0:1], 4, 7) * 4\n self._DS_1B = int.from_bytes(datas[1:2], \"big\")\n self._total_len_2B = int.from_bytes(datas[2:4], \"big\")\n self._identification_2B = int.from_bytes(datas[4:6], \"big\")\n self._flag_3b = ByteTool.disassembleBytes(datas[6:8], 0, 2)\n self._deviation_13b = ByteTool.disassembleBytes(datas[6:8], 3, 15)\n self._TTL_1B = int.from_bytes(datas[8:9], \"big\")\n self._protocol_1B = int.from_bytes(datas[9:10], \"big\")\n self._check_sum_2B = int.from_bytes(datas[10:12], \"big\")\n self._srcIp_4B = int.from_bytes(datas[12:16], \"big\")\n self._dstIp_4B = int.from_bytes(datas[16:20], \"big\")\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return \" version:%d len:%d DS:%d total_len:%d identi:%d flag:%01x deviation:%d TTL:%d protocol:%d \" \\\n \"check_sum:%04x srcIP:%08x dstIP:%08x\" % (self._IP_version_4b, self._IP_len_4b, self._DS_1B, self._total_len_2B,\n self._identification_2B, self._flag_3b, self._deviation_13b, self._TTL_1B,\n self._protocol_1B, self._check_sum_2B, self._srcIp_4B, self._dstIp_4B)\n\n def get_ip_version(self):\n return self._IP_version_4b\n\n def get_ip_len(self):\n return self._IP_len_4b\n\n def get_total_len(self):\n return self._total_len_2B\n\n def get_ttl(self):\n return self._TTL_1B\n\n def get_protocol(self):\n return self._protocol_1B\n\n def get_srcIP(self):\n return self._srcIp_4B\n\n def get_dstIP(self):\n return self._dstIp_4B\n\n\nclass UDPHead(object):\n _src_port_2B = None\n _dst_port_2B = None\n _UDP_len_2B = None\n _check_sum_2B = None\n _identify = None\n\n def __init__(self,datas):\n if PATH == 1:\n print(\" UDP Initing\")\n self._identify = \"UDP\"\n self._src_port_2B = int.from_bytes(datas[0:2], \"big\")\n self._dst_port_2B = int.from_bytes(datas[2:4], \"big\")\n self._UDP_len_2B = int.from_bytes(datas[4:6], \"big\")\n self._check_sum_2B = int.from_bytes(datas[6:8], \"big\")\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return\" srcPort:%d dstPort:%d UDP_Len:%d checkSum:%04x\" % (self._src_port_2B, self._dst_port_2B,\n self._UDP_len_2B, self._check_sum_2B)\n\n def get_identify(self):\n return self._identify\n\n @staticmethod\n def get_udp_len():\n return 8\n\n def get_total_len(self):\n return self._UDP_len_2B\n\n def get_ports(self):\n ports = [self._src_port_2B, self._dst_port_2B]\n return ports\n\n\nclass TCPHead(object):\n _src_port_2B = None\n _dst_port_2B= None\n _seq_number_4B = None\n _ack_number_4B = None\n _TCP_len_4b = None\n _flag_1B = None\n _window_2B = None\n _check_sum_2B = None\n _urgent_flag_2B = None\n _option = None\n _option_len = None\n _identify = None\n\n def __init__(self,datas):\n if PATH == 1:\n print(\" TCP Initing\")\n self._identify = \"TCP\"\n self._src_port_2B = int.from_bytes(datas[0:2], \"big\")\n self._dst_port_2B = int.from_bytes(datas[2:4], \"big\")\n self._seq_number_4B = int.from_bytes(datas[4:8], \"big\")\n self._ack_number_4B = int.from_bytes(datas[8:12], \"big\")\n self._TCP_len_4b = ByteTool.disassembleBytes(datas[12:13], 0, 3) * 4\n self._flag_1B = int.from_bytes(datas[13:14], \"big\")\n self._window_2B = int.from_bytes(datas[14:16], \"big\")\n self._check_sum_2B = int.from_bytes(datas[16:18], \"big\")\n self._urgent_flag_2B = int.from_bytes(datas[18:20], \"big\")\n self._option_len = self._TCP_len_4b - 20\n self._option = int.from_bytes(datas[20:20+self._option_len], \"big\")\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return(\" src_port:%d dst_port:%d seq_num:%d ack_num:%d Tcp_len:%d flag:%02x win:%d check_sum:%d \"\n \"urgent_flag:%d option_len:%d option:%x\") % (self._src_port_2B, self._dst_port_2B, self._seq_number_4B,\n self._ack_number_4B, self._TCP_len_4b, self._flag_1B,\n self._window_2B, self._check_sum_2B, self._urgent_flag_2B,\n self._option_len, self._option)\n\n def get_ports(self):\n ports = [self._src_port_2B, self._dst_port_2B]\n return ports\n\n def get_seq(self):\n return self._seq_number_4B\n\n def get_ack(self):\n return self._ack_number_4B\n\n def get_tcp_len(self):\n return self._TCP_len_4b\n\n def get_flag(self):\n return self._flag_1B\n\n def get_option_len(self):\n return self._option_len\n\n def get_option_content(self):\n return self._option\n\n def get_identify(self):\n return self._identify\n\n\nclass ApplicationFinder(object):\n _result_application = None\n _application_entity = None\n _application_detail = None\n _application_content = None\n _app_datas = None\n _transport_protocol = None\n _transport_ports = None\n complete_flag = False\n\n def __init__(self, datas, transport_protocol, transport_ports=None):\n self._app_datas = datas\n self._transport_protocol = transport_protocol\n self._transport_ports = transport_ports\n self._application_entity = self.matcher()\n\n def __str__(self):\n return \" Application:%s\" % (self._result_application)\n\n def matcher(self):\n flag = False\n application_entity = None\n if self._transport_protocol == 6:\n #http可以用80端口先一步过滤,不过滤也可以\n if self.find_port(443):\n application_entity = TLSData(self._app_datas)\n return application_entity\n flag, application_entity = self.HTTP_matcher()\n if flag is True:\n return application_entity\n if PATH == 1:\n print(\" TCP application\")\n elif self._transport_protocol == 17:\n if self.find_port(53):\n application_entity = DnsData(self._app_datas)\n self._result_application = \"DNS\"\n return application_entity\n print(\" UDP application\")\n\n def HTTP_matcher(self):\n data_string = \"\"\n flag = False\n for app_data in self._app_datas:\n data_string += chr(app_data)\n result_request = re.match('[a-zA-Z]{3,7} .* HTTP/1.[0,1]', data_string)\n result_response = re.match('HTTP/1.[0,1] [0-9]{0,3} *', data_string)\n if (result_request):\n self._result_application = \"HTTP\"\n self._application_detail = \"Request\"\n self._application_content = data_string\n flag = True\n elif(result_response):\n self._result_application = \"HTTP\"\n self._application_detail = \"Response\"\n self._application_content = data_string\n flag = True\n if flag is True:\n self._application_entity = HttpData(self._application_detail, self._application_content)\n return flag, self._application_entity\n\n def get_application(self):\n return self._application_entity\n\n def find_port(self, target_port):\n if self._transport_ports is not None:\n for port in self._transport_ports:\n if port == target_port:\n return True\n return False\n else:\n print(\"No port information\")\n return False\n\n\nclass HttpData(object):\n _type = None\n _method = None\n _uri = None\n _host = None\n _data = None\n _content_type = None\n _content_length = None\n _identify = None\n\n def __init__(self, type, content):\n if PATH == 1:\n print(\" HTTP Initing\")\n self._type = type\n self._data = content\n if self._type == \"Request\":\n i = 0\n j = i\n i = self.find_method_boundry(i)\n self._method = self._data[j:i]\n j = i\n i = self.find_method_boundry(i)\n self._uri = self._data[j:i]\n temp_index = self._data.find(\"Host\")\n if temp_index != -1:\n self._host = self.find_information(temp_index, 6)\n elif self._type == \"Response\":\n str_list = [\"Content-Type\", \"Content-Length\"]\n i = 0\n for meta in str_list:\n temp_index = self._data.find(meta)\n if temp_index != -1:\n if i == 0:\n self._content_type = self.find_information(temp_index, 14)\n elif i == 1:\n self._content_length = self.find_information(temp_index, 16)\n self._content_length = int(self._content_length)\n i += 1\n self._identify = \"HTTP\"\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return \" http_type:%s http_data:%s\" % (self._type, self._data)\n\n def find_method_boundry(self, i):\n for meta in self._data[i:]:\n if ord(meta) == 32:\n i += 1\n break\n i += 1\n return i\n\n def find_information(self, start_index, deviation):\n temp_str = \"\"\n for char in self._data[start_index + deviation:]:\n if char == \"\\r\":\n break\n else:\n temp_str += char\n return temp_str\n\n def get_type(self):\n return self._type\n\n def get_method(self):\n return self._method\n\n def get_uri(self):\n return self._uri\n\n def get_host(self):\n return self._host\n\n def get_content_type(self):\n return self._content_type\n\n def get_content_length(self):\n return self._content_length\n\n def get_identify(self):\n return self._identify\n\n\nclass DnsData(object):\n _raw_data = None\n _transaction_id_2B = None\n _flag_2B = None\n _question_num_2B = None\n _answer_num_2B = None\n _authority_num_2B = None\n _additional_num_2B = None\n _querys = []\n _answers = []\n _others = None\n _data = None\n _identify = None\n _direction = None\n\n def __init__(self, content):\n if PATH == 1:\n print(\" DNS Initing\")\n self._identify = \"DNS\"\n self._querys = []\n self._answers = []\n self._raw_data = content\n self._data = content\n self.fill_dns_head()\n if INFORM == 1:\n print(self.__str__())\n self._data = self._data[12:]\n for num in range(0, self._question_num_2B):\n query = self.Query(self._data)\n self._querys.append(query)\n self._data = self._data[query.get_size():]\n for num in range(0, self._answer_num_2B):\n answer = self.Answer(self._data, self)\n self._answers.append(answer)\n self._data = self._data[answer.get_size():]\n if self._flag_2B & 32768 != 0:\n self._direction = \"Response\"\n else:\n self._direction = \"Request\"\n\n self._others = self._data\n\n def fill_dns_head(self):\n self._transaction_id_2B = int.from_bytes(self._data[0:2], \"big\")\n self._flag_2B = int.from_bytes(self._data[2:4], \"big\")\n self._question_num_2B = int.from_bytes(self._data[4:6], \"big\")\n self._answer_num_2B = int.from_bytes(self._data[6:8], \"big\")\n self._authority_num_2B = int.from_bytes(self._data[8:10], \"big\")\n self._additional_num_2B = int.from_bytes(self._data[10:12], \"big\")\n\n def get_raw_data(self):\n return self._raw_data\n\n def get_transaction(self):\n return self._transaction_id_2B\n\n def get_direction(self):\n return self._direction\n\n def get_querys(self):\n return self._querys\n\n def get_answers(self):\n return self._answers\n\n def __str__(self):\n return(\" transaction_id:%d flag:%x question_num:%d answer_num:%d \" % (self._transaction_id_2B,\n self._flag_2B,\n self._question_num_2B,\n self._answer_num_2B))\n\n def get_identify(self):\n return self._identify\n\n class Query(object):\n _domain = None\n _type = None\n _class = None\n _size = None\n _query_content = None\n\n def __init__(self, datas):\n if PATH == 1:\n print(\" DNS_Query_Initing\")\n self._query_content = datas\n self._domain, self._size = self.domain_connect(self._query_content)\n self._type = int.from_bytes(self._query_content[self._size:self._size+2], \"big\")\n self._size += 2\n self._class = int.from_bytes(self._query_content[self._size:self._size+2], \"big\")\n self._size += 2\n if INFORM == 1:\n print(self.__str__())\n\n def domain_connect(self, datas):\n domain = \"\"\n content = datas\n size = 0\n while content[0] != 0:\n chr_num = int.from_bytes(content[0:1], \"big\")\n size += 1\n for data in content[1:1+chr_num]:\n domain += chr(data)\n domain += \".\"\n content = content[1+chr_num:]\n size += chr_num\n domain = domain[0:len(domain)-1]\n size += 1\n return domain, size\n\n def __str__(self):\n return \" domain:%s type:%x class:%x size:%d\" % (self._domain, self._type, self._class, self._size)\n\n def get_type(self):\n return self._type\n\n def get_size(self):\n return self._size\n\n def get_domain(self):\n return self._domain\n\n class Answer(object):\n _domain = None\n _type = None\n _class = None\n _TTL = None\n _data_length = None\n _datas = None\n _answer_content = None\n _size = None\n _raw = None\n _out_obj =None\n\n def __init__(self, datas, obj):\n self._size = 0\n if PATH == 1:\n print(\" DNS_Answer_Initing\")\n self._answer_content = datas\n self._out_obj = obj\n domain_type_flag = self.domain_type(self._answer_content[0:1])\n if domain_type_flag == 1:\n deviation = int.from_bytes(self._answer_content[1:2], \"big\")\n self._raw = obj.get_raw_data()\n self._domain, self._size = self.domain_connect(self._raw[deviation:])\n self._size = 2\n self._answer_content = self._answer_content[2:]\n self._type = int.from_bytes(self._answer_content[0:2], \"big\")\n self._class = int.from_bytes(self._answer_content[2:4], \"big\")\n self._TTL = int.from_bytes(self._answer_content[4:8], \"big\")\n self._data_length = int.from_bytes(self._answer_content[8:10], \"big\")\n self._datas = int.from_bytes(self._answer_content[10:10+self._data_length], \"big\")\n self._size += 10\n self._size += self._data_length\n if INFORM == 1:\n print(self.__str__())\n\n def domain_connect(self, content_out):\n domain = \"\"\n content = content_out\n size = 0\n while content[0] != 0:\n chr_num = int.from_bytes(content[0:1], \"big\")\n size += 1\n if chr_num != 192:\n for data in content[1:1+chr_num]:\n domain += chr(data)\n domain += \".\"\n content = content[1+chr_num:]\n size += chr_num\n else:\n content = self.get_raw()[content[1]:]\n domain = domain[0:len(domain) - 1]\n size += 1\n return domain, size\n\n def domain_type(self, first_byte):\n if(int.from_bytes(first_byte, \"big\") == 192):\n return 1\n else:\n return 0\n\n def get_size(self):\n return self._size\n\n def get_raw(self):\n return self._raw\n\n def get_type(self):\n return self._type\n\n def get_domain(self):\n return self._domain\n\n def get_datas(self):\n return self._datas\n\n def __str__(self):\n return(\" domain:%s type:%d class:%d TTL:%d data_length:%d datas:%x size:%d \" % (self._domain,\n self._type,\n self._class, self._TTL,\n self._data_length,\n self._datas, self._size))\n\n\nclass TLSData(object):\n _data = None\n _identify = None\n\n def __init__(self, content):\n if PATH == 1:\n print(\" TLS Initing\")\n self._data = content\n self._identify = \"TLS\"\n if INFORM == 1:\n print(self.__str__())\n\n def __str__(self):\n return \" TLS\"\n\n def get_identify(self):\n return self._identify\n\n\n\n","sub_path":"Virtual_Analysis_0.3.2/Pcap_class.py","file_name":"Pcap_class.py","file_ext":"py","file_size_in_byte":25738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"104491838","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Snapshot of Genre Detection \n\n# In[49]:\n\n\nimport pandas as pd\n\nfrom sklearn.externals import joblib\nimport sklearn as skl\nimport sklearn.utils, sklearn.preprocessing, sklearn.decomposition, sklearn.svm\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport numpy as np\nimport pandas as pd\n\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\n\n\n# In[50]:\n\n\ndict_genres = {'Electronic':0, 'Experimental':1, 'Folk':2, 'Hip-Hop':3, \n 'Instrumental':4,'International':5, 'Pop' :6, 'Rock': 7 }\n\nreverse_map = {v: k for k, v in dict_genres.items()}\nprint(reverse_map)\n\n\n# # Load the test data Set\n\n# In[51]:\n\n\n\ndf_test = pd.read_csv(\"test_file\", header = [0,1,2], skip_blank_lines = True)\ndf_test.rename( columns={'Unnamed: 0_level_0':'', 'Unnamed: 0_level_1':'', 'Unnamed: 1_level_2':'', \n 'Unnamed: 2_level_2':'', 'Unnamed: 3_level_2':'','Unnamed: 4_level_1':'',\n 'Unnamed: 4_level_2':''}, inplace=True )\n\ndf_test.head()\n\n\n# # LOAD MFCC \n\n# In[52]:\n\n\nimport sklearn as skl\n\nnpzfile_test = np.load(\"mfcc_test_.npz\")\nprint(npzfile_test.files)\nX_test_mfcc = npzfile_test['x_test']\ny_test_mfcc = npzfile_test['y_test']\nprint(X_test_mfcc.shape, y_test_mfcc.shape)\ny_test_mfcc[171]\n\n\n# # Load and Prepare Mel-Spectrogram\n\n# In[53]:\n\n\n#preparing the dataset\nnpzfile = np.load('test_arr_.npz')\nX_test = npzfile['arr_0']\ny_test = npzfile['arr_1']\ny_test -= 1\n\n# Convert a dB-scale spectrogram to a power spectrogram.\nX_test_raw = librosa.core.db_to_power(X_test, ref=1.0)\n#converting it to log scaled \nX_test = np.log(X_test_raw)\n\n\n# # Print Spectrogram / Play Music \n\n# In[58]:\n\n\ndef maxim(lastlayer):\n ind_ = []\n for layer_wise in lastlayer: \n ind = np.argpartition(layer_wise, -2)[-2:]\n ind_.append(list(ind[np.argsort(layer_wise[ind])[::-1]]))\n \n return ind_\n\ndef maxim_(lastlayer):\n ind_ = []\n for layer_wise in lastlayer: \n ind = np.argpartition(layer_wise, -2)[-2:]\n ind_.append(list(ind[np.argsort(layer_wise[ind])[::-1]]))\n \n return ind_\n\ndef get_audio_path(audio_dir, track_id):\n tid_str = '{:06d}'.format(track_id)\n a = os.path.join(audio_dir, tid_str[:3], tid_str + '.mp3')\n a = a.replace(\"\\\\\", \"/\")\n return a\n\ndef play_music(track_id):\n filename = get_audio_path(AUDIO_DIR, track_id)\n display(ipd.Audio(filename))\n \n\n\n# In[60]:\n\n\nAUDIO_DIR = 'audio_files/fma_small'\na = get_audio_path(AUDIO_DIR, 1)\nplay_music(3624)\n\n\n# In[61]:\n\n\nnum = 15\nspectogram = X_test[num]\ngenre = y_test[num]\nprint(reverse_map[genre])\nplt.figure(figsize=(10, 5))\nlibrosa.display.specshow(spectogram.T, y_axis='mel', x_axis='time')\nplt.colorbar(format='%+2.0f dB')\nplt.title('Test Melspectogram')\nplt.tight_layout()\n\n\n\n# # CNN RNN \n# \n# \n# \n\n# In[62]:\n\n\n#inputting the CNN-RNN Model\ndef predict_cnnrnn(music_file): \n \n weights_path = 'models/parallel/weights_.best.h5'\n model_cnnrnn = load_model(weights_path)\n music_file_expanded_ = np.expand_dims(music_file, axis=0)\n music_file_expanded = np.expand_dims(music_file_expanded_, axis = -1)\n return model_cnnrnn.predict(music_file_expanded)\n \n\n\n# In[63]:\n\n\npredict_pa = predict_cnnrnn(X_test[num])\npredict_pa\n\n\n# In[64]:\n\n\na = maxim(predict_pa)\nname_top_pa = [[reverse_map[b] for b in i] for i in a]\nname_top_pa\n\n\n# # CRNN\n\n# In[65]:\n\n\n#inputting CRNN Model\ndef predict_crnn(music_file):\n weights_path = 'models/crnn/weights.best.h5'\n model_crnn = load_model(weights_path)\n music_file_expanded_ = np.expand_dims(music_file, axis=0)\n return model_crnn.predict(music_file_expanded_)\n \n\n\n# In[66]:\n\n\npredict_crnn = predict_crnn(X_test[num])\npredict_crnn\n\n\n# In[67]:\n\n\na = maxim(predict_crnn)\nname_top_crnn = [[reverse_map[b] for b in i] for i in a]\nname_top_crnn\n\n\n# # MFCC \n\n# In[68]:\n\n\ndef predict_mfccsvc(music_file):\n filename = 'models/SVC.sav'\n loaded_model_mfcc = joblib.load(filename)\n music_file_expanded_ = np.expand_dims(music_file, axis=0)\n\n return loaded_model_mfcc.predict(music_file_expanded_)\n \n\n\n# In[69]:\n\n\npredict_mfcc = list(predict_mfccsvc(X_test_mfcc[num]))\npredict_mfcc\n\n\n# In[46]:\n\n\nname_top_mfcc = [reverse_map[b] for b in predict_mfcc]\nname_top_mfcc\n\n\n# # Combining all the models! \n\n# In[47]:\n\n\ndef huristics(model_one, model_two, model_three): \n \n if model_one == model_two or model_one == model_three :\n return(model_one)\n elif model_two == model_three:\n return(model_two)\n \n elif model_one == \"Electronic\" or model_one == \"Experimental\" or model_one == \"Instrumental\": #parellel\n return model_one\n elif model_two == \"Folk\" or model_two == \"Rock\": #recurrent\n return model_two\n else:\n return model_three \n \n \n\n\n# In[48]:\n\n\nhuristics(name_top_pa[0][0], name_top_crnn[0][0], name_top_mfcc)\n\n","sub_path":"htmlfiles/combination_rock (File 7).py","file_name":"combination_rock (File 7).py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"442758218","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport logging\r\nimport functools\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\ndef load_parameters(model, src_state_dict):\r\n logging.info('Loading Parameters...')\r\n if isinstance(src_state_dict, str):\r\n src_state_dict = torch.load(\r\n src_state_dict, map_location=lambda storage, loc: storage)\r\n dst_state_dict = model.state_dict()\r\n for k in dst_state_dict:\r\n if k in src_state_dict:\r\n if src_state_dict[k].size() == dst_state_dict[k].size():\r\n # logging.info('{}: Loaded.'.format(k))\r\n dst_state_dict[k] = src_state_dict[k]\r\n else:\r\n logging.warning('{}: Ignored due to shapes.'.format(k))\r\n else:\r\n logging.warning('{}: Ignored due to missing.'.format(k))\r\n model.load_state_dict(dst_state_dict)\r\n\r\n\r\ndef get_num_parameters(net):\r\n parameters = net.state_dict()\r\n return functools.reduce(lambda x, y: x + y,\r\n [parameters[x].numel() for x in parameters])\r\n\r\n\r\ndef get_num_flops(net, x):\r\n def forward_hook(m, input, output):\r\n if type(m) == nn.Linear:\r\n output_size = torch.tensor(output[0].shape)\r\n flops = m.weight.numel()\r\n if m.bias is not None:\r\n flops += m.bias.numel()\r\n m.flops = flops\r\n return\r\n if type(m) in [\r\n nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d,\r\n nn.ConvTranspose2d, nn.ConvTranspose3d\r\n ]:\r\n output_size = torch.tensor(output[0].shape)\r\n flops = m.weight.numel() * output_size[1:].prod() / m.groups\r\n if m.bias is not None:\r\n flops += m.bias.numel() * output_size[1:].prod()\r\n m.flops = flops\r\n return\r\n if type(m) in [\r\n nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d, nn.AvgPool1d,\r\n nn.AvgPool2d, nn.AvgPool3d\r\n ]:\r\n output_size = torch.tensor(output[0].shape)\r\n kernel_size = m.kernel_size\r\n if type(kernel_size) not in (tuple, list):\r\n kernel_size = [kernel_size] * (len(output_size) - 1)\r\n m.flops = torch.tensor(kernel_size).prod() * output_size[1:].prod()\r\n return\r\n if type(m) in [nn.ReLU, nn.ReLU6, nn.PReLU, nn.Sigmoid]:\r\n output_size = torch.tensor(output[0].shape)\r\n m.flops = output_size.prod()\r\n return\r\n if type(m) in [\r\n nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,\r\n nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d\r\n ]:\r\n output_size = torch.tensor(output[0].shape)\r\n m.flops = output_size.prod() * 4 if m.affine else 2\r\n return\r\n\r\n assert x.shape[0] == 1\r\n handles = list(\r\n map(lambda x: x.register_forward_hook(forward_hook), net.modules()))\r\n with torch.no_grad():\r\n net(x)\r\n list(map(lambda x: x.remove(), handles))\r\n return sum([x.flops for x in net.modules() if hasattr(x, 'flops')])\r\n","sub_path":"mmcv/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605266987","text":"\"\"\" EngineBase: The Core Interface and implementation independent code for the\n knowledgebase engines\n\"\"\"\nimport logging as root_logger\nfrom . import Rule\nfrom . import Actions\nfrom . import Transforms\nfrom . import utils as util\nlogging = root_logger.getLogger(__name__)\n\n\n\nclass EngineBase:\n \"\"\" The base class that wme and trie versions implement the interface of \"\"\"\n\n def __init__(self, kb_constructor, path=None, init=None):\n self._knowledge_base = kb_constructor(init)\n self._rules = {}\n self._proposed_actions = []\n #to be updated with printed representations of the trie state after each action\n self._prior_states = []\n #named recall states of past tries\n self._recall_states = []\n #Registered custom actions\n self._custom_actions = {}\n if path is not None:\n self.load_file(path)\n\n\n def load_file(self, filename):\n \"\"\" Load a file spec for the facts / rules for this engine \"\"\"\n #pylint: disable=unused-argument,no-self-use\n raise Exception(\"Base Engine Stub\")\n\n def _save_state(self, data):\n \"\"\" Copy the current string representation of the knowledge base,\n and any associated data \"\"\"\n self._prior_states.append((str(self._knowledge_base), data))\n\n def register_action(self, name, func):\n \"\"\" Register custom actions,\n of the form def name(engine, paramsList) \"\"\"\n assert(isinstance(name, str))\n assert(callable(func))\n if name in self._custom_actions:\n raise Exception(\"Duplicate action: {}\".format(name))\n self._custom_actions[name] = func\n\n def registerRules(self, s):\n \"\"\" Register passed in rule specifications \"\"\"\n #pylint: disable=unused-argument,no-self-use\n raise Exception(\"Base Engine Stub\")\n\n def add(self, s):\n \"\"\" Assert a new fact into the engine \"\"\"\n #pylint: disable=unused-argument,no-self-use\n raise Exception(\"Base Engine Stub\")\n\n def retract(self, s):\n \"\"\" Remove a fact from the engine \"\"\"\n #pylint: disable=unused-argument,no-self-use\n raise Exception(\"Base Engine Stub\")\n\n def clear_proposed_actions(self):\n \"\"\" Clear the list of actions proposed by rules, but which haven't been\n enacted \"\"\"\n self._proposed_actions = []\n\n def __len__(self):\n \"\"\" The number of rules in the engine \"\"\"\n return len(self._rules)\n\n def _run_rules(self, rule_locations=None, rule_tags=None, policy=None):\n \"\"\" Run all, or some, rulies of the engine, if provided a policy,\n propose actions and select from the proposals \"\"\"\n self._save_state((rule_locations, rule_tags, policy, self._proposed_actions))\n rules_to_run = []\n #Get the rules:\n if rule_locations is None and rule_tags is None:\n #run all rules\n rules_to_run = list(self._rules.values())\n #otherwise, get by trie location / tag and run those\n elif rule_tags is not None:\n assert(isinstance(rule_tags, list))\n rules_to_run = [x for x in self._rules.values() \\\n if bool(x._tags.intersection(rule_tags))]\n elif rule_locations is not None:\n raise Exception('Rule Location Running is not implemented yet')\n\n should_propose_rules = policy is not None\n\n for rule in rules_to_run:\n self._run_rule(rule, propose=should_propose_rules)\n\n if should_propose_rules:\n self._perform_action_by_policy(policy)\n\n\n def _run_rule(self, rule, propose=False):\n \"\"\" Run an individual rule. if propose, then don't enact the results,\n merely store them for later selection \"\"\"\n assert(isinstance(rule, Rule))\n assert(rule.is_coherent())\n logging.info(\"Running Rule: {}\".format(rule._name))\n result = self.query(rule._query)\n if not bool(result):\n logging.info(\"Rule {} Failed\".format(rule._name))\n return\n\n if rule._transform is None:\n selected = result.select()\n else:\n selected = result.select(rule._transform.getSelectionBounds())\n\n transformed = []\n for data in selected:\n transformed.append(self._run_transform(data, rule._transform))\n\n for data in transformed:\n self._run_actions(data, rule, propose)\n\n\n def _run_transform(self, ctx, transform):\n \"\"\" Run modifications on the bind results of a query \"\"\"\n assert(isinstance(ctx, dict))\n assert(transform is None or isinstance(transform, Transforms.Transform))\n chosen_ctx = ctx\n if transform is None:\n return chosen_ctx\n for x in transform.components:\n #lookup op\n opFunc = Transforms.TROP_LOOKUP[x.op]\n param_length = Transforms.TROP_PARAM_LENGTHS[x.op]\n #get source\n if isinstance(x.source, util.Bind):\n source = chosen_ctx[x.source.value]\n else:\n source = x.source\n if param_length == 1:\n newVal = opFunc(source, chosen_ctx)\n elif param_length == 2:\n #get second param:\n if x.val is not None:\n value = x.val\n else:\n value = chosen_ctx[x.bind.value]\n newVal = opFunc(source, value)\n elif param_length == 3:\n if isinstance(x.bind, util.Bind):\n bindVal = chosen_ctx[x.bind.value]\n else:\n bindVal = x.bind\n newVal = opFunc(source, x.val, bindVal)\n\n #rebind or reapply\n if x.rebind is None:\n chosen_ctx[x.source.value] = newVal\n else:\n chosen_ctx[x.rebind.value] = newVal\n\n return chosen_ctx\n\n def _run_actions(self, data, ruleOrActions, propose=False):\n \"\"\" Enact, or propose, the action list or actions in a rule provided \"\"\"\n assert(isinstance(data, dict))\n assert(isinstance(ruleOrActions, (Rule, list)))\n if propose:\n self._proposed_actions.append((data, ruleOrActions))\n else:\n if isinstance(ruleOrActions, Rule):\n self._perform_actions(data, ruleOrActions._actions)\n else:\n self._perform_actions(data, ruleOrActions)\n\n def _perform_actions(self, data, actions):\n \"\"\" Actual enaction of a set of actions \"\"\"\n assert(all([isinstance(x, Actions.Action) for x in actions]))\n for x in actions:\n #lookup op\n opFunc = Actions.ACTS_LOOKUP[x._op]\n #get values from data\n values = x.get_values(data)\n #perform action op with data\n opFunc(self, values)\n\n def _perform_action_by_policy(self, policy):\n \"\"\" utilize a policy to select from proposed actions,\n then perform those actions \"\"\"\n logging.debug(\"Performing action by policy\")\n assert(callable(policy))\n selected = policy(self._proposed_actions)\n assert(isinstance(selected, list))\n assert(all([isinstance(x, tuple) for x in selected]))\n for d, r in selected:\n assert(isinstance(d, dict))\n if isinstance(r, Rule):\n self._perform_actions(d, r._actions)\n else:\n self._perform_actions(d, r)\n","sub_path":"pyRule/EngineBase.py","file_name":"EngineBase.py","file_ext":"py","file_size_in_byte":7458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"440543110","text":"import asyncio\n\nfrom user import User\n\nasync def seed():\n models, errors = await User.add([\n {\n 'id': '1',\n 'name': {\n 'first': 'paul',\n 'last': 'severance'\n }\n },\n {\n 'id': '2',\n 'name': {\n 'first': 'alice',\n 'last': 'jane'\n }\n }\n ])\n\n if models:\n for model in models:\n print(model)\n\n if errors:\n for error in errors:\n print(error)\n\nasync def main(loop):\n\n await User.drop()\n\n await seed()\n\n async for user in User.find():\n await user.save()\n #print(user.to_jsonapi())\n\nloop = asyncio.new_event_loop()\nasyncio.set_event_loop(loop)\nloop.run_until_complete(main(loop))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"552249134","text":"\n\n\n\n'''\n数据来源:东方财富网-行情中心\nhttp://quote.eastmoney.com/center\n'''\n#coding=utf-8\nimport requests\nimport re\nimport pymysql\n#import pandas as pd\nimport logging\nlogging.basicConfig(level=logging.DEBUG,filename='stock_day_trade1.log',filemode='w',\n format='%(asctime)s-%(levelname)5s: %(message)s')\nwith open('stock_day_trade1.log','r') as f:\n f.read()\n print(f)\ndb = pymysql.connect(\"localhost\",\"root\",\"Zzl08382020\",\"stockdb\" )\ncursor = db.cursor()\ncount=0\n#用get方法访问服务器并提取页面数据\ndef getHtml(cmd,page):\n url = \"http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?cb=jQuery112406115645482397511_1542356447436&type=CT&token=4f1862fc3b5e77c150a2b985b12db0fd&sty=FCOIATC&js=(%7Bdata%3A%5B(x)%5D%2CrecordsFiltered%3A(tot)%7D)&cmd=\"+cmd+\"&st=(ChangePercent)&sr=-1&p=\"+str(page)+\"&ps=20\"\n #url= \"http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?cb=jQuery112404462275420342996_1542343049719&type=CT&token=4f1862fc3b5e77c150a2b985b12db0fd&sty=FCOIATC&js=(%7Bdata%3A%5B(x)%5D%2CrecordsFiltered%3A(tot)%7D)&cmd=C.2&st=(ChangePercent)&sr=-1&p=1&ps=20&_=1542343050897\"\n #url=\"http://www.zhihu.com/explore\"\n #print(url)\n header={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}\n response = requests.get(url,headers=header)\n text=response.text\n print('text:',text)\n pat = \"data:\\[(.*?)\\]\"\n page_list = re.compile(pat,re.S).findall(text)\n## data_l=page_list[0].split('\",\"')\n## for data in data_l:\n## data=data.replace('\"',\"\")\n## #print('page_list\\n',page_list[0])\n## data_list=data.split(\",\")\n## print('data_list\\n',data_list)\n## if len(data_list)>1:\n## try:\n## sql=\"insert into stock_day_trade(trade_code,stock_name,stock_id,trade_date,close_price,increase,open_price,close_yesterday,turnover_rate,P_B,high_price,low_price,trade_amount,trade_money) \\\n## values('{0}','{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}','{10}','{11}','{12}','{13}')\\\n## \".format(data_list[1],data_list[2],data_list[1],data_list[24],data_list[3],data_list[8],data_list[11],data_list[12],data_list[15],data_list[17],data_list[9],data_list[10],data_list[6],data_list[7])\n## cursor.execute(sql)\n## db.commit()\n## print('存储完成')\n## logging.info('存储完成:cmd:{},page:{},id:{},name:{}'.format(cmd,page,data_list[1],data_list[2]))\n## except Exception as err:\n## db.rollback()\n## print('存储失败:',err)\n## logging.error('存储失败:cmd:{},page:{},id:{},name:{}\\n{}\\n{}'.format(cmd,page,data_list[1],data_list[2],data_list,err))\n #print('data:',data)\n return page_list\n\n#获取单个页面股票数据\ndef getOnePageStock(cmd,page,i):\n global count\n page_list = getHtml(cmd,page)\n \n## datas = data[0].split('\",\"')\n## stocks = []\n## for i in range(len(datas)):\n## stock = datas[i].replace('\"',\"\").split(\",\")\n## stocks.append(stock)\n## #print('stock:',stock)\n print('page_list:',page_list)\n data_l=page_list[0].split('\",\"')\n for data in data_l:\n data=data.replace('\"',\"\")\n #print('page_list\\n',page_list[0])\n data_list=data.split(\",\")\n #print('data_list\\n',data_list)\n data_str=re.sub('-','',data_list[24][0:10])\n trade_code=data_str+data_list[1]\n print(trade_code)\n if len(data_list)>1 and data_list[3] != '-':\n try:\n sql=\"insert into stock_day_trade1(trade_code,stock_name,stock_id,trade_date,close_price,increase,open_price,close_yesterday,turnover_rate,P_E,P_B,high_price,low_price,trade_amount,trade_money,exchange) \\\n values('{0}','{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}','{10}','{11}','{12}','{13}','{14}','{15}')\\\n \".format(trade_code,data_list[2],data_list[1],data_list[24],data_list[3],data_list[8],data_list[11],data_list[12],data_list[15],data_list[16],data_list[17],data_list[9],data_list[10],data_list[6],data_list[7],i)\n cursor.execute(sql)\n db.commit()\n print('存储完成')\n print('存储完成:cmd:{},page:{},id:{},name:{}'.format(cmd,page,data_list[1],data_list[2]))\n logging.info('存储完成:cmd:{},page:{},id:{},name:{}'.format(cmd,page,data_list[1],data_list[2]))\n count += 1\n print('count:',count)\n except Exception as err:\n db.rollback()\n print('存储失败:',err)\n logging.error('存储失败:cmd:{},page:{},id:{},name:{}\\n{}\\n{}'.format(cmd,page,data_list[1],data_list[2],data_list,err))\n print('存储失败:cmd:{},page:{},id:{},name:{}\\n{}\\n{}'.format(cmd,page,data_list[1],data_list[2],data_list,err))\n## try:\n## h_table=count//400+1\n## print('h_table:',h_table,count)\n## sql=\"insert into stock_informations(stock_name,stock_id,exchange,h_table) \\\n## values('{0}','{1}','{2}','{3}')\\\n## \".format(data_list[2],data_list[1],i,h_table)\n## cursor.execute(sql)\n## db.commit()\n## print('存储完成')\n## count+=1\n## except Exception as err:\n## db.rollback()\n## print('存储失败:',err)\n return data_list\ndef to_history():\n for num in range(1,11):\n try:\n sql=\"REPLACE into stock_history_trade{0}(trade_code,stock_name,\\\n stock_id,trade_date,close_price,increase,open_price,turnover_rate,P_E,P_B,high_price,low_price,trade_amount,trade_money)\\\n select trade_code,stock_name,\\\n stock_id,trade_date,close_price,increase,open_price,turnover_rate,P_E,P_B,high_price,low_price,trade_amount,trade_money \\\n from stock_day_trade1 \\\n where stock_id in (select stock_id from stock_history_trade{0})\".format(str(num))\n cursor.execute(sql)\n db.commit()\n print('转存成功!',num)\n except Exception as err:\n db.rollback()\n print('转存失败:',err)\n logging.info('转存失败:{}'.format(err))\n return 0\ndef to_info():\n try:\n sql=\"UPDATE stock_informations I left join stock_day_trade1 D ON I.stock_id = D.stock_id \\\n set I.发行量 = (D.trade_amount / (D.turnover_rate/100))\"\n cursor.execute(sql)\n db.commit()\n print('转存fxl成功!')\n except Exception as err:\n db.rollback()\n print('转存fxl失败:',err)\n logging.info('转存fxl失败:{}'.format(err))\n return 0 \ndef main():\n try:\n sql=\"delete from stock_day_trade1\"\n cursor.execute(sql)\n db.commit()\n except Exception as err:\n db.rollback()\n print('删除失败:',err)\n return 0\n \n## cmd = {\n## \"上证A股\":\"C.2\",\n## \"上证指数\":\"C.1\",\n## \"深圳指数\":\"C.5\",\n## \"沪深A股\":\"C._A\",\n## \"深圳A股\":\"C._SZAME\",\n## \"新股\":\"C.BK05011\",\n## \"中小板\":\"C.13\",\n## \"创业板\":\"C.80\"\n## }\n cmd = {\n \"上证A股\":\"C.2\",\n \"深圳A股\":\"C._SZAME\"\n }\n \n for i in cmd.keys():\n page = 1\n stocks = getOnePageStock(cmd[i],page,i)\n #自动爬取多页,并在结束时停止\n while True: \n page +=1\n try:\n if getHtml(cmd[i],page)!= getHtml(cmd[i],page-1):\n #stocks.extend(getOnePageStock(cmd[i],page))\n getOnePageStock(cmd[i],page,i)\n #print('stocks:',stocks)\n print(i+\"已加载第\"+str(page)+\"页\")\n else:\n break\n except Exception as err:\n print('get page ERROR:{}\\n{}\\n{}',format(cmd[i],page,err))\n logging.error('get page ERROR:{}\\n{}\\n{}',format(cmd[i],page,err))\n #df = pd.DataFrame(stocks)\n #提取主要数据/提取全部数据\n #df.drop([0,14,15,16,17,18,19,20,21,22,23,25],axis=1,inplace=True)\n #columns = {1:\"代码\",2:\"名称\",3:\"最新价格\",4:\"涨跌额\",5:\"涨跌幅\",6:\"成交量\",7:\"成交额\",8:\"振幅\",9:\"最高\",10:\"最低\",11:\"今开\",12:\"昨收\",13:\"量比\",24:\"时间\"}\n #df.rename(columns = columns,inplace=True)\n #df.to_excel(\"股票{}.xls\".format(i))\n #print(\"已保存\"+i+\".xls\")\n to_history()\n to_info()\nmain()\n#to_info()\n#to_history()\n","sub_path":"day_trade.py","file_name":"day_trade.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"509795983","text":"import numpy as np\nimport pandas\nimport os\nimport shutil\nimport SimpleITK as sitk\nimport time\n\nfrom CNNModel.SuccessfulModel.ProstateSegment import ProstateSegmentationTrumpetNet\nfrom MeDIT.UsualUse import *\nfrom pretreatment.observe import show_array\nfrom pretreatment.observe import normalize\n\n\ndef get_seg(segment, image_path, save_dir):\n t2_image, _, t2 = LoadNiiData(image_path)\n\n preds, mask, mask_image = segment.Run(t2_image,store_folder=save_dir)\n\n show_data = np.concatenate((Normalize01(t2), np.clip(preds, a_min=0.0, a_max=1.0)), axis=1)\n show_roi = np.concatenate((mask, mask), axis=1)\n\n return show_data,show_roi\n\n\ndef copy_file(features,file_list,from_dir,save_dir):\n names = [x for x in file_list if (features[0] in x) and (x.endswith(features[1]))]\n for name in names:\n shutil.copy(os.path.join(from_dir, name), save_dir)\n\n\ndef get_all_seg(dir_path,save_dir_path):\n model_path = r'W:\\SuccessfulModel\\ProstateSegmentTrumpetNet'\n segment = ProstateSegmentationTrumpetNet()\n if not segment.LoadConfigAndModel(model_path):\n print('Load Failed')\n\n for casename in os.listdir(dir_path):\n print(casename)\n root = os.path.join(dir_path,casename)\n if not os.path.isdir(root):\n continue\n file_list = os.listdir(root)\n\n save_case_path = os.path.join(save_dir_path,casename)\n if not os.path.exists(save_case_path):\n os.mkdir(save_case_path)\n\n\n shutil.copy(os.path.join(root, 't2_Resize.nii'), save_case_path)\n copy_file(['dwi_','Reg_Resize.nii'],file_list,root,save_case_path)\n copy_file(['roi','_Resize.nii'],file_list,root,save_case_path)\n copy_file(['roi','.csv'],file_list,root,save_case_path)\n\n if 'prostate_roi_Resize.nii.gz' in file_list:\n shutil.copy(os.path.join(root, 'prostate_roi_Resize.nii.gz'), os.path.join(save_case_path, 'ProstateROI_TrumpetNet.nii.gz'))\n elif 'ProstateROI_Trumpet_Resize.nii.gz' in file_list:\n shutil.copy(os.path.join(root, 'ProstateROI_Trumpet_Resize.nii.gz'), os.path.join(save_case_path, 'ProstateROI_TrumpetNet.nii.gz'))\n else:\n print('no seg')\n get_seg(segment,os.path.join(root, 't2_Resize.nii'),save_case_path)\n\n\n\nif __name__ == '__main__':\n model_path = r'W:\\SuccessfulModel\\ProstateSegmentTrumpetNet'\n segment = ProstateSegmentationTrumpetNet()\n if not segment.LoadConfigAndModel(model_path):\n print('Load Failed')\n get_seg(segment,r'W:\\PrcoessedData\\PI-RADS\\2017-2018-CA_formal_GYX^gao yi xin ^^5132-+6\\t2_Resize.nii',r'W:\\PrcoessedData\\PI-RADS\\2017-2018-CA_formal_GYX^gao yi xin ^^5132-+6')\n\n # get_all_seg(r'W:\\PrcoessedData\\PCaLNI\\OneRoiFiles', 'W:\\PrcoessedData\\PI-RADS')\n # get_all_seg(r'W:\\PrcoessedData\\JSPH_PCa\\DWI1500', 'W:\\PrcoessedData\\PI-RADS')\n\n","sub_path":"pretreatment/prostate_segmentation.py","file_name":"prostate_segmentation.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"435371936","text":"# a sampled GO-based goldstandard\nimport pandas as pd\ndef read_gold(gd):\n '''\n reads goldstandard file, returns dataframe\n input: gd = csv file containing goldstandard data, preferably sampled\n output: df, header = ['gene_one, 'gene_two', 'goldstandard']\n '''\n gold = pd.read_csv(gd, header = 0)\n gold = gold.rename(columns={\"gene1\": \"gene_one\", \"gene2\": \"gene_two\"}) # replace header; should be changed in generating tf_intersect\n\n return(gold)\n\n# join network (one) into that sampled goldstandard\ndef read_net_by_chunk(net):\n '''\n read network file chunk by chunk, 100000 lines for a time\n input: file path\n '''\n all_chunks = pd.read_csv(net, header = 0, chunksize = 100000) # header not the same\n return(all_chunks)\n\ndef merge_net(gold, net_chunk):\n d1 = gold.merge(net_chunk, how = 'inner', left_on = ['gene_one', 'gene_two'], right_on = ['gene_one', 'gene_two'], sort = True)\n d2 = gold.merge(net_chunk, how = 'inner', left_on = ['gene_two', 'gene_one'], right_on = ['gene_one', 'gene_two'], sort = True) # in the future sort will not be needed due to change in pandas\n total = pd.concat([d1, d2])\n total = total.drop(['gene_one_x','gene_one_y', 'gene_two_x', 'gene_two_y'], axis = 1)\n\n return(total) # save only score and goldstandard\n\ndef merge_net_with_all_chunks(gold, all_chunks):\n '''\n returns merged goldstardard and score\n '''\n n = 0\n for chunk in all_chunks:\n total = merge_net(gold, chunk)\n if n == 0: # first chunk\n all_df = total\n else:\n all_df = pd.concat([all_df, total])\n n += 1\n return(all_df)\n\n\n# discretize network score, infer LLS for each discretized network score\ndef lls_for_domain(all_df, score):\n '''\n lls calculator for domain-weighted net, using qcut, bin = 250 (optimized)\n input:\n all_df: joined dataframe with goldstandard and \"score\"\n score: the name of \"score\"; in this case: \"weighted_mutual\"\n\n '''\n #drop = ['gene_one', 'gene_two', 'goldstandard']\n #score = list(set(all_df.columns) - set(drop))[0] # context has two kinds of scores\n\n\n all_df['cut_mutual'] = pd.qcut(all_df[score], 250, duplicates = 'raise')\n\n\n\n grouped = all_df.groupby(by = ['goldstandard', 'cut_mutual']).count()['gene_one']\n grouped = grouped.fillna(0)\n lls_score = grouped[1]/grouped[0]/(grouped[1].sum()/grouped[0].sum())\n #lls.plot()\n\n return(lls_score)\n\ndef lls_for_other(all_df, score):\n \"\"\"\n lls_for_other, lls calculator for other nets that cannot be discretized using qcut\n input:\n all_df: joined dataframe with goldstandard \"score\"\n score: for refseqNet and eskapeNet, either \"nrm_mutual\" or \"mutual_info\"; for STRING:\n \"\"\"\n #drop = ['gene_one', 'gene_two', 'goldstandard']\n #score = list(set(all_df.columns) - set(drop))[0] # context has two kinds of scores\n\n\n all_df['cut_mutual'] = pd.cut(all_df[score], 300)\n\n\n\n grouped = all_df.groupby(by = ['goldstandard', 'cut_mutual']).count()['gene_one']\n grouped = grouped.fillna(0)\n lls_score = grouped[1]/grouped[0]/(grouped[1].sum()/grouped[0].sum())\n #lls.plot()\n\n return(lls_score)\n\n\nfrom scipy.stats import linregress\nimport numpy as np\n# find the smallest LLS > 3, set as score threshold\ndef lls_regress_thres(lls_score):\n \"\"\"\n perform linear regression to assign LLS to interactions without benchmarking data\n input: lls_score\n\n procedure: drop np.inf, -np.inf and LLS = 0\n keep only LLS > 3 as the theshold of inclusion\n regress on the remaining\n\n output:\n - true_thres: score threshold where LLS > 3; below this threshold LLS will be assigned to 0\n - slope\n - intercept\n \"\"\"\n # drop inf and 0s\n lls_score = lls_score.replace([np.inf, -np.inf, 0], np.nan)\n lls_score.dropna(inplace = True)\n\n # find the smallest interval that is > 3\n thres = next(x[0] for x in enumerate(lls_score) if x[1] > 3)\n true_thres = lls_score.index[thres-1]\n\n # drop the LLS<3 and do regression\n lls_greater_three = lls_score[thres:]\n y = lls_greater_three.values\n x = np.array([interval.mid for interval in lls_greater_three.index])\n\n slope, intercept, r_value, p_value, std_err = linregress(x,y)\n\n\n return(true_thres, slope, intercept)\n\ndef map_lls(lls_score, true_thres, slope, intercept):\n '''\n making a lls mapper based on the regression and threshold\n '''\n x = np.array([interval.mid for interval in lls_score.index])\n y = slope * x + intercept\n\n new_lls = pd.Series(index = lls_score.index, data = y)\n new_lls[:true_thres] = 0\n\n # add infintie to the right side\n\n return(new_lls)\n\ndef map_score_to_lls(all_df, new_lls, score):\n '''\n mapping the lls score (regressed) to a dataframe containing \"score\"\n - all_df: dataframe containing score\n - new_lls: score -> lls mapper series\n - score: specify the score name \"weighted_mutual\", \"nrm_mutual\", \"mutual\"\n '''\n #drop = ['gene_one', 'gene_two', 'goldstandard']\n #score = list(set(all_df.columns) - set(drop))[0]\n\n all_df['lls'] = all_df[score].map(new_lls)\n\n return(all_df)\n\n# PPV, NPV, coverage\ndef PPV_coverage(lls_thres, all_df):\n '''\n calculate PPV, coverage using different threshold of LLS cutoff\n input: lls_thres: the LLS cutoff to consider interaction as True\n all_df: dataframe containing goldstandard and lls\n '''\n all_df['ans'] = all_df['lls'].map(lambda x: True if x > lls_thres else False)\n\n # calculate coverage\n total_nodes = set(all_df['gene_one']).union(set(all_df['gene_two']))\n net = all_df.loc[all_df['ans'] == True]\n covered_nodes = set(net['gene_one']).union(set(net['gene_two']))\n coverage = len(covered_nodes)/len(total_nodes)\n\n # calculate PPV\n grouped = all_df.groupby(by = ['goldstandard', 'ans']).count()['gene_one']\n\n try:\n tp = grouped[1, True]\n fp = grouped[0, True]\n PPV = tp/(tp+fp)\n except KeyError:\n PPV = 0\n\n return(coverage, PPV)\ndef try_diff_lls_thres(all_df):\n '''\n try different LLS threshold to see the tradeoffs between coverage and PPV\n LLS threshold as df['lls'].unique values\n all_df: dataframe containing goldstandard and lls\n '''\n tradeoff = pd.DataFrame(columns = ['thres', 'coverage', 'PPV'])\n\n try_list = np.sort(all_df['lls'].unique())[1:-1]\n for t in try_list:\n cov, PPV = PPV_coverage(t, all_df)\n tradeoff.loc[t] = [t, cov, PPV]\n return(tradeoff)\n\n# map LLS_reg back to each \"whole\" network\n\ndef map_lls_to_whole_data(net, new_lls, score, true_thres, output_file, net_name):\n '''\n map LLS to whole data and save to file\n drop LLS < 3: their LLS will be zero so no information :(\n '''\n\n from networkx.convert_matrix import from_pandas_edgelist\n from networkx import write_edgelist\n\n n = 0\n\n # initiate file\n with open(output_file, 'w') as f:\n f.write('#generating from '+net + '\\n')\n\n all_chunks = read_net_by_chunk(net)\n for chunk in all_chunks:\n\n # drop LLS < 3 by excluding thres:\n dropped = chunk.loc[chunk[score] > true_thres.right]\n\n # map new_lls to df score\n mapped = map_score_to_lls(dropped, new_lls, score)\n print(mapped.shape)\n\n mapped.fillna(value = 2*new_lls[-1]-new_lls[-2], inplace = True) # the right bound lls may not be captured by new_lls due to sampling\n print(2*new_lls[-1]-new_lls[-2]) # the right bound lls may not be captured by new_lls due to sampling\n\n mapped = mapped.loc[mapped['lls'] >= 3]\n print(mapped.shape)\n\n mapped.rename(axis = 'columns', mapper = {'lls': net_name + '_lls'}, inplace = True)\n\n # convert to edgelist\n G = from_pandas_edgelist(mapped, source = 'gene_one', target = 'gene_two', edge_attr = net_name + '_lls')\n\n # write to file\n with open(output_file, 'ab') as f:\n write_edgelist(G, f, data = True)\n\n\n n += 1\n print('done with '+net_name)\n","sub_path":"Genome/goldstandard_pair/lls.py","file_name":"lls.py","file_ext":"py","file_size_in_byte":7959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"346382704","text":"import os\r\nimport path\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.python import pywrap_tensorflow\r\n\r\n\r\nprint(\" [*] Reading checkpoints...\")\r\nmodel_dir = \"%s_%s\" % (\"dncnn\", \"sigma25\")\r\ncheckpoint_dir = os.path.join('checkpoint_dncnn', model_dir)\r\n\r\nckpt = tf.train.get_checkpoint_state(checkpoint_dir)\r\nckpt_name = ckpt.model_checkpoint_path\r\nprint('ckptpath: %s' % ckpt_name)\r\nreader=pywrap_tensorflow.NewCheckpointReader(ckpt_name)\r\nvar_to_shape_map=reader.get_variable_to_shape_map()\r\n\r\ndata_print=np.array([])\r\nfor key in var_to_shape_map:\r\n print('############################################################')\r\n print('tensor_name',key)\r\n ckpt_data=np.float64(np.array(reader.get_tensor(key)))#cast list to np arrary\r\n print(ckpt_data)\r\n\r\n\r\n","sub_path":"read_checkpoint.py","file_name":"read_checkpoint.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"456723873","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Dialog(object):\n\n name = ''\n group = ''\n \n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(210, 131)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())\n Dialog.setSizePolicy(sizePolicy)\n Dialog.setSizeGripEnabled(False)\n Dialog.setModal(True)\n self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label = QtWidgets.QLabel(Dialog)\n self.label.setObjectName(\"label\")\n self.verticalLayout.addWidget(self.label)\n self.lineEdit = QtWidgets.QLineEdit(Dialog)\n self.lineEdit.setObjectName(\"lineEdit\")\n self.verticalLayout.addWidget(self.lineEdit)\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setObjectName(\"label_2\")\n self.verticalLayout.addWidget(self.label_2)\n self.lineEdit_2 = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.verticalLayout.addWidget(self.lineEdit_2)\n self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(Dialog)\n self.buttonBox.accepted.connect(Dialog.accept)\n #self.buttonBox.rejected.connect(Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Data\"))\n self.label.setText(_translate(\"Dialog\", \"Введите Ваше имя:\"))\n self.label_2.setText(_translate(\"Dialog\", \"Введите Вашу учебную группу:\"))\n","sub_path":"PyQt5_exercises/chapter8/diatwo.py","file_name":"diatwo.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"233232183","text":"import tensorflow as tf\nimport numpy as np\nimport time\n\n# some configs\ninput_size = 3\naction_size = 2\nmin_delta, max_delta = -1, 1\nlearning_rate_op = 0.001\nw = {} # weights\n\n# training network\nwith tf.variable_scope('prediction'):\n state_tensor = tf.placeholder('float32', [None, input_size], name='state_tensor')\n w['q_w'] = tf.get_variable('Matrix', [state_tensor.get_shape().as_list()[1], action_size], tf.float32, tf.contrib.layers.xavier_initializer(uniform=False))\n w['q_b'] = tf.get_variable('bias', [action_size], initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n q = tf.nn.bias_add(tf.matmul(state_tensor, w['q_w']), w['q_b'])\n\n# weight optimizer\nwith tf.variable_scope('optimizer'):\n # tensor to hold target Q-value\n # eg, target_q_tensor=[10;11]\n target_q_tensor = tf.placeholder('float32', [None], name='target_q_tensor')\n\n # tensors for action_tensor, for action_tensor matrix and for Q-value deltas\n # eg, action_tensor=[0;1], action_one_hot=[[1,0];[0,1]], q_acted=[Q_0,Q_1]\n action_tensor = tf.placeholder('int64', [None], name='action_tensor')\n action_one_hot = tf.one_hot(action_tensor, action_size, 1.0, 0.0, name='action_one_hot')\n q_acted = tf.reduce_sum(q * action_one_hot, reduction_indices=1, name='q_acted')\n\n # delta\n delta = target_q_tensor - q_acted\n #clipped_delta = tf.clip_by_value(delta, min_delta, max_delta, name='clipped_delta')\n\n # error function\n #loss = tf.reduce_mean(tf.square(clipped_delta), name='loss')\n #loss = tf.reduce_mean(tf.select(tf.logical_and(tf.less(delta, 1),tf.greater(delta, -1)),tf.square(delta), tf.abs(delta)), name='loss')\n loss = tf.reduce_mean(tf.square(delta), name='loss')\n\n # optimizer\n # optim = tf.train.AdamOptimizer(learning_rate_op).minimize(loss)\n opt = tf.train.AdamOptimizer(learning_rate_op)\n gradients = opt.compute_gradients(loss)\n\n # clip gradient tensors\n clipped_grad = []\n for gv in gradients:\n if gv[0] is not None:\n clipped_grad.append((tf.clip_by_value(gv[0], min_delta, max_delta), gv[1]))\n\n #self.clipped_grad = tf.clip_by_value(self.gradients, self.min_delta, self.max_delta, name='clipped_grad')\n optim = opt.apply_gradients(clipped_grad)\n\n #optim = tf.train.GradientDescentOptimizer(learning_rate_op).minimize(loss)\n\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n\n s_t = np.array([[0,0,0],[0,0,0],\\\n [1,0,0], [1,0,0],\\\n [1,0,1], [1,0,1],\\\n [1,1,0], [1,1,0]])\n action = np.array([0, 1,\\\n 0, 1,\\\n 0, 1,\\\n 0, 1])\n target_q = np.array([0, 15,\\\n 10, 25,\\\n 20, 35,\\\n -100, -85])\n\n # weights\n # 1 1\n # -11 -11\n # 1 1\n # bias\n # 0 1\n\n counter = 0\n while True:\n if counter % 10000 == 0:\n q_values = q.eval({state_tensor: s_t})\n #for i in range(len(s_t)):\n # print(\"q\", q_values[i])\n print(\"w\", sess.run(w['q_w']), '\\nb', sess.run(w['q_b']))\n #print(\"action_one_hot\", sess.run(action_one_hot, {target_q_tensor: target_q, action_tensor: action, state_tensor: s_t}))\n #print(\"q\", sess.run(q, {target_q_tensor: target_q, action_tensor: action, state_tensor: s_t}))\n print(\"q_acted\", sess.run(q_acted, {target_q_tensor: target_q, action_tensor: action, state_tensor: s_t}))\n #print(\"clipped_delta\", sess.run(clipped_delta, {target_q_tensor: target_q, action_tensor: action, state_tensor: s_t}))\n print(\"loss\", sess.run(loss, {target_q_tensor: target_q, action_tensor: action, state_tensor: s_t}))\n\n sess.run(optim, {target_q_tensor: target_q, action_tensor: action, state_tensor: s_t})\n\n #time.sleep(1)\n counter += 1\n","sub_path":"src/test/python/testNN.py","file_name":"testNN.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"312455775","text":"# Copyright (c) 2013, TeamPRO and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.utils import flt\n\ndef execute(filters=None):\n\tif not filters: filters = {}\n\n\tcolumns = get_columns()\n\tdata = get_employees(filters)\n\treturn columns, data\n\ndef get_columns():\n\treturn [\n\t\t_(\"Employee\") + \":Link/Employee:120\", _(\"Name\") + \":Data:200\", _(\"Gender\") + \"::60\", _(\"Date of Joining\")+ \":Date:100\",\n\t\t_(\"Branch\") + \":Link/Branch:120\", _(\"Department\") + \":Link/Department:120\",\n\t\t_(\"Designation\") + \":Link/Designation:120\", _(\"Company\") + \":Link/Company:120\"\n\t]\n\ndef get_employees(filters):\n\temployees = frappe.db.sql(\"\"\"select name, employee_name, gender, date_of_joining,\n\tbranch, department, designation, company\n\tfrom `tabEmployee` \n\twhere status = 'Active' and date_of_joining between %s and %s and company = %s\"\"\",(filters[\"from_date\"],filters[\"to_date\"],filters[\"company\"]))\n\treturn employees","sub_path":"hrpro/hrpro/report/new_joinees_report/new_joinees_report.py","file_name":"new_joinees_report.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"453024651","text":"from board import *\nfrom tkinter import *\nfrom engine import *\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntk = Tk()\ntk.title(\"Chess\")\ntk.resizable(width=False,height=False)\ntk.maxsize(2000,1000)\n# auto_play = False\n\ncanvas = Canvas(tk, width=720, height=720,bd=0, highlightthickness=0)\ntxt = StringVar()\ntabl = Label(tk,textvariable=txt)\n\nreverse_mode = False\nblack_mode = False\nB = Board() #création échéquier\nE = Engine() #creation engine\n\nE.add_nulle(B) #ajoute la premiere position à la liste pour les nulles\n\ntxt.set(str(B.pos_id))\n\nimglist = [] #liste des images à afficher (pièces)\n\nimgfile2 = \"pieces/z_case_indic.png\"\nimgitem2 = PhotoImage(file=imgfile2)\n\n\ndef affiche_position(l=[]):\n canvas.delete(\"all\") # NECESSAIRE POUR L'OPTIMISATION ! (sinon les images s'enpiles au fur et à mesure)\n global imglist #besoin d'etre global sinon disparition des images\n imglist = []\n\n mrgx = 40\n mrgy = 40\n cell = 80\n\n folderName=\"pieces\"\n liste=os.listdir(folderName) # =>recupere le nom de tous les fichiers d'un dossier\n for j in range(8):\n for i in range(8):\n if (i+j)%2 == 0: col = '#f6f6f6'\n else: col = '#5d8daa'\n\n if (reverse_mode and B.side2move == \"noir\") or black_mode:\n nb_id = j+1\n ltr_id = 72-i\n case_id = 63-(i+8*j)\n else:\n nb_id = 8-j\n ltr_id = 65+i\n case_id = i+8*j\n\n canvas.create_text(mrgx//2,cell*(j+1),text=str(nb_id))\n canvas.create_text(cell*(i+1),mrgy//2,text=chr(ltr_id))\n canvas.create_rectangle(mrgx+i*cell,mrgy+j*cell,mrgx+(i+1)*cell,mrgy+(j+1)*cell,fill=col)\n\n if B.history != []:\n if B.history[-1][0] == case_id or B.history[-1][1] == case_id:\n canvas.create_rectangle(mrgx+i*cell,mrgy+j*cell,mrgx+(i+1)*cell,mrgy+(j+1)*cell,fill='orange',stipple=\"gray50\")\n\n ma_piece = B.cases[case_id]\n if ma_piece.nom != ma_piece.nomPiece[0]:\n pos = ma_piece.nomPiece.index(ma_piece.nom)-1\n if ma_piece.couleur == \"noir\":\n pos += 6\n imgfile = folderName +'/'+liste[pos] ## strchemin:str, chemin d'accès à l'image\n imglist += [PhotoImage(file=imgfile)]\n canvas.create_image(mrgx+(i+0.5)*cell, mrgy+(j+0.5)*cell, image=imglist[i+8*j])\n if ma_piece.nom == \"ROI\":\n if ma_piece.couleur == \"blanc\" and B.in_check(\"blanc\"):\n canvas.create_rectangle(mrgx+i*cell,mrgy+j*cell,mrgx+(i+1)*cell,mrgy+(j+1)*cell,fill='#ff0000',stipple=\"gray50\")\n elif ma_piece.couleur == \"noir\" and B.in_check(\"noir\"):\n canvas.create_rectangle(mrgx+i*cell,mrgy+j*cell,mrgx+(i+1)*cell,mrgy+(j+1)*cell,fill='#ff0000',stipple=\"gray50\")\n else:\n imglist += [\"\"]\n if l != []: #gestion affichage coups possibles\n for pos in l:\n if (reverse_mode and B.side2move == \"noir\") or black_mode:\n posx = 7-B.COL(pos)\n posy = 7-B.ROW(pos)\n else:\n posx = B.COL(pos)\n posy = B.ROW(pos)\n canvas.create_image(mrgx+(posx+0.5)*cell, mrgy+(posy+0.5)*cell, image=imgitem2)\n\naffiche_position()\n\ndef execute_cmd():\n cmd= cmd_bar.get()\n\n global reverse_mode\n global black_mode\n\n\n if cmd == \"new\":\n E.newgame(B)\n E.add_nulle(B)\n elif cmd == \"quit\":\n tk.quit()\n elif cmd == \"undo\":\n E.undomove(B)\n elif cmd == \"eps\":\n print(E.epsilon)\n for i in range(E.epsilon):\n d = 1\n # elif cmd == \"auto\":\n # auto_play = not auto_play\n # print(\"coups automatiques après le joueur : %s\"%auto_play)\n elif cmd == \"go\":\n E.play_bot(B)\n # elif \"gog\" in cmd:\n # E.play_bot(int(cmd.split()[1]),B)\n elif cmd == \"droite\" :\n E.compteur(1)\n E.lecture(B,E.val_compteur)\n elif cmd == \"gauche\" :\n E.compteur(-1)\n E.undomove(B)\n elif \"setboard\" in cmd:\n E.setboard(B,cmd)\n elif cmd == \"getboard\":\n print(E.getboard(B))\n elif cmd == \"nulle_rep\":\n print(E.listfen)\n elif cmd == \"la_proba\":\n show_proba(E)\n elif cmd == \"eval\" :\n print(\"evaluation (pour blancs) : \" + str(B.evaluer(\"blanc\")/100))\n elif cmd == \"op\" :\n print(E.ouverture(B))\n elif cmd == \"histo\" :\n print(B.history)\n elif cmd == 'save_op':\n E.create_op(B)\n elif 'save' in cmd :\n E.save(B,cmd)\n elif 'read' in cmd :\n E.lire(B,cmd)\n elif cmd == 'transpo':\n E.use_table = not E.use_table\n print(\"table de transposition : %s\"%E.use_table)\n elif cmd == \"black\":\n reverse_mode = False\n black_mode = not black_mode\n print(\"Black mode : %s\"%black_mode)\n elif cmd == \"reverse\":\n black_mode = False\n reverse_mode = not reverse_mode\n print(\"Reverse mode : %s\"%reverse_mode)\n elif cmd == \"dist_edge\":\n print(\"distance du roi blanc au bord : %s\"%B.dist_roi_bord(\"blanc\"))\n elif \"dist\" in cmd:\n l = cmd.split()\n x = B.caseStr2Int(l[1])\n y = B.caseStr2Int(l[2])\n print(\"distance entre les cases {} et {} : {}\".format(l[1],l[2],B.DIST(x,y)))\n elif 'sd' in cmd:\n E.setDepth(cmd)\n elif 'perft' in cmd:\n E.perft(cmd,B)\n elif cmd == \"d_rpos\":\n print(\"ROI blanc : \"+B.caseInt2Str(B.pos_roi_b))\n print(\"ROI noir : \"+B.caseInt2Str(B.pos_roi_n))\n elif len(cmd) >= 4:\n E.usermove(B,cmd)\n\n\n affiche_position()\n #txt.set(\"Eval (côté blanc) : %s\"%(B.evaluer(\"blanc\")/100))\n txt.set(str(B.pos_id))\n cmd_bar.delete(0,\"end\")\n\n\ndef show_proba(E):\n L1 = E.la_proba(B,1000,100,7)\n y_list1 = []\n #L2 = E.la_proba(B,1000,30)\n #y_list2 = []\n L3 = E.la_proba(B,1000,50,7)\n y_list3 = []\n for i in range(100):\n y_list1 += [(L1.count(i))/(1000*100)]\n #for i in range(100):\n #y_list2 += [(L2.count(i))/(1000*30)]\n for i in range(100):\n y_list3 += [(L3.count(i))/(1000*50)]\n plt.plot(np.array(list(range(100))), np.array(y_list1),label=\"1000 parties, 100 coups\")\n plt.plot(np.array(list(range(100))), np.array(y_list3),label=\"1000 parties, 50 coups\")\n #plt.plot(np.array(list(range(100))), np.array(y_list2),label=\"1000 parties, 30 coups\")\n plt.title(\"Distribution de probabilité\")\n plt.xlabel(\"Nombre de coups possibles\")\n plt.ylabel(\"Probabilité\")\n plt.legend()\n plt.show()\n\n\n\n# gestion des touches ----------------------------------------------------------\n\ndef button_push(evt=\"\"): #se déclanche lors de l'appui sur bouton\n execute_cmd()\n\ndef on_click(evt):\n casex = (evt.x-40)//80\n casey = (evt.y-40)//80\n if -1= 4:\n # cmd_bar.delete(0,\"end\")\n if (reverse_mode and B.side2move == \"noir\") or black_mode:\n coord2case = 63-(casex+8*casey)\n else:\n coord2case = casex+8*casey\n\n c = B.coord[coord2case]\n cmd_bar.insert(\"end\",c)\n taille_texte = len(cmd_bar.get())\n if taille_texte == 2:\n liste = B.gen_moves_list()\n l2=[]\n for i in range(len(liste)):\n l = liste[i]\n if l[0] == B.caseStr2Int(cmd_bar.get()):\n if not B.domove(l[0],l[1],l[2]):\n continue\n B.undomove()\n l2 += [l[1]]\n if l2 != []:\n affiche_position(l2)\n else:\n cmd_bar.delete(0,\"end\")\n elif taille_texte >= 4:\n execute_cmd()\ndef on_click2(evt):\n if cmd_bar.get() == \"\":\n cmd_bar.insert(\"end\",\"undo\")\n execute_cmd()\ndef bot_play(evt):\n cmd_bar.delete(0,\"end\")\n cmd_bar.insert(\"end\",\"go\")\n execute_cmd()\n\ndef droite(evt):\n cmd_bar.delete(0,\"end\")\n cmd_bar.insert(\"end\",\"droite\")\n execute_cmd()\ndef gauche(evt):\n cmd_bar.delete(0,\"end\")\n cmd_bar.insert(\"end\",\"gauche\")\n execute_cmd()\n\n\n# gestion des touches ----------------------------------------------------------\ntk.bind_all('', button_push)\ntk.bind_all('<1>', on_click)\ntk.bind_all('<3>',on_click2)\n# tk.bind_all('', bot_play)\ntk.bind_all('', bot_play)\ntk.bind_all('', droite)\ntk.bind_all('', gauche)\n\nbox = Frame(tk)\ncmd_bar = Entry(box)\nbtn = Button(box, text='ENTRER', command=button_push)\n\n\n\n#Pack()\nbox.pack(expand=YES)\ncmd_bar.grid(row=0, column=0, sticky=W)\nbtn.grid(row=0, column=1, sticky=W)\ncanvas.pack()\ntabl.pack()\n#Pack()\n\n\n\n\ntk.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"364534006","text":"import numpy as np\n\n\nclass WISGTD:\n \"\"\"Implements WIS-GTD(lambda) with linear function approximation.\n\n See https://armahmood.github.io/files/MS-WIS-O(n)-UAI-2015.pdf for more\n details.\n\n Args:\n num_features (int): Length of weight vectors.\n u (float): Initial value for the usage vector. Can be interpreted as\n inverse initial step size.\n eta (float): Recency-weighting factor. Can be interpreted as desired\n final step size.\n beta (float): Secondary learning rate.\n lmbda (float): Trace decay rate.\n\n Attributes:\n theta: Primary weight vector.\n w: Secondary weight vector.\n e: Eligibility trace vector.\n u: Usage vector.\n v: Usage helper vector.\n beta: Secondary learning rate.\n lmbda: Trace decay rate.\n old_gamma: Discounting parameter from the previous timestep.\n delta: TD-error of previous timestep.\n tderr_elig: delta * e for RUPEE calculations.\n \"\"\"\n\n def __init__(self,\n num_features,\n u,\n eta,\n beta,\n lmbda,\n **kwargs):\n self.e = np.zeros(num_features)\n self.theta = np.zeros(num_features)\n self.u = np.ones(num_features) * u\n self.v = np.zeros(num_features)\n self.w = np.zeros(num_features)\n\n assert beta > 0 and eta > 0 and u > 0\n\n self.beta = beta\n self.eta = eta\n self.old_lmbda = lmbda\n self.old_gamma = 0\n self.delta = 0\n self.tderr_elig = np.zeros(num_features)\n\n def update(self, phi, phi_prime, cumulant, gamma, rho, **kwargs):\n\n lmbda = self.old_lmbda # replace this when lambda changes by state\n gam_lam = self.old_lmbda * self.old_gamma\n\n phi_sq = phi * phi\n k = np.ones(phi.size) - self.eta * phi_sq\n self.u *= k\n self.u += rho * phi_sq + (rho - 1) * gam_lam * k * self.v\n\n self.v *= gam_lam * rho * k\n self.v += rho * phi_sq\n\n non_zero = self.u != 0\n alpha = np.ones(self.u.size)[non_zero]/self.u[non_zero]\n alpha[~non_zero] = 0\n\n self.delta = (cumulant + gamma * np.dot(phi_prime, self.theta) -\n np.dot(phi, self.theta))\n self.e = rho * (gam_lam * self.e + phi)\n self.tderr_elig = self.delta * self.e\n\n self.theta += alpha * (self.tderr_elig - (gamma * (1 - lmbda) *\n np.dot(self.e, self.w) *\n phi_prime))\n self.w += self.beta * (self.tderr_elig - np.dot(self.w, phi) * phi)\n\n self.old_gamma = gamma\n self.old_lmbda = lmbda\n\n # for compatibility with calculating RUPEE for control gvfs\n return phi\n\n def predict(self, phi):\n return np.dot(phi, self.theta)\n","sub_path":"src/wis_gtd.py","file_name":"wis_gtd.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"38527643","text":"import random\ntimes = 3\nsecret = random.randint(1,10)\nprint('------------------ddddd------------------')\nguess = 0\nprint(\"werwer£º\")\nwhile (guess != secret) and (times > 0):\n temp = input()\n guess = int(temp)\n times = times - 1\n if guess == secret:\n print(\"wwwwwwwwwwwwww\")\n print(\"llllllllllllllll\")\n else:\n if guess > secret:\n print(\"dddddddddddddd\")\n else:\n print(\"xxxxxxxxxxxxxxxxxxxxxx\")\n if times > 0:\n print(\"again\", end=\" \")\n else:\n print(\"sssssssss\")\nprint(\"game over\")","sub_path":"python file/python不方便Eclipse在运行的脚本/学习代码/dd.py","file_name":"dd.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"646426275","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import classification_report, confusion_matrix, f1_score, roc_auc_score\nfrom sklearn import svm\nfrom sklearn.svm import LinearSVC\n\n\nland_cover = pd.read_excel(\"readings_normalized_landcover.xlsx\", usecols = \"IS:JH\")\n\nmarsh_wren = pd.read_excel(\"readings_normalized_landcover.xlsx\", usecols = ['Marsh Wren'])\nyellow_throat = pd.read_excel(\"readings_normalized_landcover.xlsx\", usecols = ['Common Yellowthroat'])\nred_winged = pd.read_excel(\"readings_normalized_landcover.xlsx\", usecols = ['Red-winged Blackbird'])\nwestern_meadowlark = pd.read_excel(\"readings_normalized_landcover.xlsx\", usecols = ['Western Meadowlark'])\n\n# Convert values less than threshold to 0 and greater than threshold to 1 for classification\ndef convert_to_classes ( data, threshold = 0.1 ):\n data[ data > threshold ] = 1\n data[ data <= threshold ] = 0\n return np.array(data)\n\nthreshold = 0\nmarsh_wren = convert_to_classes( marsh_wren, threshold )\nyellow_throat = convert_to_classes( yellow_throat, threshold )\nred_winged = convert_to_classes( red_winged, threshold )\nwestern_meadowlark = convert_to_classes( western_meadowlark, threshold )\n\ndata = np.array( land_cover )\n\n#print (\"Marsh Wren\", np.sum(marsh_wren) )\n#print ( \"Yellow Throat\", np.sum(yellow_throat))\n#print ( \"Red Winged \", np.sum(red_winged))\nlabel = western_meadowlark\n\n# Delete columns where all readings are zero\n#idx = np.argwhere(np.all(data[..., :] == 0, axis=0))\n#data = np.delete(data, idx, axis=1)\n\ntrain_data, test_data, train_label, test_label = train_test_split( data, label, train_size = 0.5, test_size = 0.5, shuffle = True, stratify = label)\n\nn = len(test_data)\n\nvalidation_data = test_data[:n//2]\nvalidation_label = test_label[:n//2]\n\ntest_data = test_data[n//2:]\ntest_label = test_label[n//2:]\n\npositive_train = []\nnegative_train = []\n# Split train data into positive and negative to solve the problem of class imbalances\nfor i in range( len( train_data ) ):\n if train_label[i] == 1:\n positive_train.append( train_data[i] )\n else:\n negative_train.append( train_data[i] )\n\npositive_train = np.array( positive_train )\nnegative_train = np.array( negative_train )\n","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"69787954","text":"# Li, Kaite\n# 1001645704\n# 2019-04-16\n# ---------#---------#---------#---------#---------#--------#\nimport sys\nimport traceback\n\nimport ply\nimport ply.yacc\nimport ply.lex\n\nfrom pathlib import Path\nfrom time import time\n\nfrom Exceptions import *\nfrom ParseTree import *\n\n# ---------#---------#---------#---------#---------#--------#\nlexer = None\nparser = None\n\n# ---------#---------#---------#---------#---------#--------#\n# Lexical analysis section\nreserved = {\n 'int': 'INT',\n 'if' : 'IF',\n 'else': 'ELSE',\n 'while': 'WHILE',\n 'write' : 'WRITE',\n 'read' : 'READ'\n}\n\ntokens = [\n 'ID', 'INT_LITERAL', 'STRING_LITERAL',\n 'PLUS', 'EQUALS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULUS', 'EXPONENT',\n 'LPAREN', 'RPAREN', 'SEMICOLON', 'COLON', 'LBRACE', 'RBRACE', 'LESS', 'LESS_EQ', 'GREATER', 'GREATER_EQ',\n 'IS_EQ', 'NOT_EQ', 'AND', 'OR', 'NOT', 'COMMA',\n] + list(reserved.values())\n\n\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z0-9_]*'\n t.type = reserved.get(t.value, 'ID')\n return t\n\n\n\n# Tokens\nt_EQUALS = r'='\nt_LPAREN = r'\\('\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_TIMES = r'\\*'\nt_DIVIDE = r'/'\nt_MODULUS = r'%'\nt_RPAREN = r'\\)'\nt_SEMICOLON = r';'\nt_COLON = r':'\nt_EXPONENT = r'\\^'\nt_LBRACE = r'\\{'\nt_RBRACE = r'\\}'\nt_LESS = r'<'\nt_LESS_EQ = r'<='\nt_GREATER = r'>'\nt_GREATER_EQ = r'>='\nt_IS_EQ = r'=='\nt_NOT_EQ = r'!='\nt_AND = r'&&'\nt_OR = r'\\|\\|'\nt_NOT = r'!'\nt_COMMA = r','\n\n\ndef t_INT_LITERAL(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\ndef t_STRING_LITERAL(t):\n r'\"[^\"]*\"'\n t.value = str(t.value)\n return t\n\n# -------------------\n# Ignored characters\n# Space, form feed, carriage return, tab, vertical tab\nt_ignore = ' \\f\\r\\t\\v'\n\n\n# Eats characters from the // marker to the end of the line.\ndef t_comment(_):\n r'//[^\\n]*'\n\n\n# Keep track of what line we're on.\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count('\\n')\n\n\n# -------------------\ndef t_error(t):\n # Go through elaborate shennanigans to determine the column\n # at which the lexical error occurred.\n lineStart = t.lexer.lexdata.rfind('\\n', 0, t.lexer.lexpos) + 1\n column = t.lexer.lexpos - lineStart + 1\n\n msg = f'Illegal character \"{t.value[0]}\" at line {t.lexer.lineno}, column {column}.'\n\n # t.lexer.skip( 1 ) -- We used to just skip the character.\n # -- Now we throw an exception.\n\n raise LexicalError(msg)\n\n\n# ---------#---------#---------#---------#---------#--------#\n# Syntactic analysis section\n\n# -------------------\n# The start symbol.\nstart = 'program'\n\n# -------------------\n# Precedence rules for the operators\nprecedence = (\n ('right', 'EQUALS'),\n ('left', 'OR'),\n ('left', 'AND'),\n ('left', 'IS_EQ', 'NOT_EQ'),\n ('left', 'LESS', 'LESS_EQ', 'GREATER', 'GREATER_EQ'),\n ('left', 'PLUS', 'MINUS'),\n ('left', 'TIMES', 'DIVIDE', 'MODULUS'),\n ('right', 'EXPONENT'),\n ('left', 'UMINUS', 'NOT')\n)\n\n\n# -------------------\n# PROGRAM ...\n\ndef p_program(p):\n 'program : block_statement'\n p[0] = Program(p.lineno(1), p[1])\n\n\ndef p_semicolon_opt(p):\n '''semicolon_opt : epsilon\n | SEMICOLON'''\n\n\n\n# -------------------\n# STATEMENTS ...\ndef p_block_statement(p):\n '''block_statement : LBRACE statement_decl_list semicolon_opt RBRACE'''\n p[0] = Statement_Block(p.lineno(1), p[2])\n\n# Expression statement\ndef p_statement_expr(p):\n 'statement : expression'\n p[0] = Statement_Expression(p.lineno(1), p[1])\n\ndef p_statement_A(p):\n '''statement : block_statement\n | if_then_statement\n | while_statement\n | read_statement\n | write_statement'''\n p[0] = p[1]\n\ndef p_read_statement(p):\n '''read_statement : READ LPAREN lvalue lvalue_list RPAREN'''\n p[4].append(p[3])\n p[0] = read_statement(p.lineno(1), p[4])\n\ndef p_lvalue_list_A(p):\n '''lvalue_list : lvalue_list COMMA lvalue'''\n p[1].append(p[3])\n p[0] = p[1]\n\ndef p_lvalue_list_B(p):\n '''lvalue_list : epsilon'''\n p[0] = []\n\ndef p_write_statement(p):\n '''write_statement : WRITE LPAREN expr_string expr_string_list RPAREN'''\n p[4].insert(0, p[3])\n p[0] = write_statement(p.lineno(1), p[4]);\n\n\ndef p_expr_string_list_A(p):\n '''expr_string_list : expr_string_list COMMA expr_string'''\n p[1].append(p[3])\n p[0] = p[1]\n\ndef p_expr_string_list_B(p):\n '''expr_string_list : epsilon'''\n p[0] = []\n\ndef p_expr_string_B(p):\n '''expr_string : expression'''\n p[0] = p[1]\n\n\n\n# List of statements separated by semicolons\ndef p_statement_decl_list_A(p):\n '''statement_decl_list : statement_decl_list SEMICOLON statement_decl'''\n p[1].append(p[3])\n p[0] = p[1]\n\ndef p_statement_decl_list_B(p):\n 'statement_decl_list : statement_decl'\n p[0] = [p[1]]\n\ndef p_statement_decl_list_C(p):\n 'statement_decl_list : epsilon'\n p[0] = [p[1]]\n\n\ndef p_statement_decl(p) :\n '''statement_decl : statement\n | decl'''\n p[0] = p[1]\n\n # Declaration\ndef p_decl(p):\n 'decl : ID COLON type Initiation'\n if(p[4] == None):\n p[0] = decl_no_init(p.lineno(1), p[1], p[3], \"(VARIABLE-NO-INIT)\")\n else:\n p[0] = decl(p.lineno(1), p[1], p[3], \"(VARIABLE)\", p[4])\n\ndef p_with_initiation(p):\n 'Initiation : EQUALS expression'\n p[0] = p[2]\n\ndef p_no_initiation(p):\n 'Initiation : epsilon'\n p[0] = None\n\ndef p_if_then_statement_A(p):\n '''if_then_statement : IF expression block_statement'''\n p[0] = if_then_statement(p.lineno(1), p[2], p[3])\n\ndef p_if_then_statement_B(p):\n '''if_then_statement : IF expression block_statement ELSE block_statement'''\n p[0] = if_then_else_statement(p.lineno(1), p[2], p[3], p[5])\n\ndef p_while_statement(p):\n '''while_statement : WHILE expression block_statement'''\n p[0] = while_statement(p.lineno(1),p[2],p[3])\n\n# -------------------\n# IDENTIFIER ...\n\ndef p_identifier(p):\n 'identifier : ID'\n p[0] = Identifier(p.lineno(1), p[1])\n\ndef p_type(p):\n 'type : INT'\n p[0] = type(p.lineno(1), p[1])\n\n\n# -------------------\n# EXPRESSIONS ...\n\n# Uniary operator expression\ndef p_expression_uniop(p):\n '''expression : PLUS expression %prec UMINUS\n | MINUS expression %prec UMINUS\n | NOT expression'''\n p[0] = UnaryOp(p.lineno(2), p[1], p[2])\n\n\n# Binary operator expression\ndef p_expression_binop(p):\n '''expression : expression PLUS expression\n | expression MINUS expression\n | expression EXPONENT expression\n | expression TIMES expression\n | expression DIVIDE expression\n | expression MODULUS expression\n | expression LESS expression\n | expression LESS_EQ expression\n | expression GREATER expression\n | expression GREATER_EQ expression\n | expression IS_EQ expression\n | expression NOT_EQ expression\n | expression AND expression\n | expression OR expression\n | lvalue EQUALS expression'''\n p[0] = BinaryOp(p.lineno(1), p[2], p[1], p[3])\n\n\n# Parenthesized expression\ndef p_expression_group(p):\n 'expression : LPAREN expression RPAREN'\n p[0] = p[2]\n\n\n# Integer literal\ndef p_expression_int_literal(p):\n 'expression : INT_LITERAL'\n p[0] = Literal(p.lineno(1), 'int', p[1])\n\n# String literal\ndef p_expression_string_literal(p):\n '''expr_string : STRING_LITERAL'''\n p[0] = Literal(p.lineno(1), 'String', p[1][1:-1])\n\n# Lvlue\ndef p_lvalue(p):\n '''lvalue : identifier'''\n p[0] = lvalue(p.lineno(1), p[1])\n\n# Name\ndef p_expression_lvalue(p):\n 'expression : lvalue'\n p[0] = p[1]\n\n\n\n\n# -------------------\n# The 'empty' value. It's possible to just have an empty RHS\n# in a production, but having the non-terminal 'epsilon' makes\n# it much more obvious that the empty string is being parsed.\ndef p_epsilon(p):\n 'epsilon :'\n p[0] = None\n\n\n# -------------------\n# Gets called if an unexpected token (or the EOF) is seen during\n# a parse. We throw an exception\ndef p_error(p):\n msg = 'Syntax error at '\n if p is None:\n msg += 'EOF.'\n\n else:\n # Go through elaborate shennanigans to determine the column\n # at which the parse error occurred.\n lineStart = lexer.lexdata.rfind('\\n', 0, p.lexpos) + 1\n column = p.lexpos - lineStart + 1\n\n msg += f'token \"{p.value}\", line {p.lineno}, column {column}'\n\n raise SyntacticError(msg)\n\n\n# ---------#---------#---------#---------#---------#--------#\ndef _main(inputFileName):\n global lexer\n global parser\n\n begin = time()\n\n fileName = str(Path(inputFileName).name)\n parseFile = str(Path(inputFileName).with_suffix('.parse'))\n\n print(f'* Reading source file {inputFileName!r} ...')\n\n strt = time()\n with open(inputFileName, 'r') as fp:\n data = fp.read()\n\n print(f' Read succeeded. ({time() - strt:.3f}s)\\n* Beginning parse ...')\n\n try:\n strt = time()\n lexer = ply.lex.lex()\n parser = ply.yacc.yacc()\n program = parser.parse(data, tracking=True)\n\n print(f' Parse succeeded. ({time() - strt:.3f}s)\\n* Beginning parse dump to {parseFile!r} ...')\n\n strt = time()\n with open(parseFile, 'w') as fp:\n program.dump(fp=fp)\n\n print(f' Parse dumped. ({time() - strt:.3f}s)')\n\n total = time() - begin\n print(f'# Total time {total:.3f}s.\\n#----------')\n\n except LexicalError as e:\n print('Exception detected during lexical analysis.')\n print(e)\n # traceback.print_exc()\n sys.exit(1)\n\n except SyntacticError as e:\n print('Exception detected during syntactic analysis.')\n print(e)\n # traceback.print_exc()\n sys.exit(1)\n\n except:\n print('*** (Unknown) exception detected during parse/result dump.')\n traceback.print_exc()\n sys.exit(1)\n\n\n# ---------#---------#---------#\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n _main(sys.argv[1])\n\n else:\n print('Input file name required.')\n\n# ---------#---------#---------#---------#---------#--------#\n","sub_path":"miniFrontEnd.py","file_name":"miniFrontEnd.py","file_ext":"py","file_size_in_byte":10142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"340369085","text":"import numpy as np\nfrom scipy.linalg import sqrtm\n\nfrom bbpipe import PipelineStage\nfrom .types import NpzFile\nfrom .foreground_loading import FGModel, FGParameters, normed_plaw\nfrom fgbuster.component_model import CMB \nfrom sacc.sacc import SACC\n\nimport emcee\n\nclass BBCompSep(PipelineStage):\n \"\"\"\n Component separation stage\n This stage does harmonic domain foreground cleaning (e.g. BICEP).\n The foreground model parameters are defined in the config.yml file. \n \"\"\"\n name = \"BBCompSep\"\n inputs = [('cells_coadded', SACC),('cells_noise', SACC),('cells_fiducial', SACC)]\n outputs = [('param_chains', NpzFile)]\n config_options={'likelihood_type':'h&l', 'n_iters':32, 'nwalkers':16, 'r_init':1.e-3}\n\n def setup_compsep(self):\n \"\"\"\n Pre-load the data, CMB BB power spectrum, and foreground models.\n \"\"\"\n self.parse_sacc_file()\n self.load_cmb()\n self.fg_model = FGModel(self.config)\n self.parameters = FGParameters(self.config)\n if self.use_handl:\n self.prepare_h_and_l()\n return\n\n def matrix_to_vector(self, mat):\n return mat[..., self.index_ut[0], self.index_ut[1]]\n\n def vector_to_matrix(self, vec):\n if vec.ndim == 1:\n mat = np.zeros([self.nmaps, self.nmaps])\n mat[self.index_ut] = vec\n mat = mat + mat.T - np.diag(mat.diagonal())\n elif vec.ndim==2:\n mat = np.zeros([len(vec), self.nmaps, self.nmaps])\n mat[..., self.index_ut[0], self.index_ut[1]] = vec[...,:]\n for i,m in enumerate(mat):\n mat[i] = m + m.T - np.diag(m.diagonal())\n else:\n raise ValueError(\"Input vector can only be 1- or 2-D\")\n return mat\n\n def parse_sacc_file(self):\n \"\"\"\n Reads the data in the sacc file included the power spectra, bandpasses, and window functions. \n \"\"\"\n #Decide if you're using H&L\n self.use_handl = self.config['likelihood_type'] == 'h&l'\n\n #Read data\n self.s = SACC.loadFromHDF(self.get_input('cells_coadded'))\n if self.use_handl:\n s_fid = SACC.loadFromHDF(self.get_input('cells_fiducial'), \\\n precision_filename=self.get_input('cells_coadded'))\n s_noi = SACC.loadFromHDF(self.get_input('cells_noise'), \\\n precision_filename=self.get_input('cells_coadded'))\n\n #Keep only BB measurements\n self.s.cullType(b'BB') # TODO: Modify if we want to use E\n if self.use_handl:\n s_fid.cullType(b'BB')\n s_noi.cullType(b'BB')\n self.nfreqs = len(self.s.tracers)\n self.nmaps = self.nfreqs # TODO: Modify if we want to use E\n self.index_ut = np.triu_indices(self.nmaps)\n self.ncross = (self.nmaps * (self.nmaps + 1)) // 2\n self.order = self.s.sortTracers()\n\n #Collect bandpasses\n self.bpasses = []\n self.meannu = []\n for t in self.s.tracers:\n nu = t.z\n dnu = np.zeros_like(nu);\n dnu[1:-1] = 0.5 * (nu[2:] - nu[:-2])\n dnu[0] = nu[1] - nu[0]\n dnu[-1] = nu[-1] - nu[-2]\n bnu = t.Nz\n self.bpasses.append([nu, dnu, bnu])\n self.meannu.append(np.sum(dnu*nu*bnu) / np.sum(dnu*bnu))\n\n #Get ell sampling\n self.bpw_l = self.s.binning.windows[0].ls\n _,_,_,self.ell_b,_ = self.order[0]\n self.n_bpws = len(self.ell_b)\n self.windows = np.zeros([self.ncross, self.n_bpws, len(self.bpw_l)])\n\n #Get power spectra and covariances\n v = self.s.mean.vector\n if len(v) != self.n_bpws * self.ncross:\n raise ValueError(\"C_ell vector's size is wrong\")\n cv = self.s.precision.getCovarianceMatrix()\n\n #Parse into the right ordering\n v2d = np.zeros([self.n_bpws, self.ncross])\n if self.use_handl:\n v2d_noi = np.zeros([self.n_bpws, self.ncross])\n v2d_fid = np.zeros([self.n_bpws, self.ncross])\n cv2d = np.zeros([self.n_bpws, self.ncross, self.n_bpws, self.ncross])\n self.vector_indices = self.vector_to_matrix(np.arange(self.ncross, dtype=int)).astype(int)\n self.indx = []\n for t1,t2,typ,ells,ndx in self.order:\n for b,i in enumerate(ndx):\n self.windows[self.vector_indices[t1, t2], b, :] = self.s.binning.windows[i].w\n v2d[:,self.vector_indices[t1, t2]] = v[ndx]\n if self.use_handl:\n v2d_noi[:, self.vector_indices[t1, t2]] = s_noi.mean.vector[ndx]\n v2d_fid[:, self.vector_indices[t1, t2]] = s_fid.mean.vector[ndx]\n if len(ells) != self.n_bpws:\n raise ValueError(\"All power spectra need to be sampled at the same ells\")\n for t1b, t2b, typb, ellsb, ndxb in self.order:\n cv2d[:, self.vector_indices[t1, t2], :, self.vector_indices[t1b, t2b]] = cv[ndx, :][:, ndxb]\n\n #Store data\n self.bbdata = self.vector_to_matrix(v2d)\n if self.use_handl:\n self.bbnoise = self.vector_to_matrix(v2d_noi)\n self.bbfiducial = self.vector_to_matrix(v2d_fid)\n self.bbcovar = cv2d.reshape([self.n_bpws * self.ncross, self.n_bpws * self.ncross])\n self.invcov = np.linalg.solve(self.bbcovar, np.identity(len(self.bbcovar)))\n return\n\n def load_cmb(self):\n \"\"\"\n Loads the CMB BB spectrum as defined in the config file. \n \"\"\"\n cmb_lensingfile = np.loadtxt(self.config['cmb_files'][0])\n cmb_bbfile = np.loadtxt(self.config['cmb_files'][1])\n \n self.cmb_ells = cmb_bbfile[:, 0]\n mask = self.cmb_ells <= self.bpw_l.max()\n self.cmb_ells = self.cmb_ells[mask] \n self.cmb_bbr = cmb_bbfile[:, 3][mask]\n self.cmb_bblensing = cmb_lensingfile[:, 3][mask]\n self.cmb_bbr -= self.cmb_bblensing\n self.get_cmb_norms()\n return\n\n def get_cmb_norms(self):\n \"\"\"\n Evaulates the CMB unit conversion over the bandpasses. \n \"\"\"\n cmb_norms = [] \n for tn in range(self.nfreqs):\n nus = self.bpasses[tn][0]\n bpass = self.bpasses[tn][1]\n dnu = self.bpasses[tn][2]\n bpass_integration = bpass * dnu\n\n cmb_thermo_units = CMB('K_RJ').eval(nus) * nus**2 \n cmb_norms.append(np.dot(bpass_integration, cmb_thermo_units))\n self.cmb_norm = np.asarray(cmb_norms)\n return\n \n def integrate_seds(self, params):\n fg_scaling = {}\n for key in self.fg_model.components:\n fg_scaling[key] = []\n\n for tn in range(self.nfreqs):\n nus = self.bpasses[tn][0]\n bpass = self.bpasses[tn][1]\n dnu = self.bpasses[tn][2]\n bpass_integration = bpass * dnu\n\n for key, component in self.fg_model.components.items(): \n conv_rj = (nus / component['nu0'])**2\n\n sed_params = [] \n for param in component['sed'].params:\n pindx = self.parameters.param_index[param]\n sed_params.append(params[pindx])\n \n fg_units = component['cmb_n0_norm'] / self.cmb_norm[tn]\n fg_sed_eval = component['sed'].eval(nus, *sed_params) * conv_rj\n fg_sed_int = np.dot(fg_sed_eval, bpass_integration) * fg_units\n fg_scaling[key].append(fg_sed_int)\n return fg_scaling\n\n def evaluate_power_spectra(self, params):\n fg_pspectra = {}\n for key, component in self.fg_model.components.items():\n pspec_params = []\n # TODO: generalize for different power spectrum models\n # should look like:\n # for param in power_spectrum_model: get param index (same as the SEDs)\n for param in component['spectrum_params']:\n pindx = self.parameters.param_index[param]\n pspec_params.append(params[pindx])\n fg_pspectra[key] = normed_plaw(self.bpw_l, *pspec_params)\n return fg_pspectra\n \n def model(self, params):\n \"\"\"\n Defines the total model and integrates over the bandpasses and windows. \n \"\"\"\n cmb_bmodes = params[0] * self.cmb_bbr + self.cmb_bblensing\n fg_scaling = self.integrate_seds(params)\n fg_p_spectra = self.evaluate_power_spectra(params)\n \n cls_array_list = np.zeros([self.n_bpws,self.nmaps,self.nmaps])\n for t1 in range(self.nfreqs) :\n for t2 in range(t1, self.nfreqs) :\n windows = self.windows[self.vector_indices[t1, t2]]\n\n model = cmb_bmodes.copy()\n for component in self.fg_model.components:\n sed_power_scaling = fg_scaling[component][t1] * fg_scaling[component][t2]\n p_amp = params[self.parameters.amp_index[component]]\n model += p_amp * sed_power_scaling * fg_p_spectra[component]\n \n config_component = self.config['fg_model'][component]\n if 'cross' in config_component.keys():\n epsilon = config_component['cross']['param']\n epsilon_index = self.parameters.param_index[epsilon]\n cross_name = config_component['cross']['name']\n\n cross_scaling = fg_scaling[component][t1] * fg_scaling[cross_name][t2] + \\\n fg_scaling[cross_name][t1] * fg_scaling[component][t2]\n cross_spectrum = np.sqrt(fg_p_spectra[component] * fg_p_spectra[cross_name])\n cross_amp = np.sqrt(p_amp * params[self.parameters.amp_index[cross_name]])\n\n model += params[epsilon_index] * cross_amp * cross_scaling * cross_spectrum\n \n model = np.dot(windows, model)\n cls_array_list[:, t1, t2] = model\n\n if t1 != t2:\n cls_array_list[:, t2, t1] = model\n return cls_array_list\n\n def lnpriors(self, params):\n \"\"\"\n Assign priors for emcee. \n \"\"\"\n total_prior = 0\n if params[0] < 0:\n return -np.inf\n \n for key, prior in self.parameters.priors.items():\n pval = params[self.parameters.param_index[key]]\n\n if prior[0].lower() == 'gaussian':\n mu = prior[1][0]\n sigma = prior[1][1]\n total_prior += -0.5 * (pval - mu)**2 / sigma**2\n elif prior[0].lower() == 'tophat': \n if pval < float(prior[1][0]) or pval > float(prior[1][2]):\n return -np.inf \n return total_prior\n\n def chi_sq_dx(self, params):\n \"\"\"\n Chi^2 likelihood. \n \"\"\"\n model_cls = self.model(params)\n return self.matrix_to_vector(self.bbdata - model_cls).flatten()\n\n def prepare_h_and_l(self):\n fiducial_noise = self.bbfiducial + self.bbnoise\n self.Cfl_sqrt = np.array([sqrtm(f) for f in fiducial_noise])\n self.observed_cls = self.bbdata + self.bbnoise\n return \n\n def h_and_l_dx(self, params):\n \"\"\"\n Hamimeche and Lewis likelihood. \n Taken from Cobaya written by H, L and Torrado\n See: https://github.com/CobayaSampler/cobaya/blob/master/cobaya/likelihoods/_cmblikes_prototype/_cmblikes_prototype.py\n \"\"\"\n model_cls = self.model(params)\n dx_vec = []\n for k in range(model_cls.shape[0]):\n C = model_cls[k] + self.bbnoise[k]\n X = self.h_and_l(C, self.observed_cls[k], self.Cfl_sqrt[k])\n dx = self.matrix_to_vector(X).flatten()\n dx_vec = np.concatenate([dx_vec, dx])\n return dx_vec\n\n def h_and_l(self, C, Chat, Cfl_sqrt):\n diag, U = np.linalg.eigh(C)\n rot = U.T.dot(Chat).dot(U)\n roots = np.sqrt(diag)\n for i, root in enumerate(roots):\n rot[i, :] /= root\n rot[:, i] /= root\n U.dot(rot.dot(U.T), rot)\n diag, rot = np.linalg.eigh(rot)\n diag = np.sign(diag - 1) * np.sqrt(2 * np.maximum(0, diag - np.log(diag) - 1))\n Cfl_sqrt.dot(rot, U)\n for i, d in enumerate(diag):\n rot[:, i] = U[:, i] * d\n return rot.dot(U.T)\n\n def lnprob(self, params):\n \"\"\"\n Likelihood with priors. \n \"\"\"\n prior = self.lnpriors(params)\n if not np.isfinite(prior):\n return -np.inf\n if self.use_handl:\n dx = self.h_and_l_dx(params)\n else:\n dx = self.chi_sq_dx(params)\n like = -0.5 * np.einsum('i, ij, j',dx,self.invcov,dx)\n return prior + like\n\n def emcee_sampler(self):\n \"\"\"\n Sample the model with MCMC. \n \"\"\"\n zmask = self.parameters.param_init == 0\n self.parameters.param_init[zmask] += 1.e-3 * np.ones_like(zmask)\n \n nwalkers = self.config['nwalkers']\n n_iters = self.config['n_iters']\n ndim = len(self.parameters.param_init)\n pos = [self.parameters.param_init * (1. + 1.e-3*np.random.randn(ndim)) for i in range(nwalkers)]\n \n sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob)\n sampler.run_mcmc(pos, n_iters);\n\n return sampler\n\n def make_output_dir(self):\n from datetime import datetime\n import os, errno\n from shutil import copyfile\n fmt='%Y-%m-%d-%H-%M'\n date = datetime.now().strftime(fmt)\n output_dir = self.config['save_prefix']+'_'+date\n try:\n os.makedirs(output_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n copyfile(self.get_input('config'), output_dir+'/config.yml') \n return output_dir + '/'\n\n def run(self):\n self.setup_compsep()\n sampler = self.emcee_sampler()\n\n # TODO: save things correctly\n output_dir = self.make_output_dir()\n np.save(output_dir + 'chains', sampler.chain)\n np.savez(self.get_output('param_chains'), sampler.chain)\n return\n\nif __name__ == '__main__':\n cls = PipelineStage.main()\n","sub_path":"bbpower/compsep.py","file_name":"compsep.py","file_ext":"py","file_size_in_byte":14207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"163050438","text":"from django.conf import settings\nfrom django.db import models as django\nfrom django.db.models.base import ModelBase\nfrom django.db.models.manager import Manager\nfrom django.utils.timezone import now\n\nfrom .index import get_spec_key, get_stored_class_name, check_dynamic, get_dynamic_class_name, get_facade_class_name\nfrom .facade import ModelFacade\n\nimport sys\nimport importlib\nimport re\nimport copy\nimport yaml\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndjango.options.DEFAULT_NAMES += (\n 'data_name',\n 'meta_info',\n 'scope',\n 'scope_process',\n 'relation',\n 'dynamic_fields',\n 'search_fields',\n 'ordering_fields',\n 'provider_name',\n 'provider_relation',\n 'command_base'\n)\n\n\n_base_model_new = ModelBase.__new__\n\ndef _override_model_new(cls, name, bases, attrs, **kwargs):\n orig_attrs = copy.copy(attrs)\n try:\n return _base_model_new(cls, name, bases, attrs, **kwargs)\n except RuntimeError as e:\n for key, value in orig_attrs.items():\n attrs[key] = value\n\n if 'Meta' in attrs:\n attrs['Meta'].abstract = True\n else:\n attrs['Meta'] = type('Meta', (object,), {\n 'abstract': True\n })\n logger.info(\"Converting model {} to an abstract object because it is not in INSTALLED_APPS\".format(name))\n return _base_model_new(cls, name, bases, attrs, **kwargs)\n\nModelBase.__new__ = _override_model_new\n\n\ndef format_field_choices(choices):\n choice_list = []\n if isinstance(choices, (list, tuple)):\n for choice in choices:\n if isinstance(choice, (list, tuple)):\n choice_list.append(tuple(choice))\n else:\n choice_list.append((choice, choice))\n else:\n for value, label in choices.items():\n choice_list.append((value, label))\n return choice_list\n\n\ndef model_index():\n return settings.MANAGER.index\n\ndef classify_parents(parent_classes):\n map = {}\n for parent in parent_classes:\n try:\n key = get_spec_key(parent.__module__)\n except Exception as e:\n key = 'base'\n\n map.setdefault(key, [])\n map[key].append(parent)\n return map\n\ndef classify_model(model_class_name):\n module_name = model_index().model_class_path.get(model_class_name, None)\n if module_name:\n return get_spec_key(module_name)\n return 'unknown'\n\n\nclass DatabaseAccessError(Exception):\n pass\n\nclass FacadeNotExistsError(Exception):\n pass\n\n\nclass BaseModelMixin(django.Model):\n\n created = django.DateTimeField(null = True, editable = False)\n updated = django.DateTimeField(null = True, editable = False)\n\n class Meta:\n abstract = True\n\n\n def initialize(self, command):\n return True\n\n def save(self, *args, **kwargs):\n if self.created is None:\n self.created = now()\n self.updated = now()\n\n with self.facade.thread_lock:\n super().save(*args, **kwargs)\n\n def save_related(self, provider, relation_values = None):\n if not relation_values:\n relation_values = {}\n\n relations = self.facade.get_relations()\n relation_values = {\n **provider.command.get_relations(self.facade),\n **relation_values\n }\n for field, value in relation_values.items():\n if value is not None:\n facade = provider.command.facade(\n relations[field]['model'].facade.name\n )\n if relations[field]['multiple']:\n provider.update_related(self, field, facade, value)\n else:\n value = None if not value else value\n provider.set_related(self, field, facade, value)\n\n @property\n def facade(self):\n return copy.deepcopy(self.__class__.facade)\n\n\nclass BaseMetaModel(ModelBase):\n\n def __new__(cls, name, bases, attrs, **kwargs):\n spec_key = classify_model(name)\n parent_map = classify_parents(bases)\n meta_info = attrs.get('_meta_info', {})\n meta_bases = []\n\n logger.info(\"++++ Creating new model: {} <{}> {}\".format(name, spec_key, bases))\n for field, value in meta_info.items():\n logger.debug(\" init meta > {} - {}\".format(field, value))\n\n for key in ('data', 'data_mixins', 'data_base', 'base'):\n for parent in parent_map.get(key, []):\n if key in ('base', 'data_base'):\n if getattr(parent, 'Meta', None):\n meta_bases.append(parent.Meta)\n\n for field, value in getattr(parent, '_meta_info', {}).items():\n if field[0] != '_' and field not in ('abstract', 'db_table'):\n meta_info.setdefault(field, value)\n\n if spec_key == 'data' and not check_dynamic(name):\n meta_info['abstract'] = False\n else:\n meta_info['abstract'] = True\n\n if not meta_info['abstract']:\n spec = model_index().spec['data'][meta_info['data_name']]\n app_name = spec.get('app', meta_info['data_name'])\n data_info = model_index().module_map['data'][app_name]\n meta_info['db_table'] = \"{}_{}\".format(data_info.module.replace('-', '_'), meta_info['data_name'])\n\n attrs['Meta'] = type('Meta', tuple(meta_bases), meta_info)\n\n for field in dir(attrs['Meta']):\n if field[0] != '_':\n logger.debug(\" final meta > {} - {}\".format(field, getattr(attrs['Meta'], field)))\n\n return super().__new__(cls, name, bases, attrs, **kwargs)\n\n\n @property\n def facade_class(cls):\n class_name = get_stored_class_name(cls.__name__)\n facade_class_name = get_facade_class_name(class_name)\n dynamic_facade_class_name = get_dynamic_class_name(facade_class_name)\n\n module_name = model_index().model_class_path.get(class_name, None)\n module = importlib.import_module(module_name)\n\n if getattr(module, facade_class_name, None):\n facade_class = getattr(module, facade_class_name)\n elif getattr(module, dynamic_facade_class_name, None):\n facade_class = getattr(module, dynamic_facade_class_name)\n else:\n raise FacadeNotExistsError(\"Neither dynamic or coded facades exist for model {}\".format(class_name))\n return facade_class\n\n @property\n def facade(cls):\n facade = None\n if not cls._meta.abstract:\n facade = model_index().model_class_facades.get(cls.__name__, None)\n if not facade:\n facade = cls.facade_class(cls)\n model_index().model_class_facades[cls.__name__] = facade\n return facade\n\n\nclass BaseMixin(\n django.Model,\n metaclass = BaseMetaModel\n):\n class Meta:\n abstract = True\n\nclass BaseModel(\n BaseModelMixin,\n metaclass = BaseMetaModel\n):\n class Meta:\n abstract = True\n facade_class = ModelFacade\n","sub_path":"app/systems/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"93791599","text":"import os\n\nimport transmissionrpc\n\nfrom . import acd\nfrom . import settings\nfrom .log import DEBUG, INFO, WARNING, EXCEPTION\n\n\nasync def process_torrent(torrent_id):\n torrent_client = connect_transmission()\n torrent = torrent_client.get_torrent(torrent_id)\n if not torrent:\n WARNING('tmacd') << 'no such torrent id {0}'.format(torrent_id)\n return\n torrent_name = torrent.name\n INFO('tmacd') << '{0}: processing'.format(torrent_name)\n\n root_items = get_root_items(torrent)\n if not root_items:\n WARNING('tmacd') << '{0}: no item to upload?'.format(torrent_name)\n return\n DEBUG('tmacd') << '{0}: {1}'.format(torrent_name, root_items)\n\n INFO('tmacd') << '{0}: begin uploading'.format(torrent_name)\n torrent_root = torrent.downloadDir\n # upload files to Amazon Cloud Drive\n try:\n await acd.upload(torrent_root, root_items)\n except Exception as e:\n EXCEPTION('tmacd') << '{0}: upload failed'.format(torrent_name)\n INFO('tmacd') << 'retry url: /torrents/{0}'.format(torrent_id)\n return\n\n INFO('tmacd') << '{0}: remove torrent'.format(torrent_name)\n # remove the task from Transmission first\n remove_torrent(torrent_client, torrent_id)\n\n\ndef get_completed():\n torrent_client = connect_transmission()\n completed = filter(lambda _: _.leftUntilDone == 0, torrent_client.get_torrents())\n return list(completed)\n\n\ndef get_root_items(torrent):\n files = torrent.files()\n common = set()\n\n # find common path\n for fid, item in files.items():\n if not item['selected']:\n continue\n parts = split_all(item['name'])\n common.add(parts[0])\n\n common = list(common)\n return common\n\n\ndef remove_torrent(client, torrent_id):\n client.remove_torrent(torrent_id, delete_data=True)\n\n\ndef split_all(path):\n '''\n Returns path parts by directories.\n '''\n allparts = []\n while True:\n parts = os.path.split(path)\n if parts[0] == path: # sentinel for absolute paths\n allparts.insert(0, parts[0])\n break\n elif parts[1] == path: # sentinel for relative paths\n allparts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n allparts.insert(0, parts[1])\n return allparts\n\n\ndef connect_transmission():\n opt = settings['transmission']\n client = transmissionrpc.Client(opt['host'], port=opt['port'],\n user=opt['username'],\n password=opt['password'])\n return client\n","sub_path":"tmacd/torrent.py","file_name":"torrent.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"48177679","text":"sections = {\r\n 'cs-basics': {\r\n 'heading': 'CS Basics',\r\n 'subsections': {\r\n 'oops': 'OOPS',\r\n 'web_dev': 'Web Development',\r\n 'compilers': 'Interpreter vs Compilers',\r\n 'quick_rems': 'Quick Remembers',\r\n 'data_structures': 'Data Structures'\r\n }\r\n },\r\n 'cs-course': {\r\n 'heading': 'CS Course',\r\n 'subsections': {}\r\n },\r\n 'php': {\r\n 'heading': 'PHP',\r\n 'subsections': {\r\n 'common': 'Common',\r\n 'slim': 'SLIM',\r\n 'yii': 'YII2'\r\n }\r\n },\r\n 'node': {\r\n 'heading': 'Node',\r\n 'subsections': {\r\n 'basic': 'BASICS',\r\n 'event_loop': 'Event Loop',\r\n 'blocking_unblocking': 'Blocking vs Non-Blocking',\r\n }\r\n },\r\n 'angular': {\r\n 'heading': 'ANGULAR',\r\n 'subsections': {\r\n 'basic': 'BASICS'\r\n }\r\n },\r\n 'sde': {\r\n 'heading': 'Software Development',\r\n 'subsections': {}\r\n },\r\n 'networking': {\r\n 'heading': 'Network Basics',\r\n 'subsections': {}\r\n },\r\n 'coding': {\r\n 'heading': 'Coding Questions',\r\n 'subsections': {\r\n 'algos': 'Algorithms & Implementations',\r\n 'arrays': 'Arrays',\r\n 'algo_analysis': 'Analysis of Algorithms'\r\n }\r\n },\r\n 'interview-exp': {\r\n 'heading': 'Interview Experiences',\r\n 'subsections': {}\r\n },\r\n 'databases': {\r\n 'heading': 'Databases',\r\n 'subsections': {}\r\n },\r\n 'gk': {\r\n 'heading': 'GK',\r\n 'subsections': {}\r\n },\r\n 'python': {\r\n 'heading': 'Python',\r\n 'subsections': {\r\n 'flask': 'Flask'\r\n }\r\n },\r\n 'docker': {\r\n 'heading': 'Docker',\r\n 'subsections': {\r\n 'basics': 'Basics'\r\n }\r\n }\r\n\r\n}\r\n\r\n\r\ndef getSecHeading(sec) :\r\n return sec.heading\r\n\r\n\r\nsectionNames = map(getSecHeading, sections)\r\n","sub_path":"app/helpers/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"469316303","text":"def check(l):\r\n a=0\r\n b=1\r\n start_indx=0\r\n stop_indx=0\r\n while b!=len(l):\r\n if l[b] < l[a]:\r\n if start_indx == 0:\r\n start_indx=a\r\n if start_indx != 0 and l[b] > l[a]:\r\n stop_indx=a\r\n return start_indx,stop_indx # returning when a subarray is found\r\n a+=1\r\n b+=1\r\n if start_indx != 0: # performing check if subarry is found unsorted, but array is completed.\r\n return start_indx, len(l)-1\r\n else:\r\n return -1,-1 # returning when array is already sorted\r\n\r\nl=[1,2,5,4,3,9]\r\na,b = check(l)\r\nif a == -1 and b == -1:\r\n print(False)\r\nelse:\r\n print(\"Yes\")\r\n print(l[a:b+1])\r\n","sub_path":"rev_subarry_to_sort.py","file_name":"rev_subarry_to_sort.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336477662","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n# File name: aa.py\n# First Edit: 2020-04-08\n# Last Change: 08-Apr-2020.\n\"\"\"\nThis scrip is for test\n\n\"\"\"\n\n\ndef split_insuarance_or_not(raw_s):\n return raw_s.split(\"\\r\")\n\n\ndef all_number(raw_value):\n if isinstance(raw_value, int):\n return raw_value\n elif raw_value.startswith(\"*\"):\n if isinstance(raw_value[1:], int):\n return all_number(int(raw_value[1:]))\n else:\n return all_number(raw_value[1:])\n else:\n try:\n return split_insuarance_or_not(raw_value)[0]\n except:\n print(raw_value)\n\n\ndef insure_only_number(raw_value, flag=False):\n try:\n return split_insuarance_or_not(raw_value)[1]\n except IndexError:\n if raw_value.startswith(\"*\"):\n return insure_only_number(raw_value, flag=True)\n elif flag:\n return int(raw_value)\n else:\n return None\n\n\nb = insure_only_number(\"50\\r40\")\nprint(b)\n","sub_path":"playground/aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"376227580","text":"with open(\"topics.401-450.trec8\",\"r\") as myfile:\r\n data = myfile.read().replace('\\n',' ')\r\n\r\n\r\n\r\ndata = data.split()\r\n\r\nnumber = 0\r\nfor i in range(len(data)):\r\n if (data[i] == 'Number:'):\r\n number = data[i+1]\r\n number = int(number)\r\n break\r\n\r\n\r\nb=[]\r\nfor i in data:\r\n\ti =i.replace(',',' ')\r\n\ti =i.replace('?',' ')\r\n\ti =i.replace(\"'\",' ')\r\n\ti =i.replace('(',' ')\r\n\ti =i.replace(')',' ')\r\n\ti =i.replace('.',' ')\r\n\ti =i.replace(':',' ')\r\n\ti =i.replace('-',' ')\r\n\ti =i.replace('/',' ')\r\n\ti =i.replace('&',' ')\r\n\ti =i.replace('Description',' ')\r\n\tb.append(i)\r\n\r\ndata=[]\r\ndata=b\r\nt= []\r\nflag = False\r\ntemp =[]\r\n\r\n\r\n\r\n\r\nfor i in range(len(data)-2):\r\n\r\n if ( data[i] == ''):\r\n flag = True\r\n\r\n\r\n if ( flag ):\r\n if (data[i+1]!='<desc>' and data[i+1]!=\"<narr>\" and data[i+1]!='Narrative '):\r\n temp.append(data[i+1])\r\n #else :\r\n #temp.append(data[i+2])\r\n\r\n\r\n if ( data[i] == '< top>'):\r\n flag = False\r\n temp.remove(data[i])\r\n temp.remove(data[i+1])\r\n #temp.remove('Description:')\r\n t.append(temp)\r\n temp = []\r\n\r\n\r\n\r\nresult = []\r\nf = open(\"narr_topics_451-450.txt\",\"w\")\r\n\r\nf.write('<parameters>')\r\nf.write('<index>/home/miltiadis/Desktop/Omada7</index>')\r\nf.write('<rule>method:dirichlet,mu:1000</rule>')\r\nf.write('<count>1000</count>')\r\nf.write('<trecFormat>true</trecFormat>')\r\nfor i in t:\r\n result.append(' '.join(i))\r\n a = '<query>' + '<type>' + 'indri' + '</type>' +'<number>'+str(number)+ '</number>' +'<text>' + ' '.join(i)+ '</text>' +'</query>' + '\\n'\r\n f.write(a)\r\n number +=1\r\n\r\nf.write('</parameters>')\r\nf.close\r\n","sub_path":"quer.py","file_name":"quer.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"347021482","text":"# Scraper for the Superior Court of Delaware\n# CourtID: desup\n# Court Short Name: De.\n# Author: Andrei Chelaru\n# Reviewer: mlr\n# Date created: 31 July 2014\n\nfrom juriscraper.opinions.united_states.state import delaware\n\n\nclass Site(delaware.Site):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.url = (\n \"http://courts.delaware.gov/opinions/List.aspx?ag=Superior%20Court\"\n )\n self.court_id = self.__module__\n","sub_path":"juriscraper/opinions/united_states/state/delsuperct.py","file_name":"delsuperct.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"5785025","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMain script to pre-train the autoencoders\nCreated on Mon Nov 16 23:05:38 2020\n\n@author: Alycia\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.cuda\nimport torchvision.utils as v_utils\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch.utils.data import Subset\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as py\nfrom autoenc import ConvAutoencoder\n\nfrom timeit import default_timer as timer\n\n''' Parameters '''\ninput_w = 256\ninput_h = 256\ninput_nc = 3\ntrain_split = 0.7\nval_split = 0.3\nbatch_size = 32\nlearning_rate = 0.002\nchest_train_dataset_dir = './data/chest_xray/train'\nchest_val_dataset_dir = './data/chest_xray/val'\nchest_test_dataset_dir = './data/chest_xray/test'\nciphertext_train_dataset_dir = './data/ciphertext/train'\nciphertext_val_dataset_dir = './data/ciphertext/val'\nciphertext_test_dataset_dir = './data/ciphertext/test'\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n# Set up the dataloaders\ntrans = transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.Resize((input_h, input_w)),\n transforms.ToTensor()\n])\n\nchest_train_set = datasets.ImageFolder(root=chest_train_dataset_dir, transform=trans)\nchest_val_set = datasets.ImageFolder(root=chest_val_dataset_dir, transform=trans)\nchest_test_set = datasets.ImageFolder(root=chest_test_dataset_dir, transform=trans)\n\nciphertext_train_set = datasets.ImageFolder(root=ciphertext_train_dataset_dir, transform=trans)\nciphertext_val_set = datasets.ImageFolder(root=ciphertext_val_dataset_dir, transform=trans)\nciphertext_test_set = datasets.ImageFolder(root=ciphertext_test_dataset_dir, transform=trans)\n\nchest_train_loader = torch.utils.data.DataLoader(chest_train_set, batch_size, shuffle=True)\nchest_val_loader = torch.utils.data.DataLoader(chest_val_set, batch_size, shuffle=True)\n\ncipher_train_loader = torch.utils.data.DataLoader(ciphertext_train_set, batch_size, shuffle=True)\ncipher_val_loader = torch.utils.data.DataLoader(ciphertext_val_set, batch_size, shuffle=True)\n\n#pre-training\n\nautoencoder = ConvAutoencoder(\n in_channels = 1, \n out_channels = 1, \n kernel_size = 3,\n device = device,\n downsampling = 1\n )\n\nautoencoder.train_and_validate(chest_train_loader, chest_val_loader, batch_size=batch_size, lr=learning_rate, device=device, epochs=5, which_model = 'Chest')\nchest_auto_path = './pre_trained_networks/chest_autoencoder.pt'\nautoencoder.save_model(chest_auto_path)\n\nautoencoder.train_and_validate(cipher_train_loader, cipher_val_loader, batch_size=batch_size, lr=learning_rate, device=device, epochs=5, which_model = 'Ciphertext')\ncipher_auto_path = './pre_trained_networks/cipher_autoencoder.pt'\nautoencoder.save_model(cipher_auto_path)\n\n\n\n","sub_path":"autoencoder_pretraining.py","file_name":"autoencoder_pretraining.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"565564694","text":"\"\"\"fix/ cascade appointments\n\nRevision ID: ffe875017cea\nRevises: fafc3dde8627\nCreate Date: 2021-04-22 21:49:15.871132\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ffe875017cea'\ndown_revision = 'fafc3dde8627'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('appointments_barber_shop_id_fkey', 'appointments', type_='foreignkey')\n op.create_foreign_key(None, 'appointments', 'barber_shop', ['barber_shop_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'appointments', type_='foreignkey')\n op.create_foreign_key('appointments_barber_shop_id_fkey', 'appointments', 'barber_shop', ['barber_shop_id'], ['id'])\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ffe875017cea_fix_cascade_appointments.py","file_name":"ffe875017cea_fix_cascade_appointments.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"622384446","text":"import tables as tb\nimport pathlib2 as pl\n\nclass Database(object):\n \"\"\"docstring for Database\"\"\"\n def __init__(self, path, mode='a'):\n super(Database, self).__init__()\n self.path = path\n self.mode = mode\n self._handle = None\n self.open()\n \n def __repr__(self):\n return repr(self._handle)\n\n def open(self, path=None, mode=None):\n # use local parameters if not set\n if not path:\n path = self.path\n if not mode:\n mode = self.mode\n\n # Make sure path is not pathlib\n if isinstance(path, pl.PosixPath):\n path = path.as_posix()\n f = tb.open_file(path, mode)\n self._handle = f\n\n def is_open(self):\n if self._handle.isopen == 0:\n return False\n else:\n return True\n\n def close(self):\n self._handle.close()\n\n def _save_table(self, data, group, table_name):\n table = self._handle.create_table(group, table_name , data.dtype,\n expectedrows=data.shape[0])\n for d in data:\n for name, item in zip(data.dtype.names, d):\n table.row[name] = item\n table.row.append()\n table.flush()\n\n def _save_array(self, data, group, table_name, compress=False):\n if compress:\n compressor = tb.Filters(complevel=5, complib='blosc')\n self._handle.create_carray(group, table_name , obj=data,\n filters=compressor)\n else:\n self._handle.create_carray(group, table_name , obj=data)\n \n def _create_group(self, group_name, location='/'):\n self._handle.create_group(location, group_name)\n \n def _load_table(self, group, table_name, as_iterator=False):\n if as_iterator:\n return self._handle.get_node(group, table_name).iterrows()\n else:\n return self._handle.get_node(group, table_name)[:]\n\n def _load_ctable(self, group, table_name, col=None, as_iterator=False):\n \"\"\"read carray from the database and return the data as numpy array \n or iterator. If the data is returned as an iterator, col is ignored.\n\n Arguments:\n group -- location of table [string]\n table_name -- name of the table [string]\n col -- column of the array [integer]\n as_iterator -- switch between returning array or iterator [bool]\n\n Return:\n numpy array or iterator\n \"\"\"\n if as_iterator:\n return self._handle.get_node(group, table_name).iterrows()\n if col is not None:\n return self._handle.get_node(group, table_name)[:,col]\n else:\n return self._handle.get_node(group, table_name)[:]\n","sub_path":"Database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"287024585","text":"arr = [11,25,36,78,69,63,45,85,13,22]\n\nprint(\"Your list is : \",arr)\ntoFind = int(input(\"ENter number to search : \"))\n\ndef linearSearch(lst,num):\n\tfor elem in lst:\n\t\tif elem == num:\n\t\t\treturn 1\n\telse:\n\t\treturn 0\n\nif linearSearch(arr,toFind):\n\tprint(\"Present\")\nelse:\n\tprint(\"Not Present\")\n","sub_path":"collegeAssignment/linearSearch.py","file_name":"linearSearch.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"263188129","text":"import time\n\nimport RPi.GPIO as GPIO\n\nfrom util.gpio import IR\n\n\ndef get_key():\n if GPIO.input(IR) == 0:\n count = 0\n while GPIO.input(IR) == 0 and count < 200: # 9ms\n count += 1\n time.sleep(0.00006)\n if count < 10:\n return\n count = 0\n while GPIO.input(IR) == 1 and count < 80: # 4.5ms\n count += 1\n time.sleep(0.00006)\n\n idx = 0\n cnt = 0\n data = [0, 0, 0, 0]\n for i in range(0, 32):\n count = 0\n while GPIO.input(IR) == 0 and count < 15: # 0.56ms\n count += 1\n time.sleep(0.00006)\n\n count = 0\n while GPIO.input(IR) == 1 and count < 40: # 0: 0.56mx\n count += 1 # 1: 1.69ms\n time.sleep(0.00006)\n\n if count > 7:\n data[idx] |= 1 << cnt\n if cnt == 7:\n cnt = 0\n idx += 1\n else:\n cnt += 1\n # print data\n if data[0] + data[1] == 0xFF and data[2] + data[3] == 0xFF: # check\n return data[2]\n else:\n return None\n","sub_path":"lib/infrared.py","file_name":"infrared.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"409860564","text":"import numpy as np\n\n\ndef get_offset_locations(offset_distances, angle):\n \"\"\"\n\n :param offset_distances: list of scalar distances of offsets\n :param angle: angle in the xy-plane. 0 points in x-direction and turns clockwise\n :return: list of 3d-coordinates of points in offset_distances at angle\n \"\"\"\n loc = []\n for dist in offset_distances:\n loc += [[0, int(round(1*dist*np.sin(angle))), int(round(dist*np.cos(angle)))]]\n return loc\n","sub_path":"quantizedVDT/utils/affinitiy_utils.py","file_name":"affinitiy_utils.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"164174882","text":"def any_lowercase1(s):\n for c in s:\n if c.islower():\n return True\n else:\n return False\n\ndef any_lowercase2(s):\n for c in s:\n if 'c'.islower():\n return 'True'\n else:\n return 'False'\n\ndef any_lowercase3(s):\n for c in s:\n flag = c.islower()\n return flag\n\ndef any_lowercase4(s):\n flag = False\n for c in s:\n flag = flag or c.islower()\n return flag\n\n\n\n\n\nif __name__ == '__main__':\n print(any_lowercase1('LeeJHo'))\n print(any_lowercase2('LEEJHO'))\n print(any_lowercase3('LeeJHO'))\n print(any_lowercase4('LeeJHO'))\n\n\n","sub_path":"ch8/ex8-4.py","file_name":"ex8-4.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"535961431","text":"import logging\nfrom marshmallow import ValidationError\n\n\ndef format_errors(errors):\n for top_level_name, top_level_dict in errors.items():\n logging.error(\"Errors in Top-Level Key: {}\".format(top_level_name))\n for num, element_errors in top_level_dict.items():\n logging.error(\"Errors in element# {}\".format(num))\n for badkey, problems in element_errors.items():\n logging.error(\" key in error: {}\".format(badkey))\n for problem in problems:\n logging.error(\" {}\".format(problem))\n\n\nclass InvalidSWAGDataException(ValidationError):\n def __init__(self, errors):\n format_errors(errors)\n\n","sub_path":"swag_client/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"208206648","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: jmw418\n\"\"\"\n###Packages###\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.linalg\nimport scipy.special\nfrom numpy import linalg as LA\nimport scipy.fftpack as ft\ndef main():\n #initialisation of constants\n nx = 2001;nt = 101;a = 1.9;tstart = 0;tend = 1;xmin=0;xmax = 1;\n \n alpha = 0.5;\n \n # derived parameters \n dx = (xmax-xmin)/(nx); dt = (tend-tstart)/(nt)\n c = dt/dx*a\n print(\"clf number=\", c)\n \n #initialisation of structure\n x = np.linspace(xmin,xmax,nx)\n Phi = np.zeros([nx])\n print(np.size(x),np.size(Phi))\n Phinew = np.zeros([nx])\n \n #initial condition\n def IC(x):\n square = np.zeros_like(x)\n square[np.where(x <= 0.1)] = 1.0\n \n chair = np.zeros_like(x)\n for i in range(0,len(x)):\n if (x[i]<0.3):\n if (x[i]>0.2):\n chair[i] = 1\n \n return square +chair+ np.cosh(20*(x-0.5))**(-2)\n Phi = IC(x)\n \n \n # creation of the matrix structure\n A = np.zeros([nx,nx])\n A = Up_A(nx,c,A)\n beta = np.zeros([nx])\n fx = np.zeros([nx]) \n do = np.zeros([nx])\n ## this could be put in the function but this would create them alot\n v = np.zeros([nx])\n tv = np.zeros(nt) \n for i in range(0,nt):###...time \n \n \n #Phi = mom_map(nx,Phi) ## compute the momentum optionally times\n \n Phinew[:] = space_upwind(nx,Phi,c,A,beta,Phinew) ## compute the update Phi mapsto Phinew\n \n \n Phi = Phinew ### restart the Phi\n #v = Phi ## momentum storage\n #Phi = mom_map_inverse(nx,Phi) ## compute the auxilary velocity\n \n for j in range(0,nx):\n tv[i] += abs(Phi[j] - Phi[(j-1)%nx])\n \n \n Phi = frequency_filter(Phi)\n \n plt.axis([0, 1, -0.1, 1.3])\n plt.plot(x,Phi,'b')\n plt.plot(x,v,'m')\n plt.plot(x,IC((x-i*a*dt)%1 ),'r')\n plt.draw()\n plt.pause(0.001)\n plt.clf()\n \n plt.show()\n plt.plot(tv)\n plt.show()\n \ndef F(U):\n return U\n\ndef Up_A(nx,c,A):\n \n alpha = 0.5\n ## theta scheme ##\n for j in range(0,nx):\n A[j,j] = 1 + alpha*c;\n A[j,(j-1)%nx] = -alpha*c;\n\n return A\n \ndef Cen_A(nx,c,A):\n \n alpha = 0.5\n ## theta scheme ##\n for j in range(0,nx):\n A[j,j] = 1 ;\n A[j,(j-1)%nx] = -0.5*alpha*c;\n A[j,(j+1)%nx] = 0.5*alpha*c;\n return A\n\n \ndef space_upwind(nx,Phi,c,A,beta,Phinew):\n ##this is third order flux differencing naively they need to be done differently from the flux function.\n alpha = 0.5\n \n \n ### the correction ###\n for j in range(0,nx):\n beta[j] = Phi[j] - (1-alpha)*c*( Phi[(j)%nx] - Phi[(j-1)%nx] )\n Phinew[:] = np.linalg.solve(A,beta)\n return Phinew[:]\n \ndef space_centered(nx,Phi,c,A,beta,Phinew):\n ##this is third order flux differencing naively they need to be done differently from the flux function.\n alpha = 0.5\n \n \n ### the correction ###\n for j in range(0,nx):\n beta[j] = Phi[j] - 0.5*(1-alpha)*c*( Phi[(j+1)%nx] - Phi[(j-1)%nx] )\n ### the correction ###\n Phinew[:] = np.linalg.solve(A,beta) ## this is the \\Phi^'\n \n return Phinew[:]\n \n \ndef frequency_filter(v):\n nx = np.size(v)\n sig_fft = ft.fft(v)\n power = np.abs(sig_fft)\n \n # plt.plot(x,power)\n # plt.draw()\n # plt.pause(0.001)\n sample_freq = ft.fftfreq(v.size, d=nx)\n high_freq_fft = sig_fft.copy()\n peak_freq = 0.02/nx\n high_freq_fft[np.abs(sample_freq) > peak_freq] = 0.0\n filtered_sig = ft.ifft(high_freq_fft)\n v = np.real(filtered_sig)\n v = v\n return v\n \n \nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n\n\n","sub_path":"PHD/Python/Parrellism/Parrallel_coding.py","file_name":"Parrallel_coding.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"299717882","text":"import os\nimport scipy.sparse\nimport anndata as ad\nimport numpy as np\nimport tables\n\nfrom sfaira.data import DatasetBase\n\n\nclass Dataset(DatasetBase):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.download_url_data = \\\n \"http://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_10k_v3/pbmc_10k_v3_filtered_feature_bc_matrix.h5\"\n self.download_url_meta = None\n\n self.assay_sc = \"10x 3' v3\"\n self.author = \"10x Genomics\"\n self.disease = \"healthy\"\n self.doi_journal = \"no_doi_10x_genomics\"\n self.normalization = \"raw\"\n self.organ = \"blood\"\n self.organism = \"human\"\n self.sample_source = \"primary_tissue\"\n self.year = 2019\n\n self.gene_id_symbols_var_key = \"index\"\n self.gene_id_ensembl_var_key = \"gene_ids\"\n\n self.set_dataset_id(idx=1)\n\n\ndef load(data_dir, **kwargs):\n fn = os.path.join(data_dir, \"pbmc_10k_v3_filtered_feature_bc_matrix.h5\")\n with tables.open_file(str(fn), 'r') as f:\n dsets = {}\n for node in f.walk_nodes('/matrix', 'Array'):\n dsets[node.name] = node.read()\n\n M, N = dsets['shape']\n data = dsets['data']\n if dsets['data'].dtype == np.dtype('int32'):\n data = dsets['data'].view('float32')\n data[:] = dsets['data']\n matrix = scipy.sparse.csr_matrix(\n (data, dsets['indices'], dsets['indptr']),\n shape=(N, M),\n )\n adata = ad.AnnData(\n matrix,\n dict(obs_names=dsets['barcodes'].astype(str)),\n dict(\n var_names=dsets['name'].astype(str),\n gene_ids=dsets['id'].astype(str),\n feature_types=dsets['feature_type'].astype(str),\n genome=dsets['genome'].astype(str),\n ),\n )\n\n return adata\n","sub_path":"sfaira/data/dataloaders/loaders/dno_doi_10x_genomics/human_blood_2019_10xsequencing_10xgenomics_001.py","file_name":"human_blood_2019_10xsequencing_10xgenomics_001.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"393356665","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import get_user\nfrom .models import Contact ,Article,Comment,Category,Reply, Section\nfrom .forms import CommentForm , ReplyForm \nfrom services.models import Project\n\n\n\n# Create your views here.\n\n#article category page\ndef article_category(request, slug):\n articles = Article.objects.filter(category__name = slug)\n categories = Category.objects.all()\n return render(request,'articles/article_category.htm',{'articles':articles,'categories':categories})\n\n#article list page\ndef article_list(request):\n articles = Article.objects.all().order_by('-date')\n return render(request,'articles/article_list.htm',{'articles':articles})\n\n#article details\ndef article_details(request,slug):\n article = Article.objects.get(slug=slug)\n stories = Article.objects.all().order_by('date')[:5]\n comments = Comment.objects.filter(parent_article=article)\n sections = Section.objects.filter(root_article=article)\n categories = Category.objects.all()\n projects = Project.objects.all().order_by('date')[:5]\n\n #updating the number of views\n article.views = article.views + 1\n article.save()\n \n #forms\n form = CommentForm()\n form_2 = ReplyForm()\n\n\n #number of comments\n numberOfComments = Comment.objects.filter(parent_article=article).count()\n return render(request,'articles/article_details.htm',{\n 'article':article,\n 'comments':comments,\n 'form':form,\n 'form_2':form_2,\n 'numberOfComments':numberOfComments,\n 'stories':stories,\n 'sections':sections,\n 'categories':categories,\n 'projects':projects\n })\n\n#article comments\n@login_required(login_url= \"/accounts/login\")\ndef article_comment(request,slug):\n article = Article.objects.get(slug=slug)\n if request.method == 'POST':\n User = get_user(request)\n form = CommentForm(request.POST)\n if form.is_valid():\n\n #getting values of the form field\n comment_body = form.cleaned_data['comment']\n\n newComment = Comment(user = User,parent_article = article,comment_body = comment_body)\n newComment.save()\n return redirect('/articles/'+article.slug+ '/')\n\n#article reply\ndef article_reply(request,slug):\n article = Article.objects.get(slug=slug)\n comment = Comment.objects.get(parent_comment=parent_comment)\n if request.method == 'POST':\n User = get_user(request)\n form = ReplyForm(request.POST)\n if form.is_valid():\n\n #getting values of the form field\n reply_body = form.cleaned_data['reply']\n\n newReply = Reply(user = User,parent_comment = comment,reply_body = reply_body)\n newReply.save()\n return redirect('/articles/'+article.slug+ '/')\n\n\n","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"216039731","text":"# Write a function that tests whether a string is a palindrome.\n\ndef isPalindrome(a):\n# Reverse string and store in variable.\n b = a[::-1]\n# Check if variables have the same value and output accordingly.\n if a == b:\n print('It is a palindrome.')\n else:\n print('It is not a palindrome.')\n\n\n\n\n","sub_path":"ispalindrome.py","file_name":"ispalindrome.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"138953320","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 17 11:15:01 2019\n@author: t1\n\"\"\"\n\nfrom __future__ import division,print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom conv_block import ConvLayer,BatchNormLayer,ConvBlock \nfrom identity_block import IdentityBlock\n\nimport keras\nfrom keras.applications.resnet50 import ResNet50\n\n## comparing the output first few layers of \n## manual resnet with keras resnet \n\nclass MaxPoolLayer(object):\n def __init__(self,dim):\n self.dim = dim\n \n def forward(self,X):\n return tf.nn.max_pool(X,ksize = [1,self.dim,self.dim,1],strides = [1,2,2,1]\n ,padding='VALID')\n \n def get_params(self):\n return []\n \nclass ReluLayer(object):\n def forward(self,X):\n return tf.nn.relu(X)\n \n def get_params(self):\n return []\n\n\n## X -> CL -> BN -> relu -> MP -> CB -> output\nclass PartialResNet(object):\n def __init__(self):\n \n self.conv_layer1 = ConvLayer(7,3,64,stride = 2,padding = 'SAME')\n self.batch_norm1 = BatchNormLayer(64)\n self.relu_layer1 = ReluLayer()\n self.max_pool1 = MaxPoolLayer(3)\n self.conv_block1 = ConvBlock(64,mo=[64,64,256],stride=1)\n \n self.layers = [self.conv_layer1,\n self.batch_norm1,\n self.relu_layer1,\n self.max_pool1,\n self.conv_block1 \n ]\n \n def forward(self,X):\n FX = self.conv_layer1.forward(X)\n FX = self.batch_norm1.forward(FX)\n FX = self.relu_layer1.forward(FX)\n FX = self.max_pool1.forward(FX)\n FX = self.conv_block1.forward(FX)\n return FX\n \n def get_params(self):\n all_params = []\n all_params += self.conv_layer1.get_params()\n all_params += self.batch_norm1.get_params()\n all_params += self.conv_block1.get_params()\n return all_params\n \n def set_session(self,session):\n self.session = session\n self.conv_layer1.session = session\n self.batch_norm1.session = session\n self.conv_block1.set_session(session)\n \n def copyFromKerasLayers(self,layers):\n self.conv_layer1.copyFromKerasLayers(layers[1])\n self.batch_norm1.copyFromKerasLayers(layers[2])\n self.conv_block1.copyFromKerasLayers(layers[5:])\n \n \n \n \n ","sub_path":"resnet/main/.ipynb_checkpoints/first_layers_test-checkpoint.py","file_name":"first_layers_test-checkpoint.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"102389383","text":"# -*- coding: utf-8 -*-\n\n# PyCantonese\n#\n# Copyright (C) 2014-2016 PyCantonese Project\n# Author: Jackson Lee <jsllee.phon@gmail.com>\n# URL: <http://pycantonese.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nPyCantonese: Cantonese Linguistics in Python\n\nDeveloper: Jackson Lee\n\nhttp://pycantonese.org\n\n\"\"\"\n\nimport os\n\nfrom pycantonese.util import ENCODING\nfrom pycantonese.corpus import CantoneseCHATReader\nfrom pycantonese.jyutping import (parse_jyutping, jyutping2tipa, jyutping2yale)\n\n# ------------------------------------------------------------------------------\n# METADATA\n# ------------------------------------------------------------------------------\n\n# Version\nversion_filename = os.path.join(os.path.dirname(__file__), 'VERSION')\ntry:\n with open(version_filename) as f:\n __version__ = f.read().strip()\nexcept FileNotFoundError:\n __version__ = 'unknown version; VERSION file not found'\n\n# Copyright notice and license\n__copyright__ = \"\"\"\\\nCopyright (C) 2014-2016 PyCantonese Project.\n\nDistributed and Licensed under the Apache License, Version 2.0,\nwhich is included by reference.\n\"\"\"\n__license__ = 'Apache License, Version 2.0'\n\n# Description\n__description__ = 'PyCantonese'\n\n# Long description\n__long_description__ = 'PyCantonese: Cantonese Linguistics in Python'\n\n# keywords\n__keywords__ = ['computational linguistics', 'natural language processing',\n 'NLP', 'Cantonese', 'linguistics', 'corpora', 'speech',\n 'language', 'Chinese', 'Jyutping', 'tagging']\n\n# URL\n__url__ = \"http://pycantonese.org/\"\n\n# maintainer\n__maintainer__ = \"Jackson Lee\"\n__maintainer_email__ = \"jsllee.phon@gmail.com\"\n__author__ = __maintainer__\n__author_email__ = __maintainer_email__\n\n# trove classifiers for Python Package Index\n__classifiers__ = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: Chinese (Traditional)',\n 'Natural Language :: Cantonese',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Text Processing',\n 'Topic :: Text Processing :: Filters',\n 'Topic :: Text Processing :: General',\n 'Topic :: Text Processing :: Indexing',\n 'Topic :: Text Processing :: Linguistic'\n]\n\n__install_requires__ = ['pylangacq']\n\n# ------------------------------------------------------------------------------\n# CORPUS OBJECTS\n# ------------------------------------------------------------------------------\n\n\ndef hkcancor():\n \"\"\"\n Create the corpus object for the Hong Kong Cantonese Corpus.\n \"\"\"\n data_path = os.path.join(os.path.dirname(__file__),\n 'data', 'hkcancor', '*.cha')\n return CantoneseCHATReader(data_path, encoding='utf8')\n\n\ndef read_chat(*filenames, encoding=ENCODING):\n \"\"\"\n Create a corpus object based on *filenames*.\n\n :param filenames: one or multiple filenames (absolute-path or relative to\n the current directory; with or without glob matching patterns)\n\n :param encoding: file encoding; defaults to 'utf8'.\n \"\"\"\n return CantoneseCHATReader(*filenames, encoding=encoding)\n","sub_path":"pycantonese/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"202827474","text":"import pytest\nfrom hoerapi.util import parse_bool, parse_date\nfrom datetime import datetime\n\n\ndef test_parse_date():\n assert parse_date('2012-12-02 13:00:00') == datetime(2012, 12, 2, 13, 0, 0)\n\n\n@pytest.mark.parametrize(\"str,bool\", [\n ('', False),\n ('0', False),\n ('1', True),\n ('invalid22', False),\n])\ndef test_parse_bool(str, bool):\n assert parse_bool(str) == bool","sub_path":"test/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"598552121","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass TestPointsQuery(Model):\n \"\"\"TestPointsQuery.\n\n :param order_by:\n :type order_by: str\n :param points:\n :type points: list of :class:`TestPoint <test.v4_0.models.TestPoint>`\n :param points_filter:\n :type points_filter: :class:`PointsFilter <test.v4_0.models.PointsFilter>`\n :param wit_fields:\n :type wit_fields: list of str\n \"\"\"\n\n _attribute_map = {\n 'order_by': {'key': 'orderBy', 'type': 'str'},\n 'points': {'key': 'points', 'type': '[TestPoint]'},\n 'points_filter': {'key': 'pointsFilter', 'type': 'PointsFilter'},\n 'wit_fields': {'key': 'witFields', 'type': '[str]'}\n }\n\n def __init__(self, order_by=None, points=None, points_filter=None, wit_fields=None):\n super(TestPointsQuery, self).__init__()\n self.order_by = order_by\n self.points = points\n self.points_filter = points_filter\n self.wit_fields = wit_fields\n","sub_path":"vsts/vsts/test/v4_0/models/test_points_query.py","file_name":"test_points_query.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"258250673","text":"# iteration.py\n\ndef newton_i(f=lambda x:math.sin (math.pi*x) - x*x,\n Df=lambda x:math.pi * math.cos (math.pi*x) - 2 * x,\n x0=1,\n tolf=1E-14,\n tolx=1E-14,\n n=26,i=0):\n while n>0:\n n-=1\n i+=1\n x_old=x0 #Save old value\n x0-=f(x0)/Df(x0) #Take Newton step\n error=abs(f(x0)/Df(x0)) #Update error\n print('it=%d x=%e err=%e res=%e' % (i,x0,error,abs(f(x_old))))\n if error < tolx and abs(f(x_old)) < tolf:\n return x_old,error,abs(f(x_old)) #return as is\n print('No convergence!')\n return x_old,error,abs(f(x_old)) #return as is\n \nimport math\n\nprint( newton_i( ))\n","sub_path":"Assignment_1/iteration.py","file_name":"iteration.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"266931817","text":"\"\"\"Create maps of ids to capacity factor timeseries of renewables.\"\"\"\nimport math\n\nimport click\nimport numpy as np\nimport geopandas as gpd\nimport shapely\nimport rasterio\nfrom rasterio.transform import from_origin\nimport xarray as xr\n\nDTYPE = np.uint16\nNO_DATA_VALUE = 64001\nINDEX_EPSILON = 10e-3\n\nEPSG_3035_PROJ4 = \"+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs \"\nEPSG_3035 = \"EPSG:3035\"\nWGS84_PROJ4 = \"+proj=longlat +datum=WGS84 +no_defs \"\nWGS84 = \"EPSG:4326\"\n\n\n@click.command()\n@click.argument(\"path_to_timeseries\")\n@click.argument(\"path_to_map\")\n@click.argument(\"resolution_km\", type=int)\ndef id_map(path_to_timeseries, path_to_map, resolution_km):\n \"\"\"Create maps of ids to capacity factor timeseries of renewables.\n\n Each point on the map links to a timeseries of capacity factors of renewables. Together with the\n timeseries, both files form the spatio-temporal data format used in this study.\n \"\"\"\n ds = xr.open_dataset(path_to_timeseries)\n pv_config = ds[[\"lat\", \"lon\"]].to_dataframe()\n points = gpd.GeoDataFrame(\n geometry=[shapely.geometry.Point(row.lon, row.lat) for _, row in pv_config.iterrows()],\n data={\n \"site_id\": pv_config.index\n },\n crs=WGS84_PROJ4\n ).to_crs(EPSG_3035_PROJ4)\n x_min = min([point.x for point in points.geometry])\n x_max = max([point.x for point in points.geometry])\n y_min = min([point.y for point in points.geometry])\n y_max = max([point.y for point in points.geometry])\n resolution_m = resolution_km * 1000\n width = (x_max - x_min) / resolution_m + 1\n height = (y_max - y_min) / resolution_m + 1\n assert isclose(round(width), width) # diff is purely numerics\n assert isclose(round(height), height) # diff is purely numerics\n width = round(width)\n height = round(height)\n raster = np.ones(shape=(height, width), dtype=DTYPE) * NO_DATA_VALUE\n for _, point in points.iterrows():\n index_x = (point.geometry.x - x_min) / resolution_m\n index_y = (y_max - point.geometry.y) / resolution_m\n assert isclose(round(index_x), index_x) # diff is purely numerics\n assert isclose(round(index_y), index_y) # diff is purely numerics\n int_index_x = round(index_x)\n int_index_y = round(index_y)\n raster[int_index_y, int_index_x] = point.site_id\n transform = from_origin(\n west=x_min - resolution_m / 2,\n north=y_max + resolution_m / 2,\n xsize=resolution_m,\n ysize=resolution_m\n )\n with rasterio.open(path_to_map, 'w', driver='GTiff', height=height, width=width,\n count=1, dtype=DTYPE, crs=EPSG_3035, transform=transform,\n nodata=NO_DATA_VALUE) as f_map:\n f_map.write(raster, 1)\n\n\ndef isclose(a, b):\n return math.isclose(a, b, abs_tol=INDEX_EPSILON, rel_tol=0)\n\n\nif __name__ == \"__main__\":\n id_map()\n","sub_path":"src/capacityfactors/id_map.py","file_name":"id_map.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"466511944","text":"\"\"\"\nGiven a linked list, remove the nth node from the end of list and return its head.\n\nFor example,\n\n Given linked list: 1->2->3->4->5, and n = 2.\n\n After removing the second node from the end, the linked list becomes 1->2->3->5.\n\"\"\"\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n start = ListNode(0)\n start.next = head\n cur, preT = start, start\n while n >= 0:\n cur = cur.next\n n -= 1\n while cur != None:\n cur = cur.next\n preT = preT.next\n # print preT.val\n preT.next = preT.next.next\n return start.next","sub_path":"interview/facebook/mid/LC19. Remove Nth Node From End of List.py","file_name":"LC19. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"208180936","text":"__author__ = \"Gonzalez Jimenez Alvaro, Laurendeau Matthieu\"\n__date__ = \"2020 January\"\n__copyright__ = \"Copyright 2020, Advanced Learning Models\"\n__license__ = \"GPL\"\n__version__ = \"0.3\"\n__maintainer__ = \"Gonzalez Jimenez Alvaro, Laurendeau Matthieu\"\n__email__ = \"alvaro.gonzalez-jimenez@grenoble-inp.org, laurendeau.matthieu@gmail.com\"\n__status__ = \"Submitted\"\n__brief_svm__ = \"Class that contains the implementation of the \\\n Support Vector Machine\"\n\n\n############ Imports ############\n\"\"\"\nLibraries necessary to run this file alone.\n\"\"\"\nimport numpy as np # for arrays operations\nfrom cvxopt import solvers, matrix, spmatrix, sparse # convex optimization\n\nsolvers.options[\"show_progress\"] = False\n\n\nclass SVM:\n \"\"\"\n Class Support Vector Machine\n \"\"\"\n\n def __init__(self, l):\n \"\"\"\n Construct SVM model\n @param: lmda: float - Hyperparameter lambda\n \"\"\"\n self.l = l\n\n def fit(self, gram_matrix, labels):\n \"\"\"\n Solve: min_x 1/2 xPx +qx\n s.t Gx <= h\n Ax = b\n @param: gram_matrix: numpy array - Gram Matrix\n labels: numpy array - Contains all the labels (-1, 1) to fit \n the SVM\n \"\"\"\n\n # Components for quadratic program problem\n n = len(labels)\n P = matrix(gram_matrix)\n q = -matrix(labels, (n, 1), tc=\"d\")\n\n # Constraints\n G = spmatrix(labels, range(n), range(n)) # diagonal matrix\n G = sparse([G, -G])\n h = np.concatenate([np.ones(n) / (2 * self.l * n), np.zeros(n)])\n h = matrix(h.reshape((2 * n, 1)))\n\n # Solving quadratic progam problem\n self.alpha = solvers.qp(P, q, G, h)[\"x\"]\n\n # Return the solution\n return self.alpha\n\n def predict(self, K):\n \"\"\"\n Predict class from SVM model\n @param: K: numpy array - kernel\n \"\"\"\n predictions = []\n # assert False, np.shape(K)[1]\n for i in range(np.shape(K)[0]):\n # print(\"i\", i)\n pred = 0\n for k, j in enumerate(range(np.shape(K)[1])):\n # print(\"k\", k, \"j\", j)\n # print(K[i, j])\n pred += self.alpha[k] * K[i, j]\n # assert(False)\n predictions.append(np.sign(pred))\n return predictions\n\n\ndef score(predict, label):\n \"\"\"\n Evaluate performances of the model. Compare predictions and true label\n @param: predict: numpy array - predictions\n label: numpy array - real labels of the sequences\n \"\"\"\n res = 0\n for i in range(len(label)):\n res += int(predict[i] == label[i]) / len(label)\n return res\n","sub_path":"version_0.5/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88831830","text":"from __future__ import print_function\n\nimport sys\nimport os\nimport time\nimport timeit\nimport numpy as np\nimport tensorflow.keras as keras\nimport tensorflow as tf\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics.cluster import normalized_mutual_info_score, adjusted_rand_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nfrom sklearn.utils import linear_assignment_\nfrom sklearn.metrics import accuracy_score\nimport tensorflow.keras.backend as K\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport h5py\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.metrics import mean_squared_error\ntry:\n from six.moves import xrange\nexcept:\n pass\n\nimport scipy\nfrom numpy.matlib import repmat\nfrom scipy.spatial.distance import cdist\nfrom scipy import sparse\n\n\ndef gacPathCondEntropy(IminuszW, cluster_i, cluster_j):\n # Compute conditional complexity from the subpart of the weighted adjacency matrix\n # Inputs:\n # - IminuszW: the matrix (I - z*P)\n #\t- cluster_i: index vector of cluster i\n #\t- cluster_j: index vector of cluster j\n # Output:\n #\t- L_ij - the sum of conditional complexities of cluster i and j after merging.\n # by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011\n\n num_i = np.size(cluster_i)\n num_j = np.size(cluster_j)\n\n # detecting cross elements (this check costs much and is unnecessary)\n\n ijGroupIndex = np.append(cluster_i, cluster_j)\n\n y_ij = np.zeros((num_i + num_j, 2)) # [y_i, y_j]\n y_ij[:num_i, 0] = 1\n y_ij[num_i:, 1] = 1\n idx = np.ix_(ijGroupIndex, ijGroupIndex)\n L_ij = scipy.linalg.inv(IminuszW[idx]).dot(y_ij)\n L_ij = sum(L_ij[:num_i, 0]) / (num_i * num_i) + sum(L_ij[num_i:, 1]) / (num_j * num_j)\n\n return L_ij\n\n\ndef gacPathEntropy(subIminuszW):\n # Compute structural complexity from the subpart of the weighted adjacency matrix\n # Input:\n # - subIminuszW: the subpart of (I - z*P)\n # Output:\n #\t- clusterComp - strucutral complexity of a cluster.\n # by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011\n\n N = subIminuszW.shape[0]\n clusterComp = scipy.linalg.inv(subIminuszW).dot(np.ones((N, 1)))\n clusterComp = sum(clusterComp) / (N * N)\n\n return clusterComp\n\n\ndef gacMerging(graphW, initClusters, groupNumber, strDescr, z):\n # Cluster merging for Graph Agglomerative Clustering\n # Implements an agglomerative clustering algorithm based on maiximum graph\n # strcutural affinity of two groups\n # Inputs:\n #\t- graphW: asymmetric weighted adjacency matrix\n # - initClusters: a cell array of clustered vertices\n # - groupNumber: the final number of clusters\n # - strDescr: structural descriptor, 'zeta' or 'path'\n # - z: (I - z*P), default: 0.01\n # Outputs:\n # - clusterLabels: 1 x m list whose i-th entry is the group assignment of\n # the i-th data vector w_i. Groups are indexed\n # sequentially, starting from 1.\n # by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011\n\n numSample = graphW.shape[0]\n IminuszW = np.eye(numSample) - z * graphW\n myInf = 1e10\n\n # initialization\n VERBOSE = True\n\n numClusters = len(initClusters)\n if numClusters <= groupNumber:\n print('GAC: too few initial clusters. Do not need merging!');\n\n # compute the structural complexity of each initial cluster\n clusterComp = np.zeros((numClusters, 1))\n for i in xrange(numClusters):\n clusterComp[i] = gacPathEntropy(IminuszW[np.ix_(initClusters[i], initClusters[i])])\n\n # compute initial(negative) affinity table(upper trianglar matrix), very slow\n if VERBOSE:\n print(' Computing initial table.')\n\n affinityTab = np.full(shape=(numClusters, numClusters), fill_value=np.inf)\n for j in xrange(numClusters):\n for i in xrange(j):\n affinityTab[i, j] = -1 * gacPathCondEntropy(IminuszW, initClusters[i], initClusters[j])\n\n affinityTab = (clusterComp + clusterComp.T) + affinityTab\n\n if VERBOSE:\n print(' Starting merging process')\n\n curGroupNum = numClusters\n while True:\n if np.mod(curGroupNum, 20) == 0 and VERBOSE:\n print(' Group count: ', str(curGroupNum))\n\n # Find two clusters with the best affinity\n minAff = np.min(affinityTab[:curGroupNum, :curGroupNum], axis=0)\n minIndex1 = np.argmin(affinityTab[:curGroupNum, :curGroupNum], axis=0)\n minIndex2 = np.argmin(minAff)\n minIndex1 = minIndex1[minIndex2]\n if minIndex2 < minIndex1:\n minIndex1, minIndex2 = minIndex2, minIndex1\n\n # merge the two clusters\n\n new_cluster = np.unique(np.append(initClusters[minIndex1], initClusters[minIndex2]))\n\n # move the second cluster to be merged to the end of the cluster array\n # note that we only need to copy the end cluster's information to\n # the second cluster 's position\n if minIndex2 != curGroupNum:\n initClusters[minIndex2] = initClusters[-1]\n clusterComp[minIndex2] = clusterComp[curGroupNum - 1]\n # affinityTab is an upper triangular matrix\n affinityTab[: minIndex2, minIndex2] = affinityTab[:minIndex2, curGroupNum - 1]\n affinityTab[minIndex2, minIndex2 + 1: curGroupNum - 1] = affinityTab[minIndex2 + 1:curGroupNum - 1,\n curGroupNum - 1]\n\n # update the first cluster and remove the second cluster\n initClusters[minIndex1] = new_cluster\n initClusters.pop()\n clusterComp[minIndex1] = gacPathEntropy(IminuszW[np.ix_(new_cluster, new_cluster)])\n clusterComp[curGroupNum - 1] = myInf\n affinityTab[:, curGroupNum - 1] = myInf\n affinityTab[curGroupNum - 1, :] = myInf\n curGroupNum = curGroupNum - 1\n if curGroupNum <= groupNumber:\n break\n\n # update the affinity table for the merged cluster\n for groupIndex1 in xrange(minIndex1):\n affinityTab[groupIndex1, minIndex1] = -1 * gacPathCondEntropy(IminuszW, initClusters[groupIndex1],\n new_cluster)\n for groupIndex1 in xrange(minIndex1 + 1, curGroupNum):\n affinityTab[minIndex1, groupIndex1] = -1 * gacPathCondEntropy(IminuszW, initClusters[groupIndex1],\n new_cluster)\n affinityTab[:minIndex1, minIndex1] = clusterComp[:minIndex1].reshape(-1) + clusterComp[minIndex1] + affinityTab[\n :minIndex1,\n minIndex1]\n affinityTab[minIndex1, minIndex1 + 1: curGroupNum] = clusterComp[minIndex1 + 1: curGroupNum].T + clusterComp[\n minIndex1] + affinityTab[minIndex1, minIndex1 + 1:curGroupNum]\n\n # generate sample labels\n clusterLabels = np.ones((numSample, 1))\n for i in xrange(len(initClusters)):\n clusterLabels[initClusters[i]] = i\n if VERBOSE:\n print(' Final group count: ', str(curGroupNum))\n\n return clusterLabels\n\n\ndef gacNNMerge(distance_matrix, NNIndex):\n # merge each vertex with its nearest neighbor\n # by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011\n #\n\n # NN indices\n sampleNum = distance_matrix.shape[0]\n clusterLabels = np.zeros((sampleNum, 1))\n counter = 1\n for i in xrange(sampleNum):\n idx = NNIndex[i, :2]\n assignedCluster = clusterLabels[idx]\n assignedCluster = np.unique(assignedCluster[np.where(assignedCluster > 0)])\n if len(assignedCluster) == 0:\n clusterLabels[idx] = counter\n counter = counter + 1\n elif len(assignedCluster) == 1:\n clusterLabels[idx] = assignedCluster\n else:\n clusterLabels[idx] = assignedCluster[0]\n for j in xrange(1, len(assignedCluster)):\n clusterLabels[np.where(clusterLabels == assignedCluster[j])] = assignedCluster[0]\n\n uniqueLabels = np.unique(clusterLabels)\n clusterNumber = len(uniqueLabels)\n\n initialClusters = []\n for i in xrange(clusterNumber):\n initialClusters.append(np.where(clusterLabels[:].flatten() == uniqueLabels[i])[0])\n\n return initialClusters\n\n\ndef gacBuildDigraph(distance_matrix, K, a):\n # Build directed graph\n # Input:\n # - distance_matrix: pairwise distances, d_{i -> j}\n # - K: the number of nearest neighbors for KNN graph\n # - a: for covariance estimation\n # sigma^2 = (\\sum_{i=1}^n \\sum_{j \\in N_i^K} d_{ij}^2) * a\n # - graphW: asymmetric weighted adjacency matrix,\n # w_{ij} = exp(- d_{ij}^2 / sig2), if j \\in N_i^K\n #\t- NNIndex: (2K) nearest neighbors, N x (2K+1) matrix\n # by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011\n\n # NN indices\n N = distance_matrix.shape[0]\n # find 2*K NNs in the sense of given distances\n sortedDist = np.sort(distance_matrix, axis=1)\n NNIndex = np.argsort(distance_matrix, axis=1)\n NNIndex = NNIndex[:, :K + 1]\n\n # estimate derivation\n sig2 = np.mean(np.mean(sortedDist[:, 1:max(K + 1, 4)])) * a\n #########\n tmpNNDist = np.min(sortedDist[:, 1:], axis=1)\n while any(np.exp(- tmpNNDist / sig2) < 1e-5): # check sig2 and magnify it if it is too small\n sig2 = 2 * sig2\n\n #########\n print(' sigma = ', str(np.sqrt(sig2)))\n\n # build graph\n ND = sortedDist[:, 1:K + 1]\n NI = NNIndex[:, 1:K + 2]\n XI = repmat(np.arange(0, N).reshape(-1, 1), 1, K)\n sig2 = np.double(sig2)\n ND = np.double(ND)\n graphW = sparse.csc_matrix((np.exp(-ND[:] * (1 / sig2)).flatten(), (XI[:].flatten(), NI[:].flatten())),\n shape=(N, N)).todense()\n graphW += np.eye(N)\n\n return graphW, NNIndex\n\n\ndef gacCluster(distance_matrix, groupNumber, strDescr, K, a, z):\n # Graph Agglomerative Clustering toolbox\n # Input:\n # - distance_matrix: pairwise distances, d_{i -> j}\n # - groupNumber: the final number of clusters\n # - strDescr: structural descriptor. The choice can be\n # - 'zeta': zeta function based descriptor\n # - 'path': path integral based descriptor\n # - K: the number of nearest neighbors for KNN graph, default: 20\n # - p: merging (p+1)-links in l-links algorithm, default: 1\n # - a: for covariance estimation, default: 1\n # sigma^2 = (\\sum_{i=1}^n \\sum_{j \\in N_i^K} d_{ij}^2) * a\n # - z: (I - z*P), default: 0.01\n # Output:\n # - clusteredLabels: clustering results\n # by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011\n #\n # Please cite the following papers, if you find the code is helpful\n #\n # W. Zhang, D. Zhao, and X. Wang.\n # Agglomerative clustering via maximum incremental path integral.\n # Pattern Recognition, 46 (11): 3056-3065, 2013.\n #\n # W. Zhang, X. Wang, D. Zhao, and X. Tang.\n # Graph Degree Linkage: Agglomerative Clustering on a Directed Graph.\n # in Proceedings of European Conference on Computer Vision (ECCV), 2012.\n\n print('--------------- Graph Structural Agglomerative Clustering ---------------------');\n\n # initialization\n\n print('---------- Building graph and forming initial clusters with l-links ---------');\n [graphW, NNIndex] = gacBuildDigraph(distance_matrix, K, a);\n # from adjacency matrix to probability transition matrix\n graphW = np.array((1. / np.sum(graphW, axis=1))) * np.array(graphW) # row sum is 1\n initialClusters = gacNNMerge(distance_matrix, NNIndex)\n\n print('-------------------------- Zeta merging --------------------------');\n clusteredLabels = gacMerging(graphW, initialClusters, groupNumber, strDescr, z);\n\n return clusteredLabels\n\n\ndef predict_ac_mpi(feat, nClass, nSamples, nfeatures):\n K = 20\n a = 1\n z = 0.01\n\n distance_matrix = cdist(feat, feat) ** 2\n # path intergral\n label_pre = gacCluster(distance_matrix, nClass, 'path', K, a, z)\n\n return label_pre[:, 0]\n\n\ndef bestMap(L1, L2):\n if L1.__len__() != L2.__len__():\n print('size(L1) must == size(L2)')\n\n Label1 = np.unique(L1)\n nClass1 = Label1.__len__()\n Label2 = np.unique(L2)\n nClass2 = Label2.__len__()\n\n nClass = max(nClass1, nClass2)\n G = np.zeros((nClass, nClass))\n for i in range(nClass1):\n for j in range(nClass2):\n G[i][j] = np.nonzero((L1 == Label1[i]) * (L2 == Label2[j]))[0].__len__()\n\n c = linear_assignment_.linear_assignment(-G.T)[:, 1]\n newL2 = np.zeros(L2.__len__())\n for i in range(nClass2):\n for j in np.nonzero(L2 == Label2[i])[0]:\n if len(Label1) > c[i]:\n newL2[j] = Label1[c[i]]\n\n return accuracy_score(L1, newL2)\n\n\ndef dataset_settings(dataset):\n if (dataset == 'MNIST-full') or (dataset == 'MNIST-test'):\n kernel_sizes = [4, 5]\n strides = [2, 2]\n paddings = ['same', 'same']\n test_batch_size = 100\n elif dataset == 'USPS':\n kernel_sizes = [4, 5]\n strides = [2, 2]\n paddings = ['same', 'same']\n test_batch_size = 100\n elif dataset == 'FRGC':\n kernel_sizes = [4, 5]\n strides = [2, 2]\n paddings = ['same', 'same']\n test_batch_size = 1231\n elif dataset == 'CMU-PIE':\n kernel_sizes = [4, 5]\n strides = [2, 2]\n paddings = ['same', 'same']\n test_batch_size = 8\n elif dataset == 'YTF':\n kernel_sizes = [5, 4]\n strides = ['same', 'same']\n paddings = [2, 'same']\n test_batch_size = 100\n elif dataset == 'JD':\n kernel_sizes = [4, 5]\n strides = [2, 2]\n paddings = ['same', 'same']\n test_batch_size = 100\n dropouts = [0.1, 0.1, 0.0]\n feature_map_sizes = [50, 50, 10]\n return kernel_sizes, strides, paddings, test_batch_size,dropouts,feature_map_sizes\n\n\ndef create_result_dirs(output_path, file_name):\n if not os.path.exists(output_path):\n print('creating log folder')\n os.makedirs(output_path)\n try:\n os.makedirs(os.path.join(output_path, '../params'))\n except:\n pass\n func_file_name = os.path.basename(__file__)\n if func_file_name.split('.')[1] == 'pyc':\n func_file_name = func_file_name[:-1]\n functions_full_path = os.path.join(output_path, func_file_name)\n cmd = 'cp ' + func_file_name + ' \"' + functions_full_path + '\"'\n os.popen(cmd)\n run_file_full_path = os.path.join(output_path, file_name)\n cmd = 'cp ' + file_name + ' \"' + run_file_full_path + '\"'\n os.popen(cmd)\n\n\nclass Logger(object):\n def __init__(self, output_path):\n self.terminal = sys.stdout\n self.log = open(output_path + \"log.txt\", \"w+\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n # this handles the flush command by doing nothing.\n # you might want to specify some extra behavior here.\n pass\n\n\ndef kmeans(encoder_val_clean, y, nClusters, y_pred_prev=None, weight_initilization='k-means++', seed=42, n_init=40,\n max_iter=300):\n # weight_initilization = { 'kmeans-pca', 'kmean++', 'random', None }\n\n if weight_initilization == 'kmeans-pca':\n\n start_time = timeit.default_timer()\n pca = PCA(n_components=nClusters).fit(encoder_val_clean)\n kmeans_model = KMeans(init=pca.components_, n_clusters=nClusters, n_init=1, max_iter=300, random_state=seed)\n y_pred = kmeans_model.fit_predict(encoder_val_clean)\n\n centroids = kmeans_model.cluster_centers_.T\n centroids = centroids / np.sqrt(np.diag(np.matmul(centroids.T, centroids)))\n\n end_time = timeit.default_timer()\n\n elif weight_initilization == 'k-means++':\n\n start_time = timeit.default_timer()\n kmeans_model = KMeans(init='k-means++', n_clusters=nClusters, n_init=n_init, max_iter=max_iter, n_jobs=15,\n random_state=seed)\n y_pred = kmeans_model.fit_predict(encoder_val_clean)\n\n centroids = kmeans_model.cluster_centers_.T\n centroids = centroids / np.sqrt(np.diag(np.matmul(centroids.T, centroids)))\n\n end_time = timeit.default_timer()\n if y[0] >= 0:\n print('k-means: \\t nmi =', normalized_mutual_info_score(y, y_pred), '\\t arc =', adjusted_rand_score(y, y_pred),\n '\\t acc = {:.4f} '.format(bestMap(y, y_pred)),\n 'K-means objective = {:.1f} '.format(kmeans_model.inertia_), '\\t runtime =', end_time - start_time)\n\n if y_pred_prev is not None:\n print('Different Assignments: ', sum(y_pred == y_pred_prev), '\\tbestMap: ', bestMap(y_pred, y_pred_prev),\n '\\tdatapoints-bestMap*datapoints: ',\n encoder_val_clean.shape[0] - bestMap(y_pred, y_pred_prev) * encoder_val_clean.shape[0])\n\n return centroids, kmeans_model.inertia_, y_pred\n\n\ndef load_dataset(dataset_path):\n hf = h5py.File(dataset_path + '/data.h5', 'r')\n X = np.asarray(hf.get('data'), dtype='float32')\n X_train = (X - np.float32(127.5)) / np.float32(127.5)\n y_train = np.asarray(hf.get('labels'), dtype='int32')\n return X_train, y_train\n\ndef parse_function(filename,size):\n image_data = tf.read_file(filename)\n img = tf.image.decode_jpeg(image_data)\n img = tf.cast(img,tf.float32)\n img = tf.image.resize_images(img,[size,size])\n img = img / 127.5 - 1\n return img\n\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n #slice超出部分会自动丢弃\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt], excerpt\n\n\nclass Downsample(tf.keras.Model):\n\n def __init__(self, filters, size,strides,padding,dropout=0.5):\n super(Downsample, self).__init__()\n initializer = tf.random_normal_initializer(0., 0.02)\n self.conv = tf.keras.layers.Conv2D(filters,\n (size, size),\n strides=2,\n padding=padding,\n kernel_initializer=initializer,\n use_bias=True)\n self.dropout = tf.keras.layers.Dropout(dropout)\n\n def call(self, x, training):\n x = self.conv(x)\n x = self.dropout(x, training=training)\n x = tf.nn.leaky_relu(x,alpha=0.01)\n return x\n\nclass Encoder(tf.keras.Model):\n\n def __init__(self, feature_map_sizes,\n dropouts, kernel_sizes, strides,\n paddings):\n super(Encoder, self).__init__()\n initializer = tf.random_normal_initializer(0., 0.02)\n self.bottom_layers = []\n self.first_layer = tf.keras.layers.Dropout(rate=dropouts[0])\n self.bottom_layers.append(self.first_layer)\n self.middle_layers_num = len(kernel_sizes)\n for i in range(self.middle_layers_num):\n l_ei = Downsample(feature_map_sizes[i],kernel_sizes[i],strides[i],padding=paddings[i],dropout=dropouts[i+1])\n self.bottom_layers.append(l_ei)\n self.flatten_layer = tf.keras.layers.Flatten()\n self.last_layer = tf.keras.layers.Dense(units=feature_map_sizes[self.middle_layers_num],activation=keras.activations.tanh)\n #self.bottom_layers.append(self.last_layer)\n def call(self, x, training):\n # x shape == (bs, 256, 256, 3)\n outputs = []\n outputs.append(x)\n for i,layer in enumerate(self.bottom_layers):\n x = layer(x,training=training)\n if i > 0:\n outputs.append(x)\n x = self.flatten_layer(x)\n x = self.last_layer(x)\n outputs.append(x)\n return outputs\n\nclass Decoder(tf.keras.Model):\n\n def __init__(self,sec_shape, feature_map_sizes,\n kernel_sizes, strides,\n paddings):\n super(Decoder, self).__init__()\n initializer = tf.random_normal_initializer(0., 0.02)\n self.top_layers = []\n self.first_layer = tf.keras.layers.Dense(units=sec_shape[1] * sec_shape[2] *sec_shape[3])\n #self.top_layers.append(self.first_layer)\n self.reshape_layer = keras.layers.Reshape(sec_shape[1:])\n\n self.middle_layers_num = len(kernel_sizes)\n for i in range(self.middle_layers_num):\n if i < self.middle_layers_num - 1:\n activation = keras.layers.LeakyReLU(alpha=0.01)\n else:\n activation = keras.activations.tanh\n l_di = keras.layers.Conv2DTranspose(filters=feature_map_sizes[-i-1],kernel_size=kernel_sizes[-i-1],strides=strides[-i-1],padding=paddings[-i-1],activation=activation)\n self.top_layers.append(l_di)\n def call(self, x,training=False):\n # x shape == (bs, 256, 256, 3)\n outputs = []\n x = self.first_layer(x)\n x = self.reshape_layer(x)\n outputs.append(x)\n for layer in self.top_layers:\n x = layer(x)\n outputs.append(x)\n return outputs\n\nclass AE(keras.models.Model):\n def __init__(self,encoder,decoder):\n super(AE, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n def call(self, inputs, training=False):\n encoder_outs = self.encoder(inputs,training=training)\n decoder_outs = self.decoder(encoder_outs[-1])\n return encoder_outs,decoder_outs\n\ndef AE_loss(encoder_outs,decoder_outs):\n loss_recons = []\n for i in range(len(decoder_outs)):\n loss_recons.append(\n tf.losses.mean_squared_error(decoder_outs[i], encoder_outs[-i - 2])\n )\n loss_recon = sum(loss_recons)\n return loss_recon\n\ndef build_depict(input_var, feature_map_sizes=[50, 50],\n dropouts=[0.1, 0.1, 0.1], kernel_sizes=[5, 5], strides=[2, 2],\n paddings=[2, 2], hlayer_loss_param=0.1):\n # ENCODER\n input_layer = keras.layers.Input(tensor=input_var)\n encoder = Encoder(feature_map_sizes,\n dropouts, kernel_sizes, strides,\n paddings)\n encoder_outs = encoder(input_layer,training=True)\n #encoder_clean_outs = encoder(input_layer,training=False)\n # DECODER\n decoder_feature_map_sizes = [input_var.shape[-1].value] + feature_map_sizes[:-2]\n decoder = Decoder(encoder_outs[-2].shape, decoder_feature_map_sizes,\n kernel_sizes, strides,\n paddings)\n decoder_outs = decoder(encoder_outs[-1])\n # decoder_clean_outs = decoder(encoder_clean_outs[-1])\n ae = AE(encoder,decoder)\n #ae = keras.models.Model(inputs=input_layer,outputs=decoder.outputs[-1])\n # loss_recons = []\n # loss_clean_recons = []\n # for i in range(len(decoder_outs)):\n # loss_recons.append(\n # tf.losses.mean_squared_error(decoder_outs[i], encoder_clean_outs[-i-2])\n # )\n # loss_clean_recons.append(tf.losses.mean_squared_error(decoder_clean_outs[i], encoder_clean_outs[-i-2]))\n # loss_recon = sum(loss_recons)\n # loss_clean_recons = sum(loss_clean_recons)\n return ae #, loss_recon, loss_clean_recons\n\ndef train_depict_ae(dataset_name,dataset_full,y, ae,input_var, num_clusters, output_path,val_size,\n batch_size=100, test_batch_size=100, num_epochs=1000, learning_rate=1e-4, verbose=1, seed=42,\n continue_training=False):\n split = len(ae.encoder.outputs)\n ae_outs = ae(input_var, training=True)\n encoder_outs = ae_outs[:split]\n decoder_outs = ae_outs[split:]\n ae_outs_clean = ae(input_var,training=False)\n encoder_clean_outs = ae_outs_clean[:split]\n decoder_clean_outs = ae_outs_clean[split:]\n loss_recon = AE_loss(encoder_clean_outs,decoder_outs)\n loss_clean_recon = AE_loss(encoder_clean_outs,decoder_clean_outs)\n best_weight_save_path = os.path.join(output_path, '../params/params_' + dataset_name + '_values_best.h5')\n best_val = np.inf\n last_update = 0\n # Load if pretrained weights are available.\n if os.path.exists(best_weight_save_path) and continue_training:\n #ae.load_weights(best_weight_save_path)\n pass\n else:\n # TRAIN MODEL\n if verbose > 1:\n encoder_clean = encoder_clean_outs[-1]\n\n dataset_train = dataset_full.skip(val_size).shuffle(2000).batch(batch_size)\n iterator = dataset_train.make_initializable_iterator()\n next_batch = iterator.get_next()\n\n dataset_val = dataset_full.take(val_size).batch(test_batch_size)\n val_iterator = dataset_val.make_initializable_iterator()\n val_batch = val_iterator.get_next()\n\n dataset_pred = dataset_full.batch(test_batch_size)\n pred_iterator = dataset_pred.make_initializable_iterator()\n pred_batch = pred_iterator.get_next()\n tf.summary.scalar(\"Loss_recon\",loss_recon)\n tf.summary.image(\"Reconstructed images\",decoder_clean_outs[-1])\n summery_op = tf.summary.merge_all()\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_recon)\n with tf.Session() as sess:\n if os.path.exists(best_weight_save_path):\n ae.load_weights(best_weight_save_path)\n else:\n sess.run(tf.global_variables_initializer())\n summery_writer = tf.summary.FileWriter(logdir=os.path.join(output_path,\"log\"),graph=sess.graph)\n for epoch in range(num_epochs + 1):\n start = time.time()\n sess.run(iterator.initializer)\n sess.run(val_iterator.initializer)\n num_batches = 0\n train_err = 0\n while True:\n try:\n inputs = sess.run(next_batch)\n if num_batches == 0:\n summery = sess.run(summery_op,{input_var:inputs})\n summery_writer.add_summary(summery,global_step=epoch)\n # summery = sess.run([summery_op,gen_output],feed_dict={input_image_holder:input_image,target_placeholder:target})\n train_err += sess.run([loss_recon,train_op],feed_dict={input_var:inputs})[0]\n # if step % 10 == 0:\n # summery_writer.add_summary(summery, global_step=step)\n num_batches += 1\n except tf.errors.OutOfRangeError:\n break\n vaildation_error = 0\n num_batches_val = 0\n while True:\n try:\n inputs = sess.run(val_batch)\n # summery = sess.run([summery_op,gen_output],feed_dict={input_image_holder:input_image,target_placeholder:target})\n vaildation_error += sess.run(loss_clean_recon,feed_dict={input_var:inputs})\n # if step % 10 == 0:\n # summery_writer.add_summary(summery, global_step=step)\n num_batches_val += 1\n except tf.errors.OutOfRangeError:\n break\n print(\"Epoch {} of {}\".format(epoch + 1, num_epochs),\n \"\\t training loss:{:.6f}\".format(train_err / num_batches),\n \"\\t valation loss:{:.6f}\".format(vaildation_error/ num_batches_val),\n \"\\t time: {:.2f} sec\".format(time.time()-start))\n # if epoch % 10 == 0:\n last_update += 1\n if vaildation_error < best_val:\n last_update = 0\n print(\"new best error: \", vaildation_error)\n best_val = vaildation_error\n ae.save_weights(best_weight_save_path)\n if last_update > 100:\n break\n\n if (verbose > 1) and (epoch % 50 == 0) and y[0] != -1:\n # Extract MdA features\n encoder_val_clean = []\n sess.run(pred_iterator.initializer)\n while True:\n try:\n inputs = sess.run(pred_batch)\n minibatch_x = sess.run(encoder_clean,feed_dict={input_var:inputs})\n encoder_val_clean.append(minibatch_x)\n\n except tf.errors.OutOfRangeError:\n break\n encoder_val_clean = np.concatenate(encoder_val_clean, axis=0)\n kmeans(encoder_val_clean, y, num_clusters, seed=seed)\n last_weight_path = os.path.join(output_path, '../params/params_' + dataset_name + '_values_last.h5')\n ae.save_weights(last_weight_path)\n #ae.load_weights(best_weight_save_path)\n #lasagne.layers.set_all_param_values(decoder, best_params_values)\n return best_weight_save_path\ndef clustering(dataset_name,dataset_full,y,input_var, ae, num_clusters,best_weight_save_path, output_path, test_batch_size=100, seed=42,\n continue_training=False):\n encoder_clean = ae.encoder(input_var,training=False)[-1]\n encoder_val_clean = []\n iterator_training = dataset_full.batch(test_batch_size).make_one_shot_iterator()\n next_batch_training = iterator_training.get_next()\n dataset_train_size = 0\n print(\"\\nStart preclustering\")\n sess = K.get_session()\n ae.load_weights(best_weight_save_path)\n while True:\n try:\n inputs = sess.run(next_batch_training)\n minibatch_x = sess.run(encoder_clean,feed_dict={input_var:inputs})\n encoder_val_clean.append(minibatch_x)\n except tf.errors.OutOfRangeError:\n break\n encoder_val_clean = np.concatenate(encoder_val_clean, axis=0)\n # Extract MdA features\n # Check kmeans results\n # kmeans(encoder_val_clean, y, num_clusters, seed=seed)\n initial_time = timeit.default_timer()\n if (dataset_name == 'MNIST-full') or (dataset_name == 'MNIST-test')or (dataset_name == 'FRGC') or (dataset_name == 'YTF') or (dataset_name == 'CMU-PIE') or (dataset_name == 'JD'):\n # K-means on MdA Features\n centroids, inertia, y_pred = kmeans(encoder_val_clean, y, num_clusters, seed=seed)\n y_pred = (np.array(y_pred)).reshape(np.array(y_pred).shape[0], )\n else:\n # AC-PIC on MdA Features\n if os.path.exists(os.path.join(output_path, '../params/pred' + dataset_name + '.pickle')) and continue_training:\n with open(os.path.join(output_path, '../params/pred' + dataset_name + '.pickle'), \"rb\") as input_file:\n y_pred = pickle.load(input_file, encoding='latin1')\n else:\n try:\n import matlab.engine\n eng = matlab.engine.start_matlab()\n eng.addpath(eng.genpath('matlab'))\n targets_init = eng.predict_ac_mpi(\n matlab.double(\n encoder_val_clean.reshape(encoder_val_clean.shape[0] * encoder_val_clean.shape[1]).tolist()),\n num_clusters, encoder_val_clean.shape[0], encoder_val_clean.shape[1])\n y_pred = (np.array(targets_init)).reshape(np.array(targets_init).shape[0], )\n eng.quit()\n y_pred = y_pred - 1\n except:\n y_pred = predict_ac_mpi(encoder_val_clean, num_clusters, encoder_val_clean.shape[0],\n encoder_val_clean.shape[1])\n with open(os.path.join(output_path, '../params/pred' + dataset_name + '.pickle'), \"wb\") as output_file:\n pickle.dump(y_pred, output_file)\n\n final_time = timeit.default_timer()\n print('AC-PIC: \\t nmi = ', normalized_mutual_info_score(y, y_pred),\n '\\t arc = ', adjusted_rand_score(y, y_pred),\n '\\t acc = {:.4f} '.format(bestMap(y, y_pred)),\n '\\t time taken = {:.4f}'.format(final_time - initial_time))\n centroids_acpic = np.zeros(shape=(num_clusters, encoder_val_clean.shape[1]))\n for i in range(num_clusters):\n centroids_acpic[i] = encoder_val_clean[y_pred == i].mean(axis=0)\n\n centroids = centroids_acpic.T\n centroids = centroids_acpic / np.sqrt(np.diag(np.matmul(centroids.T, centroids)))\n\n return np.int32(y_pred), np.float32(centroids)\n\n\n\ndef build_eml(n_out, W_initial=None):\n if W_initial is None:\n l_out = keras.layers.Dense(\n n_out,activation=keras.activations.softmax,bias_initializer='ones')\n else:\n l_out = keras.layers.Dense(\n n_out,activation=keras.activations.softmax,kernel_initializer=lambda shape,dtype,partition_info:W_initial)\n return l_out\n #return keras.Model(inputs=encoder.inputs,outputs=l_out(encoder.outputs[-1]))\n\ndef train_depict(dataset_name, dataset_full,y,y_pred, input_var,val_size, ae, num_clusters,ae_best_weight_save_path, output_path,\n batch_size=100, test_batch_size=100, num_epochs=1000, learning_rate=1e-4, prediction_status='soft',\n rec_mult=1, clus_mult=1, centroids=None, init_flag=1, continue_training=False):\n ######################\n\n # ADD RLC TO MdA #\n ######################\n\n # initial_time = timeit.default_timer()\n rec_lambda = rec_mult\n clus_lambda = clus_mult\n pred_normalizition_flag = 1\n target_init = tf.placeholder(dtype=tf.int32,shape=[None])\n target_var = tf.placeholder(dtype=tf.float32,shape=[None,num_clusters])\n split = len(ae.encoder.outputs)\n ae_outs = ae(input_var, training=True)\n encoder_outs = ae_outs[:split]\n decoder_outs = ae_outs[split:]\n encoder_clean_outs = ae.encoder(input_var, training=False)\n\n classifier = build_eml(n_out=num_clusters, W_initial=centroids)\n network_prediction_noisy = classifier(encoder_outs[-1])\n network_prediction_clean = classifier(encoder_clean_outs[-1])\n whole_model = keras.Model(inputs=ae.inputs,outputs=[ae.decoder.outputs[-1],classifier(ae.encoder.outputs[-1])])\n\n loss_clus_init = tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(target_init,network_prediction_noisy))\n #params_init = lasagne.layers.get_all_params([decoder, network2], trainable=True)\n #`soft`目标是每个类都有一个概率,hard只有一个类的概率为1\n loss_clus = tf.reduce_mean(keras.losses.categorical_crossentropy(target_var,\n network_prediction_noisy))\n loss_recons = AE_loss(encoder_clean_outs,decoder_outs)\n loss = rec_lambda* loss_recons + clus_lambda * loss_clus\n adam = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = adam.minimize(loss)\n loss_init = rec_lambda * loss_recons + clus_lambda * loss_clus_init\n adam_init = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op_init = adam_init.minimize(loss_init)\n weight_path = os.path.join(output_path, '../params/weights_' + dataset_name + '.h5')\n print(\"\\n...Start DEPICT initialization\")\n if init_flag:\n if os.path.exists(weight_path) and continue_training:\n pass\n else:\n y_targ_val = y_pred[:val_size]\n y_targ_train = y_pred[val_size:]\n dataset_train_init = tf.data.Dataset.zip((dataset_full,tf.data.Dataset.from_tensor_slices(y_pred))).skip(val_size).shuffle(2000).batch(batch_size)\n dataset_val_init = dataset_full.take(val_size).batch(test_batch_size)\n iter_train = dataset_train_init.make_initializable_iterator()\n iter_val = dataset_val_init.make_initializable_iterator()\n iter_full = dataset_full.batch(test_batch_size).make_initializable_iterator()\n next_batch_val = iter_val.get_next()\n next_batch_training = iter_train.get_next()\n next_batch_full = iter_full.get_next()\n sess = K.get_session()\n\n sess.run(tf.global_variables_initializer())\n ae.load_weights(ae_best_weight_save_path)\n\n last_update = 0\n best_val = 0\n for epoch in range(1000):\n sess.run(iter_train.initializer)\n sess.run(iter_val.initializer)\n train_err, val_err = 0, 0\n lossre_train, lossre_val = 0, 0\n losspre_train, losspre_val = 0, 0\n num_batches_train = 0\n epoch_start = time.time()\n while True:\n try:\n inputs,tar = sess.run(next_batch_training)\n minibatch_error, lossrec, losspred = sess.run([loss_init, loss_recons, loss_clus_init, train_op_init], feed_dict={input_var: inputs,target_init:tar})[:-1]\n # if step % 10 == 0:\n # summery_writer.add_summary(summery, global_step=step)\n train_err += minibatch_error\n lossre_train += lossrec\n losspre_train += losspred\n num_batches_train += 1\n except tf.errors.OutOfRangeError:\n break\n y_val_prob = []\n while True:\n try:\n inputs = sess.run(next_batch_val)\n y_val_prob.append(sess.run(network_prediction_clean, feed_dict={input_var: inputs}))\n except tf.errors.OutOfRangeError:\n break\n y_val_prob = np.concatenate(y_val_prob)\n y_val_pred = np.argmax(y_val_prob, axis=1)\n val_nmi = normalized_mutual_info_score(y_targ_val, y_val_pred)\n print('epoch:', epoch + 1,\n '\\t loss= {:.10f}'.format(train_err / num_batches_train),\n '\\t loss_reconstruction= {:.10f}'.format(lossre_train / num_batches_train),\n '\\t loss_prediction= {:.10f}'.format(losspre_train / num_batches_train),\n '\\t val nmi = {:.4f} '.format(val_nmi),\n '\\t time = {:.2f} sec'.format(time.time()-epoch_start),\n )\n if (epoch % 50 == 0) and y[0] != -1:\n sess.run(iter_full.initializer)\n y_pred_train = []\n while True:\n try:\n inputs = sess.run(next_batch_full)\n minibatch_prob = sess.run(network_prediction_clean, feed_dict={input_var: inputs})\n minibatch_pred = np.argmax(minibatch_prob,axis=1)\n y_pred_train.append(minibatch_pred)\n except tf.errors.OutOfRangeError:\n break\n y_pred_train = np.concatenate(y_pred_train)\n print('epoch:', epoch + 1, '\\t nmi = {:.4f} '.format(normalized_mutual_info_score(y, y_pred_train)),\n '\\t arc = {:.4f} '.format(adjusted_rand_score(y, y_pred_train)),\n '\\t acc = {:.4f} '.format(bestMap(y, y_pred_train)))\n last_update += 1\n if val_nmi > best_val:\n last_update = 0\n print(\"new best val nmi: \", val_nmi)\n best_val = val_nmi\n whole_model.save_weights(weight_path)\n if last_update > 5:\n break\n # Epoch 0\n print(\"\\n...Start DEPICT training\")\n dataset_train = tf.data.Dataset.zip((dataset_full,tf.data.Dataset.range(y.shape[0]))).shuffle(2000).batch(batch_size)\n iter_train = dataset_train.make_initializable_iterator()\n next_batch_training = iter_train.get_next()\n\n rlc_weight_path = os.path.join(output_path, '../params/rlc_' + dataset_name + '.h5')\n if os.path.isfile(rlc_weight_path) and continue_training:\n whole_model.load_weights(rlc_weight_path)\n else:\n sess = K.get_session()\n sess.run(tf.global_variables_initializer())\n whole_model.load_weights(weight_path)\n y_prob = np.zeros((y.shape[0], num_clusters))\n for epoch in range(num_epochs):\n epoch_start = time.time()\n y_prob_prev = np.copy(y_prob)\n sess.run(iter_train.initializer)\n while True:\n try:\n inputs,idx = sess.run(next_batch_training)\n y_prob[idx] = sess.run(network_prediction_clean,feed_dict={input_var:inputs})\n except tf.errors.OutOfRangeError:\n break\n # y_prob_max = np.max(y_prob, axis=1)\n if pred_normalizition_flag:\n cluster_frequency = np.sum(y_prob, axis=0)\n y_prob = y_prob ** 2 / cluster_frequency\n y_prob = np.transpose(y_prob.T / np.sum(y_prob, axis=1))\n y_pred = np.argmax(y_prob, axis=1)\n\n # In each epoch, we do a full pass over the training data:\n train_err = 0\n lossre = 0\n losspre = 0\n num_batches = 0\n sess.run(iter_train.initializer)\n while True:\n try:\n inputs, idx = sess.run(next_batch_training)\n minibatch_err, lossrec, losspred = sess.run([loss, loss_recons, loss_clus, train_op], feed_dict={input_var: inputs,target_var:y_prob[idx]})[:-1]\n train_err += minibatch_err\n lossre += lossrec\n losspre += losspred\n num_batches += 1\n except tf.errors.OutOfRangeError:\n break\n\n print('mse: ', mean_squared_error(y_prob, y_prob_prev))\n if epoch > 0 and mean_squared_error(y_prob, y_prob_prev) < 1e-7:\n whole_model.save_weights(rlc_weight_path)\n break\n print('epoch:', epoch + 1, '\\t loss= {:.10f}'.format(train_err / num_batches),\n '\\t loss_recons= {:.10f}'.format(lossre / num_batches),\n '\\t loss_pred= {:.10f}'.format(losspre / num_batches))\n if y[0] >= 0:\n print('\\t nmi = {:.4f} '.format(normalized_mutual_info_score(y, y_pred)),\n '\\t arc = {:.4f} '.format(adjusted_rand_score(y, y_pred)),\n '\\t acc = {:.4f} '.format(bestMap(y, y_pred)))\n print('\\t time= {:.2f} sec'.format(time.time()-epoch_start))\n # test\n print(\"\\n...Test DEPICT\")\n y_pred = np.zeros(y.shape[0])\n with K.get_session() as sess:\n sess.run(iter_train.initializer)\n while True:\n try:\n inputs, idx = sess.run(next_batch_training)\n minibatch_prob = sess.run(network_prediction_clean, feed_dict={input_var: inputs})\n y_pred[idx] = np.argmax(minibatch_prob, axis=1)\n except tf.errors.OutOfRangeError:\n break\n predict_result_path = os.path.join(output_path,'labels.npy')\n np.save(predict_result_path,y_pred)\n if y[0]>=0:\n print('final: ', '\\t nmi = {:.4f} '.format(normalized_mutual_info_score(y, y_pred)),\n '\\t arc = {:.4f} '.format(adjusted_rand_score(y, y_pred)),\n '\\t acc = {:.4f} '.format(bestMap(y, y_pred)))\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":43741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"195486767","text":"from collections import defaultdict\r\nimport datetime\r\nimport itertools\r\nimport logging\r\n\r\n\r\nLOGGER = logging.getLogger(\"hotlist\")\r\nLOGGER.setLevel(logging.DEBUG)\r\nch = logging.StreamHandler()\r\nch.setLevel(logging.DEBUG)\r\n# create formatter and add it to the handlers\r\nformatter = logging.Formatter('[%(name)s]%(levelname)s: %(message)s')\r\nch.setFormatter(formatter)\r\n# add the handlers to the logger\r\nLOGGER.addHandler(ch)\r\n\r\nIMMUTABLE_TYPES = set([\r\n int, float, str,\r\n datetime.datetime, datetime.date, datetime.time, datetime.timedelta,\r\n])\r\n\r\ntry:\r\n IMMUTABLE_TYPES.add(long)\r\n IMMUTABLE_TYPES.add(unicode)\r\nexcept:\r\n pass\r\n\r\n\r\n\r\ndef add_immutable_type(type_name):\r\n \"\"\"\r\n Immutable types can be added to the set of immutable types, which\r\n are allowed for hot properties.\r\n \"\"\"\r\n IMMUTABLE_TYPES.add(type_name)\r\n\r\n\r\nclass HotContainer(object):\r\n \"\"\"\r\n HotContainer can maintain listerners and fire events.\r\n\r\n Expected use:\r\n class MyContainer(HotContainer):\r\n property1 = HotProperty()\r\n property2 = HotProperty()\r\n These properties can be assigned only immutable values and whenever\r\n the property is assigned into, an event is fired.\r\n \"\"\"\r\n def __init__(self):\r\n self._listeners = []\r\n\r\n def add_listener(self, listener):\r\n \"\"\"\r\n Appends a listener to the listener list. The listeners are called\r\n in the order in which they were added.\r\n \"\"\"\r\n self._listeners.append(listener)\r\n\r\n @property\r\n def listeners(self):\r\n return self._listeners\r\n\r\n def _fire(self, model, fqname, event_name, key):\r\n \"\"\"\r\n Fire an event.\r\n \"\"\"\r\n LOGGER.debug(\r\n \"FIRE: from=%s event=%s key=%s\",\r\n fqname, event_name, key,\r\n )\r\n for listener in self.listeners:\r\n try:\r\n listener(model, fqname, event_name, key)\r\n except Exception as dummy:\r\n LOGGER.exception(\r\n \"Error firing %s to %s\",\r\n event_name, listener,\r\n )\r\n\r\n\r\nclass HotContainee(object):\r\n \"\"\"\r\n A base class for the data structures assignable to HotProperty.\r\n A Containee knows its container and uses it to fire events.\r\n \"\"\"\r\n def __init__(self, name=None, container=None):\r\n self.set_rel(name, container)\r\n\r\n def _fire(self, event_name, key):\r\n self._container._fire(self, self._name, event_name, key)\r\n\r\n def set_rel(self, name, container):\r\n self._name = name\r\n self._container = container\r\n\r\n\r\nclass HotProperty(object):\r\n \"\"\"\r\n A descriptor class for controlling a property, which fires an event\r\n when changed. See :HotContainer for details.\r\n\r\n Inserts the values into the containing object's dictionary under key\r\n \"__hot_%s\" % id(self). Apart from that, adds (name, containing_object)\r\n into containing object's dictionary under key self.key + \"_rel\"\r\n \"\"\"\r\n def __init__(self, **kw):\r\n \"\"\"\r\n Initialize the HotProperty.\r\n \"\"\"\r\n super(HotProperty, self).__init__(**kw)\r\n self.key = \"__hot_%s\" % id(self)\r\n\r\n def __get__(self, obj, objtype):\r\n \"\"\"\r\n Returns the value of the property within the object.\r\n Cannot be called on a class.\r\n \"\"\"\r\n if None == obj:\r\n return self\r\n return getattr(obj, self.key, None)\r\n\r\n def __set__(self, obj, val):\r\n \"\"\"\r\n Checks that the new value for the property is immutable.\r\n Checks that we are called on an object, not a class.\r\n \"\"\"\r\n if type(val) in IMMUTABLE_TYPES:\r\n pass\r\n elif isinstance(val, HotContainee):\r\n pass\r\n else:\r\n raise TypeError(\r\n \"Can only assign immutable types or Containees here.\",\r\n )\r\n\r\n name = self._get_name_within_parent(obj)\r\n if isinstance(val, HotContainee):\r\n val.set_rel(name, obj)\r\n\r\n setattr(obj, self.key, val)\r\n obj._fire(val, name, \"reset\", None)\r\n\r\n def _get_name_within_parent(self, obj):\r\n \"\"\"\r\n We need to know the name under shich\r\n Lookup self in the obj if not yet cached.\r\n \"\"\"\r\n if (self.key + \"_rel\") in obj.__dict__:\r\n return obj.__dict__[self.key + \"_rel\"]\r\n\r\n for (k, v) in type(obj).__dict__.items():\r\n if v == self:\r\n setattr(obj, self.key + \"_rel\", k)\r\n return k\r\n raise Exception(\"Could not find parent\")\r\n\r\n\r\nclass HotTypedProperty(HotProperty):\r\n \"\"\"\r\n A hot property that limits its content to a pre-specified type, which\r\n must be a HotContainee subclass.\r\n \"\"\"\r\n def __init__(self, target_type, **kw):\r\n assert issubclass(target_type, HotContainee)\r\n self.target_type = target_type\r\n super(HotTypedProperty, self).__init__(**kw)\r\n\r\n def __set__(self, obj, val):\r\n \"\"\"\r\n Checks that the object being assigned is an instance of the right\r\n type.\r\n \"\"\"\r\n name = self._get_name_within_parent(obj)\r\n if not isinstance(val, self.target_type):\r\n val = self.target_type(val, name=name, container=obj)\r\n setattr(obj, self.key, val)\r\n obj._fire(val, name, \"reset\", None)\r\n\r\n\r\nclass HotList(HotContainee):\r\n \"\"\"\r\n A list that fires when changed.\r\n \"\"\"\r\n def __init__(self, init_iterable=None, name=None, container=None, ):\r\n super(HotList, self).__init__(name=name, container=container)\r\n if init_iterable is None:\r\n init_iterable = []\r\n self.data = []\r\n if init_iterable:\r\n self.data = [self._validate_value(i) for i in init_iterable]\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n def __iter__(self):\r\n return self.data.__iter__()\r\n\r\n def __getitem__(self, key):\r\n return self.data[key]\r\n\r\n def __delitem__(self, key):\r\n if type(key) is slice:\r\n del self.data[key]\r\n self._fire(\"reset\", key)\r\n else:\r\n key = self._natural_index(key)\r\n del self.data[key]\r\n self._fire(\"delete\", key)\r\n\r\n def __setitem__(self, key, value):\r\n if type(key) is slice:\r\n for i in value:\r\n self._validate_value(i)\r\n self.data[key] = value\r\n self._fire(\"reset\", key)\r\n else:\r\n self.data[key] = self._validate_value(value)\r\n self._fire(\"update\", self._natural_index(key))\r\n\r\n def insert(self, key, value):\r\n self.data.insert(key, self._validate_value(value))\r\n self._fire(\"insert\", self._natural_index(key))\r\n\r\n def append(self, value):\r\n self.data.append(self._validate_value(value))\r\n self._fire(\"insert\", len(self.data) - 1)\r\n\r\n def extend(self, iterable):\r\n for i in iterable:\r\n self.append(i)\r\n\r\n def _validate_value(self, val):\r\n \"\"\"\r\n The members may only be \"primitive\" types (int, str and such),\r\n or tuples of primitive types.\r\n \"\"\"\r\n if type(val) in IMMUTABLE_TYPES:\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )\r\n def _natural_index(self, index):\r\n \"\"\"\r\n If we get a negative index, we must convert it to the \"natural\"\r\n 0-based one.\r\n \"\"\"\r\n if index < 0:\r\n return len(self.data) + index\r\n return index\r\n\r\n def __str__(self):\r\n return str(self.data)\r\n def __unicode__(self):\r\n return unicode(self.data)\r\n\r\nclass TypedHotList(HotList):\r\n \"\"\"\r\n TypedHotList is a HotList variant that can restrict it's items to\r\n the provided type.\r\n \"\"\"\r\n def __init__(self, type_constraint, init_iterable=None,\r\n name=None, container=None,):\r\n \"\"\"\r\n Initializes the structure, sets the type all items in the list\r\n must be.\r\n \"\"\"\r\n assert type_constraint in IMMUTABLE_TYPES \\\r\n or \\\r\n issubclass(type_constraint, tuple) \\\r\n or \\\r\n issubclass(type_constraint, frozenset) \\\r\n or \\\r\n issubclass(type_constraint, HotProperty)\r\n self.type_constraint = type_constraint\r\n\r\n super(TypedHotList, self).__init__(init_iterable, name, container,)\r\n\r\n def _validate_value(self, val):\r\n \"\"\"\r\n The members may only be self.type_constraint. If the\r\n type_constraint is a tuple (or set) then it is also checked\r\n that the member's members are unmutable.\r\n \"\"\"\r\n if not isinstance(val, self.type_constraint):\r\n raise TypeError(\r\n \"Only %s allowed here.\" % self.type_constraint,\r\n )\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_sub_value(i)\r\n return val\r\n\r\n def _validate_sub_value(self, val):\r\n \"\"\"\r\n Called from _validate_value, checks that the supplied value\r\n is immutable.\r\n \"\"\"\r\n if type(val) in IMMUTABLE_TYPES:\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_sub_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )\r\n\r\nclass HotDict(HotContainee):\r\n \"\"\"\r\n A dict that fires when changed.\r\n \"\"\"\r\n def __init__(self, init_iterable=None, name=None, container=None, ):\r\n super(HotDict, self).__init__(name=name, container=container)\r\n if init_iterable is None:\r\n init_iterable = []\r\n self.data = {}\r\n if init_iterable:\r\n self.data = dict([\r\n (k,self._validate_value(v))\r\n for (k , v) in init_iterable\r\n ])\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n def __iter__(self):\r\n return self.data.__iter__()\r\n\r\n def __getitem__(self, key):\r\n return self.data[key]\r\n\r\n def __delitem__(self, key):\r\n del self.data[key]\r\n self._fire(\"delete\", key)\r\n\r\n def items(self):\r\n return self.data.items()\r\n def keys(self):\r\n return self.data.keys()\r\n def values(self):\r\n return self.data.values()\r\n\r\n def __setitem__(self, key, value):\r\n event = \"update\" if key in self.data else \"insert\"\r\n self.data[key] = self._validate_value(value)\r\n self._fire(event, key)\r\n\r\n def clear(self):\r\n self.data.clear()\r\n self._fire(\"reset\", \"\")\r\n\r\n def update(self, other):\r\n for (k, v) in other.items():\r\n self[k] = v\r\n\r\n def _validate_value(self, val):\r\n \"\"\"\r\n The members may only be \"primitive\" types (int, str and such),\r\n or tuples of primitive types.\r\n \"\"\"\r\n if type(val) in IMMUTABLE_TYPES:\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )\r\n\r\n def __str__(self):\r\n return str(self.data)\r\n def __unicode__(self):\r\n return unicode(self.data)\r\n\r\n\r\n\r\nclass Mapper(object):\r\n \"\"\"\r\n Mapper holds and resolves the mapping of the hot object's fqname\r\n and event_name to a callable. When the mapper is included into the\r\n view object it lets the user easily map events by their paths\r\n (fqnames) and event names to given callables.\r\n \"\"\"\r\n def __init__(self):\r\n self._routes = defaultdict(lambda:[])\r\n\r\n def __call__(self, model, fqname, event_name, key):\r\n \"\"\"\r\n Finds the callable for the (fqname, event_name) and calls them.\r\n \"\"\"\r\n for callable_ in itertools.chain(\r\n self._routes[(fqname, event_name)],\r\n self._routes[(fqname, \"\")],\r\n self._routes[(\"\", event_name)],\r\n self._routes[(\"\", \"\")],\r\n ):\r\n try:\r\n callable_(model, fqname, event_name, key)\r\n except:\r\n logging.exception(\"Error calling %s\", callable_)\r\n\r\n def listener(self, model, fqname, event_name, key):\r\n self(model, fqname, event_name, key)\r\n\r\n def add_route(self, fqname, event_name, callable):\r\n \"\"\"\r\n Maps a (fully qualified name, event name) to a callable. Then,\r\n \"\"\"\r\n self._routes[(fqname,event_name)].append(callable)\r\n","sub_path":"step07/hotmodel.py","file_name":"hotmodel.py","file_ext":"py","file_size_in_byte":13089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"345965457","text":"import torch\n\nfrom falkon.cuda.initialization import cublas_handle\nfrom falkon.cuda.cublas_gpu import cublasStrsm, cublasDtrsm, cublas_stream\nfrom falkon.utils.helpers import choose_fn, check_same_device\n# noinspection PyUnresolvedReferences\nfrom falkon.la_helpers.cuda_la_helpers import cuda_transpose\nfrom falkon.utils.tensor_helpers import is_f_contig, create_fortran, create_C\n\n\ndef cuda_trsm(A: torch.Tensor, v: torch.Tensor, alpha: float, lower: int, transpose: int) -> torch.Tensor:\n if not is_f_contig(A, strict=False):\n raise ValueError(\"A must be f-contiguous for CUDA TRSM to work.\")\n if not check_same_device(A, v):\n raise ValueError(\"A and v must be on the same CUDA device.\")\n if not A.is_cuda:\n raise ValueError(\"A and v must be CUDA tensors!\")\n\n s = torch.cuda.Stream(device=A.device)\n cublas_hdl = cublas_handle(A.device.index)\n trsm_fn = choose_fn(A.dtype, cublasDtrsm, cublasStrsm, \"TRSM\")\n\n with torch.cuda.device(A.device), torch.cuda.stream(s), cublas_stream(cublas_hdl, s._as_parameter_):\n # Deal with copying v, which may not be F-contiguous.\n vF = create_fortran(v.size(), v.dtype, v.device)\n if is_f_contig(v, strict=False):\n # We can just make a copy of v\n vF.copy_(v)\n s.synchronize() # sync is necessary here for correctness. Not sure why!\n else:\n vF = cuda_transpose(input=v, output=vF.T).T\n\n uplo = 'L' if lower else 'U'\n trans = 'T' if transpose else 'N'\n trsm_fn(cublas_hdl, side='L', uplo=uplo, trans=trans, diag='N', m=vF.shape[0], n=vF.shape[1],\n alpha=alpha, A=A.data_ptr(), lda=A.stride(1), B=vF.data_ptr(), ldb=vF.stride(1))\n if is_f_contig(v, strict=False):\n vout = vF\n else:\n vout = create_C(v.size(), v.dtype, v.device)\n vout = cuda_transpose(input=vF, output=vout.T).T\n s.synchronize()\n return vout\n","sub_path":"falkon/la_helpers/cuda_trsm.py","file_name":"cuda_trsm.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"406978033","text":"# -*- mode: python -*-\n\nblock_cipher = None\n\n\na = Analysis(['tetris_run.py'],\n pathex=['C:\\\\Users\\\\Riccoveigh\\\\PycharmProjects\\\\tetris'],\n binaries=[],\n datas=[],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='tetris_run',\n debug=False,\n strip=False,\n upx=True,\n console=True )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n name='tetris_run')\n","sub_path":"tetris_run.spec","file_name":"tetris_run.spec","file_ext":"spec","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"152827567","text":"# youtube videos thumbnail downloader and resizer and alse cropper (all at once)\r\n# created by Hritwik Singhal on 28-05-2018: 03:20\r\n\r\n# i am a newbie to python(and programming), so you can assume that this piece is\r\n# poorly written, poorly commented etc etc\r\n# Also, you can modify functions as per your need.\r\n\r\nimport urllib\r\nfrom PIL import Image\r\n\r\n\r\n# you should have a list of ID of videos,separated by new line,\r\n# in a file called 'ids' with no file extension\r\ndef download_image(ids):\r\n ID_list = []\r\n for x in open(ids).read().split():\r\n y = x.strip()\r\n ID_list.append(y[32:43])\r\n\r\n # take ID of video\r\n for one_ID in ID_list:\r\n ID = one_ID\r\n\r\n # insert ID in url of image\r\n url = 'https://img.youtube.com/vi/' + ID + '/maxresdefault.jpg'\r\n\r\n # Naming of images\r\n name = ID + '.jpg'\r\n\r\n # downloading the image\r\n urllib.urlretrieve(url, name)\r\n\r\n img_open = Image.open(name)\r\n\r\n # the image is 1280* 720\r\n # resizing the image\r\n resize_img = img_open.resize((900, 600))\r\n\r\n # Cropping image\r\n # This crops the middle 500*500 portion of image\r\n crop_area = (200, 50, 700, 570)\r\n cropped_img = resize_img.crop(crop_area)\r\n cropped_img.save(name)\r\n\r\n\r\ndownload_image('ID.txt')\r\n\r\n","sub_path":"Thumbnail_download.py","file_name":"Thumbnail_download.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"108888278","text":"# Copyright 2017-2020 TensorHub, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport logging\nimport os\nimport sys\n\nimport six\n\nfrom guild import batch_util\nfrom guild import cli\nfrom guild import click_util\nfrom guild import cmd_impl_support\nfrom guild import config\nfrom guild import flag_util\nfrom guild import guildfile\nfrom guild import help as helplib\nfrom guild import op as oplib\nfrom guild import op_cmd as op_cmd_lib\nfrom guild import op_dep\nfrom guild import op_util\nfrom guild import remote\nfrom guild import resolver as resolverlib\nfrom guild import run as runlib\nfrom guild import run_util\nfrom guild import summary\nfrom guild import util\nfrom guild import var\n\nfrom . import remote_impl_support\n\nlog = logging.getLogger(\"guild\")\n\n# Use Bayesian with gaussian process as default optimizer when opdef\n# does not contain any optimizers.\n#\nDEFAULT_OPTIMIZER = \"gp\"\nDEFAULT_OBJECTIVE = \"loss\"\n\nFLAG_TEST_ATTRS = [\n \"default\",\n \"type\",\n \"required\",\n \"arg_name\",\n \"arg_skip\",\n \"arg_switch\",\n \"env_name\",\n \"choices\",\n \"allow_other\",\n \"distribution\",\n \"max\",\n \"min\",\n \"null_label\",\n]\n\n\n###################################################################\n# State\n###################################################################\n\n\nclass State(object):\n def __init__(self, args):\n self.args = args\n self.restart_run = None\n self.proto_run = None\n self.user_op = Operation()\n self.batch_op = None\n\n\nclass Operation(oplib.Operation):\n def __init__(self):\n super(Operation, self).__init__()\n self._run = None\n self._run_is_proto = False\n self._force_sourcecode = False\n self._opdef = None\n self._resource_flagdefs = []\n self._user_flag_vals = {}\n self._batch_trials = None\n self._op_flag_vals = {}\n self._flag_null_labels = {}\n self._op_cmd = None\n self._op_cmd_run_attrs = {}\n self._python_requires = None\n self._random_seed = None\n self._max_trials = None\n self._objective = None\n self._label_template = None\n self._label = None\n self._output_scalars = None\n self._sourcecode_root = None\n\n\ndef _state_for_args(args):\n S = State(args)\n if S.args.help_op:\n _op_init_opdef(S.args.opspec, S.user_op, S.args)\n else:\n _state_init_restart_or_proto_run(S)\n _state_init_user_op(S)\n _state_init_batch_op(S)\n return S\n\n\ndef _op_config_data(op):\n return {\n \"flag-null-labels\": op._flag_null_labels,\n \"op-cmd\": op_cmd_lib.as_data(op._op_cmd),\n \"python-requires\": op._python_requires,\n \"label-template\": op._label_template,\n \"output-scalars\": op._output_scalars,\n \"deps\": op_util.op_deps_as_data(op.deps),\n \"sourcecode-root\": op._sourcecode_root,\n }\n\n\ndef _apply_op_config_data(data, op):\n op._flag_null_labels = data.get(\"flag-null-labels\")\n op._op_cmd = op_cmd_lib.for_data(data.get(\"op-cmd\"))\n op._python_requires = data.get(\"python-requires\")\n op._label_template = data.get(\"label-template\")\n op._output_scalars = data.get(\"output-scalars\")\n op._sourcecode_root = data.get(\"sourcecode-root\")\n op.deps = op_util.op_deps_for_data(data.get(\"deps\"))\n\n\n# =================================================================\n# State - restart / proto run\n# =================================================================\n\n\ndef _state_init_restart_or_proto_run(S):\n assert not (S.args.restart and S.args.proto)\n if S.args.restart:\n _state_init_restart_run(S)\n elif S.args.proto:\n _state_init_proto_run(S)\n\n\ndef _state_init_restart_run(S):\n if S.args.remote:\n S.restart_run = _remote_run_for_spec(S.args.restart, S.args)\n else:\n S.restart_run = _local_run_for_spec(S.args.restart)\n\n\ndef _state_init_proto_run(S):\n if S.args.remote:\n S.proto_run = _remote_run_for_spec(S.args.proto, S.args)\n else:\n S.proto_run = _local_run_for_spec(S.args.proto)\n\n\ndef _remote_run_for_spec(spec, args):\n return remote_impl_support.one_run(spec, args)\n\n\ndef _local_run_for_spec(spec):\n return util.find_apply(\n [run_util.run_for_run_dir, run_util.marked_or_latest_run_for_opspec, one_run,],\n spec,\n )\n\n\n# =================================================================\n# State - user op\n# =================================================================\n\n\ndef _state_init_user_op(S):\n _user_op_init_run(S)\n _op_init_force_sourcecode(S.args.force_sourcecode, S.user_op)\n _op_init_opdef(S.args.opspec, S.user_op, S.args)\n _op_init_user_flags(S.args.flags, S.user_op)\n _op_init_op_cmd(S.user_op)\n _op_init_op_flags(S.args, S.user_op)\n _op_init_config(S.args.label, S.args.tag, S.user_op)\n _op_init_core(S.args, S.user_op)\n\n\ndef _user_op_init_run(S):\n assert not (S.restart_run and S.proto_run)\n if S.restart_run:\n _user_op_init_run_(S, S.restart_run)\n elif S.proto_run:\n _user_op_init_run_(S, S.proto_run)\n S.user_op._run_is_proto = True\n\n\ndef _user_op_init_run_(S, run):\n if run.batch_proto:\n S.user_op._run = run.batch_proto\n else:\n S.user_op._run = run\n\n\ndef _op_init_force_sourcecode(force_sourcecode_arg, op):\n op._force_sourcecode = force_sourcecode_arg\n\n\n# =================================================================\n# Op - user flags\n# =================================================================\n\n\ndef _op_init_user_flags(flag_args, op):\n op._user_flag_vals, batch_files = split_flag_args(flag_args, op._opdef)\n if batch_files:\n trials = _trials_for_batch_files(batch_files)\n if len(trials) == 1:\n _apply_single_trial_user_flags(trials[0], op)\n else:\n op._batch_trials = trials\n\n\ndef split_flag_args(flag_args, opdef):\n batch_files, rest_args = op_util.split_batch_files(flag_args)\n assigns = _parse_assigns(rest_args, opdef)\n return assigns, batch_files\n\n\ndef _parse_assigns(assign_args, opdef):\n try:\n return op_util.parse_flag_assigns(assign_args, opdef)\n except op_util.ArgValueError as e:\n _invalid_flag_arg_error(e.arg)\n\n\ndef _trials_for_batch_files(batch_files):\n batch_files = [_resolve_batch_file(path) for path in batch_files]\n try:\n return op_util.trials_for_batch_files(batch_files)\n except op_util.BatchFileError as e:\n _batch_file_error(e)\n\n\ndef _resolve_batch_file(path):\n resolved = os.path.join(config.cwd(), os.path.expanduser(path))\n if not os.path.exists(resolved):\n _no_such_batch_file_error(resolved)\n return resolved\n\n\ndef _apply_single_trial_user_flags(trial, op):\n for name, val in trial.items():\n if name not in op._user_flag_vals:\n op._user_flag_vals[name] = val\n\n\n# =================================================================\n# Op - opdef\n# =================================================================\n\n\ndef _op_init_opdef(opspec, op, args):\n if opspec:\n op._opdef = opdef_for_opspec(opspec)\n elif op._run:\n if args.flags or args.force_sourcecode:\n # We need opdef for restart/run-with-proto when user specifies\n # flag values or when force-sourcecode is specified.\n op._opdef = _opdef_for_run(op._run)\n else:\n op._opdef = _default_opdef()\n\n\ndef _opdef_for_run(run):\n if isinstance(run, remote.RunProxy):\n return _opdef_for_remote_run(run)\n opspec = run.opref.to_opspec()\n return opdef_for_opspec(opspec, run)\n\n\ndef _opdef_for_remote_run(run):\n if _cwd_remote_run(run):\n return opdef_for_opspec(_cwd_opspec(run.opref))\n return opdef_for_opspec(run.opref.to_opspec(), run)\n\n\ndef _cwd_remote_run(run):\n try:\n gf = guildfile.for_dir(config.cwd())\n except:\n return False\n else:\n return gf.package and gf.package.name == run.opref.pkg_name\n\n\ndef _cwd_opspec(opref):\n return \"%s:%s\" % (opref.model_name, opref.op_name)\n\n\ndef opdef_for_opspec(opspec, for_run=None):\n try:\n return op_util.opdef_for_opspec(opspec)\n except op_util.InvalidOpSpec:\n _invalid_opspec_error(opspec)\n except op_util.CwdGuildfileError as e:\n _guildfile_error(e.path, str(e))\n except op_util.NoSuchModel as e:\n if for_run:\n _missing_run_opdef_error(opspec, for_run)\n else:\n _no_such_model_op_error(opspec)\n except op_util.MultipleMatchingModels as e:\n _multiple_models_error(e.model_ref, e.matches)\n except op_util.NoSuchOperation as e:\n _no_such_opdef_error(e.model, e.op_name)\n except op_util.ModelOpProxyError as e:\n _model_op_proxy_error(e)\n\n\ndef _default_opdef():\n return opdef_for_opspec(None)\n\n\n# =================================================================\n# Op - op cmd\n# =================================================================\n\n\ndef _op_init_op_cmd(op):\n if op._opdef:\n op._op_cmd, run_attrs = _op_cmd_for_opdef(op._opdef)\n if run_attrs:\n op._op_cmd_run_attrs.update(run_attrs)\n\n\ndef _op_cmd_for_opdef(opdef):\n try:\n return op_util.op_cmd_for_opdef(opdef)\n except op_util.InvalidOpDef as e:\n _invalid_opdef_error(opdef, e.msg)\n\n\n# =================================================================\n# Op - op flags\n# =================================================================\n\n\ndef _op_init_op_flags(args, op):\n if op._run:\n _apply_run_flags(op._run, op._op_flag_vals)\n if op._opdef:\n _apply_op_flags_vals_for_opdef(\n op._opdef,\n op._user_flag_vals,\n args.force_flags or op._batch_trials,\n op._op_cmd,\n args,\n op._resource_flagdefs,\n op._op_flag_vals,\n )\n if args.edit_flags:\n _edit_op_flags(op)\n\n\ndef _apply_run_flags(run, flag_vals):\n flag_vals.update(run.get(\"flags\") or {})\n\n\ndef _apply_op_flags_vals_for_opdef(\n opdef, user_flag_vals, force_flags, op_cmd, args, resource_flagdefs, op_flag_vals\n):\n \"\"\"Applies opdef and user-provided flags to `op_flag_vals`.\n\n Also applies resolved resource flag defs per flag vals\n `resource_flagdefs`.\n\n Attempts to resolve operation runs and use resolve run short\n IDs as applicable flag values.\n\n Opdef is used to provide missing default values, coerce flag vals,\n and validate vals. Opdef-provided flag vals are added to op flag\n vals only if they are not already in op flags, or if they are in\n user-provided flags. This maintains existing values (e.g. from a\n restart) unless a user explicitly provides a flag value.\n\n op_cmd is modified to include CmdFlag with arg-skip=yes for\n resolved run IDs provided a flag isn't defined for the resolved\n resource name. These flag values are used by Guild to resolve\n resources and should not be included in flag args unless the a\n flag def is explicitly provided.\n \"\"\"\n flag_vals, resolved_resource_flagdefs = _flag_vals_for_opdef(\n opdef, user_flag_vals, force_flags\n )\n resource_flagdefs.extend(resolved_resource_flagdefs)\n _apply_default_dep_runs(opdef, op_cmd, args, flag_vals)\n for name, val in flag_vals.items():\n if name in user_flag_vals or name not in op_flag_vals:\n op_flag_vals[name] = val\n\n\ndef _flag_vals_for_opdef(opdef, user_flag_vals, force_flags):\n \"\"\"Returns flag vals for opdef.\n\n Results includes defaults for opdef overridden by user flag vals\n where specified.\n \"\"\"\n try:\n return op_util.flag_vals_for_opdef(opdef, user_flag_vals, force_flags)\n except op_util.MissingRequiredFlags as e:\n _missing_required_flags_error(e)\n except op_util.InvalidFlagChoice as e:\n _invalid_flag_choice_error(e)\n except op_util.InvalidFlagValue as e:\n _invalid_flag_value_error(e)\n except op_util.NoSuchFlagError as e:\n _no_such_flag_error(e.flag_name, opdef)\n\n\ndef _apply_default_dep_runs(opdef, op_cmd, args, flag_vals):\n \"\"\"Applies default run IDs to flag_vals for dependencues.\"\"\"\n resolver_factory = _resolver_factory(args)\n for run, dep in op_dep.resolved_op_runs_for_opdef(\n opdef, flag_vals, resolver_factory\n ):\n dep_flag_name = _dep_flag_name(dep)\n _ensure_dep_flag_op_cmd_arg_skip(dep_flag_name, opdef, op_cmd)\n _apply_dep_run_id(run.id, dep_flag_name, flag_vals)\n\n\ndef _resolver_factory(args):\n if args.remote:\n return _remote_resolver_for_source_f(args.remote)\n else:\n # Use default.\n return None\n\n\ndef _dep_flag_name(dep):\n return dep.resdef.flag_name or dep.resdef.name\n\n\ndef _ensure_dep_flag_op_cmd_arg_skip(flag_name, opdef, op_cmd):\n \"\"\"Ensures that a dep flag does not appear as an op cmd arg.\n\n An exception is made if the flag is defined in the opdef, which\n which case the opdef flag is used to control op cmd args.\n \"\"\"\n if opdef.get_flagdef(flag_name):\n return\n cmd_flag = op_cmd.cmd_flags.setdefault(flag_name, op_cmd_lib.CmdFlag())\n cmd_flag.flag_name = flag_name\n cmd_flag.arg_skip = True\n\n\ndef _apply_dep_run_id(run_id, dep_flag_name, flag_vals):\n \"\"\"Applies a full run ID to a flag value.\n\n If the current flag value is unset or None, run ID is set without\n further checks.\n\n If the current flag value is a string and is a prefix of run_id,\n the value is replaced with the full run ID.\n\n If the current flag value is a list, the first item in the list\n that is a prefix of the run ID is updated with the full run ID.\n\n If the current flag value is neither a string nor a list, function\n raises an assertion error.\n \"\"\"\n val = flag_vals.get(dep_flag_name)\n if val is None:\n flag_vals[dep_flag_name] = run_id\n elif isinstance(val, six.string_types):\n if run_id.startswith(val):\n flag_vals[dep_flag_name] = run_id\n elif isinstance(val, list):\n _apply_dep_run_id_to_list(run_id, val)\n flag_vals[dep_flag_name] = val\n else:\n assert False, (type(val), dep_flag_name, flag_vals)\n\n\ndef _apply_dep_run_id_to_list(run_id, l):\n for i, x in enumerate(l):\n if run_id.startswith(x):\n l[i] = run_id\n break\n\n\ndef _edit_op_flags(op):\n encoded_flags = util.encode_yaml(op._op_flag_vals)\n while True:\n # Loop to let user re-edit on error.\n edited = util.editor(encoded_flags)\n if edited is None or not edited.strip():\n break\n try:\n flag_vals = util.decode_yaml(edited)\n except ValueError as e:\n cli.out(\"Error reading flags: %s\" % e, err=True)\n if not cli.confirm(\"Would you like to re-edit these flags?\", default=True):\n cli.error()\n else:\n op._op_flag_vals = flag_vals\n break\n\n\n# =================================================================\n# Remote run resolver lookup support\n# =================================================================\n\n\ndef _remote_resolver_for_source_f(remote):\n \"\"\"Returns a function used to resolve a source.\n\n We install a hook to handle remote cases. The base\n OperationResolver doesn't handle remote lookups. We implement a\n remote version that uses a customized callback for returning a\n remote 'latest or marked' run matching the op requirements.\n \"\"\"\n\n def f(source, dep):\n scheme = source.parsed_uri.scheme\n assert scheme == \"operation\", source\n resource = op_dep.ResourceProxy(dep.res_location, dep.config)\n modeldef = source.resdef.modeldef\n return _RemoteOperationResolver(remote, source, resource, modeldef)\n\n return f\n\n\nclass _RemoteOperationResolver(resolverlib.OperationResolver):\n \"\"\"Customized operation resolver that handles remote cases.\n\n Overrides `resolve_op_run` to lookup remote runs instead of the\n default resolver's lookup of local runs.\n \"\"\"\n\n def __init__(self, remote, source, resource, modeldef):\n super(_RemoteOperationResolver, self).__init__(source, resource, modeldef)\n self.remote = remote\n\n def resolve_op_run(self, run_id_prefix=None, include_staged=False):\n \"\"\"Remote version of default `resolve_op_run`.\n\n Uses a remote-enabled callback for resolving a candidate run\n the the op dependency.\n \"\"\"\n return self._resolve_op_run(\n run_id_prefix, include_staged, _remote_marked_or_latest_run_f(self.remote)\n )\n\n\ndef _remote_marked_or_latest_run_f(remote):\n \"\"\"Returns a remote-enabled lookup function for 'marked or latest run'.\"\"\"\n\n def f(oprefs, run_id_prefix=None, status=None):\n runs = _remote_runs_for_marked_or_latest(remote, oprefs, run_id_prefix, status)\n log.debug(\"remote runs for %s: %s\", oprefs, runs)\n if not runs:\n return None\n for run in runs:\n if run.get(\"marked\"):\n return run\n return runs[0]\n\n return f\n\n\ndef _remote_runs_for_marked_or_latest(remote, oprefs, run_id_prefix, status):\n \"\"\"Returns a list of candidate runs for 'marked or latest' consideration.\n\n Uses `remote_impl_support.filtered_runs` to get remote runs\n matching the specified opdef list, run ID prefix, and status list.\n \"\"\"\n from .runs_list import list_runs\n\n args = click_util.Args(**list_runs.make_context(\"\", []).params)\n args.remote = remote\n args.ops = [op.to_opspec() for op in oprefs]\n args.completed = \"completed\" in status\n args.running = \"running\" in status\n args.terminated = \"terminated\" in status\n args.staged = \"staged\" in status\n log.debug(\"filtered runs params for remote list: %r\", args.as_kw())\n return _filter_by_run_id_prefix(\n remote_impl_support.filtered_runs(args), run_id_prefix\n )\n\n\ndef _filter_by_run_id_prefix(runs, run_id_prefix):\n if not run_id_prefix:\n return runs\n return [run for run in runs if run.id.startswith(run_id_prefix)]\n\n\n# =================================================================\n# Op - config\n# =================================================================\n\n\ndef _op_init_config(label_arg, tag_arg, op):\n if tag_arg:\n assert not label_arg, label_arg\n label_arg = \"%s ${default_label}\" % tag_arg\n if op._run:\n _op_init_config_for_run(op._run, label_arg, op)\n else:\n assert op._opdef\n _op_init_config_for_opdef(op._opdef, label_arg, op)\n\n\ndef _op_init_config_for_run(run, label_arg, op):\n config = run.get(\"op\")\n if not config:\n _missing_op_config_for_restart_error(run)\n if not config.get(\"op-cmd\"):\n _invalid_op_config_for_restart_error(run)\n _apply_op_config_data(config, op)\n if label_arg:\n op._label_template = label_arg\n\n\ndef _op_init_config_for_opdef(opdef, label_arg, op):\n op._flag_null_labels = _flag_null_labels_for_opdef(opdef, op._resource_flagdefs)\n op._python_requires = _python_requires_for_opdef(opdef)\n op._label_template = label_arg or opdef.label\n op._output_scalars = opdef.output_scalars\n op._sourcecode_root = _opdef_sourcecode_dest(opdef)\n\n\ndef _flag_null_labels_for_opdef(opdef, resource_flagdefs):\n return {\n f.name: f.null_label\n for f in opdef.flags + resource_flagdefs\n if f.null_label is not None\n }\n\n\ndef _python_requires_for_opdef(opdef):\n return opdef.python_requires or opdef.modeldef.python_requires\n\n\ndef _opdef_sourcecode_dest(opdef):\n return _opdef_explicit_sourcecode_dest(opdef) or _opdef_default_sourcecode_dest(\n opdef\n )\n\n\ndef _opdef_explicit_sourcecode_dest(opdef):\n return opdef.sourcecode.dest or opdef.modeldef.sourcecode.dest\n\n\ndef _opdef_default_sourcecode_dest(opdef):\n if _sourcecode_empty(opdef):\n return None\n return _default_sourcecode_path()\n\n\ndef _sourcecode_empty(opdef):\n return opdef.sourcecode.disabled or (\n opdef.sourcecode.empty_def and opdef.modeldef.sourcecode.disabled\n )\n\n\ndef _default_sourcecode_path():\n return os.path.join(\".guild\", \"sourcecode\")\n\n\n# =================================================================\n# Op - core\n# =================================================================\n\n\ndef _op_init_core(args, op):\n _op_init_opref(op)\n _op_init_cmd(args, op)\n _op_init_private_env(op)\n _op_init_sourcecode_paths(args, op)\n _op_init_run_dir(args, op)\n _op_init_label(op)\n _op_init_random_seed(args.random_seed, op)\n _op_init_deps(op)\n _op_init_run_attrs(args, op)\n _op_init_callbacks(op)\n\n\n# =================================================================\n# Op - opref\n# =================================================================\n\n\ndef _op_init_opref(op):\n if op._opdef:\n op.opref = op._opdef.opref\n else:\n assert op._run\n op.opref = op._run.opref\n\n\n# =================================================================\n# Op - cmd args / env\n# =================================================================\n\n\ndef _op_init_cmd(args, op):\n assert op._op_cmd\n op.cmd_args, op.cmd_env = _generate_op_cmd(\n op._op_cmd, op._op_flag_vals, op._python_requires\n )\n _apply_gpu_arg_env(args, op.cmd_env)\n\n\ndef _generate_op_cmd(op_cmd, flag_vals, python_requires):\n resolve_params = _op_cmd_resolve_params(flag_vals, python_requires)\n try:\n return op_cmd_lib.generate(op_cmd, flag_vals, resolve_params)\n except util.UndefinedReferenceError as e:\n _op_cmd_error(\n \"invalid setting for operation: command contains \"\n \"invalid reference '%s'\" % e.args[0]\n )\n\n\ndef _op_cmd_resolve_params(flag_vals, python_requires):\n params = dict(flag_vals)\n params[\"python_exe\"] = _proc_python_exe(python_requires)\n return params\n\n\ndef _proc_python_exe(python_requires):\n if not python_requires:\n return config.python_exe()\n matching = util.find_python_interpreter(python_requires)\n if not matching:\n _op_cmd_error(\n \"cannot find a python interpreter for \" \"requirement %r\" % python_requires\n )\n path, _ver = matching\n return path\n\n\ndef _apply_gpu_arg_env(args, env):\n if args.no_gpus:\n log.info(\"Masking available GPUs (CUDA_VISIBLE_DEVICES='')\")\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n elif args.gpus is not None:\n log.info(\"Masking available GPUs (CUDA_VISIBLE_DEVICES='%s')\", args.gpus)\n env[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n\n\ndef _op_init_private_env(op):\n if op._opdef:\n op.private_env = op._opdef.env_secrets or []\n\n\n# =================================================================\n# Op - sourcecode paths\n# =================================================================\n\n\ndef _op_init_sourcecode_paths(args, op):\n op.sourcecode_paths = _sourcecode_paths(op, args)\n\n\ndef _sourcecode_paths(op, args):\n if args.debug_sourcecode:\n return _resolve_sourcecode_paths(args.debug_sourcecode)\n return _op_sourcecode_paths(op)\n\n\ndef _resolve_sourcecode_paths(s):\n cwd = config.cwd()\n return [\n os.path.abspath(os.path.join(cwd, part)) for part in s.split(os.path.pathsep)\n ]\n\n\ndef _op_sourcecode_paths(op):\n if op._sourcecode_root is None:\n return []\n return [op._sourcecode_root]\n\n\n# =================================================================\n# Op - run dir\n# =================================================================\n\n\ndef _op_init_run_dir(args, op):\n if op._run and not op._run_is_proto:\n op.run_dir = op._run.dir\n else:\n op.run_dir = _op_run_dir_for_args(args)\n\n\ndef _op_run_dir_for_args(args):\n if not args.run_dir:\n return None\n run_dir = os.path.abspath(args.run_dir)\n if not args.stage and os.getenv(\"NO_WARN_RUNDIR\") != \"1\":\n cli.note(\n \"Run directory is '%s' (results will not be \" \"visible to Guild)\" % run_dir\n )\n return run_dir\n\n\n# =================================================================\n# Op - run label\n# =================================================================\n\n\ndef _op_init_label(op):\n op._label = op_util.run_label(op._label_template, op._op_flag_vals)\n\n\n# =================================================================\n# Op - random seed\n# =================================================================\n\n\ndef _op_init_random_seed(random_seed_arg, op):\n if random_seed_arg:\n op._random_seed = random_seed_arg\n elif op._run:\n op._random_seed = _random_seed_for_run(op._run)\n else:\n op._random_seed = runlib.random_seed()\n\n\ndef _random_seed_for_run(run):\n return run.get(\"random_seed\") or runlib.random_seed()\n\n\n# =================================================================\n# Op - run deps\n# =================================================================\n\n\ndef _op_init_deps(op):\n if op._run:\n _check_flags_for_resolved_deps(op._user_flag_vals, op._run)\n if op._opdef:\n op.deps = _op_deps_for_opdef(op._opdef, op._op_flag_vals)\n\n\ndef _check_flags_for_resolved_deps(flag_vals, run):\n \"\"\"Generate an error if flags contain vals for resolved deps in run.\n\n Used to prevent redefinition of dependencies for a run.\n \"\"\"\n resolved_deps = run.get(\"deps\") or {}\n for name in flag_vals:\n if name in resolved_deps:\n _flag_for_resolved_dep_error(name, run)\n\n\ndef _op_deps_for_opdef(opdef, flag_vals):\n try:\n return op_dep.deps_for_opdef(opdef, flag_vals)\n except op_dep.OpDependencyError as e:\n _invalid_opdef_error(opdef, e)\n\n\n# =================================================================\n# Op - run attrs\n# =================================================================\n\n\ndef _op_init_run_attrs(args, op):\n attrs = op.run_attrs\n if op._label:\n attrs[\"label\"] = op._label\n if op._batch_trials:\n attrs[\"trials\"] = op._batch_trials\n attrs[\"flags\"] = op._op_flag_vals\n attrs[\"user_flags\"] = op._user_flag_vals\n attrs[\"run_params\"] = args.as_kw()\n attrs[\"random_seed\"] = op._random_seed\n if op._max_trials:\n attrs[\"max_trials\"] = op._max_trials\n if op._objective:\n attrs[\"objective\"] = op._objective\n attrs[\"op\"] = _op_config_data(op)\n _apply_system_attrs(op, attrs)\n attrs.update(op._op_cmd_run_attrs)\n\n\ndef _apply_system_attrs(op, attrs):\n # Don't reapply system attrs to existing runs.\n if op._run:\n return\n assert op._opdef\n attrs[\"host\"] = util.hostname()\n attrs[\"user\"] = util.user()\n attrs[\"platform\"] = util.platform_info()\n if _pip_freeze_required(op):\n attrs[\"pip_freeze\"] = _pip_freeze()\n\n\ndef _pip_freeze_required(op):\n return op._opdef.pip_freeze is not False and _is_python_op(op)\n\n\ndef _is_python_op(op):\n return \"python\" in \" \".join(op.cmd_args)\n\n\ndef _pip_freeze():\n from guild import pip_util\n\n return pip_util.freeze()\n\n\n# =================================================================\n# Op - run callbacks\n# =================================================================\n\n\ndef _op_init_callbacks(op):\n if op._run:\n if op._run_is_proto:\n _op_init_callbacks_for_run_with_proto(op)\n else:\n _op_init_callbacks_for_restart(op)\n else:\n assert op._opdef\n _op_init_callbacks_for_opdef(op._opdef, op)\n\n\ndef _op_init_callbacks_for_restart(op):\n op.callbacks = oplib.OperationCallbacks(init_output_summary=_init_output_summary)\n\n\ndef _init_output_summary(op, run):\n if _output_scalars_disabled(op):\n return None\n return _output_scalars_summary(op._output_scalars, op._op_flag_vals, run)\n\n\ndef _output_scalars_disabled(op):\n return op._output_scalars is not None and not op._output_scalars\n\n\ndef _output_scalars_summary(output_scalars, flag_vals, run):\n if output_scalars is None:\n output_scalars = summary.DEFAULT_OUTPUT_SCALARS\n ignore = flag_vals.keys()\n else:\n ignore = None\n summary_path = run.guild_path()\n return summary.OutputScalars(output_scalars, summary_path, ignore)\n\n\ndef _op_init_callbacks_for_opdef(opdef, op):\n op.callbacks = oplib.OperationCallbacks(\n init_output_summary=_init_output_summary,\n run_initialized=_run_init_cb_for_opdef(opdef),\n )\n\n\ndef _run_init_cb_for_opdef(opdef):\n def f(op, run):\n _copy_opdef_sourcecode(opdef, op, run)\n _write_run_sourcecode_digest(op, run)\n _write_run_vcs_commit(opdef, run)\n\n return f\n\n\ndef _copy_opdef_sourcecode(opdef, op, run):\n if os.getenv(\"NO_SOURCECODE\") == \"1\":\n log.debug(\"NO_SOURCECODE=1, skipping sourcecode copy\")\n return\n sourcecode_src = opdef.guildfile.dir\n if not sourcecode_src:\n log.debug(\"no sourcecode source, skipping sourcecode copy\")\n return\n sourcecode_select = op_util.sourcecode_select_for_opdef(opdef)\n if not sourcecode_select:\n log.debug(\"no sourcecode rules, skipping sourcecode copy\")\n return\n dest = _sourcecode_dest(run, op)\n log.debug(\n \"copying source code files for run %s from %s to %s\",\n run.id,\n sourcecode_src,\n dest,\n )\n op_util.copy_sourcecode(sourcecode_src, sourcecode_select, dest)\n\n\ndef _sourcecode_dest(run, op):\n return os.path.join(run.dir, op._sourcecode_root or _default_sourcecode_path())\n\n\ndef _write_run_sourcecode_digest(op, run):\n if op._sourcecode_root:\n op_util.write_sourcecode_digest(run, op._sourcecode_root)\n\n\ndef _write_run_vcs_commit(opdef, run):\n if os.getenv(\"NO_VCS_COMMIT\") == \"1\":\n log.debug(\"NO_VCS_COMMIT=1, skipping VCS commit\")\n return\n\n op_util.write_vcs_commit(opdef, run)\n\n\ndef _op_init_callbacks_for_run_with_proto(op):\n if op._force_sourcecode:\n assert op._opdef\n _op_init_callbacks_for_opdef(op._opdef, op)\n else:\n op.callbacks = oplib.OperationCallbacks(\n init_output_summary=_init_output_summary,\n run_initialized=_run_init_cb_for_run_with_proto(op),\n )\n\n\ndef _run_init_cb_for_run_with_proto(op):\n def f(_op, run):\n _copy_run_proto_sourcecode(op._run, op, run)\n _copy_run_proto_attrs(op._run, run)\n\n return f\n\n\ndef _copy_run_proto_sourcecode(proto_run, proto_op, dest_run):\n if os.getenv(\"NO_SOURCECODE\") == \"1\":\n log.debug(\"NO_SOURCECODE=1, skipping sourcecode copy\")\n return\n src = os.path.join(proto_run.dir, proto_op._sourcecode_root)\n if not os.path.exists(src):\n log.debug(\"no sourcecode source (%s), skipping sourcecode copy\", src)\n return\n dest = os.path.join(dest_run.dir, proto_op._sourcecode_root)\n log.debug(\n \"copying source code files for run %s from %s to %s\", dest_run.id, src, dest,\n )\n util.copytree(src, dest)\n\n\ndef _copy_run_proto_attrs(proto_run, dest_run):\n run_proto_attrs = [\n \"sourcecode_digest\",\n \"vcs_commit\",\n \"host\",\n \"user\",\n \"platform\",\n \"pip_freeze\",\n ]\n for attr in run_proto_attrs:\n if not proto_run.has_attr(attr):\n continue\n dest_run.write_attr(attr, proto_run.get(attr))\n\n\n# =================================================================\n# State - batch op\n# =================================================================\n\n\ndef _state_init_batch_op(S):\n _batch_op_init_run(S)\n _batch_op_init_opdef(S)\n _check_opt_flags_for_missing_batch_opdef(S)\n _check_batch_args_for_missing_batch_op(S)\n if S.batch_op:\n _op_init_op_cmd(S.batch_op)\n _op_init_user_flags(S.args.opt_flags, S.batch_op)\n _op_init_op_flags(S.args, S.batch_op)\n _op_init_config(S.args.batch_label, S.args.batch_tag, S.batch_op)\n _op_init_batch_config(S.args, S.user_op, S.batch_op)\n _apply_batch_flag_encoder(S.batch_op, S.user_op)\n _op_init_core(S.args, S.batch_op)\n\n\ndef _batch_op_init_run(S):\n assert not (S.restart_run and S.proto_run)\n if S.restart_run and S.restart_run.batch_proto:\n _batch_op_init_run_(S, S.restart_run)\n elif S.proto_run and S.proto_run.batch_proto:\n _batch_op_init_run_(S, S.proto_run)\n S.batch_op._run_is_proto = True\n\n\ndef _batch_op_init_run_(S, run):\n if S.batch_op is None:\n S.batch_op = Operation()\n S.batch_op._run = run\n\n\ndef _batch_op_init_opdef(S):\n if S.batch_op and S.batch_op._run:\n assert not S.args.optimizer and not S.args.optimize\n # As with user op, we need opdef for restart/run-with-proto\n # when user specifies flags valuesor when force-sourcecode is\n # specified. We check args here rather than S.batch_op because\n # we can't process batch user flags until we know we have a\n # batch op, which is determined in part by this function.\n if S.args.opt_flags or S.args.force_sourcecode:\n S.batch_op._opdef = _opdef_for_run(S.batch_op._run)\n elif S.user_op._opdef:\n _batch_op_init_for_opdef(S.user_op._opdef, S)\n\n\ndef _batch_op_init_for_opdef(opdef, S):\n if S.args.optimizer:\n _batch_op_init_for_named_optimizer(S.args.optimizer, opdef, S)\n elif S.args.optimize:\n _batch_op_init_for_opdef_default_optimizer(opdef, S)\n else:\n _try_implied_batch_op_init(S.user_op, S)\n\n\ndef _batch_op_init_for_named_optimizer(name, opdef, S):\n assert not S.batch_op\n optdef = opdef.get_optimizer(name)\n S.batch_op = Operation()\n if optdef:\n _op_init_for_optimizer(optdef, S.batch_op)\n else:\n _op_init_for_optimizer_opspec(name, S.batch_op)\n\n\ndef _op_init_for_optimizer(optdef, op):\n op._opdef = opdef_for_opspec(optdef.opspec)\n if optdef.flags:\n op._op_flag_vals.update(optdef.flags)\n\n\ndef _op_init_for_optimizer_opspec(opspec, op):\n op._opdef = opdef_for_opspec(opspec)\n\n\ndef _batch_op_init_for_opdef_default_optimizer(opdef, S):\n assert not S.batch_op\n S.batch_op = Operation()\n optdef = util.find_apply(\n [lambda: opdef.default_optimizer, lambda: _default_optimizer(opdef),]\n )\n _op_init_for_optimizer(optdef, S.batch_op)\n\n\ndef _default_optimizer(opdef):\n return guildfile.OptimizerDef.for_name(DEFAULT_OPTIMIZER, opdef)\n\n\ndef _try_implied_batch_op_init(user_op, S):\n batch_opspec = util.find_apply(\n [\n lambda: _batch_opspec_for_flags(user_op._op_flag_vals),\n lambda: _batch_opspec_for_trials(user_op._batch_trials),\n ]\n )\n if batch_opspec:\n assert not S.batch_op\n S.batch_op = Operation()\n S.batch_op._opdef = opdef_for_opspec(batch_opspec)\n\n\ndef _batch_opspec_for_flags(flag_vals):\n has_list = False\n for val in flag_vals.values():\n if flag_util.is_flag_function(val):\n return \"random\"\n has_list = has_list or isinstance(val, list)\n if has_list:\n return \"+\"\n return None\n\n\ndef _batch_opspec_for_trials(trials):\n return \"+\" if trials else None\n\n\ndef _check_opt_flags_for_missing_batch_opdef(S):\n if S.args.opt_flags and not (S.batch_op and S.batch_op._opdef):\n _opt_flags_for_missing_batch_opdef_error(S.args.opt_flags)\n\n\ndef _check_batch_args_for_missing_batch_op(S):\n if S.batch_op:\n return\n if S.args.max_trials:\n log.warning(\"not a batch run - ignoring --max-trials\")\n\n\ndef _op_init_batch_config(args, user_op, batch_op):\n _op_init_max_trials(args, batch_op)\n _op_init_objective(args, user_op, batch_op)\n _op_init_batch_cmd_run_attrs(args, batch_op)\n\n\ndef _op_init_max_trials(args, op):\n if op._run:\n op._max_trials = args.max_trials or op._run.get(\"max_trials\")\n else:\n op._max_trials = args.max_trials or _default_max_trials_for_op(op)\n\n\ndef _default_max_trials_for_op(op):\n if not op._opdef:\n return None\n return op._opdef.default_max_trials\n\n\ndef _op_init_objective(args, user_op, batch_op):\n assert not (args.minimize and args.maximize)\n if args.minimize:\n batch_op._objective = args.minimize\n elif args.maximize:\n batch_op._objective = \"-\" + args.maximize\n elif user_op._opdef:\n batch_op._objective = _objective_for_opdef(user_op._opdef)\n\n\ndef _objective_for_opdef(opdef):\n obj = opdef.objective\n if isinstance(obj, six.string_types):\n return obj\n elif isinstance(obj, dict):\n if \"maximize\" in obj:\n return \"-\" + obj[\"maximize\"]\n elif \"minimize\" in obj:\n return obj[\"minimize\"]\n return DEFAULT_OBJECTIVE\n\n\ndef _op_init_batch_cmd_run_attrs(args, op):\n if op._run:\n _apply_stage_trials(\n args.stage_trials or op._run.get(\"stage_trials\"), op._op_cmd_run_attrs\n )\n op._op_cmd_run_attrs[\"stage_trials\"] = args.stage_trials or op._run.get(\n \"stage_trials\"\n )\n else:\n _apply_stage_trials(args.stage_trials, op._op_cmd_run_attrs)\n\n\ndef _apply_stage_trials(flag, attrs):\n if flag:\n attrs[\"stage_trials\"] = flag\n\n\ndef _apply_batch_flag_encoder(batch_op, user_op):\n \"\"\"Allow a batch op to encode child op flag vals.\n\n Applies only when starting a new run (i.e. is not a restart or\n run-with-proto) and opdefs are available for the batch and user\n ops.\n\n Encoded values are applies when a batch wants to represent a flag\n value using additional configuration. For example, an optimizer\n will encode search parameters into a value so that search spec can\n be used downstream by the optimizer by decoding the flag value.\n\n If a flag is specified in `user_flags` it is always accepted as\n is - it is never encoded.\n \"\"\"\n if (\n batch_op._run\n or not batch_op._opdef\n or not batch_op._opdef.flag_encoder\n or not user_op._opdef\n ):\n return\n encode_flag_val = op_util.op_flag_encoder(batch_op._opdef.flag_encoder)\n if not encode_flag_val:\n return\n for flag_name, flag_val in user_op._op_flag_vals.items():\n if flag_name in user_op._user_flag_vals:\n continue\n flagdef = user_op._opdef.get_flagdef(flag_name)\n if not flagdef:\n continue\n encoded_val = encode_flag_val(flag_val, flagdef)\n user_op._op_flag_vals[flag_name] = encoded_val\n\n\n###################################################################\n# Main\n###################################################################\n\n\ndef main(args):\n _init_env(args)\n S = _init_state(args)\n _dispatch_op(S)\n\n\ndef _init_env(args):\n if args.test_flags:\n os.environ[\"FLAGS_TEST\"] = \"1\"\n os.environ[\"NO_IMPORT_FLAGS_CACHE\"] = \"1\"\n\n\ndef _init_state(args):\n _maybe_shift_opspec(args)\n _validate_args(args)\n return _state_for_args(args)\n\n\ndef _maybe_shift_opspec(args):\n # Moves opspec to flags if it looks like a flag assignment\n if args.opspec and \"=\" in args.opspec:\n args.flags = (args.opspec,) + args.flags\n args.opspec = None\n\n\ndef _validate_args(args):\n _check_incompatible_options(args)\n _check_incompatible_with_restart(args)\n\n\ndef _check_incompatible_options(args):\n incompatible = [\n (\"minimize\", \"maximize\"),\n (\"no_gpus\", \"gpus\"),\n (\"optimize\", \"optimizer\"),\n (\"print_cmd\", \"print_env\"),\n (\"print_trials\", \"stage_trials\"),\n (\"stage\", \"background\"),\n (\"stage\", \"pidfile\"),\n (\"remote\", \"background\"),\n (\"remote\", \"pidfile\"),\n (\"tag\", \"label\"),\n (\"batch_tag\", \"batch_label\"),\n ]\n for a, b in incompatible:\n if getattr(args, a) and getattr(args, b):\n _incompatible_options_error(a, b)\n\n\ndef _check_incompatible_with_restart(args):\n if not args.restart:\n return\n incompatible = [\n (\"help_model\", \"--help-model\"),\n (\"help_op\", \"--help-op\"),\n (\"opspec\", \"OPERATION\"),\n (\"optimize\", \"--optimize\"),\n (\"optimizer\", \"--optimizer\"),\n (\"proto\", \"--proto\"),\n (\"run_dir\", \"--run-dir\"),\n (\"test_output_scalars\", \"--test-output-scalars\"),\n (\"test_sourcecode\", \"--test-sourcecode\"),\n (\"test_flags\", \"--test-flags\"),\n ]\n for name, desc in incompatible:\n if getattr(args, name):\n restart_option = \"restart\" if args.restart else \"start\"\n _incompatible_with_restart_error(desc, restart_option)\n\n\n###################################################################\n# Dispatch op\n###################################################################\n\n\ndef _dispatch_op(S):\n if S.args.help_model:\n _print_model_help(S)\n elif S.args.help_op:\n _print_op_help(S)\n elif S.args.test_output_scalars:\n _test_output_scalars(S)\n elif S.args.test_sourcecode:\n _test_sourcecode(S)\n elif S.args.test_flags:\n _test_flags(S)\n else:\n _dispatch_op_cmd(S)\n\n\n###################################################################\n# Model / op help\n###################################################################\n\n\ndef _print_model_help(S):\n assert S.user_op._opdef\n helplib.print_model_help(S.user_op._opdef.modeldef)\n\n\ndef _print_op_help(S):\n assert S.user_op._opdef\n helplib.print_op_help(S.user_op._opdef)\n\n\n###################################################################\n# Test output scalars\n###################################################################\n\n\nclass TestOutputLogger(summary.TestOutputLogger):\n @staticmethod\n def line(line):\n cli.out(line)\n\n def pattern_no_matches(self, pattern):\n msg = self._format_pattern_no_matches(pattern)\n cli.out(cli.style(msg, dim=True))\n\n def pattern_matches(self, pattern, matches, vals):\n msg = self._format_pattern_matches(pattern, matches, vals)\n cli.out(cli.style(msg, fg=\"yellow\"))\n\n\ndef _test_output_scalars(S):\n if _output_scalars_disabled(S.user_op):\n cli.out(\"Output scalars disabled, nothing to test\", err=True)\n return\n output_scalars = S.user_op._output_scalars or summary.DEFAULT_OUTPUT_SCALARS\n input_path = S.args.test_output_scalars\n logger = TestOutputLogger()\n if input_path == \"-\" and sys.stdin.isatty():\n cli.note(\n \"Type patterns and press Enter to test. \"\n \"Use Ctrl-c or empty line to exit.\"\n )\n with _open_output(input_path) as f:\n summary.test_output(f, output_scalars, logger)\n\n\ndef _open_output(path):\n if path == \"-\":\n return util.StdinReader(stop_on_blank_line=sys.stdin.isatty())\n try:\n return open(path, \"rb\")\n except (IOError, OSError) as e:\n if e.errno == 2:\n cli.error(\"%s does not exist\" % path)\n else:\n cli.error(\"error opening %s: %s\" % (path, e))\n\n\n###################################################################\n# Test source code\n###################################################################\n\n\ndef _test_sourcecode(S):\n opdef = S.user_op._opdef\n assert opdef\n logger = _CopyLogger()\n sourcecode_src = opdef.guildfile.dir\n sourcecode_select = op_util.sourcecode_select_for_opdef(opdef)\n op_util.copy_sourcecode(\n sourcecode_src, sourcecode_select, None, handler_cls=logger.handler_cls\n )\n cli.out(\"Copying from %s\" % cmd_impl_support.cwd_desc(logger.root))\n cli.out(\"Rules:\")\n for rule in logger.select.rules:\n cli.out(\" %s\" % _format_file_select_rule(rule))\n if logger.select.disabled:\n assert not logger.selected, logger.selected\n assert not logger.skipped, logger.skipped\n cli.out(\"Source code copy disabled\")\n else:\n cli.out(\"Selected for copy:\")\n for path in logger.selected:\n cli.out(cli.style(\" %s\" % path, fg=\"yellow\"))\n cli.out(\"Skipped:\")\n for path in logger.skipped:\n cli.out(cli.style(\" %s\" % path, dim=True))\n\n\nclass _CopyLogger(object):\n\n root = None\n select = None\n\n def __init__(self):\n self.selected = []\n self.skipped = []\n\n def handler_cls(self, src_root, dest_root, select):\n assert dest_root is None, dest_root\n self.root = os.path.relpath(src_root)\n self.select = select\n return self\n\n def copy(self, path, _results):\n self.selected.append(os.path.join(self.root, path))\n\n def ignore(self, path, _results):\n self.skipped.append(os.path.join(self.root, path))\n\n\ndef _format_file_select_rule(rule):\n parts = [\"include\" if rule.result else \"exclude\"]\n if rule.type:\n parts.append(rule.type)\n parts.append(\", \".join([repr(p) for p in rule.patterns]))\n extras = _format_file_select_rule_extras(rule)\n if extras:\n parts.append(extras)\n return \" \".join(parts)\n\n\ndef _format_file_select_rule_extras(rule):\n parts = []\n if rule.regex:\n parts.append(\"regex\")\n if rule.sentinel:\n parts.append(\"with %r\" % rule.sentinel)\n if rule.size_gt:\n parts.append(\"size > %s\" % rule.size_gt)\n if rule.size_lt:\n parts.append(\"size < %s\" % rule.size_lt)\n if rule.max_matches:\n parts.append(\"max match %s\" % rule.max_matches)\n return \", \".join(parts)\n\n\n###################################################################\n# Test flags\n###################################################################\n\n\ndef _test_flags(S):\n opdef = S.user_op._opdef\n assert opdef\n\n def out(parent, attr, indent=0):\n val = getattr(parent, attr)\n prefix = \"%s%s:\" % (\" \" * indent, attr.replace(\"_\", \"-\"))\n if val is None:\n cli.out(prefix)\n else:\n if attr == \"choices\":\n val = [flag_util.encode_flag_val(c.value) for c in val]\n cli.out(\"%s %s\" % (prefix, flag_util.encode_flag_val(val)))\n\n out(opdef, \"flags_dest\")\n out(opdef, \"flags_import\")\n cli.out(\"flags:\")\n for f in opdef.flags:\n cli.out(\" %s:\" % f.name)\n for attr in FLAG_TEST_ATTRS:\n out(f, attr, 4)\n\n\n###################################################################\n# Dispatch op command\n###################################################################\n\n\ndef _dispatch_op_cmd(S):\n if S.args.print_cmd:\n _print_cmd(S)\n elif S.args.print_env:\n _print_env(S)\n elif S.args.print_trials:\n _print_trials(S)\n elif S.args.save_trials:\n _save_trials(S)\n else:\n _confirm_and_run(S)\n\n\n###################################################################\n# Print op info / save trials\n###################################################################\n\n\ndef _print_cmd(S):\n if S.batch_op:\n _print_op_cmd_args(S.batch_op.cmd_args)\n _print_batch_trials_cmd_args(S)\n else:\n _print_op_cmd_args(S.user_op.cmd_args)\n\n\ndef _print_op_cmd_args(args):\n cli.out(\" \".join([util.shlex_quote(arg) for arg in args]))\n\n\ndef _print_batch_trials_cmd_args(S):\n _run_tmp_batch(S, {\"PRINT_TRIALS_CMD\": \"1\"})\n\n\ndef _run_tmp_batch(S, extra_env):\n assert S.batch_op\n with util.TempDir() as tmp:\n _init_batch_run(S, tmp.path)\n _run_op(S.batch_op, S.args, extra_env)\n\n\ndef _print_env(S):\n _print_op_cmd_env(S.user_op.cmd_env)\n\n\ndef _print_op_cmd_env(env):\n for name, val in sorted(env.items()):\n cli.out(\"%s=%s\" % (name, util.env_var_quote(val)))\n\n\ndef _print_trials(S):\n if not S.batch_op:\n _print_trials_for_non_batch_error()\n _run_tmp_batch(S, {\"PRINT_TRIALS\": \"1\"})\n\n\ndef _save_trials(S):\n if not S.batch_op:\n _save_trials_for_non_batch_error()\n path = _save_trials_path(S.args.save_trials)\n cli.out(\"Saving trials to %s\" % util.format_dir(path))\n _run_tmp_batch(S, {\"SAVE_TRIALS\": os.path.abspath(os.path.expanduser(path))})\n\n\ndef _save_trials_path(save_trials_arg):\n cwd = config.cwd()\n return (\n os.path.join(cwd, save_trials_arg) if cwd not in (\".\", \"\") else save_trials_arg\n )\n\n\n###################################################################\n# Run\n###################################################################\n\n\ndef _confirm_and_run(S):\n if S.args.yes or _confirm_run(S):\n _run(S)\n\n\n# =================================================================\n# Confirm op\n# =================================================================\n\n\ndef _confirm_run(S):\n prompt = (\n \"You are about to {action} {subject}\"\n \"{batch_suffix}{remote_suffix}{flags_note}\\n\"\n \"{user_flags}\"\n \"{optimizer_flags}\"\n \"Continue?\".format(\n action=_preview_op_action(S),\n subject=_preview_op_subject(S),\n batch_suffix=_preview_batch_suffix(S),\n remote_suffix=_preview_remote_suffix(S),\n flags_note=_preview_flags_note(S),\n user_flags=_preview_user_flags(S),\n optimizer_flags=_preview_optimizer_flags(S),\n )\n )\n return cli.confirm(prompt, default=True)\n\n\ndef _preview_op_action(S):\n if S.args.stage:\n return \"stage\"\n elif S.args.restart:\n return \"start\"\n else:\n return \"run\"\n\n\ndef _preview_op_subject(S):\n op_desc = _fmt_opref(S.user_op.opref)\n if S.restart_run:\n return \"%s (%s)\" % (S.restart_run.id, op_desc)\n else:\n return op_desc\n\n\ndef _fmt_opref(opref):\n return opref.to_opspec(config.cwd())\n\n\ndef _preview_batch_suffix(S):\n if not S.batch_op:\n return \"\"\n return \"\".join(\n [_batch_desc_preview_part(S.batch_op), _batch_qualifier_preview_part(S),]\n )\n\n\ndef _batch_desc_preview_part(op):\n opt_name = op.opref.to_opspec(config.cwd())\n if opt_name == \"+\":\n return \" as a batch\"\n elif opt_name in (\"random\", \"skopt:random\"):\n return \" with random search\"\n else:\n return \" with '%s' optimizer\" % opt_name\n\n\ndef _batch_qualifier_preview_part(S):\n batch_op = S.batch_op\n parts = []\n if batch_op.opref.op_name == \"+\":\n parts.append(_preview_trials_count(S))\n elif batch_op._max_trials:\n parts.append(\"max %i trials\" % batch_op._max_trials)\n if _is_likey_optimizer(batch_op) and batch_op._objective:\n parts.append(_objective_preview_part(batch_op._objective))\n if not parts:\n return \"\"\n return \" (%s)\" % \", \".join(parts)\n\n\ndef _preview_trials_count(S):\n trials_count = _trials_count(S)\n if trials_count == 1:\n return \"1 trial\"\n else:\n return \"%i trials\" % trials_count\n\n\ndef _trials_count(S):\n count = len(_op_trials(S.user_op))\n if S.batch_op._max_trials is not None:\n count = min(count, S.batch_op._max_trials)\n return count\n\n\ndef _op_trials(op):\n if op._batch_trials:\n return batch_util.expand_trial_flags(\n op._batch_trials, op._op_flag_vals, op._user_flag_vals, op._random_seed\n )\n else:\n return batch_util.expand_flags(op._op_flag_vals, op._random_seed)\n\n\ndef _is_likey_optimizer(op):\n \"\"\"Return True if op is likely an optimizer.\n\n All operations are considered likely except those known to NOT be\n optimizers. These are '+' (the general batch op) and 'random'.\n\n Ideally the operation would indicate if it is an optimizer but\n Guild doesn't support an interface for this.\n \"\"\"\n return op.opref.op_name not in (\"+\", \"random\")\n\n\ndef _objective_preview_part(obj):\n if obj[:1] == \"-\":\n return \"maximize %s\" % obj[1:]\n else:\n return \"minimize %s\" % obj\n\n\ndef _preview_remote_suffix(S):\n if S.args.remote:\n return \" on %s\" % S.args.remote\n return \"\"\n\n\ndef _preview_flags_note(S):\n if S.user_op._op_flag_vals and S.user_op._batch_trials:\n return \" (flags below used unless specified in batch trial)\"\n return \"\"\n\n\ndef _preview_user_flags(S):\n return _preview_flags(S.user_op._op_flag_vals, S.user_op._flag_null_labels)\n\n\ndef _preview_flags(flag_vals, null_labels):\n if not flag_vals:\n return \"\"\n return (\n \"\\n\".join(\n [\n \" %s\" % _format_flag(name, val, null_labels)\n for name, val in sorted(flag_vals.items())\n ]\n )\n + \"\\n\"\n )\n\n\ndef _format_flag(name, val, null_labels):\n if val is None:\n formatted = _null_label(name, null_labels)\n else:\n formatted = util.find_apply(\n [_try_format_function, flag_util.encode_flag_val], val\n )\n return \"%s: %s\" % (name, formatted)\n\n\ndef _try_format_function(val):\n if not isinstance(val, six.string_types):\n return None\n try:\n flag_util.decode_flag_function(val)\n except ValueError:\n return None\n else:\n return val\n\n\ndef _null_label(name, null_labels):\n null_label = null_labels.get(name, \"default\")\n return flag_util.encode_flag_val(null_label)\n\n\ndef _preview_optimizer_flags(S):\n if not S.batch_op or not S.batch_op._op_flag_vals:\n return \"\"\n flags_preview = _preview_flags(\n S.batch_op._op_flag_vals, S.batch_op._flag_null_labels\n )\n preview = \"Optimizer flags:\\n%s\" % flags_preview\n return cli.style(preview, dim=True)\n\n\n# =================================================================\n# Run / stage\n# =================================================================\n\n\ndef _run(S):\n if S.args.remote:\n _run_remote(S)\n else:\n _run_local(S)\n\n\ndef _run_remote(S):\n _check_remote_script(S.user_op.opref)\n remote_impl_support.run(_remote_args(S))\n\n\ndef _check_remote_script(opref):\n if opref.pkg_type == \"script\":\n cli.error(\n \"cannot run scripts remotely\\n\"\n \"Define an operation in guild.yml that uses %s as the main \"\n \"module and run that operation instead.\" % opref.to_opspec(config.cwd())\n )\n\n\ndef _remote_args(S):\n params = S.args.as_kw()\n params[\"opspec\"] = S.user_op.opref.to_opspec()\n if S.restart_run:\n params[\"restart\"] = S.restart_run.id\n return click_util.Args(**params)\n\n\ndef _run_local(S):\n _check_run_needed(S)\n op = _init_op_for_run(S)\n if S.args.stage:\n _stage_op(op, S.args)\n else:\n _run_op(op, S.args)\n\n\ndef _check_run_needed(S):\n if not S.args.needed:\n return\n matching = _remove_failed_runs(_find_matching_runs(S))\n if matching:\n if _restarting_match(matching, S):\n _skip_needed_unchanged_flags_info()\n else:\n _skip_needed_matches_info(matching)\n raise SystemExit(0)\n\n\ndef _find_matching_runs(S):\n if S.batch_op:\n matching = op_util.find_matching_runs(\n S.batch_op.opref, S.batch_op._op_flag_vals\n )\n return _filter_matching_batch_runs(matching, S.user_op)\n else:\n return op_util.find_matching_runs(S.user_op.opref, S.user_op._op_flag_vals)\n\n\ndef _filter_matching_batch_runs(batch_runs, user_op):\n return [\n run\n for run in batch_runs\n if (\n run.batch_proto\n and op_util.is_matching_run(\n run.batch_proto,\n user_op.opref,\n user_op._op_flag_vals,\n include_pending=True,\n )\n )\n ]\n\n\ndef _remove_failed_runs(runs):\n return [run for run in runs if run.status != \"error\"]\n\n\ndef _restarting_match(matches, S):\n restart_run = S.batch_op._run if S.batch_op else S.user_op._run\n return restart_run and restart_run.id in (run.id for run in matches)\n\n\ndef _init_op_for_run(S):\n if S.batch_op:\n _init_batch_run(S)\n return S.batch_op\n return S.user_op\n\n\ndef _init_batch_run(S, run_dir=None):\n batch_run = oplib.init_run(S.batch_op, run_dir)\n S.batch_op.run_dir = batch_run.dir\n oplib.init_run(S.user_op, batch_run.guild_path(\"proto\"))\n\n\ndef _stage_op(op, args):\n try:\n run = oplib.stage(op)\n except op_dep.OpDependencyError as e:\n _op_dependency_error(e)\n else:\n if not args.quiet:\n _print_staged_info(run)\n\n\ndef _print_staged_info(run):\n if _staged_outside_guild_home(run):\n _print_staged_dir_instructions(run)\n else:\n _print_stage_pending_instructions(run)\n\n\ndef _staged_outside_guild_home(run):\n return not util.compare_paths(os.path.dirname(run.dir), var.runs_dir())\n\n\ndef _print_staged_dir_instructions(run):\n cmd_args = run.get(\"cmd\") or []\n cmd = \" \".join([util.shlex_quote(arg) for arg in cmd_args])\n cli.out(\n \"{op} staged in '{dir}'\\n\"\n \"To start the operation, use \"\n \"\\\"(cd '{dir}' && source .guild/ENV && {cmd})\\\"\".format(\n op=run_util.format_operation(run), dir=run.dir, cmd=cmd\n )\n )\n\n\ndef _print_stage_pending_instructions(run):\n cli.out(\n \"{op} staged as {run_id}\\n\"\n \"To start the operation, use 'guild run --start {run_id}'\".format(\n op=run_util.format_operation(run), run_id=run.id\n )\n )\n\n\ndef _run_op(op, args, extra_env=None):\n try:\n _, exit_status = oplib.run(\n op,\n quiet=args.quiet,\n pidfile=_op_pidfile(args),\n stop_after=args.stop_after,\n extra_env=extra_env,\n )\n except op_dep.OpDependencyError as e:\n _op_dependency_error(e)\n except oplib.ProcessError as e:\n _op_process_error(op, e)\n else:\n _handle_run_exit(exit_status)\n\n\ndef _op_pidfile(args):\n if args.pidfile:\n return args.pidfile\n elif args.background:\n return util.TempFile(\"guild-pid-\").path\n else:\n return None\n\n\ndef _handle_run_exit(exit_status):\n sys.stdout.flush()\n if exit_status != 0:\n cli.error(exit_status=exit_status)\n\n\n###################################################################\n# Error handlers / user messages\n###################################################################\n\n\ndef _incompatible_options_error(a, b):\n cli.error(\n \"--%s and --%s cannot both be used\\n\"\n \"Try 'guild run --help' for more information.\"\n % (a.replace(\"_\", \"-\"), b.replace(\"_\", \"-\"))\n )\n\n\ndef _incompatible_with_restart_error(option, restart_option):\n cli.error(\n \"%s cannot be used with --%s\\n\"\n \"Try 'guild run --help' for more information.\" % (option, restart_option)\n )\n\n\ndef _invalid_opspec_error(opspec):\n cli.error(\n \"invalid operation '%s'\\n\"\n \"Try 'guild operations' for a list of available operations.\" % opspec\n )\n\n\ndef _guildfile_error(gf_path, msg):\n log.error(msg)\n if os.path.basename(gf_path) == \"guild.yml\":\n gf_path = os.path.dirname(gf_path)\n cli.error(\n \"guildfile in %s contains an error (see above for details)\"\n % cmd_impl_support.cwd_desc(gf_path)\n )\n\n\ndef _missing_run_opdef_error(opspec, run):\n cli.error(\n \"cannot find definition for operation '%s' in run %s\\n\"\n \"The definition is required when setting flags for start or restart.\"\n \"\" % (opspec, run.id)\n )\n\n\ndef _no_such_model_op_error(opspec):\n if opspec:\n if \":\" in opspec:\n cli.error(\n \"cannot find operation %s\\n\"\n \"Try 'guild operations' for a list of available operations.\" % opspec\n )\n else:\n cli.error(\n \"cannot find operation %s\\n\"\n \"You may need to include a model in the form MODEL:OPERATION. \"\n \"Try 'guild operations' for a list of available operations.\" % opspec\n )\n else:\n cli.error(\n \"cannot find a default operation\\n\" \"Try 'guild operations' for a list.\"\n )\n\n\ndef _multiple_models_error(model_ref, models):\n models_list = \"\\n\".join(\n [\" %s\" % name for name in sorted([m.fullname for m in models])]\n )\n cli.error(\n \"there are multiple models that match '%s'\\n\"\n \"Try specifying one of the following:\\n\"\n \"%s\" % (model_ref, models_list)\n )\n\n\ndef _no_such_opdef_error(model, op_name):\n op = \"operation '{0}'\".format(op_name) if op_name else \"a default operation\"\n if model.name:\n cli.error(\n \"{op} is not defined for model '{model}'\\n\"\n \"Try 'guild operations {model}' for a list of available \"\n \"operations.\".format(op=op, model=model.name)\n )\n else:\n cli.error(\n \"{op} is not defined for this project\\n\"\n \"Try 'guild operations' for a list of available operations.\".format(op=op)\n )\n\n\ndef _invalid_flag_arg_error(arg):\n cli.error(\"invalid argument '%s' - expected NAME=VAL\" % arg)\n\n\ndef _no_such_flag_error(name, opdef):\n cli.error(\n \"unsupported flag '%s'\\n\"\n \"Try 'guild run %s --help-op' for a list of \"\n \"flags or use --force-flags to skip this check.\" % (name, opdef.fullname)\n )\n\n\ndef _coerce_flag_val_error(e):\n cli.error(\"cannot apply %r to flag '%s': %s\" % (e.value, e.flag_name, e.error))\n\n\ndef _missing_required_flags_error(e):\n cli.out(\"Operation requires the following missing flags:\\n\", err=True)\n line1 = lambda s: s.split(\"\\n\")[0]\n cli.table(\n [{\"name\": flag.name, \"desc\": line1(flag.description)} for flag in e.missing],\n [\"name\", \"desc\"],\n indent=2,\n err=True,\n )\n cli.out(\n \"\\nRun the command again with these flags specified \" \"as NAME=VAL.\", err=True\n )\n cli.error()\n\n\ndef _invalid_flag_choice_error(e):\n cli.out(\n \"Unsupported value for '%s' - supported values are:\\n\" % e.flag.name, err=True\n )\n cli.table(\n [\n {\"val\": choice.value, \"desc\": choice.description}\n for choice in e.flag.choices\n ],\n [\"val\", \"desc\"],\n indent=2,\n err=True,\n )\n cli.out(\"\\nRun the command again using one of these options.\", err=True)\n cli.error()\n\n\ndef _invalid_flag_value_error(e):\n cli.error(\"invalid value %s for %s: %s\" % (e.value, e.flag.name, e.msg))\n\n\ndef _invalid_opdef_error(opdef, msg):\n cli.error(\"invalid definition for operation '%s': %s\" % (opdef.fullname, msg))\n\n\ndef _model_op_proxy_error(e):\n cli.error(\"cannot run '%s': %s\" % (e.opspec, e.msg))\n\n\ndef _op_cmd_error(msg):\n cli.error(msg)\n\n\ndef _op_dependency_error(e):\n cli.error(\"run failed because a dependency was not met: %s\" % e)\n\n\ndef _op_process_error(op, e):\n cli.error(\"error running %s: %s\" % (_fmt_opref(op.opref), e))\n\n\ndef _opt_flags_for_missing_batch_opdef_error(args):\n assert args\n cli.error(\"invalid optimizer flag %s: no optimizer specified\" % args[0])\n\n\ndef _missing_op_config_for_restart_error(run):\n cli.error(\n \"cannot restart run in %s: missing op configuration\\n\"\n \"The run may not have been initialized correctly. Try starting \"\n \"the operation without the --start/--restart flag.\" % run.dir\n )\n\n\ndef _invalid_op_config_for_restart_error(run):\n cli.error(\n \"cannot restart run in %s: invalid op configuration\\n\"\n \"This may be an internal error. Please open an issue \"\n \"https://github.com/guildai/guildai/issues.\" % run.dir\n )\n\n\ndef _no_such_batch_file_error(path):\n cli.error(\"batch file %s does not exist\" % path)\n\n\ndef _batch_file_error(e):\n cli.error(e)\n\n\ndef _flag_for_resolved_dep_error(flag_name, run):\n cli.error(\n \"cannot specify a value for '%s' when restarting %s - \"\n \"resource has already been resolved\" % (flag_name, run.short_id)\n )\n\n\ndef _print_trials_for_non_batch_error():\n cli.error(\"cannot print trials for a non-batch operation\")\n\n\ndef _save_trials_for_non_batch_error():\n cli.error(\"cannot save trials for a non-batch operation\")\n\n\ndef _skip_needed_unchanged_flags_info():\n cli.out(\"Skipping run because flags have not changed \" \"(--needed specified)\")\n\n\ndef _skip_needed_matches_info(matching_runs):\n cli.out(\n \"Skipping because the following runs match \"\n \"this operation (--needed specified):\"\n )\n formatted = [run_util.format_run(run) for run in matching_runs]\n cols = [\"index\", \"operation\", \"started\", \"status_with_remote\", \"label\"]\n cli.table(formatted, cols=cols, indent=2)\n\n\n###################################################################\n# Cmd impl API\n###################################################################\n\n\ndef run(start=None, **kw):\n from .run import run as run_cmd\n\n if start is not None:\n raise ValueError(\"start kw not supported, use restart instead\")\n ctx = run_cmd.make_context(\"\", [])\n ctx.params.update(kw)\n ctx.params[\"yes\"] = True\n args = click_util.Args(**ctx.params)\n main(args)\n\n\ndef one_run(run_id_prefix):\n runs = [runlib.Run(id, path) for id, path in var.find_runs(run_id_prefix)]\n return cmd_impl_support.one_run(runs, run_id_prefix)\n","sub_path":"guild/commands/run_impl.py","file_name":"run_impl.py","file_ext":"py","file_size_in_byte":64872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"472719081","text":"import numpy as np\nimport signal,os\nfrom subprocess import Popen\nimport subprocess\nfrom time import sleep\nimport os.path\n\n\nconfig_path = \"/home/kuro/Projects/ComputationalIntelligence/torcs-server/example_torcs_config.xml\"\npopulation_folder = \"/home/kuro/Projects/ComputationalIntelligence/torcs-client/JesusTakeTheWheel/evolution/population\"\n\nsensors_size = 23\nhidden_size = 50\noutput_size = 3\n\ndef create_weights(dev = 1.0):\n w1 = np.random.normal(size=(sensors_size, hidden_size),scale=dev)\n b1 = np.random.normal(size=(hidden_size) , scale=dev)\n w2 = np.random.normal(size=(hidden_size, output_size) ,scale=dev)\n b2 = np.random.normal(size=(output_size), scale=dev)\n return w1,b1,w2,b2\n\ndef weights_to_vector(w1,b1,w2,b2):\n result = w1.flatten()\n result = np.append(result,b1.flatten())\n result = np.append(result,w2.flatten())\n result = np.append(result,b2.flatten())\n return result\n\ndef vector_to_weights(vec):\n end_pointer = sensors_size*hidden_size\n w1 = vec[:end_pointer].reshape((sensors_size,hidden_size))\n b1 = vec[end_pointer:end_pointer+hidden_size]\n end_pointer = end_pointer + hidden_size\n\n w2 = vec[end_pointer:end_pointer + hidden_size*output_size].reshape((hidden_size,output_size))\n end_pointer = end_pointer + hidden_size*output_size\n\n b2 = vec[end_pointer:]\n return w1,b1,w2,b2\n\ndef createPopulations(creator_function, size):\n for i in range(size):\n individual = creator_function()\n\n\nif __name__ == '__main__':\n population = []\n\n\n\n\n for i in range(1000):\n subprocess.call(\"/home/kuro/Projects/ComputationalIntelligence/torcs-client/JesusTakeTheWheel/train.sh\")","sub_path":"evolution/evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336604466","text":"import logging\n\nimport pytz\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom enum import Enum\nfrom html.parser import HTMLParser\nfrom os.path import join, lexists, isfile\nimport os\nfrom typing import List, Dict, Any, Optional, Union\nfrom urllib.parse import unquote\nfrom zipfile import ZipFile\nimport re\nimport json\n\nfrom dateutil import parser\n\nfrom wereyouhere.common import Entry, History, Visit, get_logger\n\n\n# TODO wonder if that old format used to be UTC...\n# Mar 8, 2018, 5:14:40 PM\n_TIME_FORMAT = \"%b %d, %Y, %I:%M:%S %p %Z\"\n\n# ugh. something is seriously wrong with datetime, it wouldn't parse timezone aware UTC timestamp :(\ndef parse_dt(s: str) -> datetime:\n dt = parser.parse(s)\n if dt.tzinfo is None:\n # TODO log?\n # hopefully it was utc? Legacy, so no that much of an issue anymore..\n dt = dt.replace(tzinfo=pytz.utc)\n return dt\n\nclass State(Enum):\n OUTSIDE = 0\n INSIDE = 1\n PARSING_LINK = 2\n PARSING_DATE = 3\n\n# would be easier to use beautiful soup, but ends up in a big memory footprint..\nclass TakeoutHTMLParser(HTMLParser):\n state: State\n current: Dict[str, str]\n urls: History\n\n def __init__(self, tag: str) -> None:\n super().__init__()\n self.state = State.OUTSIDE\n self.urls = History()\n self.current = {}\n self.tag = tag\n\n def _reg(self, name, value):\n assert name not in self.current\n self.current[name] = value\n\n def _astate(self, s): assert self.state == s\n\n def _trans(self, f, t):\n self._astate(f)\n self.state = t\n\n # enter content cell -> scan link -> scan date -> finish till next content cell\n def handle_starttag(self, tag, attrs):\n if self.state == State.INSIDE and tag == 'a':\n self.state = State.PARSING_LINK\n attrs = OrderedDict(attrs)\n hr = attrs['href']\n\n # sometimes it's starts with this prefix, it's apparently clicks from google search? or visits from chrome address line? who knows...\n # TODO handle http?\n prefix = r'https://www.google.com/url?q='\n if hr.startswith(prefix + \"http\"):\n hr = hr[len(prefix):]\n hr = unquote(hr)\n self._reg('url', hr)\n\n def handle_endtag(self, tag):\n if tag == 'html':\n pass # ??\n\n def handle_data(self, data):\n if self.state == State.OUTSIDE:\n if data[:-1].strip() == \"Visited\":\n self.state = State.INSIDE\n return\n\n if self.state == State.PARSING_LINK:\n # self._reg(Entry.link, data)\n self.state = State.PARSING_DATE\n return\n\n if self.state == State.PARSING_DATE:\n # TODO regex?\n years = [str(i) + \",\" for i in range(2000, 2030)]\n for y in years:\n if y in data:\n self._reg('time', data.strip())\n\n url = self.current['url']\n times = self.current['time']\n time = parse_dt(times)\n assert time.tzinfo is not None\n visit = Visit(\n dt=time,\n tag=self.tag,\n )\n self.urls.register(url, visit)\n\n self.current = {}\n self.state = State.OUTSIDE\n return\n\ndef _read_google_activity(myactivity_html_fo, tag: str):\n data: str = myactivity_html_fo.read().decode('utf-8')\n parser = TakeoutHTMLParser(tag)\n parser.feed(data)\n return parser.urls\n\ndef _exists(thing, path):\n if isinstance(thing, ZipFile):\n return path in thing.namelist()\n else:\n return lexists(join(thing, path))\n\n\ndef _open(thing, path):\n if isinstance(thing, ZipFile):\n return thing.open(path, 'r')\n else:\n return open(join(thing, path), 'rb')\n\n\ndef read_google_activity(takeout) -> Optional[History]:\n logger = get_logger()\n spath = join(\"Takeout\", \"My Activity\", \"Chrome\", \"MyActivity.html\")\n if not _exists(takeout, spath):\n logger.warning(f\"{spath} is not present... skipping\")\n return None\n with _open(takeout, spath) as fo:\n return _read_google_activity(fo, 'activity-chrome')\n\ndef read_search_activity(takeout) -> Optional[History]:\n logger = get_logger()\n spath = join(\"Takeout\", \"My Activity\", \"Search\", \"MyActivity.html\")\n if not _exists(takeout, spath):\n logger.warning(f\"{spath} is not present... skipping\")\n return None\n with _open(takeout, spath) as fo:\n return _read_google_activity(fo, 'activity-search')\n\n# TODO add this to tests?\ndef read_browser_history_json(takeout) -> Optional[History]:\n logger = get_logger()\n spath = join(\"Takeout\", \"Chrome\", \"BrowserHistory.json\")\n\n if not _exists(takeout, spath):\n logger.warning(f\"{spath} is not present... skipping\")\n return None\n\n j = None\n with _open(takeout, spath) as fo:\n j = json.load(fo)\n\n urls = History()\n hist = j['Browser History']\n for item in hist:\n url = item['url']\n time = datetime.utcfromtimestamp(item['time_usec'] / 10 ** 6).replace(tzinfo=pytz.utc)\n visit = Visit(\n dt=time,\n tag=\"history_json\",\n )\n urls.register(url, visit)\n return urls\n\ndef get_takeout_histories(takeout_path: str) -> List[History]:\n # first, figure out what is takeout_path...\n takeout: Union[ZipFile, str]\n if isfile(takeout_path):\n # must be a takeout zip\n # TODO support other formats too\n takeout = ZipFile(takeout_path)\n elif lexists(join(takeout_path, 'Takeout', 'My Activity')):\n # unpacked dir, just process it\n takeout = takeout_path\n else:\n # directory with many takeout archives\n TAKEOUT_REGEX = re.compile(r'takeout-\\d{8}T\\d{6}Z')\n takeout_name = max([ff for ff in os.listdir(takeout_path) if TAKEOUT_REGEX.match(ff)]) # lastest chronologically\n takeout = ZipFile(join(takeout_path, takeout_name))\n # TODO multipart archives?\n\n chrome_myactivity = read_google_activity(takeout)\n search_myactivity = read_search_activity(takeout)\n browser_history_json = read_browser_history_json(takeout)\n return [h for h in (\n chrome_myactivity,\n search_myactivity,\n browser_history_json,\n ) if h is not None]\n","sub_path":"wereyouhere/generator/takeout.py","file_name":"takeout.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"522561817","text":"import time\nimport json\nimport requests\nimport dateutil.parser #need to install\n\ndef get_api_key(): #To retrieve key\n return \"happyhappyjoyjoy\"\n\ndef av_update(hashcode):\n key = get_api_key()\n new_url = '/other/avscans_update/'\n url = '/other/avscans/'\n js = requests.get('http://localhost:8000/' + new_url + key + '/' + hashcode +'/').content\n time.sleep(5)\n js = requests.get('http://localhost:8000/' + url + key + '/' + hashcode +'/').content\n jsondata = json.loads(js) # String -> Dictionary\n return jsondata\n\ndef showDate (date, fmt=None):\n date = dateutil.parser.parse(date)\n native = date.replace(tzinfo=None)\n format='%b %d, %Y'\n return native.strftime(format) \n\ndef glyph2(field, val):\n if field == val:\n field = \"<span class=\\\"glyphicon glyphicon-ok\\\" style=\\\"color:green\\\"></span>\"\n else:\n field = \"<span class=\\\"glyphicon glyphicon-remove\\\" style=\\\"color:red\\\"></span>\"\n return field\n","sub_path":"vbUtils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"230741463","text":"\"\"\"Cisco Spark Webhooks-API wrapper classes.\n\nClasses:\n Webhook: Models a Spark 'webhook' JSON object as a native Python object.\n WebhooksAPI: Wrappers the Cisco Spark Webhooks-API and exposes the API\n calls as Python method calls that return native Python objects.\n\n\"\"\"\n\n\nfrom ciscosparkapi.exceptions import ciscosparkapiException\nfrom ciscosparkapi.helper import utf8, generator_container\nfrom ciscosparkapi.restsession import RestSession\nfrom ciscosparkapi.sparkdata import SparkData\n\n\nclass Webhook(SparkData):\n \"\"\"Model a Spark 'webhook' JSON object as a native Python object.\"\"\"\n\n def __init__(self, json):\n \"\"\"Init a new Webhook data object from a JSON dictionary or string.\n\n Args:\n json(dict, unicode, str): Input JSON object.\n\n Raises:\n TypeError: If the input object is not a dictionary or string.\n\n \"\"\"\n super(Webhook, self).__init__(json)\n\n @property\n def id(self):\n \"\"\"Webhook ID.\"\"\"\n return self._json.get(u'id')\n\n @property\n def name(self):\n \"\"\"A user-friendly name for this webhook.\"\"\"\n return self._json.get(u'name')\n\n @property\n def targetUrl(self):\n \"\"\"The URL that receives POST requests for each event.\"\"\"\n return self._json.get(u'targetUrl')\n\n @property\n def resource(self):\n \"\"\"The resource type for the webhook.\"\"\"\n return self._json.get(u'resource')\n\n @property\n def event(self):\n \"\"\"The event type for the webhook.\"\"\"\n return self._json.get(u'event')\n\n @property\n def filter(self):\n \"\"\"The filter that defines the webhook scope.\"\"\"\n return self._json.get(u'filter')\n\n @property\n def secret(self):\n \"\"\"Secret used to generate payload signature.\"\"\"\n return self._json.get(u'secret')\n\n @property\n def created(self):\n \"\"\"Creation date and time in ISO8601 format.\"\"\"\n return self._json.get(u'created')\n\n @property\n def data(self):\n \"\"\"The object representation of the resource triggering the webhook.\n\n The data property contains the object representation of the resource\n that triggered the webhook. For example, if you registered a webhook\n that triggers when messages are created (i.e. posted into a room) then\n the data property will contain the representation for a message\n resource, as specified in the Messages API documentation.\n\n \"\"\"\n object_data = self._json.get(u'data', None)\n if object_data:\n return SparkData(object_data)\n else:\n return None\n\n\nclass WebhooksAPI(object):\n \"\"\"Cisco Spark Webhooks-API wrapper class.\n\n Wrappers the Cisco Spark Webhooks-API and exposes the API calls as Python\n method calls that return native Python objects.\n\n Attributes:\n session(RestSession): The RESTful session object to be used for API\n calls to the Cisco Spark service.\n\n \"\"\"\n\n def __init__(self, session):\n \"\"\"Init a new WebhooksAPI object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the Cisco Spark service.\n\n Raises:\n AssertionError: If the parameter types are incorrect.\n\n \"\"\"\n assert isinstance(session, RestSession)\n super(WebhooksAPI, self).__init__()\n self.session = session\n\n @generator_container\n def list(self, max=None):\n \"\"\"List all of the authenticated user's webhooks.\n\n This method supports Cisco Spark's implementation of RFC5988 Web\n Linking to provide pagination support. It returns a generator\n container that incrementally yields all webhooks returned by the\n query. The generator will automatically request additional 'pages' of\n responses from Spark as needed until all responses have been returned.\n The container makes the generator safe for reuse. A new API call will\n be made, using the same parameters that were specified when the\n generator was created, every time a new iterator is requested from the\n container.\n\n Args:\n max(int): Limits the maximum number of webhooks returned from the\n Spark service per request.\n\n Yields:\n Webhook: The the next webhook from the Cisco Spark query.\n\n Raises:\n AssertionError: If the parameter types are incorrect.\n SparkApiError: If the Cisco Spark cloud returns an error.\n\n \"\"\"\n # Process args\n assert max is None or isinstance(max, int)\n params = {}\n if max:\n params[u'max'] = max\n # API request - get items\n items = self.session.get_items('webhooks', params=params)\n # Yield Webhook objects created from the returned items JSON objects\n for item in items:\n yield Webhook(item)\n\n def create(self, name, targetUrl, resource, event,\n filter=None, secret=None):\n \"\"\"Create a webhook.\n\n Args:\n name(unicode, str): A user-friendly name for this webhook.\n targetUrl(unicode, str): The URL that receives POST requests for\n each event.\n resource(unicode, str): The resource type for the webhook.\n event(unicode, str): The event type for the webhook.\n filter(unicode, str): The filter that defines the webhook scope.\n secret(unicode, str): secret used to generate payload signature.\n\n Returns:\n Webhook: With the details of the created webhook.\n\n Raises:\n AssertionError: If the parameter types are incorrect.\n SparkApiError: If the Cisco Spark cloud returns an error.\n\n \"\"\"\n # Process args\n assert isinstance(name, basestring)\n assert isinstance(targetUrl, basestring)\n assert isinstance(resource, basestring)\n assert isinstance(event, basestring)\n assert filter is None or isinstance(filter, basestring)\n assert secret is None or isinstance(secret, basestring)\n post_data = {}\n post_data[u'name'] = utf8(name)\n post_data[u'targetUrl'] = utf8(targetUrl)\n post_data[u'resource'] = utf8(resource)\n post_data[u'event'] = utf8(event)\n if filter:\n post_data[u'filter'] = utf8(filter)\n if secret:\n post_data[u'secret'] = utf8(secret)\n # API request\n json_obj = self.session.post('webhooks', json=post_data)\n # Return a Webhook object created from the response JSON data\n return Webhook(json_obj)\n\n def get(self, webhookId):\n \"\"\"Get the details of a webhook, by ID.\n\n Args:\n webhookId(unicode, str): The webhookId of the webhook.\n\n Returns:\n Webhook: With the details of the requested webhook.\n\n Raises:\n AssertionError: If the parameter types are incorrect.\n SparkApiError: If the Cisco Spark cloud returns an error.\n\n \"\"\"\n # Process args\n assert isinstance(webhookId, basestring)\n # API request\n json_obj = self.session.get('webhooks/' + webhookId)\n # Return a Webhook object created from the response JSON data\n return Webhook(json_obj)\n\n def update(self, webhookId, **update_attributes):\n \"\"\"Update details for a webhook.\n\n Args:\n webhookId(unicode, str): The webhookId of the webhook to be\n updated.\n\n **update_attributes:\n name(unicode, str): A user-friendly name for this webhook.\n targetUrl(unicode, str): The URL that receives POST requests\n for each event.\n\n Returns:\n Webhook: With the updated Spark webhook details.\n\n Raises:\n AssertionError: If the parameter types are incorrect.\n ciscosparkapiException: If an update attribute is not provided.\n SparkApiError: If the Cisco Spark cloud returns an error.\n\n \"\"\"\n # Process args\n assert isinstance(webhookId, basestring)\n # Process update_attributes keyword arguments\n if not update_attributes:\n error_message = \"At least one **update_attributes keyword \" \\\n \"argument must be specified.\"\n raise ciscosparkapiException(error_message)\n put_data = {}\n for param, value in update_attributes.items():\n if isinstance(value, basestring):\n value = utf8(value)\n put_data[utf8(param)] = value\n # API request\n json_obj = self.session.post('webhooks/' + webhookId, json=put_data)\n # Return a Webhook object created from the response JSON data\n return Webhook(json_obj)\n\n def delete(self, webhookId):\n \"\"\"Delete a webhook.\n\n Args:\n webhookId(unicode, str): The webhookId of the webhook to be\n deleted.\n\n Raises:\n AssertionError: If the parameter types are incorrect.\n SparkApiError: If the Cisco Spark cloud returns an error.\n\n \"\"\"\n # Process args\n assert isinstance(webhookId, basestring)\n # API request\n self.session.delete('webhooks/' + webhookId)\n","sub_path":"ciscosparkapi/api/webhooks.py","file_name":"webhooks.py","file_ext":"py","file_size_in_byte":9321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"376136878","text":"import random\n\nimport rootplot.root2matplotlib as r2m\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rc\nrc('text', usetex=True)\n\ndef mkhypo(mu, sigma, testfunc, npoints=20000):\n import ROOT as r\n h = r.TH1D(\"h\", \"h\", 200, 0, 200)\n h2= r.TH1D(\"h2\", \"h2\", 200, 0, 200)\n for i in range(npoints):\n x = random.gauss(mu, sigma)\n if testfunc(x): h2.Fill(x)\n else: h.Fill(x)\n pv = h2.Integral()/(h.Integral()+h2.Integral())\n hist = r2m.Hist(h)\n hist2 = r2m.Hist(h2)\n return hist, hist2, pv\n\ndef mkplot(mu1, sigma1, mu2, sigma2, test_stat, xlim, ofile, show_cls=False):\n my1, my1_gt, pv1 = mkhypo(mu1, sigma1, lambda x: x <= test_stat)\n my2, my2_lt, pv2 = mkhypo(mu2, sigma2, lambda x: x >= test_stat)\n\n # Make a figure with width 6 inches and height 4 inches\n fig = plt.figure(figsize=(8, 6))\n # Create an axes instance\n ax = plt.axes()\n ax.set_xlabel(\"Test Statistic, $x$\")\n ax.set_ylabel(r\"$N_{\\textrm{toys}}$\")\n _,_,null = my1.hist(histtype=\"stepfilled\", ec=\"k\", fc=\"r\", alpha=0.5)\n _,_,alt = my2.hist(histtype=\"stepfilled\", ec=\"k\", fc=\"b\", alpha=0.5)\n _,_,null_p = my1_gt.hist(histtype=\"stepfilled\", fc=\"r\", ec=\"k\", alpha=0.8)\n _,_,alt_p = my2_lt.hist(histtype=\"stepfilled\", fc=\"b\", ec=\"k\", alpha=0.8)\n ax.grid()\n\n ts_line = ax.axvline(test_stat, lw=1, color=\"k\")\n\n ax.annotate(r'Test Statistic in Data, $x_0$', xy=(1.0,0.98), xycoords=ts_line,\n xytext=(5, 0), textcoords='offset points',\n ha=\"left\", va=\"top\",\n rotation=270,\n )\n ax.set_ylim(0, 1250)\n ax.set_xlim(0, xlim)\n if show_cls:\n labels = (r\"Null Hypothesis, $H_0$\",\n r\"Alternate Hypothesis, $H_1$\",\n \"$P(x < x_0|H_0) = 1 - CL_b = %.2f$\" % pv1,\n \"$P(x > x_0|H_1) = CL_{s+b} = %.2f $\" % pv2\n )\n else:\n labels = (r\"Null Hypothesis, $H_0$\",\n r\"Alternate Hypothesis, $H_1$\",\n \"$P(x < x_0|H_0) = %.2f$\" % pv1,\n \"$P(x > x_0|H_1) = %.2f $\" % pv2\n )\n\n ax.legend((null[0], alt[0], null_p[0], alt_p[0]),\n labels\n , loc=2)\n if show_cls:\n ax.text(0.65, 0.9, \"$CL_s = \\\\frac{CL_{s+b}}{CL_b} = %.2f$\" % (pv2/(1-pv1))\n , transform = ax.transAxes, size=\"x-large\"\n , bbox = dict(boxstyle=\"round\",\n fc=\"w\",\n )\n )\n fig.savefig(ofile, bbox_inches='tight')\n\nif __name__ == \"__main__\":\n mkplot(75, 10, 35, 10, 60, 110, \"plots/cls1.pdf\")\n mkplot(45, 10, 42, 10, 65, 110, \"plots/cls2.pdf\", show_cls=True)\n","sub_path":"plotCLSFigures.py","file_name":"plotCLSFigures.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"286147894","text":"from rest_framework.exceptions import ValidationError\n\n\nclass BackendException(ValidationError):\n def __init__(self, detail, create=False, update=False, retreive_single=False, delete=False, retreive_list=False, code=None):\n self.create = create\n self.update = update\n self.retreive_single = retreive_single\n self.delete = delete\n self.retreive_list = retreive_list\n\n super().__init__(detail, code)\n\n\nclass AbstractStorageBackend:\n exception_class = BackendException\n","sub_path":"tests/app/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"403997614","text":"\r\ndef work(n,k,w):\r\n if max(w)>k:\r\n return -1\r\n if sum(w)<=k:\r\n return 1\r\n count=0\r\n su=0\r\n for i in range(n):\r\n if su<=k:\r\n su+=w[i]\r\n\r\n if su>k:\r\n su=w[i]\r\n if su==w[i]:\r\n count+=1\r\n return count\r\n \r\n \r\n \r\n \r\n\r\nfor _ in range(int(input())):\r\n n,k=input().split()\r\n n=int(n)\r\n k=int(k)\r\n w=list(map(int,input().strip().split()))[:n]\r\n print(work(n,k,w))\r\n","sub_path":"AUG20B/code_war.py","file_name":"code_war.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"434768621","text":"import tensorflow as tf\nfrom random import shuffle\nimport numpy as np\nfrom numpy import array\nNUM_EXEMPLES = 10000\nNUM_HIDDEN = 24\nBATCH_SIZE=1000\nNO_BATCH = 10\nEPOCHS=100\ntrain_input = ['{0:020b}'.format(i) for i in range(2**15)]\n#train_input = [int(i) for i in train_input]\nshuffle(train_input)\nti = []\nfor i in train_input:\n temp_list=[]\n for j in i:\n temp_list.append([int(j)])\n ti.append(array(temp_list))\ntrain_input = ti\n\ntrain_output = []\nfor i in train_input:\n count = 0\n for j in i:\n if j[0]==1:\n count+=1\n temp_list = [0]*21\n temp_list[count] = 1\n train_output.append(temp_list)\ntrain_output=array(train_output)\ntrain_input=array(train_input).astype('float32')\nx_train,x_test,y_train,y_test = train_input[:NUM_EXEMPLES],train_input[NUM_EXEMPLES:],train_output[:NUM_EXEMPLES],train_output[NUM_EXEMPLES:]\ndata = tf.placeholder(tf.float32,[None,20,1])\ntarget = tf.placeholder(tf.float32,[None,21])\ncell = tf.nn.rnn_cell.LSTMCell(NUM_HIDDEN,state_is_tuple=True)\nval,state = tf.nn.dynamic_rnn(cell,data,dtype=tf.float32)\nval = tf.transpose(val,[1,0,2])\nval = tf.gather(val,19)\n\nweights = tf.Variable(tf.truncated_normal([NUM_HIDDEN,int(target.get_shape()[1])]))\nbiais = tf.Variable(tf.random.uniform((int(target.get_shape()[1]),)))\npredictions = tf.nn.softmax(tf.matmul(val,weights)+biais)\ncross_entropy= -tf.reduce_sum(target*tf.log(predictions))\noptimiser = tf.train.AdamOptimizer()\nminimize = optimiser.minimize(cross_entropy)\n\nmistakes = tf.not_equal(tf.argmax(predictions,1),tf.argmax(target,1))\nerror = tf.reduce_mean(tf.cast(mistakes,tf.float32))\n\ninit_op = tf.global_variables_initializer()\nsaver = tf.train.Saver()\nsess = tf.Session()\nsess.run(init_op)\nfor i in range(EPOCHS):\n ptr=0\n for j in range(NO_BATCH):\n inp,out=x_train[ptr:ptr+BATCH_SIZE] , y_train[ptr:ptr+BATCH_SIZE]\n ptr+=BATCH_SIZE\n sess.run(minimize,{data:inp,target:out})\n print(\"EPOCH - %d and loss is %.3f\"%(i,sess.run(error,{data:x_test,target:y_test})))\n\n#print(sess.run(tf.argmax(predictions,axis=1),{data:array([0,1,0,1,1,1,0,1,1,1,1,0,0,1,0,1,1,0,1,1]).reshape(1,20,1).astype('float32')}))\n#saver.save(sess,'./tmp/model.cpkt')\n","sub_path":"LSTM-character.py","file_name":"LSTM-character.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"412019907","text":"from django.db import models\nfrom django.forms import ValidationError\n\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom django_2gis_maps import fields as map_fields\n\nfrom core.utils import generate_filename\nfrom core.constants import WORK_DAYS\n\n\nclass Brand(models.Model):\n position = models.PositiveIntegerField(\n default=0, blank=False, null=False,\n verbose_name='№',\n )\n title = models.CharField(max_length=255, verbose_name='Название')\n logo = models.ImageField(upload_to=generate_filename, verbose_name='Лого')\n description = RichTextUploadingField(\n null=True, blank=True, verbose_name='Описание'\n )\n address = models.CharField(\n max_length=100, blank=True, null=True, verbose_name='Адрес'\n )\n link = models.URLField(null=True, blank=True, verbose_name='Ссылка')\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ['position']\n verbose_name = 'Бренд'\n verbose_name_plural = 'Бренды'\n\n\nclass BrandImage(models.Model):\n brand = models.ForeignKey(\n to='Brand', on_delete=models.CASCADE, related_name='images',\n verbose_name='Бренд'\n )\n image = models.ImageField(\n upload_to=generate_filename, verbose_name='Изображение',\n )\n\n def __str__(self):\n return f'Image of {self.brand.title} brand'\n\n class Meta:\n verbose_name = 'Изображение бренда'\n verbose_name_plural = 'Изображения бренда'\n\n\nclass Filial(models.Model):\n position = models.PositiveIntegerField(\n default=0, blank=False, null=False,\n verbose_name='№',\n )\n title = models.CharField(max_length=255, verbose_name='Название')\n address = map_fields.AddressField(max_length=200, verbose_name='Адрес')\n geolocation = map_fields.GeoLocationField(verbose_name='Геолокация')\n filial_1c_code = models.CharField(\n max_length=255, unique=True,\n verbose_name='Уникальный 1C код филиала'\n )\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ['position']\n verbose_name = 'Филиал'\n verbose_name_plural = 'Филиалы'\n\n\nclass FilialImage(models.Model):\n filial = models.ForeignKey(\n to='Filial', on_delete=models.CASCADE, related_name='images',\n verbose_name='Филиал'\n )\n image = models.ImageField(\n upload_to=generate_filename, verbose_name='Изображение'\n )\n is_main = models.BooleanField(default=False, verbose_name='Основная?')\n\n def __str__(self):\n return f'Image of {self.filial.title} filial'\n\n class Meta:\n verbose_name = 'Изображение филиала'\n verbose_name_plural = 'Изображения филиала'\n\n\nclass FilialPhone(models.Model):\n filial = models.ForeignKey(\n to='Filial', on_delete=models.CASCADE, related_name='phone_numbers',\n verbose_name='Филиал'\n )\n phone = models.CharField(max_length=255, verbose_name='Номер телефона')\n is_phone = models.BooleanField(\n default=True, verbose_name='Номер телефона?'\n )\n is_whatsapp = models.BooleanField(\n default=True, verbose_name='Номер Whatsapp?'\n )\n\n def __str__(self):\n return f'Phone of {self.filial.title} filial'\n\n class Meta:\n verbose_name = 'Номер фи��иала'\n verbose_name_plural = 'Номера филиала'\n\n\nclass WorkTime(models.Model):\n day = models.SmallIntegerField(\n choices=WORK_DAYS, verbose_name='День недели'\n )\n start_work = models.TimeField(\n verbose_name='Начало рабочего времени', null=True, blank=True\n )\n end_work = models.TimeField(\n verbose_name='Конец рабочего времени', null=True, blank=True\n )\n filial = models.ForeignKey(\n to=Filial, on_delete=models.CASCADE, related_name='works_time',\n verbose_name='Филиал'\n )\n\n class Meta:\n ordering = ['day']\n unique_together = ['day', 'filial']\n verbose_name = 'Рабочий день'\n verbose_name_plural = 'Рабочие дни'\n\n def __str__(self):\n return f'{str(self.filial)}, {self.day}'\n\n def clean(self):\n text = ('Нельзя заполнить только время '\n 'конца или начала, рабочего времени')\n if (\n self.start_work is None\n and self.end_work is not None\n ):\n raise ValidationError(text)\n elif (\n self.start_work is not None\n and self.end_work is None\n ):\n raise ValidationError(text)\n","sub_path":"apps/brand/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"335475786","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# __author__ = 'Arthur|http://wingedwhitetiger.com/'\n\nimport os\n\nimport maya.cmds as cmd\nfrom PySide import QtGui\n\nimport proxyLib.arnold_standin as arnold_standin\nfrom WitProxy.Lib.publicLib import alembicLib\n\nreload(alembicLib)\nreload(arnold_standin)\n\n\nclass ProxyFileDialog(QtGui.QDialog):\n def __init__(self, parent=None):\n super(ProxyFileDialog, self).__init__(parent)\n\n file_dialog = QtGui.QFileDialog()\n file_dialog.setFileMode(QtGui.QFileDialog.ExistingFiles)\n\n self.__file = file_dialog.getOpenFileNames(self, 'Select Alembic file', '/', 'Alembic data files (*.abc)')\n\n def get_file(self):\n return self.__file\n\n\ndef main():\n if cmd.pluginInfo('WitProxy', q=True, loaded=True) and 'witProxyCache' in \\\n cmd.pluginInfo('WitProxy', q=True, dependNode=True):\n win = ProxyFileDialog()\n abc_files = win.get_file()[0]\n win.deleteLater()\n for abc in abc_files:\n proxy_point = alembicLib.get_alembic(abc.encode('utf-8'))\n counts = len(proxy_point)\n if not not counts:\n name = os.path.basename(abc).rsplit('.abc')[0]\n root_node = cmd.createNode('transform', name=name)\n for proxy in range(counts/2):\n cache_node = cmd.createNode('witProxyCache', name=proxy_point[proxy*2+1], parent=root_node)\n cmd.setAttr(cache_node + '.cacheFileName', abc, type='string')\n cmd.setAttr(cache_node + '.cacheGeomPath', proxy_point[proxy * 2].replace('/', '|'), type='string')\n cmd.connectAttr('time1.outTime', cache_node + '.cacheTime')\n # arnold_standin.create_arnold_standin(abc_file[0], os.path.abspath(os.path.join(os.path.dirname(__file__),\n # '../../procedurals/' +\n # dso_name)))\n else:\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"WitProxy/Maya/2016/scripts/python/import_cache.py","file_name":"import_cache.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"590535703","text":"r = float(input('Geef afstand: '))\n\nQ1 = 2.0 * 10 ** -6\nQ2 = 1.0 * 10 ** -6\nk = 8.99 * 10**9\n\ncoulombkracht = k * (Q1 * Q2 ) / ( (r * 10 ** -2) ** 2)\n\nresultaat = coulombkracht\n\nprint(resultaat)","sub_path":"04 - Variabelen/Coulombkracht.py","file_name":"Coulombkracht.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"403075554","text":"from distutils.core import setup\nimport os\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='LDScriptures',\n version='1.0.0',\n description='Powerful tool for getting the LDS (mormon) scriptures in your python script.',\n author='CustodiSec',\n author_email='tgb1@protonmail.com',\n url='https://github.com/gabbarreiro/ldscriptures',\n packages=['ldscriptures'],\n package_data = {'ldscriptures': ['ldscriptures/languages.json']},\n data_files = [['ldscriptures', ['README.rst']]],\n long_description=read('README.rst'),\n requires = ['bs4', 'requests', 'autodoc', 'cachetools'], \n keywords = ['mormon', 'lds', 'latter', 'day', 'saints', 'book of mormon', 'scriptures', 'bible', 'pearl of great price',\n 'doctrine and convenants', 'church of jesus christ', 'parse', 'citation', 'scriptures'],\n install_requires= [\n 'requests',\n 'bs4'\n ] \n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"260541876","text":"#!/usr/bin/env python\n#\n# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).\n# All rights reserved.\n# This file is distributed under the terms of the MIT License.\n# See the file 'LICENSE' in the root directory of the present\n# distribution, or http://opensource.org/licenses/MIT.\n#\n# @author Davide Brunato <brunato@sissa.it>\n#\nimport unittest\n\nfrom xmlschema import XMLSchemaParseError\nfrom xmlschema.validators import XMLSchema11\nfrom xmlschema.testing import XsdValidatorTestCase\n\n\nclass TestXsdAttributes(XsdValidatorTestCase):\n\n def test_wrong_attribute(self):\n self.check_schema(\"\"\"\n <xs:attributeGroup name=\"alpha\">\n <xs:attribute name=\"name\" type=\"xs:string\"/>\n <xs:attribute ref=\"phone\"/> <!-- Missing \"phone\" attribute -->\n </xs:attributeGroup>\n \"\"\", XMLSchemaParseError)\n\n def test_wrong_attribute_group(self):\n self.check_schema(\"\"\"\n <xs:attributeGroup name=\"alpha\">\n <xs:attribute name=\"name\" type=\"xs:string\"/>\n <xs:attributeGroup ref=\"beta\"/> <!-- Missing \"beta\" attribute group -->\n </xs:attributeGroup>\n \"\"\", XMLSchemaParseError)\n\n schema = self.check_schema(\"\"\"\n <xs:attributeGroup name=\"alpha\">\n <xs:attribute name=\"name\" type=\"xs:string\"/>\n <xs:attributeGroup name=\"beta\"/> <!-- attribute \"name\" instead of \"ref\" -->\n </xs:attributeGroup>\n \"\"\", validation='lax')\n self.assertTrue(isinstance(schema.all_errors[1], XMLSchemaParseError))\n\n def test_scope_property(self):\n schema = self.check_schema(\"\"\"\n <xs:attribute name=\"global_attr\" type=\"xs:string\"/>\n <xs:attributeGroup name=\"attrGroup\">\n <xs:attribute name=\"local_attr\" type=\"xs:string\"/>\n </xs:attributeGroup>\n \"\"\")\n self.assertEqual(schema.attributes['global_attr'].scope, 'global')\n self.assertEqual(schema.attribute_groups['attrGroup']['local_attr'].scope, 'local')\n\n def test_value_constraint_property(self):\n schema = self.check_schema(\"\"\"\n <xs:attributeGroup name=\"attrGroup\">\n <xs:attribute name=\"attr1\" type=\"xs:string\"/>\n <xs:attribute name=\"attr2\" type=\"xs:string\" default=\"alpha\"/>\n <xs:attribute name=\"attr3\" type=\"xs:string\" default=\"beta\"/>\n </xs:attributeGroup>\n \"\"\")\n attribute_group = schema.attribute_groups['attrGroup']\n self.assertIsNone(attribute_group['attr1'].value_constraint)\n self.assertEqual(attribute_group['attr2'].value_constraint, 'alpha')\n self.assertEqual(attribute_group['attr3'].value_constraint, 'beta')\n\n\nclass TestXsd11Attributes(TestXsdAttributes):\n\n schema_class = XMLSchema11\n\n\nif __name__ == '__main__':\n import platform\n header_template = \"Test xmlschema's XSD attributes with Python {} on {}\"\n header = header_template.format(platform.python_version(), platform.platform())\n print('{0}\\n{1}\\n{0}'.format(\"*\" * len(header), header))\n\n unittest.main()\n","sub_path":"tests/validators/test_attributes.py","file_name":"test_attributes.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"8616432","text":"import urllib.parse\nimport urllib.request\nimport json\nimport uuid\nimport os\nimport sys\n\nfrom flask import g, jsonify, request, current_app\nfrom . import api\nfrom .errors import *\nfrom .. import db\nfrom ..models import User, Permission\nfrom .decorators import login_required\n\n@api.route('/github-login/<code>')\ndef github_login(code):\n secret = current_app.config['FLASK_GITHUB_SECRET']\n client_id = current_app.config['FLASK_GITHUB_CLIENT_ID']\n url = 'https://github.com/login/oauth/access_token'\n data = {\n 'code': code,\n 'client_id': client_id,\n 'client_secret': secret\n }\n params = urllib.parse.urlencode(data).encode('utf-8')\n headers = {\n 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',\n 'Accept': 'application/json'\n }\n req = urllib.request.Request(url, params, headers)\n html = urllib.request.urlopen(req).read().decode('utf-8')\n access_data = json.loads(html)\n if access_data.get('error', ''):\n return bad_request('链接已失效,请重新登录', True)\n access_token = access_data['access_token']\n req2 = urllib.request.Request(url='https://api.github.com/user?access_token='+access_token, headers=headers)\n html2 = urllib.request.urlopen(req2).read().decode('utf-8')\n info = json.loads(html2)\n print(info, '~~~~~~~~~~~~~~')\n id_string = 'github' + str(info['id'])\n user = User.query.filter_by(id_string=id_string).first()\n if not user:\n avatar_url = info['avatar_url']\n if avatar_url:\n filename = str(uuid.uuid1())\n dirname, _ = os.path.split(os.path.abspath(sys.argv[0]))\n fold_path = dirname + '/../files/avatar/'\n if not os.path.exists(fold_path):\n os.makedirs(fold_path)\n upload_path = fold_path + filename\n urllib.request.urlretrieve(avatar_url, upload_path)\n res = urllib.request.urlretrieve(avatar_url, upload_path)\n avatar = filename\n register_info = {\n 'username': info['login'],\n 'id_string': id_string,\n 'avatar': avatar\n }\n if info['email']:\n register_info['email']\n user = User(**register_info)\n try:\n db.session.add(user)\n db.session.commit()\n except:\n db.session.rollback()\n response = jsonify({ 'error': 'create user error', 'message': '创建用户失败,请重新登录' })\n response.status_code = 500\n return response\n db.session.add(user)\n token = user.generate_auth_token(3600 * 24 * 30)\n return jsonify({ 'token': token })\n\n@api.route('/get-user/<user_id>')\ndef get_user_info(user_id):\n user = User.query.get(user_id)\n if not user:\n return not_found('获取不到用户信息')\n return jsonify(user.to_json())\n\n@api.route('/get-self/')\n@login_required\ndef get_self_info():\n if not g.current_user:\n return bad_request('未找到用户信息')\n return jsonify(g.current_user.get_detail())\n\n@api.route('/check-admin/')\ndef checkAdmin():\n return jsonify({ 'admin': bool(g.current_user and g.current_user.can(Permission.ADMIN))})","sub_path":"app/api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"181783406","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n'''\npython3 socket收发消息都是只能使用bytes类型,而python2.7则可以使用str类型\n'''\n\nimport socket\n\n#创建socket对象\ns = socket.socket() #创建socket对象\n\ns.settimeout(5) #设置超时时间\ns.getpeername() #客户端使用,返回服务端的套接字地址(ip、端口号)\n\n#连接服务端\nip_port = ('127.0.0.1',9999) #定义ip、端口号\ns.connect(ip_port) #socket对象连接服务端。(ip、端口号一定是元组形式) ----连接失败会报错\n#s.connect_ex(ip_port) #功能同上,不过如果连接失败会返回一个错误编码(不报错)\n\n\n#发消息\nsend_data = '你好'\ns.send(bytes(send_data,encoding='utf-8')) #发送消息,成功返回发送的字节数-----python3只能发送字节类型(bytes),python2.7可以直接发送字符串\n#s.sendall(bytes(send_data,encoding='utf-8')) #发送所有数据,成功返回None,失败报错\n\n#收消息\nrecv_data = s.recv(1024)\n\n\n#关闭\ns.close()","sub_path":"day09/1-socket基本语法/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"171543629","text":"from ..utilities import json_file\nfrom .utilities import path, calculate_date\nfrom calendar import monthrange\nimport datetime\n\n\ndef change_data(class_name, subject_name, homework_data, homework_date):\n\thomework_path = path.AG + class_name + \"/homework\"\n\thomework_json = json_file.read(homework_path)\n\n\tdate = calculate_date(homework_date, class_name, subject_name)\n\n\tif date == 1:\n\t\treturn 1\n\telif date == 2:\n\t\treturn 2\n\n\tfor element in homework_json[\"assignments\"]:\n\t\tif element[\"subject\"].lower() == subject_name.lower():\n\t\t\telement[\"tasks\"] = [{\n\t\t\t\t'content': homework_data,\n\t\t\t\t'date': str(date)\n\t\t\t},]\n\n\t\t\tjson_file.write(homework_path, homework_json)\n\t\t\treturn 0\n\n\treturn 3\n\n\ndef find_subject(class_name, subject_name):\n\thomework_path = path.AG + class_name + \"/homework\"\n\thomework_json = json_file.read(homework_path)\n\n\tfor element in homework_json[\"assignments\"]:\n\t\tif element[\"subject\"].lower() == subject_name.lower():\n\t\t\treturn element\n\n\treturn False\n\n\ndef add_data(class_name, subject_name, homework_data):\n\thomework_path = path.AG + class_name + \"/homework\"\n\thomework_json = json_file.read(homework_path)\n\n\tdate = calculate_date(homework_date, class_name, subject_name)\n\n\tif date == 1:\n\t\treturn 1\n\telif date == 2:\n\t\treturn 2\n\n\ttask = {\n\t\t'content': homework_data,\n\t\t'date': str(date)\n\t}\n\n\tfor element in homework_json[\"assignments\"]:\n\t\tif element[\"subject\"].lower() == subject_name.lower():\n\t\t\telement[\"tasks\"].append(task)\n\t\t\tjson_file.write(homework_path, homework_json)\n\t\t\treturn 0\n\n\treturn 3\n\n\ndef delete(class_name, subject_name, pointer):\n\thomework_path = path.AG + class_name + \"/homework\"\n\thomework_json = json_file.read(homework_path)\n\n\tto_delete = -1\n\tfor i in range(len(homework_json[\"assignments\"])):\n\t\tif homework_json[\"assignments\"][i][\"subject\"].lower() == subject_name.lower():\n\t\t\tto_delete = i\n\n\tif to_delete + 1:\n\t\tif pointer == -1:\n\t\t\tdel homework_json[\"assignments\"][to_delete]\n\t\t\tjson_file.write(homework_path, homework_json)\n\t\t\treturn True\n\t\telse:\n\t\t\tdel homework_json[\"assignments\"][to_delete]['tasks'][pointer]\n\t\t\tjson_file.write(homework_path, homework_json)\n\t\t\treturn True\n\n\treturn False\n\n\ndef get(class_name):\n\thomework_path = path.AG + class_name + \"/homework\"\n\thomework_json = json_file.read(homework_path)\n\t\n\ttemplates = {\n\t\t\t\"header\": \"Всего предметов -- {total}:\\n\\n\",\n\t\t\t'task': ' ' * 4 + '{iterator}. {content} на {date}\\n',\n\t\t\t\"tasks\": \"{subject}: \\n{tasks}\\n\"\n\t\t}\n\n\tresponce = {\n\t\t\"tasks\": \"\",\n\t\t\"header\": \"\"\n\t}\n\n\tif len(homework_json[\"assignments\"]) == 0:\n\t\treturn \"Нет домашнего задания.\"\n\n\tresponce[\"header\"] = templates[\"header\"].format(\n\t\t\ttotal = len(homework_json[\"assignments\"])\n\t\t)\n\n\tfor element in homework_json[\"assignments\"]:\n\t\ttask_text = ''\n\t\titerator = 1\n\n\t\tfor task in element['tasks']:\n\t\t\tsplit_date = task['date'].split('-')\n\t\t\tunsplit_date = split_date[2] + '.' + split_date[1] + '.' + split_date[0]\n\n\t\t\ttask_text += templates['task'].format(\n\t\t\t\t\titerator = iterator,\n\t\t\t\t\tcontent = task['content'],\n\t\t\t\t\tdate = unsplit_date\n\t\t\t\t)\n\t\t\titerator += 1\n\n\t\tresponce[\"tasks\"] += templates[\"tasks\"].format(\n\t\t\t\tsubject = element[\"subject\"],\n\t\t\t\ttasks = task_text\n\t\t\t)\n\n\treturn responce[\"header\"] + responce[\"tasks\"]\n\n\ndef add(class_name, subject_name, homework_data, homework_date):\n\thomework_path = path.AG + class_name + \"/homework\"\n\thomework_json = json_file.read(homework_path)\n\n\ttask = {\n\t\t'content': homework_data,\n\t\t'date': ''\n\t}\n\n\tdate = calculate_date(homework_date, class_name, subject_name)\n\n\tif date == 1:\n\t\treturn 1\n\telif date == 2:\n\t\treturn 2\n\n\ttask['date'] = str(date)\n\n\tfor element in homework_json['assignments']:\n\t\tif element['subject'].lower() == subject_name.lower():\n\t\t\telement['tasks'].append(task)\n\t\t\tjson_file.write(homework_path, homework_json)\n\n\t\t\treturn 0\n\n\n\thomework = {\n\t\t\"tasks\": [task, ],\n\t\t\"subject\": subject_name\n\t}\n\t\n\thomework_json[\"assignments\"].append(homework)\n\tjson_file.write(homework_path, homework_json)\n\n\treturn 0\n","sub_path":"agym_bot/Bot/Functions/db/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"51417420","text":"from Products.CMFCore.utils import getToolByName\nfrom zope.app.component.hooks import getSite\n\nfrom ixds.covalent.interfaces import ICovalentMember\n\n\ndef get_member_by_id(member_id):\n catalog = getToolByName(getSite(), 'portal_catalog')\n result = catalog({\n 'object_provides': ICovalentMember.__identifier__,\n 'id': member_id, })\n if result:\n return result[0].getObject()\n\n\ndef add_member_object(member_folder, member_id, fullname, email,\n sendInvitation=True):\n member_folder.invokeFactory(\n type_name=get_member_portal_type(member_folder),\n id=member_id,\n title=fullname,\n emailAddress=email,\n sendInvitation=sendInvitation, )\n return member_folder[member_id]\n\n\ndef add_members(context, user_infos):\n for info in user_infos:\n add_member_object(context, info['id'], info['title'],\n info['emailAddress'])\n\n\ndef activate_member_object(member_obj):\n # Member gets Owner role here:\n change_ownership(member_obj, member_obj.id)\n\n # Change workflow state. Don't use wftool.doActionFor but\n # wftool._invokeWithNotification to avoid security checks:\n wftool = getToolByName(member_obj, 'portal_workflow')\n wfs = wftool.getWorkflowsFor(member_obj)\n wf = wfs[0]\n wftool._invokeWithNotification(\n wfs, member_obj, 'activate',\n wf._changeStateOf,\n (member_obj, wf.transitions.get('activate')), {}, )\n\n member_obj.reindexObjectSecurity()\n\n\ndef add_plone_member(member_obj, password):\n regtool = getToolByName(member_obj, 'portal_registration')\n memberdata = regtool.addMember(\n member_obj.id, password,\n properties={\n 'username': member_obj.id,\n 'fullname': member_obj.title,\n 'email': member_obj.emailAddress, })\n return memberdata\n\n\ndef change_ownership(context, userid):\n \"\"\" Change ownership of context to userid \"\"\"\n # http://keeshink.blogspot.com/2010/04/change-creator-programmatically.html\n # http://collective-docs.readthedocs.org/en/latest/content/ownership.html\n\n acl_users = getToolByName(context, 'acl_users')\n user = acl_users.getUserById(userid)\n\n # change ownership\n context.changeOwnership(user, recursive=False)\n\n # remove owner role from other members\n for owner in context.users_with_local_role('Owner'):\n roles = list(context.get_local_roles_for_userid(owner))\n roles.remove('Owner')\n if roles:\n context.manage_setLocalRoles(owner, roles)\n else:\n context.manage_delLocalRoles([owner])\n\n # assign owner role to new owner\n roles = list(context.get_local_roles_for_userid(userid))\n if 'Owner' not in roles:\n roles.append('Owner')\n context.manage_setLocalRoles(userid, roles)\n\n\ndef set_owner_and_creator(obj, userid):\n \"\"\" Make it look like userid created and owns obj \"\"\"\n change_ownership(obj, userid)\n obj.setCreators(userid,)\n\n\ndef get_member_portal_type(context):\n for allowed_type in context.allowedContentTypes():\n if ICovalentMember.__identifier__ in allowed_type.behaviors:\n return allowed_type.id\n\n\ndef import_plone_users(context):\n pm = getToolByName(context, 'portal_membership')\n users = pm.listMemberIds()\n\n imported = 0\n already_existed = 0\n\n for userId in users:\n if userId in context:\n already_existed += 1\n continue\n\n member = pm.getMemberById(userId)\n\n member_obj = add_member_object(context, userId,\n member.getProperty('fullname'), member.getProperty('email'),\n sendInvitation=False)\n\n activate_member_object(member_obj)\n\n imported += 1\n\n return {'imported': imported, 'already_existed': already_existed}\n","sub_path":"ixds/covalent/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"343162649","text":"# @Author: Sacha Haidinger <sachahai>\n# @Date: 2020-07-03T09:35:12+10:00\n# @Email: sacha.haidinger@epfl.ch\n# @Project: Learning methods for Cell Profiling\n# @Last modified by: sachahai\n# @Last modified time: 2020-08-31T11:16:59+10:00\n\n'''\nComputer and plot a BACKBONE inside the learnt representation\nBased on prior knowledge on the dataset, it this latter is expected to show\na smooth trajectory or manifold and a ground truth for continuity is available (time,\nconcentration of treatment...), the backbone is the projected continuum.\nIt can help to visualize the structure of the latent space and to evaluate\nto what extent the expected continuum is retrieved.\n'''\n\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport plotly.offline\nfrom util.helpers import plot_from_csv\nfrom scipy import stats\nimport pickle as pkl\n\n\n############################################################\n############################################################\n### Backbone generation and distance to initial state score\n############################################################\n############################################################\n\ndef closest_point(point,points,dim=2):\n '''\n Compute the eucl. distance between a point and the closest points in a list of points\n return both the distance and the indice of the closests point\n '''\n points = np.asarray(points)\n dist_2 = np.sum((points-point)**2, axis=1)\n if dim==2:\n return np.argmin(dist_2), np.sqrt(dist_2[np.argmin(dist_2)])\n if dim==3:\n return np.argmin(dist_2), dist_2[np.argmin(dist_2)]**(1./3.)\n\ndef sqr_distance(point1,point2):\n '''\n Compute the eucl. distance between a point and an other points\n '''\n p1 = np.array(point1)\n p2 = np.array(point2)\n sqr_dist = np.sum((p1-p2)**2, axis = 0)\n dist = np.sqrt(sqr_dist)\n\n return dist\n\ndef dot(v,w):\n x,y,z = v\n X,Y,Z = w\n return x*X + y*Y + z*Z\ndef length(v):\n x,y,z = v\n return math.sqrt(x*x + y*y + z*z)\ndef vector(b,e):\n x,y,z = b\n X,Y,Z = e\n return (X-x, Y-y, Z-z)\ndef unit(v):\n x,y,z = v\n mag = length(v)\n return (x/mag, y/mag, z/mag)\ndef distance(p0,p1):\n return length(vector(p0,p1))\ndef scale(v,sc):\n x,y,z = v\n return (x * sc, y * sc, z * sc)\ndef add(v,w):\n x,y,z = v\n X,Y,Z = w\n return (x+X, y+Y, z+Z)\n\ndef pnt2line(pnt, start, end):\n line_vec = vector(start, end)\n pnt_vec = vector(start, pnt)\n line_len = length(line_vec)\n line_unitvec = unit(line_vec)\n pnt_vec_scaled = scale(pnt_vec, 1.0/line_len)\n t = dot(line_unitvec, pnt_vec_scaled)\n if t < 0.0:\n t = 0.0\n elif t > 1.0:\n t = 1.0\n nearest = scale(line_vec, t)\n dist = distance(nearest, pnt_vec)\n nearest = add(nearest, start)\n return (dist, nearest)\n\ndef pnt2closestline(pnt, list_of_segments):\n '''Compute the closest distance between a point and each segment\n in list_of_segments [(start1,end1),(start2,end2),...]\n Return the idx of the closest segment, the distance to the point, and the\n point on the segment.\n '''\n dists = []\n nearests_on_line = []\n\n for segment in list_of_segments:\n res = pnt2line(pnt,segment[0],segment[1])\n dists.append(res[0])\n nearests_on_line.append(res[1])\n\n idx = np.argmin(dists)\n\n return idx, dists[idx], nearests_on_line[idx]\n\n\n\ndef dist_preservation_err(path_to_csv,low_dim_names=['x_coord','y_coord','z_coord'],overwrite_csv=False,save_path=None):\n '''\n From a CSV file containing the VAE latent code of each single cell and the\n ground truth distance to initial state (ground truth measure of the phenotipycal\n change strengh), compute a score (rank correlation) based on how the rank are preserved.\n Indeed, we expect a good manifold to keep a smooth structure that depict the\n strengh of phenotype.\n The closest to 1 the score is, the better it is\n\n path_to_csv can be a string (path to csv file) or a DataFrame\n\n NOTE : The csv file must countain ground turth information from BBBC (GT_shape, GT_dist_toInit_state)\n '''\n\n dimensionality = 3 #We infer dim is 3, control it later\n\n if isinstance(path_to_csv,str):\n full_csv = pd.read_csv(path_to_csv)\n else :\n full_csv = path_to_csv\n\n #Define where are the source phenotype in latent space\n red_cells = full_csv['GT_Shape']<0.15 #small shape factor are round red cells\n green_cells = full_csv['GT_Shape']>0.35\n\n #Take the 20 cells closest to the source phenotype, to define a green center and a red center\n x_reds = full_csv[red_cells].nsmallest(20,'GT_dist_toInit_state')[low_dim_names[0]].values\n y_reds = full_csv[red_cells].nsmallest(20,'GT_dist_toInit_state')[low_dim_names[1]].values\n x_greens = full_csv[green_cells].nsmallest(20,'GT_dist_toInit_state')[low_dim_names[0]].values\n y_greens = full_csv[green_cells].nsmallest(20,'GT_dist_toInit_state')[low_dim_names[1]].values\n try:\n z_reds = full_csv[red_cells].nsmallest(20,'GT_dist_toInit_state')[low_dim_names[2]].values\n z_greens = full_csv[green_cells].nsmallest(20,'GT_dist_toInit_state')[low_dim_names[2]].values\n except:\n print('2D latent space detected')\n dimensionality=2\n\n figplotly = plot_from_csv(path_to_csv,low_dim_names,dim=dimensionality)\n\n ################################################\n # 2 DIMENSION ##########\n ################################################\n if dimensionality==2:\n red_latent_center = [np.mean(x_reds),np.mean(y_reds)]\n green_latent_center = [np.mean(x_greens),np.mean(y_greens)]\n\n #Define a center of for several different degree of phenotype strengh\n #Indeed, if we measure only distance betwen initial state and stronget phenotype,\n #it will favor completly straight manifold, which is not the aim.\n cluster_phenotype_centers = [] # 7 cluster x 4 midpoint x 2or3 coord\n cluster_list = np.unique(full_csv.GT_label.values)\n for cluster in cluster_list:\n intra_cluster = []\n for midpoint in [0.25,0.5,0.75]:\n #Consider the 15 single cell that are the closest to a phenotype strength midpoint\n cluster_index = full_csv['GT_label']==cluster\n sub_csv = full_csv[cluster_index]\n csv_sorted = sub_csv.iloc[(sub_csv['GT_dist_toInit_state']-midpoint).abs().argsort()[:15]]\n x_clus = np.mean(csv_sorted.x_coord.values)\n y_clus = np.mean(csv_sorted.y_coord.values)\n intra_cluster.append([x_clus,y_clus])\n\n cluster_index = full_csv['GT_label']==cluster\n x_clusters = full_csv[cluster_index].nlargest(15,'GT_dist_toInit_state').x_coord.values\n y_clusters = full_csv[cluster_index].nlargest(15,'GT_dist_toInit_state').y_coord.values\n intra_cluster.append([np.mean(x_clusters),np.mean(y_clusters)])\n\n cluster_phenotype_centers.append(intra_cluster)\n\n trace = go.Scatter(x=[red_latent_center[0],green_latent_center[0]],y=[red_latent_center[1],green_latent_center[1]],\n mode='markers',marker_symbol='x',marker_color='red',\n marker=dict(size=12, opacity=1),\n name=f'Centers')\n\n figplotly.add_traces(trace)\n\n cluster_phenotype_centers=np.asarray(cluster_phenotype_centers)\n trace2 = go.Scatter(x=np.squeeze(cluster_phenotype_centers[:6,:,0]),y=np.squeeze(cluster_phenotype_centers[:6,:,1]),\n mode='markers',marker_symbol='x',marker_color='black',\n marker=dict(size=8, opacity=1),\n name=f'Strong Phenotype')\n\n figplotly.add_traces(trace2)\n\n #Compute and add distance to maximum phenotype per cluster in GT dataframe\n distances = []\n Extremes = np.array([green_latent_center,red_latent_center])\n #Find distance to max Phenotype\n for index, row in full_csv.iterrows():\n #Find the GT strength of phenotype change of that cell\n GT_strength = row['GT_dist_toInit_state']\n GT_initial_state = row['GT_initial_state']\n init_state=Extremes[1]\n if GT_initial_state=='green':\n init_state=Extremes[0]\n\n GT_cluster = row['GT_label']\n #if (GT_strength < 0.25+0.05): #No midpoints are used\n #Dist between point and initial state\n #elif (GT_strength < 0.5 + 0.05): #Use one midpoints\n #Add segment initial state to midpoint 1 + segment midpoint 1 to point\n #elif (GT_strength < 0.75 + 0.05): #Use two midpoints\n #Add segment init-midpoint1 + midpoint1-midpoint2 + midpoint2-actualpoint\n #else: #Use the 3 midpoints\n #Add segment init-midpoint1 + midpoint1-midpoint2 + midpoint2-midpoint3 + midpoint3-actualpoint\n ind, dist = closest_point(np.array([row['x_coord'],row['y_coord']]),Extremes)\n distances.append(dist)\n full_csv['latent_dist_toInit_state'] = distances\n\n #Normalize to have the distance of center with strong_phenotype center = to 1\n cluster_list = np.unique(full_csv.GT_label.values)\n for i, cluster in enumerate(cluster_list):\n cluster_index = full_csv['GT_label']==cluster\n ind, normal_dist = closest_point(cluster_phenotype_centers[i,-1],Extremes)\n full_csv['latent_dist_toInit_state'][cluster_index] = full_csv['latent_dist_toInit_state'][cluster_index].values / normal_dist\n\n ################################################\n #% 3 DIMENSION ##########\n ################################################\n if dimensionality==3:\n red_latent_center = [np.mean(x_reds),np.mean(y_reds),np.mean(z_reds)]\n green_latent_center = [np.mean(x_greens),np.mean(y_greens),np.mean(z_greens)]\n\n\n #Define a center of for several different degree of phenotype strengh\n #Indeed, if we measure only distance betwen initial state and stronget phenotype,\n #it will favor completly straight manifold, which is not the aim.\n cluster_phenotype_midpoints = [] # 7 cluster x 3 midpoint x 2 green or red x 3dim\n cluster_max_centers = [] #7 x 3dim\n cluster_list = np.unique(full_csv.GT_label.values)\n for cluster in cluster_list:\n intra_cluster = []\n for midpoint in [0.25,0.5,0.75]:\n #Consider the 15 single cell that are the closest to a phenotype strength midpoint\n cluster_index = full_csv['GT_label']==cluster\n green_index = full_csv['GT_initial_state']=='green'\n red_index = full_csv['GT_initial_state']=='red'\n sub_csv_green = full_csv[(cluster_index) & (green_index)]\n sub_csv_red = full_csv[(cluster_index) & (red_index)]\n csv_sorted_green = sub_csv_green.iloc[(sub_csv_green['GT_dist_toInit_state']-midpoint).abs().argsort()[:15]]\n csv_sorted_red = sub_csv_red.iloc[(sub_csv_red['GT_dist_toInit_state']-midpoint).abs().argsort()[:15]]\n\n x_clus_green = np.mean(csv_sorted_green[low_dim_names[0]].values)\n y_clus_green = np.mean(csv_sorted_green[low_dim_names[1]].values)\n z_clus_green = np.mean(csv_sorted_green[low_dim_names[2]].values)\n\n x_clus_red = np.mean(csv_sorted_red[low_dim_names[0]].values)\n y_clus_red = np.mean(csv_sorted_red[low_dim_names[1]].values)\n z_clus_red = np.mean(csv_sorted_red[low_dim_names[2]].values)\n intra_cluster.append([[x_clus_green,y_clus_green,z_clus_green],[x_clus_red,y_clus_red,z_clus_red]])\n\n cluster_index = full_csv['GT_label']==cluster\n x_clusters = full_csv[cluster_index].nlargest(15,'GT_dist_toInit_state')[low_dim_names[0]].values\n y_clusters = full_csv[cluster_index].nlargest(15,'GT_dist_toInit_state')[low_dim_names[1]].values\n z_clusters = full_csv[cluster_index].nlargest(15,'GT_dist_toInit_state')[low_dim_names[2]].values\n cluster_max_centers.append([np.mean(x_clusters),np.mean(y_clusters),np.mean(z_clusters)])\n\n cluster_phenotype_midpoints.append(intra_cluster)\n\n cluster_phenotype_midpoints = np.array(cluster_phenotype_midpoints)\n cluster_max_centers = np.array(cluster_max_centers)\n\n #Plot the manifold backbone :\n # Initial state to midpoints to strongest phenotype\n\n cluster_list = np.unique(full_csv[full_csv['GT_label']!=7].GT_label.values)\n Extremes = np.array([green_latent_center,red_latent_center])\n backbone = [] # 6 x 9 x 3 6 backbone, 9 point 8 segment, 3 coordinates\n traces = []\n for cluster in cluster_list:\n cluster = int(cluster)\n temp = []\n temp_g = [Extremes[0],cluster_phenotype_midpoints[cluster-1,0,0,:],cluster_phenotype_midpoints[cluster-1,1,0,:],cluster_phenotype_midpoints[cluster-1,2,0,:],cluster_max_centers[cluster-1,:]]\n temp_r = [cluster_phenotype_midpoints[cluster-1,2,1,:],cluster_phenotype_midpoints[cluster-1,1,1,:],cluster_phenotype_midpoints[cluster-1,0,1,:],Extremes[1]]\n backbone_c = np.array(temp_g+temp_r)\n backbone.append(backbone_c)\n\n scatter = go.Scatter3d(x=backbone_c[:,0],y=backbone_c[:,1],z=backbone_c[:,2],\n mode='lines+markers',marker_symbol='x',marker=dict(size=10, opacity=1),\n name=f'backbone cluster {cluster}', marker_color=plotly.colors.qualitative.Plotly[cluster-1],\n line_width=6)\n traces.append(scatter)\n\n figplotly.add_traces(traces)\n\n distances = []\n\n #Find distance to max Phenotype\n for index, row in full_csv.iterrows():\n #Find the GT strength of phenotype change of that cell\n GT_strength = row['GT_dist_toInit_state']\n GT_initial_state = row['GT_initial_state']\n ind_init = 1\n if GT_initial_state=='green':\n ind_init = 0\n GT_cluster = int(row['GT_label'])\n\n seg1 = (Extremes[ind_init],cluster_phenotype_midpoints[GT_cluster-1,0,ind_init,:])\n seg2 = (cluster_phenotype_midpoints[GT_cluster-1,0,ind_init,:],cluster_phenotype_midpoints[GT_cluster-1,1,ind_init,:])\n seg3 = (cluster_phenotype_midpoints[GT_cluster-1,1,ind_init,:],cluster_phenotype_midpoints[GT_cluster-1,2,ind_init,:])\n seg4 = (cluster_phenotype_midpoints[GT_cluster-1,2,ind_init,:],cluster_max_centers[GT_cluster-1,:])\n\n list_of_seg = [(seg1[0],seg1[1]),(seg2[0],seg2[1]),(seg3[0],seg3[1]),(seg4[0],seg4[1])]\n actual_point = np.array([row[low_dim_names[0]],row[low_dim_names[1]],row[low_dim_names[2]]])\n #id of clost segment, dist to the segment, coord of closest point on line\n idx, dist_to_seg, nearests_on_line = pnt2closestline(actual_point,list_of_seg)\n nearests_on_line = np.round(np.array(nearests_on_line),4)\n dist = 0\n if (idx == 0): #No midpoints are used and use dist return\n if (np.all(np.round(nearests_on_line,4)==np.round(seg1[0],4))) :\n dist = dist_to_seg\n else:\n dist = sqr_distance(seg1[0],np.array(nearests_on_line))\n\n elif (idx==1): #Use first midpoint\n d1 = sqr_distance(seg1[0],seg1[1])\n d2 = sqr_distance(seg2[0],np.array(nearests_on_line))\n dist = d1+d2\n\n elif (idx==2): #Use two midpoints\n d1 = sqr_distance(seg1[0],seg1[1])\n d2 = sqr_distance(seg2[0],seg2[1])\n d3 = sqr_distance(seg3[0],np.array(nearests_on_line))\n dist = d1+d2+d3\n\n elif (idx == 3): #Use all 3 midpoints\n if not(np.all(np.round(nearests_on_line,4)==np.round(seg4[1],4))):\n d1 = sqr_distance(seg1[0],seg1[1])\n d2 = sqr_distance(seg2[0],seg2[1])\n d3 = sqr_distance(seg3[0],seg3[1])\n d4 = sqr_distance(seg4[0],np.array(nearests_on_line))\n dist = d1+d2+d3+d4\n else:\n d1 = sqr_distance(seg1[0],seg1[1])\n d2 = sqr_distance(seg2[0],seg2[1])\n d3 = sqr_distance(seg3[0],seg3[1])\n d4 = sqr_distance(seg4[0],seg4[1])\n dist = d1+d2+d3+d4+dist_to_seg\n\n distances.append(dist)\n full_csv['latent_dist_toInit_state'] = distances\n\n #Normalize to have the distance of center with strong_phenotype center = to 1\n #Take the path throughout all the midpoints\n cluster_list = np.unique(full_csv.GT_label.values)\n for i, cluster in enumerate(cluster_list):\n cluster = int(cluster)\n for j, init_state in enumerate(['green','red']): #distance towards green or towards red\n cluster_index = full_csv['GT_label']==cluster\n init_state_index = full_csv['GT_initial_state']==init_state\n\n d1 = sqr_distance(Extremes[j],cluster_phenotype_midpoints[cluster-1,0,j,:])\n d2 = sqr_distance(cluster_phenotype_midpoints[cluster-1,0,j,:],cluster_phenotype_midpoints[cluster-1,1,j,:])\n d3 = sqr_distance(cluster_phenotype_midpoints[cluster-1,1,j,:],cluster_phenotype_midpoints[cluster-1,2,j,:])\n d4 = sqr_distance(cluster_phenotype_midpoints[cluster-1,2,j,:],cluster_max_centers[cluster-1,:])\n\n normal_dist = d1+d2+d3+d4\n full_csv['latent_dist_toInit_state'][cluster_index & init_state_index] = full_csv['latent_dist_toInit_state'][cluster_index & init_state_index].values / normal_dist\n\n ################################################\n # Save and visual assement ##################\n ################################################\n if overwrite_csv: #Add new column to the csv\n full_csv.to_csv(path_to_csv,index=False)\n\n\n ## Plot the ordered results for visual assessment\n full_csv = full_csv.sort_values(by='GT_dist_toInit_state')\n\n #Disregard cluster 7 because no coherent manifold\n no_cluster_7 = full_csv['GT_label']!=7\n\n line1 = go.Scatter(y=full_csv[no_cluster_7].GT_dist_toInit_state.values,\n mode='lines',name='GT_distance',line=dict(width=4))\n line2 = go.Scatter(y=full_csv[no_cluster_7].latent_dist_toInit_state.values,\n mode='markers',name='Latent_distance',marker=dict(size=3))\n\n #Moving average on distance on latent space to assess monotony\n dist_dataframe = full_csv[no_cluster_7]\n dist_dataframe.reset_index()\n stride = 50\n dist_dataframe['SMA_10']=np.nan\n dist_dataframe.loc[::stride,'SMA_10'] = dist_dataframe.loc[:,'latent_dist_toInit_state'].rolling(window=150).mean()\n\n line3 = go.Scatter(y=dist_dataframe['SMA_10'].values,\n mode='lines',connectgaps=True,name='Rolling avg',line=dict(width=2))\n\n layout=go.Layout(title='test')\n fig_te = go.Figure(data=[line1,line2,line3],layout=layout)\n\n ################################################\n # Numerical value assessment -- Rank Correlation ##########\n ################################################\n # Calculate a score for the distance_to_strong_phenotype fitting\n #Disregard cluster 7 because no coherent manifold\n no_cluster_7 = full_csv['GT_label']!=7\n GT_distance = full_csv[no_cluster_7].GT_dist_toInit_state.values\n latent_distance = full_csv[no_cluster_7].latent_dist_toInit_state.values\n\n spearman_r = stats.spearmanr(GT_distance,latent_distance)\n kendall_r = stats.kendalltau(GT_distance,latent_distance)\n\n spearman_per_cluster = []\n cluster_list = np.unique(full_csv.GT_label.values)\n for cluster in cluster_list:\n cluster_index = full_csv['GT_label']==cluster\n spearman_per_cluster.append(stats.spearmanr(full_csv[cluster_index].GT_dist_toInit_state.values,full_csv[cluster_index].latent_dist_toInit_state.values)[0])\n\n title = f'VAE, dist to initial state metric | Spearman Coeff : {spearman_r[0]:.4f}, Kendall Coeff : {kendall_r[0]:.3f}'\n fig_te.update_layout(margin=dict(l=1.1,r=1.1,b=1.1,t=30),showlegend=True,legend=dict(y=-.1),title=dict(text=title))\n fig_te.update_layout(title={'yref':'paper','y':1,'yanchor':'bottom'},title_x=0.5)\n #fig_te.show()\n\n fig2 = px.bar(x=cluster_list[:6],y=spearman_per_cluster[:6])\n fig2.update_layout(title='Spearman coeff per cluster')\n #fig2.show()\n\n if save_path != None:\n plotly.offline.plot(figplotly, filename=f'{save_path}/backbone_plot.html', auto_open=False)\n plotly.offline.plot(fig_te, filename=f'{save_path}/correlation_fit.html', auto_open=False)\n backbone_pkl = save_path+'/backbone_points.pkl'\n with open(backbone_pkl, 'wb') as f:\n pkl.dump(backbone, f, protocol=pkl.HIGHEST_PROTOCOL)\n correlation_score_df = pd.DataFrame({'spearman_r':spearman_r[0],'kendall_r':kendall_r[0]},index=[0])\n correlation_score_df.to_csv(f'{save_path}/correlation_fit.csv')\n\n return backbone, spearman_r[0], kendall_r[0]\n","sub_path":"Code/quantitative_metrics/backbone_metric.py","file_name":"backbone_metric.py","file_ext":"py","file_size_in_byte":21256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"173655358","text":"#! /usr/bin/env python\n# coding=utf-8\n#================================================================\n# Copyright (C) 2018 * Ltd. All rights reserved.\n#\n# Editor : VIM\n# File name : video_demo.py\n# Author : YunYang1994\n# Created date: 2018-11-30 15:56:37\n# Description :\n#\n#================================================================\n\nimport cv2\nimport os\nimport errno\nimport time\nimport numpy as np\nimport core.utils as utils\nimport tensorflow as tf\nfrom PIL import Image\n\ndata_root = '../DelegationGraph/videos'\nresult_root = './results'\nscene_name = 'S01'\ncameras = ['c001', 'c002', 'c003', 'c004', 'c005']\n\n\ndef run_directory(scene_name, setting_name):\n setting_data_dir = data_root + '/' + scene_name + '/' + setting_name\n for camera_name in cameras:\n if setting_name == 'baseline':\n video_path = setting_data_dir + '/' + 'h264_' + camera_name + '.mp4'\n mask_path = None\n else:\n video_path = setting_data_dir + '/' + 'croped_' + camera_name + '.mp4'\n mask_path = setting_data_dir + '/' + camera_name + '_mask.jpg'\n\n output_path = result_root + '/' + scene_name + '/' + setting_name + '/' + 'det_' + camera_name + '.txt'\n\n run_inference(video_path, mask_path, output_path)\n \n\ndef run_inference(video_path, mask_path, output_path):\n\n return_elements = [\"input/input_data:0\", \"input/input_mask:0\", \"pred_sbbox/concat_2:0\", \"pred_mbbox/concat_2:0\", \"pred_lbbox/concat_2:0\"]\n pb_file = \"./yolov3_coco.pb\"\n video_path = video_path\n mask_path = mask_path\n output_path = output_path\n num_classes = 80\n input_size = 416\n graph = tf.Graph()\n return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)\n\n print(video_path, mask_path, output_path)\n\n def generate_mask_data(mask_path, batch_size):\n print(mask_path)\n mask_im = cv2.imread(mask_path)\n mask_im = np.round(utils.image_preporcess(np.copy(mask_im), [input_size, input_size])[:,:,0])\n mask_im = mask_im[np.newaxis, ..., np.newaxis]\n\n images_data = [mask_im for _ in range(batch_size)] \n\n x = np.vstack(images_data).astype(np.float32)\n\n return x\n\n if mask_path == None:\n mask_data = np.ones((1, input_size, input_size, 1)).astype(np.float32)\n else:\n mask_data = generate_mask_data(mask_path, 1)\n\n\n if not os.path.exists(os.path.dirname(output_path)):\n try:\n os.makedirs(os.path.dirname(output_path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n # erase the detection output file.\n open(output_path, 'w').close()\n\n with tf.Session(graph=graph) as sess:\n vid = cv2.VideoCapture(video_path)\n frame_id = 0\n f = open(output_path, \"a\")\n while True:\n return_value, frame = vid.read()\n if return_value:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(frame)\n else:\n f.close()\n print (\"No image!\")\n return 0\n frame_size = frame.shape[:2]\n image_data = utils.image_preporcess(np.copy(frame), [input_size, input_size])\n image_data = image_data[np.newaxis, ...]\n prev_time = time.time()\n\n s = time.time()\n print(image_data.shape)\n pred_sbbox, pred_mbbox, pred_lbbox = sess.run(\n [return_tensors[2], return_tensors[3], return_tensors[4]],\n feed_dict={ return_tensors[0]: image_data,\n return_tensors[1]: mask_data})\n e = time.time()\n\n print(e - s)\n\n pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),\n np.reshape(pred_mbbox, (-1, 5 + num_classes)),\n np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)\n\n bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, 0.3)\n bboxes = utils.nms(bboxes, 0.45, method='nms')\n\n for bbox in bboxes:\n if bbox[-1] not in [2, 5, 7]:\n continue\n if bbox[2] * bbox[3] < 6000:\n continue\n f.write(\"{} {} {} {} {} {}\\n\".format(frame_id, bbox[0], bbox[1], bbox[2], bbox[3], bbox[4]))\n\n frame_id += 1\n\n # image = utils.draw_bbox(frame, bboxes)\n # curr_time = time.time()\n # exec_time = curr_time - prev_time\n # result = np.asarray(image)\n # info = \"time: %.2f ms\" %(1000*exec_time)\n # cv2.namedWindow(\"result\", cv2.WINDOW_AUTOSIZE)\n # result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n # cv2.imshow(\"result\", result)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # f.close() \n # break\n\n\nif __name__ == '__main__':\n for setting_name in os.listdir(data_root + '/' + scene_name):\n run_directory(scene_name, setting_name)\n\n\n","sub_path":"aic_videos.py","file_name":"aic_videos.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"652252555","text":"from __future__ import absolute_import\n\n# standard\nfrom collections import namedtuple\nfrom datetime import datetime\nimport json\nimport sys\nimport traceback\n\n# pypi\nfrom fabric.state import output\n\n# dojo\nfrom dojo_ops import Builder\n\n\n# command line argument values.\n_Args = namedtuple('_Args', 'instance_id, role_name, also_activate, provision')\n\n\ndef _parse_args(*args):\n if not args:\n args = sys.argv[1:]\n\n instance_id = args[0]\n role_name = args[1]\n if len(args) <= 2:\n # default value.\n also_activate = None\n elif '[' in args[2]:\n also_activate = json.loads(args[2])\n else:\n also_activate = args[2]\n provision = True if len(args) <= 3 else bool(int(args[3]))\n\n print('instance-id: {0} / role: {1} / also_activate: {2} / provision: {3}'\n .format(instance_id, role_name, also_activate, provision))\n return _Args(instance_id, role_name, also_activate, provision)\n\n\ndef build_instance(*args):\n \"\"\"Updates an EC2 instance using the dojo fabcloudkit configuration.\n\n Simple wrapper to call to Builder().update() that allows parameters to be specified from the\n command line, and output to be redirected/captured for logging or email delivery.\n (Fabric does not currently allow capturing log output in a reasonable way.)\n\n The args must be as follows (very little error checking is performed):\n args[0]: instance_id of an EC2 instance\n args[1]: name of the fabcloudkit role\n args[2]: optional: a string, or a json list of strings; each string\n identifies a fabcloudkit role, in addition to args[1],\n that should also be activated on the instance (note that\n these roles are never provisioned or built, just activated).\n Can be \"null\" to indicated empty when also specifying args[3].\n args[3]: optional: [0|1], default=1. If 1, indicates that the instance\n should be provisioned for the role args[1]. If 0 then no\n provisioning takes place.\n \"\"\"\n print('BEGIN DOJO INSTANCE UPDATE:')\n print('{0} UTC'.format(datetime.utcnow().isoformat()))\n args = _parse_args(*args)\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n\n code = 0\n try:\n # quiet down the output volume.\n output['running'] = False\n Builder().update(args.instance_id, args.role_name, args.also_activate, args.provision)\n except:\n traceback.print_exc()\n code = 1\n\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n if code:\n print('***** UPDATE FAILED *****')\n print('{0} UTC'.format(datetime.utcnow().isoformat()))\n print('END DOJO INSTANCE UPDATE.')\n exit(code)\n\n\nif __name__ == '__main__':\n build_instance()\n","sub_path":"dojo_ops/scripts/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"251487873","text":"import tensorflow as tf\nimport numpy as np\n\nx_data=np.random.rand(100).astype(np.float32)\ny_data=x_data*0.2+0.3\nprint(x_data)\nweights=tf.Variable(tf.random_uniform([1],-1.0,1.0))\n# print(\"weights\",tf.random_uniform([1],-1.0,1.0))\nbaises=tf.Variable(tf.zeros([1]))\ny=x_data*weights+baises\nloss=tf.reduce_mean(tf.square(y-y_data))\ntestdemo=tf.square(y-y_data)\ntrain=tf.train.GradientDescentOptimizer(0.1).minimize(loss)\ninit=tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n print(\"testdemo\")\n print(sess.run(testdemo))\n print(\"losse\")\n print(sess.run(loss))\n print(sess.run([weights,baises]))\n # for i in range(500):\n # sess.run(train)\n # if i %10==0:\n # print(sess.run(loss))\n # print(sess.run(weights),sess.run(baises))\n\n\n","sub_path":"实验区/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"437979947","text":"import gateway.link\r\n\r\n\r\nhttp_upload = gateway.link.Replicate(\r\n channels = {20,21,23,24,40,99,100,161,500},\r\n start_delay = 0,\r\n transmit_rate = 1,\r\n host_api_url = '/host/',\r\n max_connect_attempts = 50)\r\n\r\nhttp_upload.run()\r\n","sub_path":"edge/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"430807014","text":"import json\r\nfrom unittest.mock import ANY, MagicMock, PropertyMock, patch\r\n\r\nfrom pytest import fixture\r\n\r\nfrom gasmon.receiver import Receiver, Event\r\n\r\nQUEUE_URL = 'queue-url'\r\n\r\nVALID_MESSAGE = \"\"\"\r\n{ \r\n \"locationId\": \"abc\",\r\n \"eventId\": \"def\",\r\n \"timestamp\": 123456789,\r\n \"value\": 2\r\n}\r\n\"\"\"\r\n\r\nINVALID_MESSAGE = \"\"\"\r\n{\r\n \"not\": \"valid\"\r\n}\r\n\"\"\"\r\n\r\n@fixture\r\ndef receiver_for_tests():\r\n queue_subscription = MagicMock()\r\n type(queue_subscription).queue_url = PropertyMock(return_value=QUEUE_URL)\r\n return Receiver(queue_subscription)\r\n\r\ndef test_receiver_parses_valid_message(receiver_for_tests):\r\n with patch.object(receiver_for_tests, 'sqs_client') as mock_sqs_client:\r\n mock_sqs_client.receive_message.return_value = _build_messages([VALID_MESSAGE])\r\n event = receiver_for_tests.get_events().__next__()\r\n assert event == Event(location_id='abc', event_id='def', timestamp=123456789, value=2)\r\n\r\ndef test_receiver_ignores_invalid_message(receiver_for_tests):\r\n with patch.object(receiver_for_tests, 'sqs_client') as mock_sqs_client:\r\n mock_sqs_client.receive_message.return_value = _build_messages([INVALID_MESSAGE, VALID_MESSAGE])\r\n event = receiver_for_tests.get_events().__next__()\r\n assert event == Event(location_id='abc', event_id='def', timestamp=123456789, value=2)\r\n\r\ndef test_receiver_deletes_messages_from_queue(receiver_for_tests):\r\n with patch.object(receiver_for_tests, 'sqs_client') as mock_sqs_client:\r\n mock_sqs_client.receive_message.return_value = _build_messages([VALID_MESSAGE])\r\n event = receiver_for_tests.get_events().__next__()\r\n mock_sqs_client.delete_message_batch.assert_called_with(QueueUrl=QUEUE_URL, Entries=ANY)\r\n\r\ndef _build_messages(message_bodies):\r\n return {'Messages': [{'ReceiptHandle': 'foo', 'Body': json.dumps({'Message': message_body})} for message_body in message_bodies]}","sub_path":"tests/test_receiver.py","file_name":"test_receiver.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"154762056","text":"from lcd_pico import I2cLcd\r\nfrom machine import Pin, I2C\r\nfrom _thread import start_new_thread, allocate_lock\r\nimport utime\r\n\r\nlock = allocate_lock()\r\n\r\n\r\n# init LEDs\r\nled_green = Pin(13, Pin.OUT)\r\nled_amber = Pin(14, Pin.OUT)\r\nled_red = Pin(15, Pin.OUT)\r\n\r\n# init display\r\ndisplay_i2c = I2C(0, scl=Pin(1), sda=Pin(0), freq=400000)\r\nLCD = I2cLcd(display_i2c, 39, 2, 16) # Address = 39, number of lines = 2, number of symbols per line = 16\r\n\r\n# init hand encoder\r\nencoder_clk = Pin(16, Pin.IN) # clock of hand encoder\r\nencoder_dt = Pin(17, Pin.IN) # dt of hand encoder\r\nencoder_sw = Pin(18, Pin.IN, Pin.PULL_UP) # switch of hand encoder\r\n\r\nencoder_stepSpeed = [[200, 1], [175, 2], [150, 3], [137, 4], [125, 5], [120, 6], [115, 7], [110, 8], [105, 9],\r\n [100, 10], [95, 12], [90, 14], [85, 16], [80, 18], [75, 20], [50, 50], [25, 100], [20, 175],\r\n [15, 275], [10, 550]]\r\n\r\nglobal encoder_counter\r\nglobal encoder_acceleration\r\n\r\ndef hand_encoder_thread(clk, dt, stepSpeed):\r\n global encoder_counter\r\n global encoder_acceleration\r\n \r\n clk_lastState = [utime.ticks_ms(), utime.ticks_ms(), clk.value()] #2x time ticks required to avoid big count steps when a single click has a low delte time\r\n encoder_watchdog = 0\r\n\r\n while True:\r\n try:\r\n if encoder_watchdog >= 2500:\r\n led_green.toggle()\r\n encoder_watchdog = 0\r\n clk_value = clk.value()\r\n dt_value = dt.value()\r\n \r\n if clk_value != clk_lastState[2]:\r\n if clk_value == 1:\r\n countStep = 1\r\n currentTimeTicks = utime.ticks_ms()\r\n if encoder_acceleration:\r\n lastTimeTicks = clk_lastState[1]\r\n if clk_lastState[0] > lastTimeTicks: lastTimeTicks = clk_lastState[0]\r\n deltaTime = currentTimeTicks - lastTimeTicks\r\n for speed in stepSpeed:\r\n if deltaTime < speed[0]: countStep = speed[1]\r\n if dt_value == 0:\r\n lock.acquire()\r\n encoder_counter += countStep\r\n lock.release()\r\n else:\r\n lock.acquire()\r\n encoder_counter -= countStep\r\n lock.release()\r\n clk_lastState[0] = clk_lastState[1]\r\n clk_lastState[1] = currentTimeTicks\r\n \r\n clk_lastState[2] = clk_value\r\n utime.sleep_us(100)\r\n encoder_watchdog += 1\r\n except:\r\n print(\"Somthing went wrong, reading the encoder\")\r\n\r\n# start hand_encoder_thread\r\nencoder_counter = 0\r\nencoder_acceleration = True\r\nstart_new_thread(hand_encoder_thread, (encoder_clk, encoder_dt, encoder_stepSpeed))\r\n\r\n# display\r\nLCD.backlight_on()\r\nlock.acquire()\r\ncounter = encoder_counter\r\nlock.release()\r\ncounter_old = counter\r\nLCD.clear()\r\nLCD.putstr(str(counter))\r\nprint(counter)\r\n\r\nwhile True:\r\n #try:\r\n led_red.toggle()\r\n lock.acquire()\r\n counter = encoder_counter\r\n lock.release()\r\n if counter != counter_old:\r\n text = str(counter)\r\n number_of_char = len(text)\r\n if number_of_char < 16:\r\n for i in range(16-number_of_char):\r\n text += \" \"\r\n #LCD.hal_write_command(0x02) #set cursor of display to home position\r\n #LCD.cursor_x = 0\r\n #LCD.cursor_y = 0\r\n LCD.move_to(0,0)\r\n LCD.putstr(text)\r\n print(counter)\r\n counter_old = counter\r\n utime.sleep_ms(250)\r\n #except:\r\n #print(\"Something went wrong, controlling the display\")\r\n","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"373108442","text":"\n# import\n# Tkinter\n\t# http://www.runoob.com/python/python-gui-tkinter.html\n\t# https://wiki.python.org/moin/TkInter\n\t# https://docs.python.org/2/library/tkinter.html\n# wxPython\n# Jython\n\nimport Tkinter as tk\n\n# def\nclass Application(tk.Frame):\n\tdef __init__(self, master=None):\n\t\ttk.Frame.__init__(self, master)\n\t\tself.grid()\n\t\tself.createWidgets()\n\n\tdef createWidgets(self):\n\t\tself.quitButton= tk.Button(self, text='Quit', command=self.quit)\n\t\tself.quitButton.grid()\n\n\n# use\napp= Application()\napp.master.title(\"Minimal Application\")\napp.mainloop()","sub_path":"code/feature/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"204974820","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic import ListView, DetailView\nfrom .forms import UserForm, InterviewForm\nfrom .models import Application, Skills, Interview\n# Create your views here.\n\n\ndef home(request):\n user_form = UserForm()\n return render(request, 'home.html', {\n 'user_form': user_form\n })\n\n\ndef signup(request):\n error_message = ''\n if request.method == 'POST':\n # This is how to create a 'user' form object\n # that includes the data from the browser\n form = UserCreationForm(request.POST)\n if form.is_valid():\n # This will add the user to the database\n user = form.save()\n # This is how we log a user in via code\n login(request, user)\n return redirect('home')\n else:\n error_message = 'Invalid sign up - try again'\n # A bad POST or a GET request, so render signup.html with an empty form\n form = UserCreationForm()\n context = {'form': form, 'error_message': error_message}\n return render(request, 'registration/signup.html', context)\n\n\ndef applications_index(request):\n applications = Application.objects.filter(user=request.user)\n return render(request, 'applications/index.html', {\n 'applications': applications\n })\n\n\nclass ApplicationCreate(CreateView):\n model = Application\n fields = ['status', 'date_applied', 'title', 'company',\n 'description', 'notes', 'salary', 'interest_level']\n\n def form_valid(self, form):\n # Assign the logged in user (self.request.user)\n form.instance.user = self.request.user\n # Let the CreateView do its usual\n return super().form_valid(form)\n\n\ndef interview_index(request, app_id):\n interviews = Interview.objects.filter(application=app_id)\n return render(request, 'interviews/index.html', {\n 'application_id': app_id,\n 'interviews': interviews\n })\n\n\ndef interview_form(request, app_id):\n return render(request, 'interviews/form.html')\n\ndef interview_create(request, app_id):\n form = InterviewForm(request.POST)\n if form.is_valid():\n # don't save the form to the db until it\n # has the cat_id assigned\n new_interview = form.save(commit=False)\n new_interview.application_id = app_id\n new_interview.save()\n return redirect('interview_index', application_id=app_id)\n\n","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"33531532","text":"grid = [ [3, 0, 6, 5, 0, 8, 4, 0, 0],\r\n [5, 2, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 8, 7, 0, 0, 0, 0, 3, 1],\r\n [0, 0, 3, 0, 1, 0, 0, 8, 0],\r\n [9, 0, 0, 8, 6, 3, 0, 0, 5],\r\n [0, 5, 0, 0, 9, 0, 6, 0, 0],\r\n [1, 3, 0, 0, 0, 0, 2, 5, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 7, 4],\r\n [0, 0, 5, 2, 0, 6, 3, 0, 0]]\r\n\r\nl=[0,0]\r\n\r\ndef print_grid(arr):\r\n for i in range(9):\r\n for j in range(9):\r\n print(grid[i][j],end=\" \")\r\n print(\"\")\r\n\r\n\r\ndef check_location_is_safe(sudoku, i,j,e):\r\n rowOk = all([e != sudoku[i][x] for x in range(9)])\r\n if rowOk:\r\n columnOk = all([e != sudoku[x][j] for x in range(9)])\r\n if columnOk:\r\n secTopX, secTopY = 3 * (i // 3), 3 * (j // 3)\r\n for x in range(secTopX, secTopX + 3):\r\n for y in range(secTopY, secTopY + 3):\r\n if sudoku[x][y] == e:\r\n return False\r\n return True\r\n return False\r\n\r\ndef find_empty_location(arr):\r\n for row in range(9):\r\n for col in range(9):\r\n if (arr[row][col] == 0):\r\n return row,col\r\n return -1,-1\r\n\r\n\r\ndef solve_sudoku(sudoku,i=0,j=0):\r\n i, j = find_empty_location(sudoku)\r\n if i == -1:\r\n return True\r\n for e in range(1, 10):\r\n if check_location_is_safe(sudoku, i, j, e):\r\n sudoku[i][j] = e\r\n if solve_sudoku(sudoku, i, j):\r\n return True\r\n sudoku[i][j] = 0\r\n return False\r\n\r\nif __name__ == \"__main__\":\r\n if (solve_sudoku(grid)):\r\n print_grid(grid)\r\n else:\r\n print(\"No solution exists\")","sub_path":"sudoku logic.py","file_name":"sudoku logic.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"262417786","text":"import torch\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nimport argparse\nfrom pathlib import Path\n\nimport copy\n\ndef main(args):\n img_path = Path(args.img)\n img = np.array(Image.open(args.img))\n\n print(\"shape :\",img.shape)\n h,w = img.shape[:2]\n\n process_width = int(w * args.deg / 360)\n print(\"process_width :\", process_width)\n\n tmp = copy.deepcopy(img[:, :process_width])\n # tmp = tmp[:, ::-1]\n print(\"tmp :\" ,tmp.shape)\n img[:, :w-process_width] = img[:, process_width:]\n img[:, w-process_width:] = tmp\n\n Image.fromarray(img).save(img_path.parent / (img_path.stem + \"_rotated.png\"))\n\nif __name__ == '__main__':\n\n \"\"\"options\"\"\"\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument('--pth', default=\"./trained_model/resnet50_rnn__st3d.pth\",\n # help='path to load saved checkpoint.')\n parser.add_argument('--img', required=True,\n help='Path to an image file to be rotated')\n parser.add_argument('--deg', type=int,\n help='degree to rotate the input image')\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"scripts/rotate_panorama_horizontal.py","file_name":"rotate_panorama_horizontal.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"211510346","text":"from tkinter import *\nimport sqlite3\nfrom tkinter import messagebox\n\ncon = sqlite3.connect(\"database.db\")\ncur = con.cursor()\n\n\nclass AddPeople(Toplevel):\n def __init__(self):\n Toplevel. __init__(self)\n\n self.geometry(\"650x650+40+60\")\n self.title(\"ADD PEOPLE\")\n self.resizable(True, True)\n\n self.top = Frame(self, height=150, bg='white')\n self.top.pack(fill=X)\n self.bottom = Frame(self, height=500, bg='#208de2')\n self.bottom.pack(fill=X)\n\n # top frame design\n self.top_image = PhotoImage(file='icons/images.png')\n self.top_image_label = Label(self.top, image=self.top_image, bg='white')\n self.top_image_label.place(x=10, y=10)\n self.heading = Label(self.top, text='add peoples', font='magneto 20 bold', bg='white')\n self.heading.place(x=100, y=30)\n\n # name\n self.label_name = Label(self.bottom, text=\" name \",font= \"magneto 15 bold\", fg='black', bg=\"white\")\n self.label_name.place(x=50, y=50)\n\n self.entry_name = Entry(self.bottom, width=25, bd=4)\n self.entry_name.insert(0, \"enter Name\")\n self.entry_name.place(x=250, y=50)\n\n # surname\n self.label_surname = Label(self.bottom, text=\" surname \", font=\"magneto 15 bold\", fg='black', bg=\"white\")\n self.label_surname.place(x=50, y=100)\n\n self.entry_surname = Entry(self.bottom, width=25, bd=4)\n self.entry_surname.insert(0, \"enter Name\")\n self.entry_surname.place(x=250, y=100)\n\n # email\n\n self.label_email = Label(self.bottom, text=\" email \", font=\"magneto 15 bold\", fg='black', bg=\"white\")\n self.label_email.place(x=50, y=150)\n\n self.entry_email = Entry(self.bottom, width=25, bd=4)\n self.entry_email.insert(0, \"enter Name\")\n self.entry_email.place(x=250, y=150)\n\n\n # phone number\n self.label_phone = Label(self.bottom, text=\" phone \", font=\"magneto 15 bold\", fg='black', bg=\"white\")\n self.label_phone.place(x=50, y=200)\n\n self.entry_phone = Entry(self.bottom, width=20, bd=4)\n self.entry_phone.insert(0, \"enter phone\")\n self.entry_phone.place(x=250, y=200)\n\n # address\n self.label_address = Label(self.bottom, text=\" add \", font=\"magneto 15 bold\", fg='black', bg=\"white\")\n self.label_address.place(x=50, y=250)\n\n self.entry_address = Entry(self.bottom, width=25, bd=4)\n self.entry_address.insert( 0, \"address\")\n self.entry_address.place(x=250, y=250)\n\n # buttons\n button = Button(self.bottom, text=\"add in contact\", command=self.add_people())\n button.place(x=270, y=300)\n\n def add_people(self):\n name = self.entry_name.get()\n surname = self.entry_surname.get()\n email = self.entry_email.get()\n phone = self.entry_phone.get()\n #address = self.entry_address.get()\n print(name)\n\n\n\n\n\n\n\n\n\n","sub_path":"addpeople.py","file_name":"addpeople.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"557383933","text":"#!/usr/bin/env python\n\nimport sys\nimport jenkins\nimport json\nimport traceback\nimport datetime\nimport time\nimport subprocess\nimport argparse\nimport getpass\nif sys.version >= (3,):\n from urllib.parse import urlencode\n from urllib.request import urlopen, Request\nelse:\n from urllib2 import Request, urlopen\n\n\nclass SUTHealthCheck(object):\n\n def __init__(self, args):\n self.host = args.host\n self.port = args.port\n self.url = 'http://%s:%d' % (self.host, self.port)\n self.server = jenkins.Jenkins(self.url, username=getpass.getuser(), password=getpass.getpass().strip())\n return\n\n def getBMCIP(self, computer):\n bmc_ip = None\n try:\n config = self.server.get_node_config(computer)\n except:\n return bmc_ip\n lines = [line.strip() for line in config.splitlines() if line.strip()]\n for i, line in enumerate(lines):\n if 'bmc_ip' in line.lower():\n bmc_ip = lines[i+1].split('>', 1)[1].split('<', 1)[0].strip()\n break\n if 'SUT' in computer and '<description>' in line and 'omputer' in line:\n bmc_ip = line.split('omputer', 1)[1].split(':', 1)[1].strip().split(' ', 1)[0].strip()\n break\n return bmc_ip\n\n def getComputers(self):\n computers = {}\n nodes = self.server.get_nodes()\n for entry in nodes:\n name = entry['name']\n if 'diesel' in name or 'ethanol' in name or 'goldengoose' in name or 'grandstand' in name or 'SUT' in name:\n computers[name] = not entry['offline']\n return computers\n\n def getHostName(self, computer):\n host_name = None\n try:\n config = self.server.get_node_config(computer)\n except:\n print('coult not get node_config for %s' % computer)\n return host_name\n lines = [line.strip() for line in config.splitlines() if line.strip()]\n for i, line in enumerate(lines):\n if 'SUT' in computer and '<description>' in line and 'omputer' in line:\n host_name = line.split('omputer', 1)[1].split(' ', 2)[1].strip()\n return host_name\n return host_name\n\n def nslookup(self, name=None, ip=None):\n value = None\n if name:\n try:\n cmd = 'nslookup \"%s\" | grep \"Address: \" | tail -1' % name\n output = subprocess.check_output(cmd, shell=True)\n if not 'can\\'t find' in output and 'Address: ' in output:\n value = output.split(' ', 1)[1].strip()\n except Exception as e:\n print('ERROR: nslookup failed for \"%s\": %s\\n%s' % (name, e, traceback.format_exc(e)))\n elif ip:\n try:\n cmd = 'nslookup \"%s\" | grep \"name = \" | tail -1' % ip\n output = subprocess.check_output(cmd, shell=True)\n if 'name = ' in output:\n value = output.split('name = ', 1)[1].split('.', 1)[0].strip()\n except Exception as e:\n print('ERROR: nslookup failed for \"%s\": %s\\n%s' % (name, e, traceback.format_exc(e)))\n return value\n\n def main(self):\n ips = {}\n computers = self.getComputers()\n for computer in computers:\n ipaddr = self.nslookup(name=computer)\n if ipaddr and '.' in ipaddr:\n ips.setdefault(computer, {'ip': ipaddr})\n ips[computer]['name'] = self.nslookup(ip=ipaddr)\n continue\n ipaddr = self.getBMCIP(computer)\n if ipaddr:\n if '.' in ipaddr:\n ips.setdefault(computer, {'ip': ipaddr})\n ips[computer]['name'] = self.nslookup(ip=ipaddr)\n else:\n ips.setdefault(computer, {'name': ipaddr})\n ips[computer]['ip'] = self.nslookup(name=ipaddr)\n else:\n ips.setdefault(computer, {'ip': 'unknown', 'name': computer})\n for computer, data in sorted(ips.items()):\n print('%-20s %s (%s)' % (computer, data['ip'], data['name'].lower()))\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='SUT Health Check')\n parser.add_argument('-H', '--host', help='Jenkins master host name')\n parser.add_argument('-p', '--port', type=int, help='Jenkins master port')\n args = parser.parse_args()\n SHC = SUTHealthCheck(args)\n if not SHC.main():\n exit(1)\n\n","sub_path":"automation/jenkins/sut_health_check.py","file_name":"sut_health_check.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"}