diff --git "a/4022.jsonl" "b/4022.jsonl" new file mode 100644--- /dev/null +++ "b/4022.jsonl" @@ -0,0 +1,466 @@ +{"seq_id":"31062881811","text":"#!/usr/bin/env python\n\n# This script converts the output files cluster programs such as uclust\n# cd hit to a generalized output format that is also compatible with the\n# QIIME pipeline.\n\n# import the argparse module to handle the input commands\nimport argparse\n\n# get the 3 commandline arguments for the input files and output directory\nparser = argparse.ArgumentParser(description = 'convert cluster file')\n\nparser.add_argument('-c', metavar='cluster file', type=str, \n\t\t\thelp='enter the cluster file path')\nparser.add_argument('-o', metavar='output file', type=str,\n\t\t\thelp='The output file name')\t\t\t\nparser.add_argument('-p', metavar='program used', type=str,\n\t\t\thelp='The program used for clustering (uclust/usearch/cdhit')\nargs = parser.parse_args()\n\ndef get_uc_cluster (cluster_file):\n\t# parse through the uc file and get the cluster information\n\tcluster_dic = {}\n\n\tfor line in open(cluster_file, 'r'):\n\t\tline = line.strip().split('\\t')\n\t\t# get the seeds and sequences who match these seeds\n\t\tif line[0] == 'S':\n\t\t\tcluster_dic[int(line[1])+1] = [line[8]]\n\t\telif line[0] == 'H':\n\t\t\tcluster_dic[int(line[1])+1] += [line[8]]\n\t\n\treturn cluster_dic\n\ndef get_cd_cluster (cluster_file):\n\t# parse through the cd hit file and get the cluster information\n\t\n\tcluster_dic, cluster = {}, 0\n\t\n\tfor line in open(cluster_file + '.clstr', 'r'):\n\t\tline = line.strip()\n\t\t# get the cluster number if a new cluster starts\n\t\tif line[0] == '>':\n\t\t\tcluster = int(line.split(' ')[1])+1\n\t\t\tcluster_dic[cluster] = []\n\t\t# expand the cluster with the sequences in it\n\t\telse:\n\t\t\tseq = line.split('>')[1].split('...')[0]\n\t\t\tcluster_dic[cluster] += [seq]\n\t\t\t\t\n\treturn cluster_dic\n\ndef get_octupus_cluster (cluster_file):\n\t# parse the octupus cluster file and get the cluster information\n\t# cluster file = octuall.seq\n\n\tcluster_dic, cluster = {}, 0\n\n\tfor line in open(cluster_file, 'r'):\n\t\tline = line.strip()\n\t\tif '*' in line:\n\t\t\tcluster = int(line.replace('*octu',''))\n\t\t\tcluster_dic[cluster] = []\n\t\tif '>' in line:\n\t\t\tcluster_dic[cluster] += [line[1:].replace('\\r','')]\n\n\treturn cluster_dic\t\t\t\n\ndef get_tgicl_cluster (cluster_file):\n\t# parse the tgicl cluster file and get the cluster information\n\t\n\tcluster_dic, cluster = {}, 0\n\n\t# parse the (non-singleton) cluster file and add the clusters to the dictionary\n\tfor line in open(cluster_file + '_cl_clusters', 'r'):\n\t\tline = line.strip().split('\\t')\n\t\tif '>' in line[0]:\n\t\t\tcluster = int(line[0][3:])\n\t\telse:\n\t\t\tcluster_dic[cluster] = line\n\n\tcluster_singleton = cluster + 1\n\n\t# parse the singleton cluster file and add the singletons to the dictionary\n\tfor line in open(cluster_file + '.singletons', 'r'):\n\t\tcluster_dic[cluster_singleton] = [line.strip()]\n\t\tcluster_singleton += 1\n\t\n\treturn cluster_dic\t\n\ndef write_output (cluster_dic, output_file):\n\t# convert the clusters to the desired .txt output and\n\t# write them to the output file\n\t\n\t# open the output file\n\toutput = open(output_file, 'w')\n\t\n\tfor i in range(1, (len(cluster_dic)+1)):\n\t\tcluster = '\\t'.join([str(i)] + cluster_dic[i]) + '\\n'\n\t\toutput.write(cluster)\n\t\n\toutput.close()\n\t\t\ndef main ():\n\t\n\tif args.p == 'usearch_old' or args.p == 'usearch':\n\t\t# get the uc file and save it in the .txt format\n\t\twrite_output(get_uc_cluster(args.c), args.o)\n\t\n\telif args.p == 'cdhit':\n\t\t# get the clstr file and save it in the .txt format\n\t\twrite_output(get_cd_cluster(args.c), args.o)\n\n\telif args.p == 'octupus':\n\t\t# get the octuall.seq file and save it in the .txt format\n\t\twrite_output(get_octupus_cluster(args.c), args.o)\n\n\telif args.p == 'tgicl':\n\t\t# get the tgicl cluster and singelton files, extract the clusters and merge the results\n\t\twrite_output(get_tgicl_cluster(args.c), args.o)\n\t\t\nif __name__ == \"__main__\":\n main()\n\n\t\t\n","repo_name":"Y-Lammers/Cluster-pipeline","sub_path":"src/cluster_to_txt.py","file_name":"cluster_to_txt.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17058698766","text":"'''Escrever um algoritmo que lê a hora de início de um jogo e a hora do final do jogo\n(considerando apenas horas inteiras) e calcula a duração do jogo em horas, sabendo-se\nque o tempo máximo de duração do jogo é de 24 horas e que o jogo pode iniciar em um dia\ne terminar no dia seguinte.'''\n\ndef tempPartida(hora_inicio, hora_final):\n if hora_inicio >= hora_final:\n tempo_jogo = (24 - hora_inicio) + hora_final\n else:\n tempo_jogo = hora_final - hora_inicio\n return tempo_jogo\n\ndef main():\n hora_inicio= int(input(\"Qual a hora da início do jogo? \"))\n hora_final = int(input(\"Qual a hora do fim do jogo? \"))\n print(\"Duração da partida: \", tempPartida(hora_inicio, hora_final),\"horas\")\n\nmain()","repo_name":"larissacsf/Exercises-1","sub_path":"PYTHON/Questao24.py","file_name":"Questao24.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3538871746","text":"\"\"\" Utilities for modifying strings\"\"\"\n\nimport os\nimport re\n\nfrom theano.compat.six import string_types\nfrom theano.compat.six.moves import xrange\n\nfrom fuel.utils.exc import EnvironmentVariableError, NoDataPathError\nfrom fuel.utils.exc import reraise_as\nfrom fuel.utils.common_strings import environment_variable_essay\n\n\ndef preprocess(string, environ=None):\n \"\"\"\n Preprocesses a string, by replacing `${VARNAME}` with\n `os.environ['VARNAME']` and ~ with the path to the user's\n home directory\n Parameters\n ----------\n string : str\n String object to preprocess\n environ : dict, optional\n If supplied, preferentially accept values from\n this dictionary as well as `os.environ`. That is,\n if a key appears in both, this dictionary takes\n precedence.\n Returns\n -------\n rval : str\n The preprocessed string\n \"\"\"\n if environ is None:\n environ = {}\n\n split = string.split('${')\n\n rval = [split[0]]\n\n for candidate in split[1:]:\n subsplit = candidate.split('}')\n\n if len(subsplit) < 2:\n raise ValueError('Open ${ not followed by } before '\n 'end of string or next ${ in \"' + string + '\"')\n\n varname = subsplit[0]\n try:\n val = (environ[varname] if varname in environ\n else os.environ[varname])\n except KeyError:\n if varname == 'PYLEARN2_DATA_PATH':\n reraise_as(NoDataPathError())\n if varname == 'PYLEARN2_VIEWER_COMMAND':\n reraise_as(EnvironmentVariableError(\n viewer_command_error_essay + environment_variable_essay)\n )\n\n reraise_as(ValueError('Unrecognized environment variable \"' +\n varname + '\". Did you mean ' +\n match(varname, os.environ.keys()) + '?'))\n\n rval.append(val)\n\n rval.append('}'.join(subsplit[1:]))\n\n rval = ''.join(rval)\n\n string = os.path.expanduser(string)\n\n return rval\n\n\n\n","repo_name":"alexmlamb/fuel","sub_path":"fuel/utils/string_utils.py","file_name":"string_utils.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"17450776937","text":"from libs.api.common.constants import ResponseCodes\nfrom libs.utils.allure_wrapper import step\n\n\ndef test_renew_auth_token(user_session, common_config):\n with step(\"Obtain new user access token\"):\n user_session.get_token()\n original_token = user_session.token\n with step(\"Renew user access token\"):\n response = user_session.get_token()\n with step(\"Assert SUCCESS response code\"):\n assert response.status_code == ResponseCodes.SUCCESS\n with step(\"Assert response json\"):\n response_json = response.json()\n assert response_json['access_token'] != original_token\n assert response_json['expires_in'] == common_config.user_token_expiration_time\n","repo_name":"Urban-Jungle-Project/urban_jungle_test_project","sub_path":"tests/api/auth/test_renew_auth_token.py","file_name":"test_renew_auth_token.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14960329393","text":"#!/usr/bin/python3\n\"\"\"Console module\"\"\"\nimport cmd\nfrom models.base_model import BaseModel\nfrom models import storage\nimport uuid\nimport json\n\n\nclass HBNBCommand(cmd.Cmd):\n prompt = \"(hbnb) \"\n\n def default(self, line: str) -> None:\n args = [\n \"BaseModel\", \"User\", \"State\", \"City\", \"Place\", \"Amenity\", \"Review\"\n ]\n com = line.split(\".\")\n if com[0] not in args:\n return super().default(line)\n if (com[1] == \"count()\"):\n a = 0\n for k in storage.all():\n if k.startswith(com[0] + '.'):\n a += 1\n print(a)\n elif (com[1] == \"all()\"):\n print(\"[\", end=\"\")\n a = 0\n for k in storage.all():\n if k.startswith(com[0] + '.'):\n a += 1\n b = 0\n for k in storage.all():\n if k.startswith(com[0] + '.'):\n id = k.split(\".\")\n id = id[1].split(\"]\")\n id = id[0]\n inst = \"{}.{}\".format(com[0], id)\n print(storage.all()[inst], end=\"\")\n if (b < (a - 1)):\n print(\", \", end=\"\")\n\n b += 1\n print(\"]\")\n elif (com[1].startswith(\"show(\")):\n id = com[1].split(\"(\")\n id = id[1].split(\")\")\n id = id[0]\n id = id.strip('\\\"')\n id = id.strip(\"\\'\")\n\n inst = \"{}.{}\".format(com[0], id)\n if inst not in storage.all():\n print(\"** no instance found **\")\n else:\n print(storage.all()[inst])\n\n elif (com[1].startswith(\"destroy(\")):\n id = com[1].split(\"(\")\n id = id[1].split(\")\")\n id = id[0]\n id = id.strip('\\\"')\n id = id.strip(\"\\'\")\n\n inst = \"{}.{}\".format(com[0], id)\n if inst not in storage.all():\n print(\"** no instance found **\")\n else:\n del storage.all()[inst]\n storage.save()\n elif (com[1].startswith(\"update(\")):\n id = com[1].split(\"(\")\n id = id[1].split(\")\")\n id = id[0]\n id = id.split(\",\")\n key = id[1]\n key = key.strip(\"\\\"\")\n key = key.strip(\"\\'\")\n key = key.split('\"')\n key = key[1]\n value = id[2]\n value = value.strip(\"\\\"\")\n value = value.strip(\"\\'\")\n try:\n value = value.split('\"')\n value = value[1]\n except Exception:\n value = int(value[0])\n\n id = id[0]\n id = id.strip('\\\"')\n id = id.strip(\"\\'\")\n inst = \"{}.{}\".format(com[0], id)\n obj = storage.all()[inst]\n obj.__dict__[key] = value\n storage.save()\n\n def do_EOF(self, line):\n \"\"\"\n handles the end of file\n \"\"\"\n return True\n\n def do_quit(self, line):\n \"\"\"\n Quit command to exit the program\n \"\"\"\n return True\n\n def do_create(self, line):\n \"\"\"\n Creates a new instance of BaseModel\n \"\"\"\n if line is None or line == \"\":\n print(\"** class name missing **\")\n elif line not in storage.classes():\n print(\"** class doesn't exist **\")\n else:\n ins = storage.classes()[line]()\n ins.save()\n print(ins.id)\n\n def do_show(self, line):\n \"\"\"\n Prints the string representation\n \"\"\"\n if line is None or line == \"\":\n print(\"** class name missing **\")\n else:\n line = line.split(\" \")\n if line[0] not in storage.classes():\n print(\"** class doesn't exist **\")\n elif len(line) < 2:\n print(\"** instance id missing **\")\n else:\n inst = \"{}.{}\".format(line[0], line[1])\n if inst not in storage.all():\n print(\"** no instance found **\")\n else:\n print(storage.all()[inst])\n\n def do_destroy(self, line):\n \"\"\"\n Deletes an instance based on the class name and id\n\n \"\"\"\n if line is None or line == \"\":\n print(\"** class name missing **\")\n else:\n line = line.split(\" \")\n if line[0] not in storage.classes():\n print(\"** class doesn't exist **\")\n elif len(line) < 2:\n print(\"** instance id missing **\")\n else:\n inst = \"{}.{}\".format(line[0], line[1])\n if inst not in storage.all():\n print(\"** no instance found **\")\n else:\n del storage.all()[inst]\n storage.save()\n\n def do_all(self, line):\n \"\"\"\n Prints all string representation\n \"\"\"\n if line is not None and line != \"\":\n line = line.split(\" \")\n if line[0] not in storage.classes():\n print(\"** class doesn't exist **\")\n else:\n obj = []\n for k, v in storage.all().items():\n if type(v).__name__ == line[0]:\n obj.append(str(v))\n print(obj)\n\n def do_update(self, line):\n \"\"\"\n Updates an instance based on the class name and id\n \"\"\"\n if line is None or line == \"\":\n print(\"** class name missing **\")\n return False\n line = line.split(\" \")\n\n if line[0] not in storage.classes():\n print(\"** class doesn't exist **\")\n return False\n if len(line) < 2:\n print(\"** instance id missing **\")\n return False\n if \"{}.{}\".format(line[0], line[1]) not in storage.all():\n print(\"** no instance found **\")\n return False\n if len(line) == 2:\n print(\"** attribute name missing **\")\n return False\n if len(line) == 3:\n try:\n type(eval(line[2])) != dict\n except NameError:\n print(\"** value missing **\")\n return False\n if len(line) == 4:\n obj = storage.all()[\"{}.{}\".format(line[0], line[1])]\n if line[2] in obj.__class__.__dict__.keys():\n valtype = type(obj.__class__.__dict__[line[2]])\n obj.__dict__[line[2]] = valtype(line[3])\n else:\n obj.__dict__[line[2]] = line[3]\n elif type(eval(line[2])) == dict:\n obj = storage.all[\"{}.{}\".format(line[0], line[1])]\n for k, v in eval(line[2]).items():\n if (k in obj.__class__.__dict__.keys() and\n type(obj.__class__.__dict__[k]) in {str, int, float}):\n valtype = type(obj.__class__.__dict__[k])\n obj.__dict__[k] = valtype(v)\n else:\n obj.__dict__[k] = v\n storage.save()\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","repo_name":"Popsicool/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":7161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"29321919278","text":"import config\nfrom model.user import User\n\n\ndef gen_js():\n with open(\"static/js/origin.js\") as js_file:\n origin = js_file.read()\n\n admins = User.get_admin()\n if admins:\n admin = admins[0]\n port = \"\"\n if port != 80:\n port = \":\"+ str(config.PORT)\n now = origin.format(domain=config.DOMAIN, token=admin.key, port=port)\n with open(\"static/js/x.js\", \"w\") as now_file:\n now_file.write(now)\n\n\nif __name__ == \"__main__\":\n gen()\n","repo_name":"python333/xss","sub_path":"view/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8462483663","text":"from flask import Flask, jsonify, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bootstrap import Bootstrap\nfrom typing import Callable\nimport random\nimport os\n\nAPI_KEY = os.environ.get(\"API_KEY\")\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.environ.get(\"SECRET_KEY\")\nBootstrap(app)\n\n# # Connect to Database\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(\"DATABASE_URL1\", \"sqlite:///quotes.db\")\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\nclass MySQLAlchemy(SQLAlchemy):\n Column: Callable\n String: Callable\n Integer: Callable\n\n\ndb = MySQLAlchemy(app)\n\n\n# # Quote TABLE Configuration\nclass Quote(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n quote = db.Column(db.String(2500), unique=True, nullable=False)\n author = db.Column(db.String(250), nullable=False)\n\n def to_dict(self):\n # # Method 1.\n # dictionary = {}\n # # Loop through each column in the data record\n # for column in self.__table__.columns:\n # # Create a new dictionary entry;\n # # where the key is the name of the column\n # # and the value is the value of the column\n # print(getattr(self, column.name))\n # dictionary[column.name] = getattr(self, column.name)\n # return dictionary\n\n # Method 2. Altenatively use Dictionary Comprehension to do the same thing.\n return {column.name: getattr(self, column.name) for column in self.__table__.columns}\n\n\ndb.create_all()\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n# # HTTP GET - Read Record\n\n@app.route(\"/random\", methods=[\"GET\"])\ndef get_random_quote():\n quotes = db.session.query(Quote).all()\n random_quote = random.choice(quotes)\n return jsonify(quote=random_quote.to_dict())\n\n\n@app.route(\"/all\", methods=[\"GET\"])\ndef get_all_quotes():\n quotes = db.session.query(Quote).all()\n return jsonify(quotes=[quote.to_dict() for quote in quotes])\n\n\n@app.route(\"/search-by-author\")\ndef get_quote_by_author():\n query_author = request.args.get(\"author\")\n quotes = db.session.query(Quote).filter_by(author=query_author).all()\n if quotes:\n return jsonify(quotes=[quote.to_dict() for quote in quotes])\n else:\n return jsonify(error={\"Not Found\": \"Sorry, this author does not exist in this database.\"})\n\n\n@app.route(\"/search\")\ndef get_quote():\n query_quote = request.args.get(\"quote\")\n quotes = db.session.query(Quote).filter_by(quote=query_quote)\n if quotes:\n return jsonify(quotes=[quote.to_dict() for quote in quotes])\n else:\n return jsonify(error={\"Not Found\": \"Sorry, this author does not exist in this database.\"})\n\n\n# # HTTP POST - Create Record\n@app.route(\"/add\", methods=[\"POST\"])\ndef post_new_quote():\n new_quote = Quote(\n quote=request.form.get(\"quote\"),\n author=request.form.get(\"author\"),\n )\n db.session.add(new_quote)\n db.session.commit()\n return jsonify(response={\"success\": \"Successfully added the new quote.\"})\n\n\n# HTTP PUT/PATCH - Update Record\n@app.route(\"/update-quote/\", methods=[\"PATCH\"])\ndef patch_quote(quote_id):\n new_quote = request.args.get(\"new_quote\")\n quote = db.session.query(Quote).get(quote_id)\n if quote:\n quote.quote = new_quote\n db.session.commit()\n # Just add the code after the jsonify method. 200 = Ok\n return jsonify(response={\"success\": \"Successfully updated the quote.\"}), 200\n else:\n # 404 = Resource not found\n return jsonify(error={\"Not Found\": \"Sorry a quote with that id was not found in the database.\"}), 404\n\n\n@app.route(\"/update-quote-author/\", methods=[\"PATCH\"])\ndef patch_quote_author(quote_id):\n new_author = request.args.get(\"new_author\")\n quote = db.session.query(Quote).get(quote_id)\n if quote:\n quote.author = new_author\n db.session.commit()\n # Just add the code after the jsonify method. 200 = Ok\n return jsonify(response={\"success\": \"Successfully updated the author of the quote.\"}), 200\n else:\n # 404 = Resource not found\n return jsonify(error={\"Not Found\": \"Sorry a quote with that id was not found in the database.\"}), 404\n\n\n# # HTTP DELETE - Delete Record\n@app.route(\"/delete-quote/\", methods=[\"DELETE\"])\ndef delete_quote(quote_id):\n api_key = request.args.get(\"api-key\")\n if api_key == API_KEY:\n quote = db.session.query(Quote).get(quote_id)\n if quote:\n db.session.delete(quote)\n db.session.commit()\n return jsonify(response={\"success\": \"Successfully deleted the quote from the database.\"}), 200\n else:\n return jsonify(error={\"Not Found\": \"Sorry a quote with that id was not found in the database.\"}), 404\n\n else:\n return jsonify(error={\"Forbidden\": \"Sorry, that's not allowed. Make sure you have the correct api_key.\"}), 403\n\n\nif __name__ == '__main__':\n app.run(host=\"localhost\", port=5000)\n","repo_name":"jasonaik/out-of-context-quote-generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37604402291","text":"from iperf_exec import Iperf\nfrom analyzelog import AnalyzeLog\nfrom SshConnection import SshConnection\n\n\nclass IperfThreads:\n \"\"\"Thread bodies implementation\"\"\"\n\n def cpe_configuration(self, channel, host_index, thread_id, conf_data):\n \"\"\"Method for cpe configuration thread\n\n Args:\n channel (str): router channel\n host_index (int): conf_data index\n thread_id (str): thread description\n conf_data (dict): data from configuration\n\n \"\"\"\n ssh_conn = SshConnection(conf_data, host_index)\n\n print(\"Starting \" + thread_id)\n ssh_conn.connect_to_cpe(channel, SshConnection.WIFI_FREQ)\n\n def run_thread(self, filename, host_index, thread_id,\n conf_data, iperf_choice):\n \"\"\"Method for TCP_upload, TCP_download,\n UDP_upload and UDP_download threads\n\n Args:\n filename (str): temporary file\n host_index (int): conf_data index\n thread_id (str): thread description\n conf_data (dict): data from configuration\n\n \"\"\"\n\n iperf = Iperf(filename, conf_data, host_index)\n\n print(\"Starting \" + thread_id)\n\n if iperf_choice == 'TCP_upload':\n iperf.tcp_upload()\n elif iperf_choice == 'TCP_download':\n iperf.tcp_download()\n elif iperf_choice == 'UDP_upload':\n iperf.udp_upload()\n elif iperf_choice == 'UDP_download':\n iperf.udp_download()\n\n ob = AnalyzeLog()\n ob.get_mean_value(filename, thread_id)\n ob.get_all_data(filename, thread_id)\n\n","repo_name":"MaciejTe/Wi-fi-performance-test","sub_path":"iperf_threads.py","file_name":"iperf_threads.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35711790920","text":"#! /usr/bin/python3\nimport sys\n\nimport iota.harness.api as api\nimport iota.harness.infra.resmgr as resmgr\n\nimport iota.test.apulu.config.api as config_api\nimport iota.test.apulu.utils.dhcp as dhcp_utils\nimport iota.test.utils.arping as arp_utils\n\nimport iota.protos.pygen.topo_svc_pb2 as topo_svc\n\n__max_udp_ports = 1\n__max_tcp_ports = 1\n\nportUdpAllocator = resmgr.TestbedPortAllocator(205)\nportTcpAllocator = resmgr.TestbedPortAllocator(4500)\n\ndef __publish_workloads(workloads=[]):\n workloads = workloads if workloads else api.GetWorkloads()\n wl_list = list(filter(lambda x: x.vnic.IsOriginDiscovered() and not x.vnic.DhcpEnabled, workloads))\n if not arp_utils.SendGratArp(wl_list):\n return api.types.status.FAILURE\n\n return api.types.status.SUCCESS\n\ndef __add_secondary_ip_to_workloads(workloads=[]):\n if not api.IsSimulation():\n req = api.Trigger_CreateAllParallelCommandsRequest()\n else:\n req = api.Trigger_CreateExecuteCommandsRequest(serial = False)\n\n workloads = workloads if workloads else api.GetWorkloads()\n for wl in workloads:\n for sec_ip_addr in wl.sec_ip_addresses:\n api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,\n \"ifconfig %s add %s\" % (wl.interface, sec_ip_addr))\n api.Logger.debug(\"ifconfig add from %s %s %s %s\" % (wl.node_name, wl.workload_name, wl.interface, sec_ip_addr))\n\n resp = api.Trigger(req)\n if resp is None:\n return api.types.status.FAILURE\n\n return api.types.status.SUCCESS\n\ndef __add_iptables_to_workloads(workloads=[]):\n if not api.IsSimulation():\n req = api.Trigger_CreateAllParallelCommandsRequest()\n else:\n req = api.Trigger_CreateExecuteCommandsRequest(serial = False)\n\n workloads = workloads if workloads else api.GetWorkloads()\n for wl in workloads:\n api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,\n \"iptables -A INPUT -p tcp -i %s --src %s -j DROP\" % (wl.interface, wl.ip_prefix))\n api.Logger.info(f\"iptables -A INPUT -p tcp -i {wl.interface} --src {wl.ip_prefix} -j DROP\")\n api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,\n \"iptables -A INPUT -p tcp -i %s --dst %s -j DROP\" % (wl.interface, wl.ip_prefix))\n api.Logger.info(f\"iptables -A INPUT -p tcp -i {wl.interface} --dst {wl.ip_prefix} -j DROP\")\n api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,\n \"iptables -A INPUT -p udp -i %s --src %s -j DROP\" % (wl.interface, wl.ip_prefix))\n api.Logger.info(f\"iptables -A INPUT -p udp -i {wl.interface} --src {wl.ip_prefix} -j DROP\")\n api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,\n \"iptables -A INPUT -p udp -i %s --dst %s -j DROP\" % (wl.interface, wl.ip_prefix))\n api.Logger.info(f\"iptables -A INPUT -p udp -i {wl.interface} --dst {wl.ip_prefix} -j DROP\")\n\n resp = api.Trigger(req)\n if resp is None:\n return api.types.status.FAILURE\n\n return api.types.status.SUCCESS\n\ndef _add_exposed_ports(wl_msg):\n if wl_msg.workload_type != topo_svc.WORKLOAD_TYPE_CONTAINER:\n return\n for p in [\"4500\", \"4501\", \"4507\"]:\n tcp_port = wl_msg.exposed_ports.add()\n tcp_port.Port = p\n tcp_port.Proto = \"tcp\"\n\n for _ in range(__max_udp_ports):\n udp_port = wl_msg.exposed_ports.add()\n udp_port.Port = \"1001\"\n udp_port.Proto = \"udp\"\n\n\ndef __add_workloads(redirect_port):\n\n req = topo_svc.WorkloadMsg()\n req.workload_op = topo_svc.ADD\n\n for ep in config_api.GetEndpoints():\n wl_msg = req.workloads.add()\n # Make the workload_name unique across nodes by appending node-name\n wl_msg.workload_name = ep.name + ep.node_name\n wl_msg.node_name = ep.node_name\n intf = wl_msg.interfaces.add()\n if not ep.vnic.DhcpEnabled:\n intf.ip_prefix = ep.ip_addresses[0]\n intf.sec_ip_prefix.extend(ep.ip_addresses[1:])\n # wl_msg.ipv6_prefix = ep.ip_addresses[1]\n intf.mac_address = ep.macaddr\n if ep.vlan != 0:\n intf.interface_type = topo_svc.INTERFACE_TYPE_VSS\n else:\n intf.interface_type = topo_svc.INTERFACE_TYPE_NONE\n intf.encap_vlan = ep.vlan\n interface = ep.interface\n if interface != None: intf.interface = interface\n intf.parent_interface = intf.interface\n wl_msg.workload_type = api.GetWorkloadTypeForNode(wl_msg.node_name)\n wl_msg.workload_image = api.GetWorkloadImageForNode(wl_msg.node_name)\n wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)\n if redirect_port:\n _add_exposed_ports(wl_msg)\n api.Logger.info(f\"Workload {wl_msg.workload_name} \"\n f\"Node {wl_msg.node_name} Intf {intf.interface} Parent-Intf {intf.parent_interface} \"\n f\"IP {intf.ip_prefix} MAC {intf.mac_address} \"\n f\"VLAN {intf.encap_vlan}\")\n if len(req.workloads):\n api.Logger.info(\"Adding %d Workloads\" % len(req.workloads))\n resp = api.AddWorkloads(req, skip_bringup=api.IsConfigOnly())\n if resp is None:\n sys.exit(1)\n\n dhcp_wl_list = []\n for ep in config_api.GetEndpoints():\n workload_name = ep.name + ep.node_name\n wl = api.GetWorkloadByName(workload_name)\n if wl is None:\n sys.exit(1)\n\n wl.vnic = ep.vnic\n if wl.vnic.DhcpEnabled:\n dhcp_wl_list.append(wl)\n wl.ip_prefix = ep.ip_addresses[0]\n wl.ip_address = wl.ip_prefix.split('/')[0]\n wl.sec_ip_prefixes = []\n wl.sec_ip_addresses = []\n for secip in ep.ip_addresses[1:]:\n wl.sec_ip_prefixes.append(secip)\n wl.sec_ip_addresses.append(secip.split('/')[0])\n\n if len(dhcp_wl_list):\n if not dhcp_utils.AcquireIPFromDhcp(dhcp_wl_list):\n sys.exit(1)\n\ndef __delete_classic_workloads(target_node = None, workloads = None):\n\n req = topo_svc.WorkloadMsg()\n req.workload_op = topo_svc.DELETE\n\n workloads = workloads if workloads else api.GetWorkloads()\n for wl in workloads:\n if target_node and target_node != wl.node_name:\n api.Logger.debug(\"Skipping delete workload for node %s\" % wl.node_name)\n continue\n\n wl_msg = req.workloads.add()\n wl_msg.workload_name = wl.workload_name\n wl_msg.node_name = wl.node_name\n\n if len(req.workloads):\n resp = api.DeleteWorkloads(req, skip_store=True)\n if resp is None:\n sys.exit(1)\n\ndef __readd_classic_workloads(target_node = None, workloads = []):\n\n req = topo_svc.WorkloadMsg()\n req.workload_op = topo_svc.ADD\n\n workloads = workloads if workloads else api.GetWorkloads()\n for wl in workloads:\n if target_node and target_node != wl.node_name:\n api.Logger.debug(\"Skipping add classic workload for node %s\" % wl.node_name)\n continue\n\n wl_msg = req.workloads.add()\n intf = wl_msg.interfaces.add()\n intf.ip_prefix = wl.ip_prefix\n intf.ipv6_prefix = wl.ipv6_prefix\n intf.sec_ip_prefix.extend(wl.sec_ip_prefixes)\n intf.mac_address = wl.mac_address\n intf.encap_vlan = wl.encap_vlan\n intf.uplink_vlan = wl.uplink_vlan\n wl_msg.workload_name = wl.workload_name\n wl_msg.node_name = wl.node_name\n intf.pinned_port = wl.pinned_port\n intf.interface_type = wl.interface_type\n # Interface to be set to parent intf in vlan case, same as workloads created first time\n interface = wl.parent_interface\n if interface != None: intf.interface = interface\n intf.parent_interface = wl.parent_interface\n wl_msg.workload_type = wl.workload_type\n wl_msg.workload_image = wl.workload_image\n wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)\n\n if len(req.workloads):\n resp = api.AddWorkloads(req, skip_store=True)\n if resp is None:\n sys.exit(1)\n\ndef ReAddWorkloads(node):\n __delete_classic_workloads(node)\n __readd_classic_workloads(node)\n\ndef DeleteWorkload(wl):\n __delete_classic_workloads(workloads=[wl])\n\ndef ReAddWorkload(wl):\n __readd_classic_workloads(workloads=[wl])\n __add_secondary_ip_to_workloads([wl])\n\ndef Main(args):\n api.Logger.info(\"Adding Workloads\")\n if args != None and hasattr(args, 'trex'):\n redirect_port = args.trex\n else:\n redirect_port = False\n __add_workloads(redirect_port)\n __add_secondary_ip_to_workloads()\n if redirect_port:\n __add_iptables_to_workloads()\n __publish_workloads()\n return api.types.status.SUCCESS\n\nif __name__ == '__main__':\n Main(None)\n","repo_name":"ccdxc/sw","sub_path":"iota/test/apulu/config/bringup_workloads.py","file_name":"bringup_workloads.py","file_ext":"py","file_size_in_byte":8814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"36795314941","text":"# TASK 1\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nfrom numpy.linalg import eig\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import davies_bouldin_score\nfrom sklearn.preprocessing import StandardScaler\ndf = pd.read_csv(\"C:/Users/manju/OneDrive/Data Analysis For Business Intelligence/Modules/Semester_1/Data Mining and Neural Networks/Computational_task/Computational Tak_1/wdbc - Copy.csv\")\n\n# NORMALISE THE DATA\ndummy = pd.get_dummies(df['Diagnosis'])\ndf1 = pd.concat((df,dummy),axis=1)\ndf1 = df1.drop(['Diagnosis','M'],axis=1)\ndf1.rename(columns={'B':'Diagnosis'}, inplace = True)\nprint(df1)\ndf2 = df1.drop(['ID Number', 'Diagnosis'],axis=1)\nprint(df2)\nx = df1['Diagnosis']\nt = df1['Diagnosis'].to_numpy()\nprint(t)\ncolumn = df2.columns\nscaler = StandardScaler()\nscaler.fit(df2)\nstandardized_data = scaler.transform(df2)\nstandardized__data = pd.DataFrame(standardized_data, columns=column)\n\n# Principal Component Analysis\npca = PCA(n_components=3)\nprincipalComponents_data = pca.fit_transform(standardized__data)\nprincipalComponents__Data= pd.DataFrame(data = principalComponents_data,columns = ['principal component 1', 'principal component 2','principal component 3'])\nprint(principalComponents__Data)\n\n\n# Eigen values of correlation matrix\ncorrMatrix = standardized__data.corr()\n#print(\" corrMatrix\")\nprint(\"\\t\\tcorrMatrix\")\nprint(corrMatrix)\nvalues , vectors = eig(corrMatrix)\nprint(\" Eigen values\")\nprint(values)\n#print(vectors)\nplt.plot(values)\nplt.title('Eigen values')\nplt.xlabel('Eigen values')\nplt.ylabel('Count')\nplt.show()\n\n# TASK 2\nprint(principalComponents__Data)\nprincipal_component = pd.concat((principalComponents__Data,x),axis=1)\nprint(principal_component)\nsns.histplot(x ='principal component 1', hue ='Diagnosis', data = principal_component)\nmedian1 = 0\nplt.axvline(median1,color='black',label='Median')\nplt.title(\"Principal Component 1(PC1)\")\nplt.xlabel(\"PC1\")\nplt.ylabel(\"Count\")\nplt.show()\nsns.histplot(x ='principal component 2', hue ='Diagnosis', data = principal_component)\nmedian1 = 0.0547195\nplt.axvline(median1,color='black',label='Median')\nplt.title(\"Principal Component 2(PC2)\")\nplt.xlabel(\"PC2\")\nplt.ylabel(\"Count\")\nplt.show()\nsns.histplot(x ='principal component 3', hue ='Diagnosis', data = principal_component)\nmedian1 = -0.072897\nplt.axvline(median1,color='black',label='Median')\nplt.title(\"Principal Component 3(PC3)\")\nplt.xlabel(\"PC3\")\nplt.ylabel(\"Count\")\nplt.show()\n\nplt.figure(figsize=(10,10))\nplt.xlabel('Principal Component - 2',fontsize=20)\nplt.ylabel('Principal Component - 1',fontsize=20)\nplt.title(\"Principal Component Analysis(PC1 versus PC2)\",fontsize=20)\ntargets = [1, 0]\ncolors = ['r', 'b']\nfor target, color in zip(targets,colors):\n indicesToKeep = df1['Diagnosis'] == target\n plt.scatter(principalComponents__Data.loc[indicesToKeep, 'principal component 2']\n , principalComponents__Data.loc[indicesToKeep, 'principal component 1'], c = color, s = 50)\nplt.legend(targets,prop={'size': 15})\nplt.show()\n\nplt.figure(figsize=(10,10))\nplt.xlabel('Principal Component - 3',fontsize=20)\nplt.ylabel('Principal Component - 2',fontsize=20)\nplt.title(\"Principal Component Analysis(PC2 versus PC3)\",fontsize=20)\ntargets = [1, 0]\ncolors = ['r', 'b']\nfor target, color in zip(targets,colors):\n indicesToKeep = df1['Diagnosis'] == target\n plt.scatter(principalComponents__Data.loc[indicesToKeep, 'principal component 3']\n , principalComponents__Data.loc[indicesToKeep, 'principal component 2'], c = color, s = 50)\nplt.legend(targets,prop={'size': 15})\nplt.show()\n\nplt.figure(figsize=(10,10))\nplt.xlabel('Principal Component - 3',fontsize=20)\nplt.ylabel('Principal Component - 1',fontsize=20)\nplt.title(\"Principal Component Analysis(PC1 versus PC3)\",fontsize=20)\ntargets = [1, 0]\ncolors = ['r', 'b']\nfor target, color in zip(targets,colors):\n indicesToKeep = df1['Diagnosis'] == target\n plt.scatter(principalComponents__Data.loc[indicesToKeep, 'principal component 3']\n , principalComponents__Data.loc[indicesToKeep, 'principal component 1'], c = color, s = 50)\nplt.legend(targets,prop={'size': 15})\nplt.show()\n\n\n# TASK3 and Task 4\n# K=2\nX= principalComponents__Data.drop(['principal component 3'],axis=1)\ndata = X.to_numpy()\nprint(data)\n\nkmeans = KMeans(init=\"random\",n_clusters=2).fit(X)\ncentroids = kmeans.cluster_centers_\nprint(\" Centroids\")\nprint(centroids)\nlabels = kmeans.labels_\nprint(\" labels\")\nprint(labels)\nDB_index1 = davies_bouldin_score(X, labels)\nprint(\" DB Index\")\nprint(DB_index1)\nplt.scatter(principalComponents__Data['principal component 1'],principalComponents__Data['principal component 2'],c= kmeans.labels_.astype(float), alpha=0.5)\nplt.scatter(centroids[:, 0], centroids[:, 1], c='r', s=50)\nplt.title(\"K_means = 2(PC1 and PC2)\")\nplt.show()\nprint(\" Cross Table\")\nprint(pd.crosstab(t,labels))\nX= principalComponents__Data.drop(['principal component 3'],axis=1)\ndata = X.to_numpy()\nprint(data)\n\n\n# K=3\n\nkmeans = KMeans(init=\"random\",n_clusters=3).fit(X)\ncentroids = kmeans.cluster_centers_\nprint(\" centroids\")\nprint(centroids)\nlabels = kmeans.labels_\nDB_index2 = davies_bouldin_score(X, labels)\nprint(\" DB_index2\")\nprint(DB_index2)\nplt.scatter(principalComponents__Data['principal component 1'],principalComponents__Data['principal component 2'],c= kmeans.labels_.astype(float), alpha=0.5)\nplt.scatter(centroids[:, 0], centroids[:, 1], c='r', s=50)\nplt.title(\"K_means = 3(PC1 and PC2)\")\nplt.show()\nprint(\" Cross table\")\nprint(pd.crosstab(t,labels))\n# K=5\nkmeans = KMeans(init=\"random\",n_clusters=5).fit(X)\ncentroids = kmeans.cluster_centers_\nprint(\" centroids\")\nprint(centroids)\nlabels = kmeans.labels_\nDB_index3 = davies_bouldin_score(X, labels)\nprint(\" DB_index3\")\nprint(DB_index3)\nplt.scatter(principalComponents__Data['principal component 1'],principalComponents__Data['principal component 2'], c= kmeans.labels_.astype(float), alpha=0.5)\nplt.scatter(centroids[:, 0], centroids[:, 1], c='r', s=50)\nplt.title(\"K_means = 5(PC1 and PC2)\")\nplt.show()\nprint(\" Cross table\")\nprint(pd.crosstab(t,labels))\n\n\n","repo_name":"manjuvkurup/Cluster-Analysis-of-Breast-Cancer-Wisconsin-Diagnostic-Data","sub_path":"Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1515421563","text":"# Django settings for test_project project.\nDEBUG = True\n\nINSTALLED_APPS = (\n 'django_crontab',\n)\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3'\n }\n}\n\nMIDDLEWARE_CLASSES = ()\n\nSECRET_KEY = 'not-so-secret'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {},\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n }\n },\n 'loggers': {\n 'django_crontab': {\n 'handlers': ['console'],\n 'level': 'INFO',\n }\n }\n}\n\nimport os\nCRONTAB_DJANGO_MANAGE_PATH = os.path.join(os.path.dirname(__file__), '', 'test_manage.py')\n","repo_name":"kraiz/django-crontab","sub_path":"tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":812,"dataset":"github-code","pt":"71"} +{"seq_id":"37568359066","text":"import numpy as np\nimport torch as tch\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt \n\n# plt.rc('text', usetex=True)\nplt.switch_backend('Agg')\nplt.rc('font', family='serif')\n\nfrom torch.optim import Adam\nfrom torch.nn import MSELoss\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom nets import *\nfrom environment import *\nimport os\nfrom tqdm import tqdm\nfrom copy import deepcopy\n\nfrom torch.multiprocessing import Pool, Process, set_start_method\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.spatial.distance import pdist, squareform\nfrom sklearn.decomposition import PCA\n\nPASTEL_GREEN = \"#8fbf8f\"\nPASTEL_RED = \"#ff8080\"\nPASTEL_BLUE = \"#8080ff\"\nPASTEL_MAGENTA = \"#ff80ff\"\n\njet = plt.get_cmap('jet')\nseismic = plt.get_cmap('seismic')\n\ndef evaluate_PCA(pca_model, x):\n try:\n x = x.detach().cpu().numpy()\n except:\n pass\n\n rec = pca_model.inverse_transform(pca_model.transform(x))\n x = np.reshape(x, (-1, x.shape[-1]))\n d = rec-x\n norm_x = np.mean(np.sqrt(np.sum(x**2, axis=-1)), axis=0)\n norm_d = np.mean(np.sqrt(np.sum(d**2, axis=-1)), axis=0)\n\n return norm_x, norm_d\n\ndef plot_mean_std(ax, data, axis=0, c_line='g', c_fill=PASTEL_GREEN, x=None, label=None, log_yscale=False):\n if not log_yscale:\n mean = data.mean(axis=axis)\n std = data.std(axis=axis)\n low = mean - std\n high = mean + std\n else:\n ax.set_yscale('log')\n log_mean = np.log(data).mean(axis=axis)\n log_std = np.log(data).std(axis=axis)\n mean = np.exp(log_mean)\n low = np.exp(log_mean-log_std)\n high = np.exp(log_mean+log_std)\n\n if x is None:\n x = range(mean.shape[0])\n\n ax.plot(x, mean, c=c_line, label=label)\n ax.fill_between(x, low, high, color=c_fill, alpha=.7, zorder=1)\n\n\nBASE_FOLDER = '/home/atf6569/my_scratch/SequenceWorkingMemory/continuous_capacity_study/'\n\n\n\ndef basic_tests(T=5, memsize=24, bs=512, TEMPLATE='T_{}_memsize_{}/seed_{}/', bias_out=True):\n n_seeds = 1\n observation_size = 64\n state_size = 64\n\n save_folder = 'out/'\n os.makedirs(save_folder, exist_ok=True)\n\n for seed in range(n_seeds):\n folder = BASE_FOLDER + TEMPLATE.format(T, memsize, seed)\n env = ContinuousDots(T=T, observation_size=observation_size, device=device)\n sequence_encoder = RNNSequenceEncoder(in_size=observation_size, state_size=state_size, out_size=memsize, device=device, bias_out=bias_out)\n dec = Decoder(in_size=memsize, state_size=state_size, device=device)\n env.load(folder+'environment.pt')\n dec.load_state_dict(tch.load(folder+'decoder.pt', map_location=dec.device))\n sequence_encoder.load_state_dict(tch.load(folder+'encoder.pt', map_location=dec.device))\n all_pos = np.zeros((20*bs, T, 2))\n all_encs = np.zeros((20*bs, memsize))\n all_decoder_states = np.zeros((20*bs, T, state_size))\n loss = MSELoss()\n\n for test_batch_idx in range(20):\n X, y, _ = env.get_sequences(bs=bs, T=T) \n encs = sequence_encoder(X)\n # States are actually currents\n _, states, outs = dec.get_internal_states(encs)\n print(loss(outs, y))\n all_pos[bs*test_batch_idx:bs*(test_batch_idx+1)] = y.detach().cpu().numpy()\n all_encs[bs*test_batch_idx:bs*(test_batch_idx+1)] = encs[:, -1].detach().cpu().numpy()\n all_decoder_states[bs*test_batch_idx:bs*(test_batch_idx+1)] = states.detach().cpu().numpy()\n\n print(all_decoder_states.shape)\n print(all_decoder_states.std(axis=(0,1)))\n\n os.makedirs(save_folder+'tuning_curves', exist_ok=True)\n for neuron_idx in range(10):\n fig, axes = plt.subplots(T, T, figsize=(5*T, 5*T))\n for t in range(T):\n activity = all_decoder_states[:, t, neuron_idx]\n vmax = np.max(activity)\n vmin = np.min(activity)\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n # print('t={}'.format(t), 'vmin, vmax', vmin, vmax)\n \n for t_ref in range(T):\n ax = axes[t, t_ref]\n xt, yt = all_pos[:, t_ref, 0], all_pos[:, t_ref, 1]\n # print(np.min(xt), np.min(yt))\n ax.scatter(xt, yt, c=seismic(norm(activity)), rasterized=True)\n ax.set_xlabel('Activity at time {}'.format(t))\n ax.set_ylabel('Position at time {}'.format(t_ref))\n divider = make_axes_locatable(ax)\n ax_cb = divider.new_horizontal(size=\"5%\", pad=0.05)\n cb1 = matplotlib.colorbar.ColorbarBase(ax_cb, cmap=seismic, norm=norm, orientation='vertical')\n fig.add_axes(ax_cb)\n\n plt.tight_layout()\n fig.savefig(save_folder+'tuning_curves/neuron_{}.pdf'.format(neuron_idx))\n plt.close('all')\n\n\n means = []\n deltas_norms = []\n models = []\n linregs = []\n linreg_scores = []\n\n fig, axes = plt.subplots(2, T, figsize=(5*T, 10))\n for t in range(T):\n pca = PCA()\n states_t = all_decoder_states[:, t]\n loadings = pca.fit_transform(states_t)\n means.append(pca.mean_)\n models.append(deepcopy(pca))\n linreg = LinearRegression()\n positions = np.reshape(all_pos, (all_pos.shape[0], -1))\n linreg.fit(positions, loadings[:, :2*T])\n print(linreg.score(positions, loadings[:, :2*T]))\n linregs.append(deepcopy(linreg))\n linreg_scores.append(linreg.score(positions, loadings[:, :2*T]))\n tmp = states_t - means[-1]\n deltas_norms.append(np.mean(np.sqrt(np.sum(tmp**2, axis=-1))))\n variance_ratios = pca.explained_variance_ratio_ \n axes[0,t].hist(variance_ratios, bins=300)\n axes[0,t].set_xlabel('Variance ratio')\n axes[0,t].set_ylabel('Bin count')\n tmp = [0.] + [c for c in np.cumsum(variance_ratios)]\n axes[1,t].plot(range(len(tmp)), tmp)\n axes[1,t].axvline(x=2*(t+1), ls='--')\n axes[1,t].axvline(x=2*T, ls='--')\n axes[1,t].set_ylabel('Cumulated variance explained')\n axes[1,t].set_xlabel('Number of PCs')\n fig.savefig(save_folder+'intermediate_svds.pdf')\n\n means_norms = [np.sqrt(np.sum(x**2)) for x in means]\n\n scores_pca = np.zeros((T, T))\n scores_linreg = np.zeros((T, T))\n for i in range(T):\n for j in range(max(T,i+1)):\n positions = np.reshape(all_pos, (all_pos.shape[0], -1))\n model = models[i]\n linmodel = linregs[i]\n x = all_decoder_states[:, j]\n loadings = model.transform(x)\n rec = model.inverse_transform(loadings)\n # state_norms = np.mean(np.sqrt(np.sum(x**2, axis=-1)),axis=0)\n # delta_norms = np.mean(np.sqrt(np.sum((x-rec)**2, axis=-1)),axis=0)\n state_norms, delta_norms = evaluate_PCA(model, x)\n print(i, j, delta_norms, state_norms, delta_norms / state_norms)\n scores_pca[i,j] = delta_norms / state_norms\n scores_pca[i,j] = scores_pca[j,i]\n\n\n scores_linreg[i,j] = linmodel.score(positions, loadings[:, :2*T])\n scores_linreg[i,j] = scores_linreg[j,i]\n\n fig, axes = plt.subplots(1, 4, figsize=(22,5))\n dists = squareform(pdist(means, metric='cosine'))\n vmax = np.max(dists)\n vmin = np.min(dists)\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n axes[0].matshow(dists, cmap='seismic')\n divider = make_axes_locatable(axes[0])\n ax_cb = divider.new_horizontal(size=\"5%\", pad=0.05)\n cb1 = matplotlib.colorbar.ColorbarBase(ax_cb, cmap=seismic, norm=norm, orientation='vertical')\n fig.add_axes(ax_cb)\n\n vmax = np.max(scores_pca)\n vmin = np.min(scores_pca)\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n axes[1].matshow(scores_pca, cmap='seismic')\n divider = make_axes_locatable(axes[1])\n ax_cb = divider.new_horizontal(size=\"5%\", pad=0.05)\n cb1 = matplotlib.colorbar.ColorbarBase(ax_cb, cmap=seismic, norm=norm, orientation='vertical')\n fig.add_axes(ax_cb)\n axes[1].set_xlabel('States at time i')\n axes[1].set_ylabel('PCA from time j')\n\n vmax = np.max(scores_linreg)\n vmin = np.min(scores_linreg)\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n axes[2].matshow(scores_linreg, cmap='seismic')\n divider = make_axes_locatable(axes[2])\n ax_cb = divider.new_horizontal(size=\"5%\", pad=0.05)\n cb1 = matplotlib.colorbar.ColorbarBase(ax_cb, cmap=seismic, norm=norm, orientation='vertical')\n fig.add_axes(ax_cb)\n axes[2].set_xlabel('States at time i')\n axes[2].set_ylabel('Linreg from time j')\n\n axes[3].plot(means_norms, c='r', label='Norm of ')\n axes[3].plot(deltas_norms, c='g', label='Norm of dH')\n axes[3].set_xlabel('Decoding step')\n axes[3].set_ylabel('Norms')\n axes[3].legend()\n plt.tight_layout()\n plt.savefig(save_folder+'norms.pdf')\n \n all_states = np.reshape(all_decoder_states, (-1, all_decoder_states.shape[-1]))\n print(all_states.shape)\n global_model = PCA()\n loadings = global_model.fit_transform(all_states)\n state_norms, delta_norms = evaluate_PCA(global_model, all_states)\n\n fig, axes = plt.subplots(2, T, figsize=(5*T, 10))\n for t in range(T):\n variance_ratios = pca.explained_variance_ratio_ \n axes[0,t].hist(variance_ratios, bins=300)\n axes[0,t].set_xlabel('Variance ratio')\n axes[0,t].set_ylabel('Bin count')\n tmp = [0.] + [c for c in np.cumsum(variance_ratios)]\n axes[1,t].plot(range(len(tmp)), tmp)\n axes[1,t].axvline(x=2*(t+1), ls='--')\n axes[1,t].axvline(x=2*T, ls='--')\n axes[1,t].set_ylabel('Cumulated variance explained')\n axes[1,t].set_xlabel('Number of PCs')\n fig.savefig(save_folder+'global_PCA.pdf')\n\n tmp = deepcopy(all_decoder_states)\n for t in range(T):\n tmp[:,t] -= np.mean(all_decoder_states[:,t], axis=0)\n\n tmp = np.reshape(tmp, (tmp.shape[0], -1))\n tmp_pos = np.reshape(all_pos, (all_pos.shape[0], -1))\n # print(tmp.shape)\n global_model = PCA()\n loadings = global_model.fit_transform(tmp)\n state_norms, delta_norms = evaluate_PCA(global_model, tmp)\n linreg_global = LinearRegression()\n linreg_global.fit(tmp_pos, loadings[:, :2*T])\n print(linreg_global.score(tmp_pos, loadings[:, :2*T]))\n\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n variance_ratios = pca.explained_variance_ratio_ \n axes[0].hist(variance_ratios, bins=300)\n axes[0].set_xlabel('Variance ratio')\n axes[0].set_ylabel('Bin count')\n tmp = [0.] + [c for c in np.cumsum(variance_ratios)]\n axes[1].plot(range(len(tmp)), tmp)\n axes[1].axvline(x=2*T, ls='--')\n axes[1].set_ylabel('Cumulated variance explained')\n axes[1].set_xlabel('Number of PCs')\n fig.savefig(save_folder+'centered_global_PCA.pdf')\n\n\n means_loadings = global_model.transform(means)\n fig, axes = plt.subplots(1, 2*T, figsize=(10*T, 5))\n for loading_idx in range(2*T):\n axes[loading_idx].plot(means_loadings[:, loading_idx])\n fig.savefig(save_folder+'means_loadings_evolutions.pdf')\n\n\n\n\nif __name__ == '__main__':\n set_start_method('spawn')\n\n if tch.cuda.is_available():\n device = tch.device('cuda:0')\n else:\n device = tch.device('cpu')\n\n basic_tests(T=5, memsize=128)\n","repo_name":"AFanthomme/SequenceWorkingMemory","sub_path":"decoder_study.py","file_name":"decoder_study.py","file_ext":"py","file_size_in_byte":11925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32558882175","text":"phrases = {'Jack': ['WhAt HapEneD tO aLL tHe RuM?!?!', 'If you were waiting for the opportune moment, that was it.', 'Why fight when you can negotiate?', 'Im going to send you to Davy Jones locker!', 'Nobody move! I dropped my brain.'],\n 'DavyJones' : ['Did You Forget? Im A Heartless Wretch!', 'Ha! You Afraid To Get Wet?', 'Summon the Kracken!','Do You Fear Death?',\"Did You Forget? I'm A Heartless Wretch!\"]\n}\n\nimport random\nclass Pirate:\n\n def __init__( self , name ):\n self.name = name\n self.strength = 15\n self.speed = 3\n self.health = 100\n\n def show_stats( self ):\n print(f\"Name: {self.name}\\nStrength: {self.strength}\\nSpeed: {self.speed}\\nHealth: {self.health}\\n\")\n\n\n def attack ( self , ninja ):\n print(phrases['Jack'][random.randint(1,4)])\n prob= random.randint(1,9)\n if(prob%2==0):\n self.ninjaDodge(ninja)\n else:\n ninja.health -= 0\n return self\n\n def ninjaDodge (self, ninja):\n dodge= random.randint(1,10)\n if(dodge<=ninja.speed):\n ninja.health -=0\n else:\n ninja.health -= self.strength\n return self\n\n\n\nclass pirate_two(Pirate):\n def __init__(self, name):\n super().__init__(name)\n\n def show_stats( self ):\n super().show_stats()\n # print(f\"Name: {self.name}\\nStrength: {self.strength}\\nSpeed: {self.speed}\\nHealth: {self.health}\\n\")\n\n\n def attack ( self , ninja ):\n print(phrases['DavyJones'][random.randint(1,4)])\n prob= random.randint(1,9)\n if(prob%2==0):\n self.ninjaDodge(ninja)\n else:\n ninja.health -= 0\n return self\n\n def ninjaDodge (self, ninja):\n super().ninjaDodge(ninja)","repo_name":"danielhernandez918/ninjas_vs_pirates","sub_path":"classes/pirate.py","file_name":"pirate.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"75172334630","text":"t=int(input())\nfor _ in range(t):\n n=int(input())\n a=[]\n b=[]\n i=j=4*n-2\n for i in range(4*n-1):\n x,y=input().split()\n a.append(int(x))\n b.append(int(y))\n \n while i>=0:\n s=a[i]\n a.remove(s)\n if s not in a:\n mx=s\n i=-1\n else:\n a.append(s)\n i=-1\n while j>=0:\n m=b[j]\n b.remove(m)\n if m not in b:\n my=m\n j=-1\n else:\n b.append(m)\n j=-1\n print(mx,my)","repo_name":"BALARAMGHOSH/Python","sub_path":"PTMSSNG.py","file_name":"PTMSSNG.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"7191972451","text":"import random\nimport string\nfrom datetime import datetime, timedelta\nfrom typing import Callable, Any, List, Union\n\nimport numpy as np\nfrom pyspark.sql import DataFrame as NativeSparkDataFrame, SparkSession\nfrom pyspark.sql.functions import udf, col\n\nfrom feathub.common import types\nfrom feathub.common.exceptions import FeathubException\nfrom feathub.feature_tables.sources.datagen_source import (\n DataGenSource,\n RandomField,\n SequenceField,\n)\nfrom feathub.processors.spark.dataframe_builder.time_utils import (\n append_unix_time_attribute_column,\n)\nfrom feathub.processors.spark.spark_types_utils import to_spark_type\n\n\ndef _generate_random_field_name(occupied_field_names: List[str]) -> str:\n prefix = \"seed\"\n affix = 0\n while True:\n field_name = prefix + str(affix)\n if field_name in occupied_field_names:\n affix += 1\n continue\n return field_name\n\n\ndef _get_udf_mapping(\n field_type: types.DType, field_config: Union[RandomField, SequenceField]\n) -> Callable[[int], Any]:\n if isinstance(field_config, SequenceField):\n offset = field_config.start\n if field_type == types.Int64 or field_type == types.Int32:\n return lambda x: x + offset\n elif field_type == types.Float64 or field_type == types.Float32:\n return lambda x: float(x + offset)\n elif field_type == types.String:\n return lambda x: str(x + offset)\n else:\n raise FeathubException(f\"Unsupported data type {field_type}\")\n elif isinstance(field_config, RandomField):\n if field_type == types.Int64:\n minimum = (\n np.iinfo(np.int64).min\n if field_config.minimum is None\n else field_config.minimum\n )\n maximum = (\n np.iinfo(np.int64).max\n if field_config.maximum is None\n else field_config.maximum\n )\n return lambda seed: random.Random(seed).randint(minimum, maximum)\n elif field_type == types.Int32:\n minimum = (\n np.iinfo(np.int32).min\n if field_config.minimum is None\n else field_config.minimum\n )\n maximum = (\n np.iinfo(np.int32).max\n if field_config.maximum is None\n else field_config.maximum\n )\n return lambda seed: random.Random(seed).randint(minimum, maximum)\n elif field_type == types.Float64:\n minimum = float(\n np.finfo(np.float64).min\n if field_config.minimum is None\n else field_config.minimum\n )\n maximum = float(\n np.finfo(np.float64).max\n if field_config.maximum is None\n else field_config.maximum\n )\n return lambda seed: random.Random(seed).uniform(minimum, maximum)\n elif field_type == types.Float32:\n minimum = float(\n np.finfo(np.float32).min\n if field_config.minimum is None\n else field_config.minimum\n )\n maximum = float(\n np.finfo(np.float32).max\n if field_config.maximum is None\n else field_config.maximum\n )\n return lambda seed: random.Random(seed).uniform(minimum, maximum)\n elif field_type == types.String:\n size = field_config.length\n return lambda seed: \"\".join(\n random.Random(seed).choices(\n string.ascii_letters + string.digits, k=size\n )\n )\n elif field_type == types.Bool:\n return lambda seed: bool(random.Random(seed).getrandbits(1))\n elif field_type == types.Timestamp:\n seconds = field_config.max_past / timedelta(seconds=1)\n return lambda seed: datetime.fromtimestamp(\n random.Random(seed).uniform(0, seconds)\n )\n elif isinstance(field_type, types.VectorType):\n element_mapping = _get_udf_mapping(field_type.dtype, field_config)\n size = field_config.length\n return lambda seed: [element_mapping(i) for i in range(size)]\n elif isinstance(field_type, types.MapType):\n key_mapping = _get_udf_mapping(field_type.key_dtype, field_config)\n value_mapping = _get_udf_mapping(field_type.value_dtype, field_config)\n return lambda seed: {key_mapping(seed): value_mapping(seed)}\n else:\n raise FeathubException(f\"Unsupported data type {field_type}\")\n\n\ndef get_dataframe_from_data_gen_source(\n spark_session: SparkSession, source: DataGenSource\n) -> NativeSparkDataFrame:\n if source.number_of_rows is None:\n raise FeathubException(\n \"SparkProcessor does not support generating unbounded data.\"\n )\n\n seed_field_name = _generate_random_field_name(source.schema.field_names)\n df = spark_session.range(source.number_of_rows).select(\n col(\"id\").alias(seed_field_name)\n )\n\n for field_name in source.schema.field_names:\n field_type = source.schema.get_field_type(field_name)\n field_config = source.field_configs.get(field_name)\n\n mapper = _get_udf_mapping(field_type, field_config)\n\n mapper_udf = udf(mapper, returnType=to_spark_type(field_type))\n df = df.withColumn(field_name, mapper_udf(seed_field_name))\n\n df = df.drop(seed_field_name)\n\n df = (\n df\n if source.timestamp_field is None\n else append_unix_time_attribute_column(\n df,\n source.timestamp_field,\n source.timestamp_format,\n )\n )\n\n return df\n","repo_name":"alibaba/feathub","sub_path":"python/feathub/processors/spark/dataframe_builder/datagen_utils.py","file_name":"datagen_utils.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"71"} +{"seq_id":"74840093029","text":"import argparse\n\nimport gspread\nimport pandas as pd\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\ndef auth_user():\n # Define a authorized user with the credentials created.\n scope = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive\",\n ]\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n \"credentials.json\", scope\n )\n\n client = gspread.authorize(credentials)\n return client\n\n\ndef create_and_share_sheet(user_mail, spreadsheet_name, csv_file):\n client = auth_user()\n sh = client.create(spreadsheet_name)\n worksheet = sh.worksheet(\"Sheet1\")\n\n # Read from the csv file mentioned in the command.\n df = pd.read_csv(csv_file)\n col = df.columns\n # Define the column headings based on the our csv file.\n end = ord(\"A\") + len(col) - 1\n cell_range = \"A1:\" + chr(end) + \"1\"\n\n # Define cells\n cell_list = worksheet.range(cell_range)\n i = 0\n for cell in cell_list:\n cell.value = col[i]\n i += 1\n\n # Write these column headings to the worksheet\n worksheet.update_cells(cell_list)\n\n # Convert rest of the dataframe to numpy object. (Use pandas version 1.0.3 strictly for this to work!)\n df = df.to_numpy().tolist()\n\n # Write data from numpy object to the worksheet\n for i in range(2, len(df) + 2):\n pos = \"A\" + str(i) + \":\" + chr(end) + str(i)\n cell_list = worksheet.range(pos)\n val = df[i - 2]\n j = 0\n for cell in cell_list:\n cell.value = val[j]\n j += 1\n worksheet.update_cells(cell_list)\n\n # Share the created spreadsheet with the receiver.\n sh.share(user_mail, perm_type=\"user\", role=\"writer\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"generate and share google sheet for give csv\"\n )\n parser.add_argument(\n \"-mail\",\n help=\"Enter the email id of the community admin\",\n dest=\"mail_id\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"-csv\",\n help=\"Enter path of csv file\",\n dest=\"csv\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"-spreadsheet_name\",\n help=\"Enter name of spreadsheet\",\n dest=\"ss_name\",\n type=str,\n required=True,\n )\n\n args = parser.parse_args()\n\n community_admin = args.mail_id\n csv_file = args.csv\n spreadsheet_name = args.ss_name\n\n create_and_share_sheet(community_admin, spreadsheet_name, csv_file)\n","repo_name":"Python-World/Python_and_the_Web","sub_path":"Scripts/API/Google Spreadsheet/create_sheet.py","file_name":"create_sheet.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":666,"dataset":"github-code","pt":"71"} +{"seq_id":"18764661796","text":" \nfrom typing import Tuple\nimport random\n\nk_tpl = tuple([n for n in range(-10, 11) if n != 0])\n\n\ndef get_task() -> Tuple[str, str]:\n functions = (get_linear, get_quadratic)\n return random.choice(functions)()\n\n\ndef get_linear() -> Tuple[str, str]:\n x = random.randint(-50, 50)\n a = random.randint(0, 100)\n k = random.choice(k_tpl)\n b = a + (k * x)\n sign = \"+\" if k >= 0 else \"\"\n equation = f\"{str(a)+f'{sign}' if a != 0 else ''}{k if abs(k) != 1 else ''}x={b}\"\n return (equation, str(x))\n\n\ndef get_quadratic() -> Tuple[str, str]:\n a = random.randint(1, 10)\n x1 = random.randint(-10, 10)\n x2 = random.randint(-10, 10)\n b = -1 * a * (x1 + x2)\n c = a * (x1 * x2)\n b_sign = \"+\" if b > 0 else \"\"\n c_sign = \"+\" if c > 0 else \"\"\n\n equation = f\"{a if a != 1 else ''}x\\u00B2{f'{b_sign}'+str(b)+'x' if b != 0 else ''}{f'{c_sign}'+str(c) if c != 0 else ''}=0\"\n return (equation, \"{} {}\".format(*sorted((x1, x2))))\n","repo_name":"geekycats-school/tglib","sub_path":"src/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16688221207","text":"def remove_dups(astring):\r\n \r\n k = set(astring)\r\n\r\n x = len(astring) - len(set(astring))\r\n\r\n return k, x\r\n\r\n#print(remove_dups(\"mississippi\")) #should print misp\r\n\r\n\r\ndef rot13(mess):\r\n cipher=\"\"\r\n placeholder=0\r\n for ch in mess:\r\n if ord(ch)>109:\r\n placeholder=(ord(ch)%109)+96\r\n cipher+=chr(placeholder)\r\n else:\r\n cipher+=chr(ord(ch)+13)\r\n print(cipher)\r\n \r\nrot13(\"Realdude\")\r\n\r\n\r\n","repo_name":"nate-flasher/Project-Work","sub_path":"course/Freshman_Year/cos120/HOMEWORK/H07/H07.py","file_name":"H07.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39796313815","text":"from openpyxl import load_workbook\nimport pandas as pd\n\n\ndef load_data():\n wb = load_workbook(filename=\"films.xlsx\")\n drop = (\"Oscars won\", \"No of Oscars won\", \"Guardian film page\", [None])\n\n def load(name):\n data = wb[name].values\n data = pd.DataFrame(columns=next(data), data=data)\n if \"Cat\" not in data.columns:\n data[\"Cat\"] = name.title()\n for column in drop:\n data.drop(columns=column, errors=\"ignore\", inplace=True)\n return data\n\n return pd.concat(load(name).assign(genre=name.capitalize()) for name in wb.sheetnames)\n\n\nif __name__ == \"__main__\":\n df = load_data()\n","repo_name":"tomsmeitink92/hollywood","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32529580010","text":"import json\nimport pytest\nimport requests\nimport globalvariables as globalvar\nimport WeGuardlogger as WeGuard\nimport cases_validations as config\nimport test_GETutils as utils\n\nbnUrl = \"enterprise/rest/bannernotifications/read\"\n\n@pytest.mark.parametrize('url', [\"\"])\n@pytest.mark.skipif(config.run_test_001_bannerNotification_read == 0, reason=\"test skipped\")\n@pytest.mark.usualtest\n@pytest.mark.positivetest\n@pytest.mark.devicespage\n@pytest.mark.regressiontest\n@pytest.mark.sanitytest\n@pytest.mark.run(order=10015)\ndef test_tc_008_read_bn(url):\n if globalvar.bearerToken == '':\n pytest.skip(\"Empty Bearer token Skipping test\")\n try:\n apiUrl = globalvar.BaseURL + bnUrl\n headers = {'Authorization': 'Bearer' + ' ' + globalvar.bearerToken}\n res = requests.get(url= apiUrl, headers=headers, timeout= globalvar.timeout)\n curl_str1 = utils.getCurlEquivalent(res)\n print(curl_str1)\n WeGuard.logger.debug(\n \"\\n\" \" Response Headers: \" + str(\n res.headers) + \" apiUrl: \" + apiUrl + \" Response code: \" + str(res.status_code) + \"\\n\" + \" Response: \" + str(res.content))\n assert res.status_code == 200\n except BaseException as e:\n WeGuard.logger.error(\"Exception : \" + str(e))\n WeGuard.logger.error(\"--------------------------- TC 008 Failed to read banner notification ---------------------------\\n\\n\")\n assert False\n\n","repo_name":"CHAKRAPANI-BANDHU/WeGuardAPIs-through-Python-Py-scripts","sub_path":"ALogin_Dashboard_Devices/test_read_bn.py","file_name":"test_read_bn.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14657107122","text":"import numpy as np\nimport tensorflow as tf\n\n\nclass W2VModel:\n def __init__(self, vocabulary_size, embedding_dimension, save_path=None):\n self.vocabulary_size = vocabulary_size\n self.embedding_dimension = embedding_dimension\n self.graph = tf.Graph()\n self.initialized = False\n\n with self.graph.as_default():\n self.train_data = tf.placeholder(tf.int32)\n self.train_labels = tf.placeholder(tf.int32)\n\n self.embeddings = tf.Variable(\n tf.random_uniform([self.vocabulary_size, self.embedding_dimension],\n -1.0, 1.0))\n self.weights = tf.Variable(\n tf.truncated_normal([self.vocabulary_size, self.embedding_dimension],\n stddev=1.0 / np.sqrt(self.embedding_dimension)))\n self.biases = tf.Variable(tf.zeros([self.vocabulary_size]))\n\n self.embed_input = tf.nn.embedding_lookup(\n self.embeddings, self.train_data)\n\n self.loss = tf.reduce_mean(\n tf.nn.sampled_softmax_loss(weights=self.weights, biases=self.biases,\n inputs=self.embed_input, labels=self.train_labels,\n num_sampled=self.vocabulary_size // 200,\n num_classes=self.vocabulary_size))\n self.optimizer = tf.train.AdagradOptimizer(1.0).minimize(self.loss)\n\n norm = tf.sqrt(tf.reduce_sum(\n tf.square(self.embeddings), 1, keepdims=True))\n self.normalized_embeddings = self.embeddings / norm\n\n self.save_path = save_path\n if self.save_path is not None:\n self.saver = tf.train.Saver()\n\n def train(self, generator, steps, verbose=True):\n with tf.Session(graph=self.graph) as session:\n tf.global_variables_initializer().run()\n if self.save_path is not None:\n try:\n self.saver.restore(session, self.save_path)\n print(\"Weights and embeddings loaded from {}\".format(\n self.save_path))\n except:\n print(\"Saved model not found. Starting training from scratch.\")\n\n average_loss = 0\n print(\"Starting training with {} steps.\".format(steps))\n for step in range(1, steps):\n batch_data, batch_labels = next(generator)\n batch_data = np.asarray(batch_data)\n batch_labels = np.asarray(batch_labels).reshape(\n (len(batch_labels), 1))\n _, loss = session.run([self.optimizer, self.loss],\n feed_dict={self.train_data: batch_data,\n self.train_labels: batch_labels})\n average_loss += loss\n\n if verbose:\n if step % 2000 == 0:\n print(\"Step: {}: loss = {}\".format(\n step, average_loss/2000))\n average_loss = 0\n\n self.final_embeddings = self.normalized_embeddings.eval()\n self.saver.save(session, self.save_path)\n","repo_name":"vkk800/neural-fun","sub_path":"w2v/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7455546723","text":"import os\nfrom dotenv import load_dotenv\nimport psycopg2\n\nload_dotenv(verbose=True)\n\n\nclass Databases():\n def __init__(self):\n self.db = psycopg2.connect(host=os.getenv('HOST'),\n dbname=os.getenv('DBNAME'),\n user=os.getenv('USER'),\n password=os.getenv('PWD'),\n port=os.getenv('PORT'))\n self.cursor = self.db.cursor()\n\n def __del__(self):\n self.db.close()\n self.cursor.close()\n\n def execute(self, query, args={}):\n self.cursor.execute(query, args)\n row = self.cursor.fetchall()\n return row\n\n def commit(self):\n self.cursor.commit()\n\n\nclass CRUD(Databases):\n def insertDB(self, schema, table, colum, data):\n sql = \" INSERT INTO {schema}.{table}({colum}) VALUES ('{data}') ;\".format(schema=schema, table=table,\n colum=colum, data=data)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except Exception as e:\n print(\" insert DB err \", e)\n\n def readDB(self, schema, table, colum):\n sql = \" SELECT {colum} from {schema}.{table}\".format(colum=colum, schema=schema, table=table)\n try:\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n except Exception as e:\n result = (\" read DB err\", e)\n\n return result\n\n def updateDB(self, schema, table, colum, value, condition):\n sql = \" UPDATE {schema}.{table} SET {colum}='{value}' WHERE {colum}='{condition}' \".format(schema=schema,\n table=table,\n colum=colum,\n value=value,\n condition=condition)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except Exception as e:\n print(\" update DB err\", e)\n\n def deleteDB(self, schema, table, condition):\n sql = \" delete from {schema}.{table} where {condition} ; \".format(schema=schema, table=table,\n condition=condition)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except Exception as e:\n print(\"delete DB err\", e)","repo_name":"johnbuzz98/KUIME_Backend","sub_path":"config/flask_config.py","file_name":"flask_config.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5511166209","text":"from django.test import TestCase, Client\nfrom django.http import HttpRequest, HttpResponse\nfrom django.db.models import ObjectDoesNotExist\nfrom django.shortcuts import reverse\n\nfrom TAScheduler.models import User, UserType, Course, Section\nfrom TAScheduler.viewsupport.errors import SectionEditError, SectionEditPlace\nfrom TAScheduler.viewsupport.message import Message, MessageQueue\nfrom TAScheduler.acceptance_tests.acceptance_base import TASAcceptanceTestCase\n\n\nclass SectionEdit(TASAcceptanceTestCase[SectionEditError]):\n def setUp(self):\n self.client = Client()\n self.session = self.client.session\n\n self.admin_user = User.objects.create(\n username='josiahth',\n password='good-password',\n type=UserType.ADMIN,\n password_tmp=False,\n )\n\n self.course = Course.objects.create(\n code='361',\n name='Software Engineering',\n )\n\n self.section = Section.objects.create(\n code='902',\n course=self.course\n )\n\n self.session['user_id'] = self.admin_user.id\n self.session.save()\n\n self.good_code = '901'\n\n self.edit_url = reverse('sections-edit', args=[self.section.id])\n self.view_url = reverse('sections-view', args=[self.section.id])\n\n def test_edits(self):\n resp = self.client.post(self.edit_url, {\n 'section_code': self.good_code,\n 'course_id': self.course.id,\n })\n\n self.assertRedirects(resp, self.view_url)\n\n self.section.refresh_from_db()\n\n self.assertEqual(self.good_code, self.section.code)\n self.assertEqual(self.course, self.section.course)\n\n def test_rejects_missing_code(self):\n resp = self.client.post(self.edit_url, {\n # 'course_code': self.good_code,\n 'course_id': self.course.id,\n })\n\n error = self.assertContextError(resp)\n\n self.assertEqual(SectionEditPlace.CODE, error.place())\n self.assertEqual('All sections must have a 3 digit code', error.message())\n\n def test_rejects_short_code(self):\n resp = self.client.post(self.edit_url, {\n 'course_code': self.good_code[:2],\n 'course_id': self.course.id,\n })\n\n error = self.assertContextError(resp)\n\n self.assertEqual(SectionEditPlace.CODE, error.place())\n self.assertEqual('All sections must have a 3 digit code', error.message())\n\n","repo_name":"tzortzos/361proj","sub_path":"TAScheduler/acceptance_tests/sections/test_edit.py","file_name":"test_edit.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72501236711","text":"from graph_api_service import GraphApiService\nfrom measure.measure_model import MeasurePropertyIn, BasicMeasureOut, \\\n MeasuresOut, MeasureOut, MeasureIn, MeasureRelationIn\nfrom measure_name.measure_name_service import MeasureNameService\nfrom models.not_found_model import NotFoundByIdModel\nfrom models.relation_information_model import RelationInformation\n\n\nclass MeasureService:\n \"\"\"\n Object to handle logic of measure requests\n\n Attributes:\n graph_api_service (GraphApiService): Service used to communicate with Graph API\n measure_name_service (MeasureNameService): Service to manage measure name models\n \"\"\"\n graph_api_service = GraphApiService()\n measure_name_service = MeasureNameService()\n\n def save_measure(self, measure: MeasureIn):\n \"\"\"\n Send request to graph api to create new measure\n\n Args:\n measure (MeasureIn): Measure to be added\n\n Returns:\n Result of request as measure object\n \"\"\"\n node_response = self.graph_api_service.create_node(\"`Measure`\")\n\n if node_response[\"errors\"] is not None:\n return MeasureOut(**measure.dict(), errors=node_response[\"errors\"])\n\n measure_id = node_response[\"id\"]\n\n if measure.measure_name_id is not None and \\\n type(self.measure_name_service.get_measure_name(measure.measure_name_id)) is not NotFoundByIdModel:\n self.graph_api_service.create_relationships(start_node=measure_id,\n end_node=measure.measure_name_id,\n name=\"hasMeasureName\")\n\n measure.measure_name_id = None\n self.graph_api_service.create_properties(measure_id, measure)\n\n return self.get_measure(measure_id)\n\n def get_measures(self):\n \"\"\"\n Send request to graph api to get measures\n\n Returns:\n Result of request as list of measures objects\n \"\"\"\n get_response = self.graph_api_service.get_nodes(\"`Measure`\")\n\n measures = []\n\n for measure_node in get_response[\"nodes\"]:\n properties = {'id': measure_node['id']}\n for property in measure_node[\"properties\"]:\n if property[\"key\"] in [\"datatype\", \"range\", \"unit\"]:\n properties[property[\"key\"]] = property[\"value\"]\n\n measure = BasicMeasureOut(**properties)\n measures.append(measure)\n\n return MeasuresOut(measures=measures)\n\n def get_measure(self, measure_id: int):\n \"\"\"\n Send request to graph api to get given measure\n\n Args:\n measure_id (int): Id of measure\n\n Returns:\n Result of request as measure object\n \"\"\"\n get_response = self.graph_api_service.get_node(measure_id)\n\n if get_response[\"errors\"] is not None:\n return NotFoundByIdModel(id=measure_id, errors=get_response[\"errors\"])\n if get_response[\"labels\"][0] != \"Measure\":\n return NotFoundByIdModel(id=measure_id, errors=\"Node not found.\")\n\n measure = {'id': get_response['id'], 'relations': [],\n 'reversed_relations': []}\n for property in get_response[\"properties\"]:\n if property[\"key\"] in [\"datatype\", \"range\", \"unit\"]:\n measure[property[\"key\"]] = property[\"value\"]\n\n relations_response = self.graph_api_service.get_node_relationships(measure_id)\n\n for relation in relations_response[\"relationships\"]:\n if relation[\"start_node\"] == measure_id:\n measure['relations'].append(RelationInformation(second_node_id=relation[\"end_node\"],\n name=relation[\"name\"],\n relation_id=relation[\"id\"]))\n else:\n measure['reversed_relations'].append(RelationInformation(second_node_id=relation[\"start_node\"],\n name=relation[\"name\"],\n relation_id=relation[\"id\"]))\n\n return MeasureOut(**measure)\n\n def delete_measure(self, measure_id: int):\n \"\"\"\n Send request to graph api to delete given measure\n\n Args:\n measure_id (int): Id of measure\n\n Returns:\n Result of request as measure object\n \"\"\"\n get_response = self.get_measure(measure_id)\n\n if type(get_response) is NotFoundByIdModel:\n return get_response\n\n self.graph_api_service.delete_node(measure_id)\n return get_response\n\n def update_measure(self, measure_id: int, measure: MeasurePropertyIn):\n \"\"\"\n Send request to graph api to update given measure\n\n Args:\n measure_id (int): Id of measure\n measure (MeasurePropertyIn): Properties to update\n\n Returns:\n Result of request as measure object\n \"\"\"\n get_response = self.get_measure(measure_id)\n\n if type(get_response) is NotFoundByIdModel:\n return get_response\n\n self.graph_api_service.delete_node_properties(measure_id)\n self.graph_api_service.create_properties(measure_id, measure)\n\n measure_result = {\"id\": measure_id, \"relations\": get_response.relations,\n \"reversed_relations\": get_response.reversed_relations}\n measure_result.update(measure.dict())\n\n return MeasureOut(**measure_result)\n\n def update_measure_relationships(self, measure_id: int,\n measure: MeasureRelationIn):\n \"\"\"\n Send request to graph api to update given measure\n\n Args:\n measure_id (int): Id of measure\n measure (MeasureRelationIn): Relationships to update\n\n Returns:\n Result of request as measure object\n \"\"\"\n get_response = self.get_measure(measure_id)\n\n if type(get_response) is NotFoundByIdModel:\n return get_response\n\n if measure.measure_name_id is not None and \\\n type(self.measure_name_service.get_measure_name(\n measure.measure_name_id)) is not NotFoundByIdModel:\n self.graph_api_service.create_relationships(start_node=measure_id,\n end_node=measure.measure_name_id,\n name=\"hasMeasureName\")\n return self.get_measure(measure_id)\n","repo_name":"GRISERA/grisera-framework","sub_path":"grisera_api/measure/measure_service.py","file_name":"measure_service.py","file_ext":"py","file_size_in_byte":6595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41607668447","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport unittest\nfrom nassl import _nassl\nfrom nassl.ssl_client import SslClient, OpenSslVerifyEnum\nimport socket\n\n\nclass X509_EXTENSION_Tests(unittest.TestCase):\n\n def test_new_bad(self):\n self.assertRaises(NotImplementedError, _nassl.X509_EXTENSION, (None))\n\n\nclass X509_EXTENSION_Tests_Online(unittest.TestCase):\n\n def test(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(5)\n sock.connect(('www.google.com', 443))\n\n sslClient = SslClient(sock=sock, ssl_verify=OpenSslVerifyEnum.NONE)\n sslClient.do_handshake()\n x509ext = sslClient.get_peer_certificate().get_extensions()[0]\n\n self.assertIsNotNone(x509ext.get_data())\n self.assertIsNotNone(x509ext.get_object())\n self.assertIsNotNone(x509ext.get_critical())\n\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()","repo_name":"gronau-it-cloud-computing/nassl","sub_path":"tests/X509_EXTENSION_tests.py","file_name":"X509_EXTENSION_tests.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"31330106409","text":"\"\"\"\nmake_hsexp.py\n Function to create all possible HSExp pulses (tip-down/tip-up & pos/neg offset)\n\"\"\"\n\nfrom types import SimpleNamespace\n\nimport numpy as np\nfrom pypulseq import Opts\n\nfrom bmctool import GAMMA_HZ\nfrom bmctool.utils.pulses.calculate_phase import calculate_phase\nfrom bmctool.utils.pulses.create_arbitrary_pulse_with_phase import create_arbitrary_pulse_with_phase\nfrom bmctool.utils.pulses.make_hypsec_half_passage import calculate_amplitude as hypsec_amp\n\n\ndef calculate_window_modulation(t: np.ndarray,\n t0: float) \\\n -> np.ndarray:\n \"\"\"\n Calculates modulation function for HSExp pulses.\n :param t: time points of the different sample points [s]\n :param t0: reference time point (= last point for half passage pulse) [s]\n :return:\n \"\"\"\n return 0.42 - 0.5 * np.cos(np.pi * t / t0) + 0.08 * np.cos(2 * np.pi * t / t0)\n\n\ndef calculate_frequency(t: np.ndarray,\n t0: float,\n bandwidth: float,\n ef: float,\n freq_factor: int) \\\n -> np.ndarray:\n \"\"\"\n Calculates modulation function for HSExp pulses.\n :param t: time points of the different sample points [s]\n :param t0: reference time point (= last point for half passage pulse) [s]\n :param bandwidth: bandwidth of the pulse [Hz]\n :param ef: dimensionless parameter to control steepness of the exponential curve\n :param freq_factor: factor (-1 or +1) to switch between positive and negative offsets\n \"\"\"\n\n return -freq_factor * bandwidth * np.pi * np.exp(-t / t0 * ef)\n\n\ndef make_hsexp(amp: float = 1.0,\n t_p: float = 12e-3,\n mu: float = 65,\n bandwidth: float = 2500,\n t_window: float = 3.5e-3,\n ef: float = 3.5,\n tip_down: bool = True,\n pos_offset: bool = True,\n system: Opts = Opts(),\n gamma_hz: float = GAMMA_HZ) \\\n -> SimpleNamespace:\n \"\"\"\n Creates a radio-frequency pulse event with amplitude and phase modulation of a HSExp pulse.\n :param amp: maximum amplitude value [µT]\n :param t_p: pulse pulse_duration [s]\n :param mu: parameter µ of hyperbolic secant pulse\n :param bandwidth: bandwidth of hyperbolic secant pulse [Hz]\n :param t_window: pulse_duration of window function\n :param ef: dimensionless parameter to control steepness of the exponential curve\n :param tip_down: flag to switch between tip down (True) and tip up (False) pulses\n :param pos_offset: flag to switch between positive (True) and negative (False) offsets\n :param system: system limits of the MR scanner\n :param gamma_hz: gyromagnetic ratio [Hz]\n \"\"\"\n\n samples = int(t_p * 1e6)\n t_pulse = np.divide(np.arange(1, samples + 1), samples) * t_p # time point array\n\n # find start index of window function\n idx_window = np.argmin(np.abs(t_pulse - t_window))\n\n if tip_down:\n shift_idx = -1\n else:\n shift_idx = 0\n\n # calculate amplitude of hyperbolic secant (HS) pulse\n w1 = hypsec_amp(t_pulse, t_pulse[shift_idx], amp, mu, bandwidth)\n\n # calculate and apply modulation function to convert HS into HSExp pulse\n window_mod = calculate_window_modulation(t_pulse[:idx_window], t_pulse[idx_window])\n if tip_down:\n w1[:idx_window] = w1[:idx_window] * window_mod\n else:\n w1[-idx_window:] = w1[-idx_window:] * np.flip(window_mod)\n\n # calculate freq modulation of pulse\n if tip_down and pos_offset:\n dfreq = calculate_frequency(t_pulse, t_pulse[-1], bandwidth, ef, 1)\n elif tip_down and not pos_offset:\n dfreq = calculate_frequency(t_pulse, t_pulse[-1], bandwidth, ef, -1)\n elif not tip_down and pos_offset:\n dfreq = calculate_frequency(np.flip(t_pulse), t_pulse[-1], bandwidth, ef, 1)\n elif not tip_down and not pos_offset:\n dfreq = calculate_frequency(np.flip(t_pulse), t_pulse[-1], bandwidth, ef, -1)\n\n # make freq modulation end (in case of tip-down) or start (in case of tip-up) with dw = 0\n diff_idx = np.argmin(np.abs(dfreq))\n dfreq -= dfreq[diff_idx]\n\n # calculate phase (= integrate over dfreq)\n dphase = calculate_phase(dfreq, t_p, samples, shift_idx=shift_idx, pos_offsets=pos_offset)\n\n # create pypulseq rf pulse object\n signal = w1 * np.exp(1j * dphase) # create complex array with amp and phase\n flip_angle = gamma_hz * 2 * np.pi\n hsexp = create_arbitrary_pulse_with_phase(signal=signal, flip_angle=flip_angle, system=system)\n\n return hsexp\n\n\ndef generate_hsexp_dict(amp: float = 1.0,\n t_p: float = 12e-3,\n mu: float = 65,\n bandwidth: float = 2500,\n t_window: float = 3.5e-3,\n ef: float = 3.5,\n system: Opts = Opts(),\n gamma_hz: float = GAMMA_HZ) \\\n -> dict:\n \"\"\"\n Creates a dictionary with the 4 different hsexp pulses (tip-down/up and pos/neg offsets)\n :param amp: maximum amplitude value [µT]\n :param t_p: pulse pulse_duration [s]\n :param mu: parameter µ of hyperbolic secant pulse\n :param bandwidth: bandwidth of hyperbolic secant pulse [Hz]\n :param t_window: pulse_duration of window function\n :param ef: dimensionless parameter to control steepness of the exponential curve\n :param system: system limits of the MR scanner\n :param gamma_hz: gyromagnetic ratio [Hz]\n :return:\n \"\"\"\n\n pulse_dict = {} # create empty dict for the 4 different pulses\n\n # tip-down positive offset\n pre_pos = make_hsexp(amp=amp,\n t_p=t_p,\n mu=mu,\n bandwidth=bandwidth,\n t_window=t_window,\n ef=ef,\n tip_down=True,\n pos_offset=True,\n system=system,\n gamma_hz=gamma_hz)\n\n pulse_dict.update({'pre_pos': pre_pos})\n\n # tip-down negative offset\n pre_neg = make_hsexp(amp=amp,\n t_p=t_p,\n mu=mu,\n bandwidth=bandwidth,\n t_window=t_window,\n ef=ef,\n tip_down=True,\n pos_offset=False,\n system=system,\n gamma_hz=gamma_hz)\n\n pulse_dict.update({'pre_neg': pre_neg})\n\n # tip-up positive offsets\n post_pos = make_hsexp(amp=amp,\n t_p=t_p,\n mu=mu,\n bandwidth=bandwidth,\n t_window=t_window,\n ef=ef,\n tip_down=False,\n pos_offset=True,\n system=system,\n gamma_hz=gamma_hz)\n\n pulse_dict.update({'post_pos': post_pos})\n\n # tip-up negative offsets\n post_neg = make_hsexp(amp=amp,\n t_p=t_p,\n mu=mu,\n bandwidth=bandwidth,\n t_window=t_window,\n ef=ef,\n tip_down=False,\n pos_offset=False,\n system=system,\n gamma_hz=gamma_hz)\n\n pulse_dict.update({'post_neg': post_neg})\n\n return pulse_dict\n","repo_name":"schuenke/BMCTool","sub_path":"bmctool/utils/pulses/make_hsexp.py","file_name":"make_hsexp.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"71421871271","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nfrom random import choice\nimport random as rnd\nimport vizdoom as vzd\nfrom argparse import ArgumentParser\nimport matplotlib.pyplot as plt\nfrom decimal import *\nimport numpy as np\nimport datetime\nimport seaborn as sns\nimport sys, select\nfrom math import ceil\n\nDEFAULT_CONFIG = \"../scenarios/my_way_home_onespawn.cfg\"\nPLOT = True\nLOAD = False\nSAVE = True\n\n\ndef state_to_bucket(state):\n new_state = (state.game_variables[0], state.game_variables[1], state.game_variables[2])\n # print(new_state)\n x = int((new_state[0]-min_x)/disc_diff)\n y = int((new_state[1]-min_y) / disc_diff)\n # print(tuple([x, y]))\n # return tuple([x, y])\n theta = int((new_state[2]) / disc_angle)\n # print(tuple([x, y, theta]))\n return tuple([x, y, theta])\n\n\ndef select_action(state):\n # print(\"state\" + str(state))\n if np.max(q_table[state]) == np.min(q_table[state]):\n action = rnd.randint(0, len(actions)-1)\n else:\n action = int(np.argmax(q_table[state]))\n return action\n\n\ndef update_value_functions(full=False, path=[]):\n print(\"@@@@@@@@@@@@@@@@@\")\n update = True\n counter = 0\n rnd.shuffle(STATE_SPACE)\n path.reverse()\n while update is True and counter <= 100:\n print(\"Value iteration iteration #\"+str(counter))\n counter += 1\n update = False\n for state in path+STATE_SPACE:\n temp_max = -np.inf\n for action in range(NUM_ACTIONS):\n temp = r_table[state+(action,)]\n non_zero_next = np.transpose(np.nonzero(p_table[state+(action,)]))\n # if non_zero_next.size != 0:\n # print(\"Current state= \" + str(state))\n # print(non_zero_next)\n temp += sum([p_table[state+(action,)+tuple(next_state)]*value_table_temp[tuple(next_state)] for next_state in non_zero_next])\n temp_max = max(temp_max, temp)\n if n_visits[state+(action,)] < M_VISITS_TO_KNOWN:\n q_table[state + (action,)] = R_MAX\n else:\n q_table[state+(action,)] = temp\n if round(value_table_temp[state], 4) != round(temp_max, 4):\n # print(\"crr_max= \"+str(value_table_temp[state])+\" new_max= \"+str(temp_max))\n value_table_temp[state] = temp_max\n if full is True:\n # print(\"State that cause another iteration: \"+str(state))\n update = True\n\n\ndef save_matrices():\n f = open('prob.npy', 'wb')\n np.save(f, p_table)\n f.close()\n f = open('value.npy', 'wb')\n np.save(f, value_table_temp)\n f.close()\n f = open('q_function.npy', 'wb')\n np.save(f, q_table)\n f.close()\n f = open('reward.npy', 'wb')\n np.save(f, r_table)\n f.close()\n f = open('state_visits.npy', 'wb')\n np.save(f, n_visits)\n f.close()\n f = open('transitions.npy', 'wb')\n np.save(f, n_transitions)\n f.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"ViZDoom example showing how to use information about objects and map.\")\n parser.add_argument(dest=\"config\",\n default=DEFAULT_CONFIG,\n nargs=\"?\",\n help=\"Path to the configuration file of the scenario.\"\n \" Please see \"\n \"../scenarios/*cfg for more scenarios.\")\n\n args = parser.parse_args()\n\n game = vzd.DoomGame()\n\n # Use other config file if you wish.\n game.load_config(args.config)\n game.set_render_hud(False)\n game.set_screen_resolution(vzd.ScreenResolution.RES_640X480)\n game.set_window_visible(False)\n\n # Enables information about all objects present in the current episode/level.\n game.set_objects_info_enabled(True)\n\n # Enables information about all sectors (map layout).\n game.set_sectors_info_enabled(True)\n\n game.clear_available_game_variables()\n game.add_available_game_variable(vzd.GameVariable.POSITION_X)\n game.add_available_game_variable(vzd.GameVariable.POSITION_Y)\n game.add_available_game_variable(vzd.GameVariable.ANGLE)\n\n actions = [[0, True, False, False],\n [90, False, True, False],\n [-90, False, False, True]]\n ticks = 5\n game.init()\n state = game.get_state()\n blocking = []\n for s in state.sectors:\n blocking += [[l.x1, l.y1, l.x2, l.y2] for l in s.lines if l.is_blocking]\n max_x = max([max(b[0], b[2]) for b in blocking])\n min_x = min([min(b[0], b[2]) for b in blocking])\n max_y = max([max(b[1], b[3]) for b in blocking])\n min_y = min([min(b[1], b[3]) for b in blocking])\n print(\"x range: \" + str(min_x) + \" \" + str(max_x))\n print(\"y range: \" + str(min_y) + \" \" + str(max_y))\n disc_diff = 20.0\n\n min_angle = 0\n max_angle = 360\n disc_angle = 90\n\n update_area = 10\n\n plot_every = 50\n\n # NUM_BUCKETS = (int((max_x-min_x)/disc_diff), int((max_y-min_y)/disc_diff))\n NUM_BUCKETS = (ceil((max_x-min_x)/disc_diff), ceil((max_y-min_y)/disc_diff), int((max_angle-min_angle)/disc_angle))\n print(\"NUM_BUCKETS\"+str(NUM_BUCKETS))\n NUM_ACTIONS = len(actions)\n print(\"NUM_ACTIONS\"+str(NUM_ACTIONS))\n # STATE_SPACE = [(x, y) for x in range(NUM_BUCKETS[0]) for y in range(NUM_BUCKETS[1])]\n STATE_SPACE = [(x, y, theta) for x in range(NUM_BUCKETS[0]) for y in range(NUM_BUCKETS[1]) for theta in range(NUM_BUCKETS[2])]\n\n R_MAX = 1.0\n M_VISITS_TO_KNOWN = 1 # Increase for more exploration\n\n episodes = 300\n sleep_time = 1.0 / vzd.DEFAULT_TICRATE # = 0.028\n\n if LOAD is False:\n r_table = np.full(NUM_BUCKETS + (NUM_ACTIONS,), R_MAX, dtype=float)\n p_table = np.zeros(NUM_BUCKETS + (NUM_ACTIONS,) + NUM_BUCKETS, dtype=float)\n\n # for state in STATE_SPACE:\n # for action in range(NUM_ACTIONS):\n # p_table[state + (action,) + state] = 0.2\n\n n_visits = np.zeros(NUM_BUCKETS + (NUM_ACTIONS,), dtype=float)\n n_transitions = np.zeros(NUM_BUCKETS + (NUM_ACTIONS,) + NUM_BUCKETS, dtype=float)\n\n q_table = np.full(NUM_BUCKETS + (NUM_ACTIONS,), 2*R_MAX, dtype=float)\n value_table_temp = np.zeros(NUM_BUCKETS, dtype=float)\n else:\n f = open('prob.npy', 'rb')\n p_table = np.load(f)\n f.close()\n f = open('value.npy', 'rb')\n value_table_temp = np.load(f)\n f.close()\n f = open('q_function.npy', 'rb')\n q_table = np.load(f)\n f.close()\n f = open('reward.npy', 'rb')\n r_table = np.load(f)\n f.close()\n f = open('state_visits.npy', 'rb')\n n_visits = np.load(f)\n f.close()\n f = open('transitions.npy', 'rb')\n n_transitions = np.load(f)\n f.close()\n\n episode_path = []\n\n for i in range(episodes):\n print(\"Episode #\" + str(i + 1)+\". 2 seconds to end run\")\n q, o, e = select.select([sys.stdin], [], [], 2)\n if (q):\n save_matrices()\n game.close()\n exit()\n f = open(str(datetime.datetime.now()).split()[0]+'.log', 'a')\n if PLOT and i % plot_every == 0:\n plt.show()\n # Not needed for the first episode but the loop is nicer.\n game.new_episode()\n state = game.get_state()\n final_state = 0\n total_reward = 0\n first_step = True\n while not game.is_episode_finished():\n # Gets the state\n state = game.get_state()\n bucket = state_to_bucket(state)\n episode_path.append(bucket)\n action = select_action(bucket)\n reward = game.make_action(actions[action], ticks)\n # print(\"reward \"+str(reward)+\"\\n\")\n total_reward += reward\n\n n_visits[bucket + (action,)] += 1\n if not game.is_episode_finished():\n new_state = game.get_state()\n new_bucket = state_to_bucket(new_state)\n n_transitions[bucket+(action,)+new_bucket] += 1\n if n_visits[bucket + (action,)] >= M_VISITS_TO_KNOWN:\n r_table[bucket + (action,)] = reward\n for next_bucket in STATE_SPACE:\n p_table[bucket + (action,) + next_bucket] = n_transitions[bucket + (action,) + next_bucket] / \\\n n_visits[bucket + (action,)]\n if state.number % 30 == 0:\n update_value_functions(False, episode_path)\n\n # print(\"State #\" + str(state.number))\n final_state = state.number\n # print(\"Player position: x:\", state.game_variables[0], \", y:\", state.game_variables[1], \", angle:\",\n # state.game_variables[2])\n # print(\"Objects:\")\n\n if PLOT and i % plot_every == 0:\n # Print information about objects present in the episode.\n for o in state.objects:\n if o.name == \"DoomPlayer\":\n plt.plot(o.position_x, o.position_y, color='green', marker='o')\n else:\n plt.plot(o.position_x, o.position_y, color='red', marker='o')\n\n # print(\"=====================\")\n # print(\"Sectors:\")\n # Print information about sectors.\n if first_step is True:\n for s in state.sectors:\n for l in s.lines:\n if l.is_blocking:\n plt.plot([l.x1, l.x2], [l.y1, l.y2], color='black', linewidth=2)\n first_step = False\n\n # Show map\n # plt.show()\n plt.draw()\n plt.pause(0.001)\n\n print(\"Episode finished!\")\n update_value_functions(True, episode_path)\n f.write(\"Episode \"+str(i)+\" State #\"+str(final_state)+\" Total reward: \"+str(total_reward)+\"\\n\")\n f.close()\n if PLOT and i % plot_every == 0:\n plt.savefig(\"Episode\"+str(i))\n plt.close()\n sns.set()\n ax = sns.heatmap(np.transpose(np.sum(n_visits, axis=(2, 3))))\n ax.invert_yaxis()\n fig = ax.get_figure()\n fig.savefig(\"Episode\"+str(i)+\"_HeatMap\")\n plt.close()\n if i == 0:\n plt.show()\n for s in state.sectors:\n for l in s.lines:\n if l.is_blocking:\n plt.plot([l.x1, l.x2], [l.y1, l.y2], color='black', linewidth=2)\n plt.xticks([min_x+disc_diff*x for x in range(NUM_BUCKETS[0])], \"\")\n plt.yticks([min_y+disc_diff*y for y in range(NUM_BUCKETS[1])], \"\")\n # plt.axes.xaxis.set_ticklabels([])\n # plt.axes.yaxis.set_ticklabels([])\n plt.grid(True)\n plt.savefig(\"Disc_Map\")\n plt.close()\n if SAVE is True:\n f = open('prob.npy', 'wb')\n np.save(f, p_table)\n f.close()\n f = open('value.npy', 'wb')\n np.save(f, value_table_temp)\n f.close()\n f = open('q_function.npy', 'wb')\n np.save(f, q_table)\n f.close()\n f = open('reward.npy', 'wb')\n np.save(f, r_table)\n f.close()\n f = open('state_visits.npy', 'wb')\n np.save(f, n_visits)\n f.close()\n f = open('transitions.npy', 'wb')\n np.save(f, n_transitions)\n f.close()\n # It will be done automatically anyway but sometimes you need to do it in the middle of the program...\n game.close()\n","repo_name":"AmitTsvi/MazeNavigationByDiscretization","sub_path":"Uniform/UniformRMAX.py","file_name":"UniformRMAX.py","file_ext":"py","file_size_in_byte":11542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"10044461811","text":"import asyncio\nimport base64\nimport urllib.parse\n\nfrom starlette.types import Message, ASGIApp\n\nfrom uvicorn.lifespan import Lifespan\n\nclass LambdaFunction:\n\n def __init__(self, asgi: ASGIApp):\n self._asgi = asgi\n\n def lambda_handler(self, event, context):\n loop = asyncio.get_event_loop()\n lifespan = Lifespan(self._asgi)\n loop.create_task(lifespan.run())\n loop.run_until_complete(lifespan.wait_startup())\n\n connection_scope = self.get_connection_scope(\n event=event,\n context=context\n )\n\n async def _receive() -> Message:\n body = event['body']\n if event['isBase64Encoded']:\n body = base64.standard_b64decode(body)\n return {\n 'type': 'http.request',\n 'body': body,\n 'more_body': False\n }\n\n response = {}\n\n async def _send(message: Message) -> None:\n if message['type'] == 'http.response.start':\n response[\"statusCode\"] = message['status']\n response[\"isBase64Encoded\"] = False\n response[\"headers\"] = {k.decode('utf-8'):v.decode('utf-8') for k, v in message['headers']}\n if message['type'] == 'http.response.body':\n response[\"body\"] = message['body'].decode('utf-8')\n\n asgi = self._asgi(connection_scope)\n loop.run_until_complete(asgi(_receive, _send))\n loop.run_until_complete(lifespan.wait_shutdown())\n\n return response\n\n def _unwrap_multi_value_parameters(self, parameters: dict):\n for key, value in parameters.items():\n if isinstance(value, list):\n for sub_value in value:\n yield key, sub_value\n\n else:\n yield key, value\n\n def get_query_string(self, event: dict):\n parameters: dict = event.get('queryStringParameters') or {}\n parameters.update(event.get('multiValueQueryStringParameters') or {})\n pairs = list(self._unwrap_multi_value_parameters(parameters))\n return urllib.parse.urlencode(pairs)\n\n def get_connection_scope(self, event, context):\n return {\n 'type': 'http',\n 'http_version': '1.1',\n 'scheme': 'http',\n 'method': event['httpMethod'],\n 'root_path': '',\n 'path': event['path'],\n 'query_string': self.get_query_string(event),\n 'headers': event['headers'].items(),\n 'x-aws-lambda': {\n 'requestContext': event['requestContext'],\n 'lambdaContext': context\n }\n }\n","repo_name":"alokinsoft/starlette-lambda","sub_path":"starlette_lambda/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"71"} +{"seq_id":"70457070311","text":"# -*- coding: UTF-8 -*-\n\n# author: luoboovo\n# contact: fuyu16032001@gmail.com\n# datetime: 2021/05/11\n# software: PyCharm\n# = = =\n# = = =\n# = = =\n# ===========\n# = 萝 =\n# = 卜 =\n# = 神 =\n# = 保 =\n# = 佑 =\n# = 永 =\n# = 无 =\n# = bug =\n# = =\n# = =\n# =\nimport sys\nimport unittest\nfrom typing import List\n\nimport pandas as pd\n\nfrom DoWork import DoWork\nfrom DoWork01 import DoWork1\nfrom ddt import data, file_data, ddt, unpack\n\nfrom HTMLTestRunner import HTMLTestRunner\n\n\ndef get_csvdata(path):\n df = pd.read_csv(path)\n data = list(df['testcase'].astype('str'))\n data1 = []\n for i in data:\n if len(i) == 1 :\n data1.append(list(i))\n else:\n j = []\n j.append(i)\n data1.append(j)\n return data1\n\n\n@ddt\nclass MyTest(unittest.TestCase): # 继承unittest.TestCase\n def tearDown(self):\n # 每个测试用例执行之后做操作\n print('测试结束')\n print('----------------')\n\n def setUp(self):\n # 每个测试用例执行之前做操作\n print('----------------')\n print('测试开始')\n\n # @classmethod\n # def tearDownClass(self):\n # # 必须使用 @ classmethod装饰器, 所有test运行完后运行一次\n # print('4444444')\n #\n # @classmethod\n # def setUpClass(self):\n # # 必须使用@classmethod 装饰器,所有test运行前运行一次\n # print('33333')\n # @unittest.skipIf(sys.platform=='win132','啦啦啦啦!')\n # @unittest.skipIf(sys.version=='3.9.2 (tags/v3.9.2:1a79785, Feb 19 2021, 13:44:55) [MSC v.1928 64 bit (AMD64)]','是3.9版本的!')\n @data('1','2')\n # @unpack\n def test_01(self, a):\n print(a)\n\n # @file_data('test_data.yaml')\n # def test_a(self, *args):\n # print(args[0])\n a = get_csvdata('test_data.csv')\n @data(*a)\n @unpack\n def test_02(self,c):\n print(c)\n\n # def test_a_run(self):\n # print('输入4, 6, 9')\n # print(DoWork(4, 6, 9))\n # print('输入10,6')\n # print(DoWork1(10, 6))\n #\n # def test_b_run(self):\n # print('输入4, 4, 9')\n # print(DoWork(4, 4, 9))\n # print('输入7,4')\n # print(DoWork1(7, 4))\n #\n # def test_c_run(self):\n # print('输入4, 6, 11')\n # print(DoWork(4, 6, 11))\n # print('输入17,11')\n # print(DoWork1(17, 11))\n #\n # def test_d_run(self):\n # print('输入5, 6, 9')\n # print(DoWork(5, 6, 9))\n # print('输入1,1')\n # print(DoWork1(1, 1))\n #\n # def test_e_run(self):\n # print('输入5, 4, 9')\n # print(DoWork(5, 4, 9))\n # print('输入-1,-1')\n # print(DoWork1(-1, -1))\n #\n # def test_f_run(self):\n # print('输入5, 6, 11')\n # print(DoWork(5, 6, 11))\n #\n # def test_g_run(self):\n # print('输入3, 4, 11')\n # print(DoWork(3, 4, 11))\n #\n # def test_h_run(self):\n # print('输入3, 4, 9')\n # print(DoWork(3, 4, 9))\n #\n # def test_i_run(self):\n # print('输入3, 6, 9')\n # print(DoWork(3, 6, 9))\n #\n # def test_k_run(self):\n # print('输入3, 6, 9')\n # print(DoWork(3, 6, 9))\n\n#\n# if __name__ == '__main__':\n# suite=unittest.TestSuite()\n# suite.addTest(unittest.TestLoader().loadTestsFromName(\"MyTest.test_01\"))\n# suite.addTest(unittest.TestLoader().loadTestsFromTestCase(MyTest))\n# with open('result.html','wb') as f:\n# # runner=unittest.TextTestRunner(stream=f,verbosity=2)\n# runner=HTMLTestRunner(stream=f, title=\"测试报告\", description=\"用例执行情况\")\n# runner.run(suite)\n","repo_name":"luobodage/PythonBasis","sub_path":"正式开班/测试/unittest测试/test_DoWork.py","file_name":"test_DoWork.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"22815540961","text":"import torch \nfrom torch import nn \nfrom pathlib import Path\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\ntorch.manual_seed(seed = 100)\n\n# Create *known* parameters\nweight_tensor = torch.rand(size = (1,6))\nbias = 0.3\n\n# six features and 50 rows\nX = torch.rand(size = (50,6)) \nprint('Shape of X : ',X.shape)\nprint('Shape of weight tensor : ',weight_tensor.shape)\n\ny = X.matmul(weight_tensor.T) \nprint('Shape of the output after X*W : ',y.shape)\n# print(y)\n\n# Create train/test split\ntrain_split = int(0.8 * len(X))\nX_train, y_train = X[:train_split], y[:train_split]\nX_test, y_test = X[train_split:], y[train_split:]\n\nprint(len(X_train), len(y_train), len(X_test), len(y_test))\n\ntorch.manual_seed(seed = 100)\n\nclass LinearRegressionModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(size = (1,6)))\n self.bias = nn.Parameter(torch.rand(1))\n\n def forward(self,X:torch.tensor):\n return X.matmul(self.weight.T)+self.bias\n\nmodel = LinearRegressionModel()\nmodel_initial_parameters = list(model.parameters())\nprint('Model initial parameters : ',model_initial_parameters)\n\n# Make predictions with model\nwith torch.inference_mode(): \n y_preds = model(X_test)\n\n\nloss_fn = nn.L1Loss()\noptimizer = torch.optim.SGD(params=model.parameters(),lr = 0.01)\n\nepochs = 100 \ntrain_loss_values = []\ntest_loss_values = []\nepoch_count = [] \n\nfor epoch in range(epochs):\n model.train() \n y_pred = model(X_train)\n loss = loss_fn(y_pred,y_train)\n optimizer.zero_grad() \n loss.backward()\n optimizer.step()\n\n model.eval()\n with torch.inference_mode():\n test_pred = model(X_test)\n test_loss = loss_fn(test_pred,y_test.type(torch.float))\n\n if epoch % 10 ==0:\n epoch_count.append(epoch)\n train_loss_values.append(loss.detach().numpy())\n test_loss_values.append(test_loss.detach().numpy())\n print(f\"Epoch:{epoch} | MAE Train Loss: {loss} | MAE Test loss: {test_loss}\")\n\nprint(model.state_dict())\nprint(weight_tensor)\n\n\nmodel.eval()\n\n# Make predictions on the test data\nwith torch.inference_mode():\n final_predictions = model(X_test)\n\nprint(final_predictions)\nprint(y_test)\n\n\n\n# 1. Create models directory \nMODEL_PATH = Path(\"models\")\nMODEL_PATH.mkdir(parents=True, exist_ok=True)\n\n# 2. Create model save path \nMODEL_NAME = \"01_pytorch_workflow_model.pth\"\nMODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME\n\n# 3. Save the model state dict \nprint(f\"Saving model to: {MODEL_SAVE_PATH}\")\ntorch.save(obj=model.state_dict(),f=MODEL_SAVE_PATH)\n\n\n# Instantiate a fresh instance of LinearRegressionModelV2\nloaded_model = LinearRegressionModel()\nloaded_model.load_state_dict(torch.load(MODEL_SAVE_PATH))\nloaded_model.to(\"cpu\")\nprint(f\"Loaded model:\\n{loaded_model}\")\nprint(f\"Model on device:\\n{next(loaded_model.parameters()).device}\")\n\n\n","repo_name":"thomasgit13/Deep_Learning","sub_path":"Basics/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8630751229","text":"import data_manager\nimport display\nimport common as c\nfrom datetime import date\n\n\ndef get_category(filename):\n CATEGORY_INDEX = 0\n category_list = []\n category = data_manager.data_import(filename)\n\n for element in category:\n category_list.append(element[CATEGORY_INDEX])\n return category_list\n\n\ndef expenses(expenses, expense_categories):\n expense_options = ['Add expense', 'Remove expense', 'Edit expense', \\\n 'Display expenses in the current month', 'Back to main menu']\n selected_option = 0\n\n while selected_option != '5':\n try:\n display.print_menu(expense_options)\n selected_option = input('Enter a number: ')\n\n if selected_option == '1':\n add_record(expense_categories, expenses)\n elif selected_option == '2':\n remove_record(expenses)\n elif selected_option == '3':\n edit_record(expenses, expense_categories)\n elif selected_option == '4':\n display_current_month(expenses)\n\n except ValueError:\n print('Wrong value. Please, try again.')\n except IndexError:\n print('Wrong number. Please, try again. ')\n\n\ndef sum_amount(data):\n sum_amount = 0\n\n for element in data:\n element[c.AMOUNT_INDEX] = float(element[c.AMOUNT_INDEX])\n sum_amount += element[c.AMOUNT_INDEX]\n return str(sum_amount)\n\n\ndef add_record(category, data_):\n YEAR_INDEX = 0\n MONTH_INDEX = 1\n DAY_INDEX = 2\n record = [c.generate_id(data_)]\n\n amount = float(input('Enter the amount [0.0]: '))\n amount = format(amount, '.2f')\n record.append(str(amount))\n\n display.print_menu(category)\n selected_category = int(input('Chose category number: '))\n record.append(category[selected_category-1])\n\n operation_details = input('Enter the operation details: ')\n record.append(operation_details)\n\n expanse_date = date.today()\n expanse_date = expanse_date.timetuple()\n record.append(str(expanse_date[YEAR_INDEX]))\n record.append(str(expanse_date[MONTH_INDEX]))\n record.append(str(expanse_date[DAY_INDEX]))\n\n data_.append(record)\n print('Record added correctly. \\n')\n return data_\n\n\ndef remove_record(data):\n display.print_table(data)\n removed_record = input('Enter id of record, which do you want remove: ')\n\n for element in data[:]:\n if removed_record == element[c.ID_INDEX]:\n data.remove(element)\n return data\n\n\ndef edit_record(data, category):\n options = ['Amount', 'Categories', 'Details', 'Year', 'Month', 'Day']\n\n display.print_table(data)\n edited_record = input('Enter id of record, which do you want edit: ')\n for element in data[:]:\n if edited_record == element[c.ID_INDEX]:\n display.print_menu(options)\n selected_option = int(input('Enter number, which element do you want edit: '))\n\n if selected_option == 1:\n new_amount = format(float(input('Enter the amount [0.0]: ')), '.2f')\n element[selected_option] = str(new_amount)\n elif selected_option == 2:\n display.print_menu(category)\n selected_category = int(input('Chose category number: '))\n element[selected_option] = category[selected_category-1]\n elif selected_option == 3:\n new_details = input('Enter new details: ')\n element[selected_option] = new_details\n elif selected_option in (4, 5, 6):\n new_date = int(input('Enter new date: '))\n element[selected_option] = new_details\n\n return data\n\n\ndef display_current_month(data):\n MONTH_INDEX = 1\n current_month_list = []\n\n current_date= date.today()\n current_date = current_date.timetuple()\n current_month = str(current_date[MONTH_INDEX])\n\n for element in data:\n if element[c.MONTH_INDEX] == current_month:\n current_month_list.append(element)\n if current_month_list == []:\n print('No record to display.')\n else:\n display.print_table(current_month_list)\n sum_amount_ = sum_amount(current_month_list)\n print('Sum: ' + sum_amount_ + ' EUR\\n')\n\n\ndef incomes(incomes, income_categories):\n income_options = ['Add income', 'Remove income', 'Edit income', 'Display incomes in the current month',\\\n 'Back to main menu']\n selected_option = 0\n\n while selected_option != '5':\n try:\n display.print_menu(income_options)\n selected_option = input('Enter a number: ')\n\n if selected_option == '1':\n add_record(income_categories, incomes)\n elif selected_option == '2':\n remove_record(incomes)\n elif selected_option == '3':\n edit_record(incomes, income_categories)\n elif selected_option == '4':\n display_current_month(incomes)\n except ValueError:\n print('Wrong value. Please, try again.')\n except IndexError:\n print('Wrong number. Please, try again. ')\n","repo_name":"Voytay/Petproject","sub_path":"income_expenses.py","file_name":"income_expenses.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12038117830","text":"\"\"\"\nRejection sampler draw\n\"\"\"\n\nimport numpy as np\nimport pickle\nimport fitsio as fio\nimport skysampler.old_emulator as emulator\nimport skysampler.utils as utils\n\nVERSION = \"01-gr-ri\"\n\n\nwide_data_path = \"/e/eser2/vargatn/EMULATOR/GAMMA/multi-indexer-gamma_v001_clust__z0_l1.p\"\ndeep_data_path = \"/e/eser2/vargatn/DES/SIM_DATA/run-vd09-SN-C3_trim_extcorr.fits\"\nRMAX = 5.\n\nNSAMPLES = 2e6\nNCHUNKS = 400\nNPROCESS = 30\n\ndeep_c_settings = {\n \"columns\": [\n (\"COLOR_G_R\", ((\"bdf_mag\", 0), (\"bdf_mag\", 1), \"-\")),\n (\"COLOR_R_I\", ((\"bdf_mag\", 1), (\"bdf_mag\", 2), \"-\")),\n ],\n \"logs\": [False, False],\n \"limits\": [(-2, 4), (-2, 4)],\n \"emulator\": {\n \"bandwidth\": 0.1\n },\n \"fname\": deep_data_path,\n}\ndeep_smc_settings = {\n \"columns\": [\n (\"GABS\", ((\"bdf_g\", 0), (\"bdf_g\", 1), \"SQSUM\")),\n (\"SIZE\", (\"bdf_T\", 1, \"+\")),\n (\"FRACDEV\", \"bdf_fracdev\"),\n (\"MAG_I\", (\"bdf_mag\", 2)),\n (\"COLOR_G_R\", ((\"bdf_mag\", 0), (\"bdf_mag\", 1), \"-\")),\n (\"COLOR_R_I\", ((\"bdf_mag\", 1), (\"bdf_mag\", 2), \"-\")),\n (\"COLOR_I_Z\", ((\"bdf_mag\", 2), (\"bdf_mag\", 3), \"-\")),\n ],\n \"logs\": [False, True, False, False, False, False, False],\n \"limits\": [(0, 1), (-1, 5), (-3, 4), (16, 26), (-2, 4), (-2, 4), (-2, 4)],\n \"emulator\": {\n \"bandwidth\": 0.15,\n \"ref_axes\": 3,\n \"nslices\": 5,\n \"tomographic_weights\": ((0, 1, 1, 0, 0),),\n \"nbins\": 100,\n \"eta\": 1. * np.array([1., 0., 0., 0., 1., 0., 1.]),\n \"window_size\": 15,\n },\n \"fname\": deep_data_path,\n}\nwide_cr_settings = {\n \"columns\": [\n (\"COLOR_G_R\", (\"MOF_CM_MAG_CORRECTED_G\", \"MOF_CM_MAG_CORRECTED_R\", \"-\")),\n (\"COLOR_R_I\", (\"MOF_CM_MAG_CORRECTED_R\", \"MOF_CM_MAG_CORRECTED_I\", \"-\")),\n (\"LOGR\", \"DIST\"),\n ],\n \"logs\": [False, False, True],\n \"limits\": [(-2, 4), (-2, 4), (1e-3, RMAX),],\n \"emulator\": {\n \"bandwidth\": 0.1,\n \"ref_axes\": 2,\n \"nslices\": 7,\n \"tomographic_weights\": ((0, 1, 0, 0, 0, 0, 0),),\n \"nbins\": 100,\n \"eta\": 1 * np.array([0, 2, 1]),\n \"window_size\": 10,\n },\n \"fname\": wide_data_path,\n}\nwide_r_settings = {\n \"columns\": [\n (\"LOGR\", \"DIST\"),\n ],\n \"logs\": [True,],\n \"limits\": [(1e-3, RMAX),],\n \"emulator\": {\n \"bandwidth\": 0.1,\n \"nbins\": 100,\n \"eta\": 0.1,\n \"window_size\": 15,\n },\n \"fname\": wide_data_path,\n}\n\nprior_cols = {\n \"cols_dc\": [\"COLOR_G_R\", \"COLOR_R_I\"],\n \"cols_wr\": [\"LOGR\",],\n \"cols_wcr\": [\"COLOR_G_R\", \"COLOR_R_I\", \"LOGR\",],\n}\n\n\nif __name__ == \"__main__\":\n mdl = pickle.load(open(wide_data_path, \"rb\"))\n wide_cr_settings = emulator.construct_wide_container(mdl, wide_cr_settings)\n wide_r_settings = emulator.construct_wide_container(mdl, wide_r_settings)\n\n deep_smc_settings = emulator.construct_deep_container(deep_data_path, deep_smc_settings)\n deep_c_settings = emulator.construct_deep_container(deep_data_path, deep_c_settings)\n\n sample, infodicts = emulator.make_infodicts(wide_cr_settings,\n wide_r_settings,\n deep_c_settings,\n deep_smc_settings,\n nsamples=NSAMPLES, cols=prior_cols,\n nchunks=NCHUNKS)\n\n fname = \"/e/eser2/vargatn/EMULATOR/GAMMA/resamples/resample_\" + VERSION + \".p\"\n res = {\n \"sample\": sample,\n \"infodicts\": infodicts,\n }\n pickle.dump(res, open(fname, \"wb\"))\n print(fname)\n\n chunked_infodicts = utils.partition(infodicts, int(np.ceil(NCHUNKS / NPROCESS)))\n for i, chunk in enumerate(chunked_infodicts):\n result = emulator.run_scores(chunk)\n res = {\n \"result\": result,\n }\n fname = \"resample_\" + VERSION + \"_chunk_{:02d}.p\".format(i)\n print(fname)\n pickle.dump(res, open(fname, \"wb\"))\n","repo_name":"vargatn/skysampler","sub_path":"bin/old_rejection_sampler.py","file_name":"old_rejection_sampler.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"40542250724","text":"from typing import List\n\ndef exchange(lst1: List[int], lst2: List[int]) -> str:\n \"\"\"In this problem, you will implement a function that takes two lists of numbers,\n and determines whether it is possible to perform an exchange of elements\n between them to make lst1 a list of only even numbers.\n There is no limit on the number of exchanged elements between lst1 and lst2.\n If it is possible to exchange elements between the lst1 and lst2 to make\n all the elements of lst1 to be even, return \"YES\".\n Otherwise, return \"NO\".\n For example:\n >>> exchange([1, 2, 3, 4], [1, 2, 3, 4])\n 'YES'\n >>> exchange([1, 2, 3, 4], [1, 5, 3, 4])\n 'NO'\n It is assumed that the input lists will be non-empty.\n \"\"\"\n # Check if lst1 already contains only even numbers\n if all(num % 2 == 0 for num in lst1):\n return \"YES\"\n \n # Find all odd numbers in lst1 and their indices\n odd_nums = [(num, i) for i, num in enumerate(lst1) if num % 2 != 0]\n \n # Find all even numbers in lst2 and their indices\n even_nums = [(num, i) for i, num in enumerate(lst2) if num % 2 == 0]\n \n # If there are not enough even numbers in lst2 to replace all odd numbers in lst1\n if len(even_nums) < len(odd_nums):\n return \"NO\"\n \n # Sort odd_nums in descending order of their values\n odd_nums.sort(reverse=True)\n \n # Sort even_nums in ascending order of their values\n even_nums.sort()\n \n # Replace odd numbers in lst1 with even numbers from lst2\n for odd_num, odd_index in odd_nums:\n for even_num, even_index in even_nums:\n if even_num > odd_num:\n lst1[odd_index] = even_num\n lst2[even_index] = odd_num\n even_nums.remove((even_num, even_index))\n break\n \n # Check if lst1 now contains only even numbers\n if all(num % 2 == 0 for num in lst1):\n return \"YES\"\n else:\n return \"NO\"\n\ndef check(candidate):\n assert candidate([1, 2, 3, 4], [1, 2, 3, 4]) == 'YES'\n assert candidate([1, 2, 3, 4], [1, 5, 3, 4]) == 'NO'\n assert candidate([1, 2, 3, 4], [2, 1, 4, 3]) == 'YES'\n assert candidate([5, 7, 3], [2, 6, 4]) == 'YES'\n assert candidate([5, 7, 3], [2, 6, 3]) == 'NO'\n assert candidate([3, 2, 6, 1, 8, 9], [3, 5, 5, 1, 1, 1]) == 'NO'\n assert candidate([100, 200], [200, 200]) == 'YES'\n\ndef test_check():\n check(exchange)\n\ntest_check()\n","repo_name":"esslushy/ChatGPT-Python-Programs","sub_path":"ChatGPT-0301/HumanEval_110_exchange-18-Exception.py","file_name":"HumanEval_110_exchange-18-Exception.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34331164652","text":"from itertools import product\n\ndef solve(s1, s2, res_string, allow_leading0 = False):\n\n nums = [i for i in range(0, 10)]\n letters = [letter for letter in set(s1+s2+res_string)]\n total_letters = len(letters)\n\n for val in product(nums, repeat=total_letters):\n\n f = dict(zip(letters, val))\n if not allow_leading0 and f[s1[0]] == 0 or f[s2[0]] == 0 or f[res_string[0]] == 0:\n continue\n\n num1 = 0\n num2 = 0\n res = 0\n\n for power, letter in enumerate(reversed(s1)):\n num1 += f[letter]*(10**power)\n for power, letter in enumerate(reversed(s2)):\n num2 += f[letter]*(10**power)\n for power, letter in enumerate(reversed(res_string)):\n res += f[letter]*(10**power)\n if num1+num2 == res:\n yield f\n pass\n\n# works but takes a few minutes\n\"\"\" for res in solve(lambda x, y : x+y, \"SEND\", \"MORE\", \"MONEY\"):\n print(res) \"\"\"\n#print all solutions\nfor res in solve(lambda x, y : x+y, \"KIOTO\", \"OSAKA\", \"TOKIO\"):\n print(res, '\\n')\n\n# get six solutions\npuzzle = solve(lambda x, y : x+y, \"TRZY\", \"TRZY\", \"SZEŚĆ\")\nfor i in range(10):\n print(next(puzzle), '\\n')\n","repo_name":"jpultorak/pythonUWR","sub_path":"L4/zad.py","file_name":"zad.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3528176218","text":"from django.contrib import admin\nfrom django.utils.safestring import mark_safe\nfrom django_audit_fields.admin import audit_fieldset_tuple\nfrom edc_model_admin.dashboard import ModelAdminSubjectDashboardMixin\nfrom edc_model_admin.model_admin_simple_history import SimpleHistoryAdmin\n\nfrom ..admin_site import inte_screening_admin\nfrom ..forms import DailyClosingLogForm\nfrom ..models import DailyClosingLog\n\n\n@admin.register(DailyClosingLog, site=inte_screening_admin)\nclass DailyClosingLogAdmin(ModelAdminSubjectDashboardMixin, SimpleHistoryAdmin):\n form = DailyClosingLogForm\n date_hierarchy = \"log_date\"\n show_object_tools = True\n additional_instructions = mark_safe(\n \"This form should be completed \"\n \"at the end of each clinic day.\"\n )\n\n fieldsets = (\n [None, {\"fields\": (\"log_date\", \"site\")}],\n [\n \"Daily Closing Log\",\n {\n \"fields\": (\n \"clinic_services\",\n \"attended\",\n \"selection_method\",\n \"approached\",\n \"agreed_to_screen\",\n \"comment\",\n )\n },\n ],\n audit_fieldset_tuple,\n )\n\n list_display = (\n \"log_date\",\n \"clinic_services\",\n \"number_attended\",\n \"number_approached\",\n \"number_agreed_to_screen\",\n )\n\n list_filter = (\"log_date\", \"clinic_services\", \"created\", \"modified\")\n\n radio_fields = {\n \"clinic_services\": admin.VERTICAL,\n \"selection_method\": admin.VERTICAL,\n }\n\n def number_attended(self, obj):\n return obj.attended\n\n number_attended.short_description = \"Attended\"\n\n def number_approached(self, obj):\n return obj.approached\n\n number_approached.short_description = \"Approached\"\n\n def number_agreed_to_screen(self, obj):\n return obj.agreed_to_screen\n\n number_agreed_to_screen.short_description = \"Agreed to Screen\"\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"site\":\n try:\n site_id = request.site.id\n except AttributeError:\n site_id = None\n kwargs[\"queryset\"] = db_field.related_model.objects.filter(pk=site_id)\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n def view_on_site(self, obj):\n return True\n","repo_name":"inte-africa-trial/inte-edc","sub_path":"inte_screening/admin/daily_closing_log_admin.py","file_name":"daily_closing_log_admin.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"21268575664","text":"from __future__ import print_function, division\nimport argparse\nimport itertools\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch\nimport torch.nn.functional as F\n\nimport Trustworthy_AD.misc as misc\nfrom loss import evidential_classification\n\nplt.rcParams['font.sans-serif'] = ['SimSun']\nplt.rcParams['axes.unicode_minus'] = False\nsns.set(font='SimSun', style='white', )\n\n\ndef a_value(probabilities, zero_label=0, one_label=1):\n \"\"\"\n 计算AUC值\n Args:\n probabilities 所有测试集样本 分类gt 和 各个类别的预测概率\n zero_label 第一个类别 0\n one_label 第二个类别 1\n \"\"\"\n expanded_points = []\n for instance in probabilities: # 遍历所有测试集样本 分类gt 和 各个类别的预测概率\n if instance[0] == zero_label or instance[0] == one_label:\n expanded_points.append((instance[0], instance[1][zero_label]))\n sorted_ranks = sorted(expanded_points, key=lambda x: x[1])\n\n n0, n1, sum_ranks = 0, 0, 0\n # Iterate through ranks and increment counters for overall count and ranks\n for index, point in enumerate(sorted_ranks):\n if point[0] == zero_label:\n n0 += 1\n sum_ranks += index + 1 # Add 1 as ranks are one-based\n elif point[0] == one_label:\n n1 += 1\n else:\n pass # Not interested in this class\n\n return (sum_ranks - (n0 * (n0 + 1) / 2.0)) / float(n0 * n1) # Eqn 3\n\n\ndef MAUC(data, no_classes):\n # Find all pairwise comparisons of labels\n class_pairs = [x for x in itertools.combinations(range(no_classes), 2)]\n\n # Have to take average of A value with both classes acting as label 0 as\n # this gives different outputs for more than 2 classes\n sum_avals = 0\n for pairing in class_pairs:\n sum_avals += (\n a_value(data, zero_label=pairing[0], one_label=pairing[1]) +\n a_value(data, zero_label=pairing[1], one_label=pairing[0])) / 2.0\n\n return sum_avals * (2 / float(no_classes * (no_classes - 1))) # Eqn 7\n\n\ndef calcBCA(estimLabels, trueLabels, no_classes):\n bcaAll = []\n for c0 in range(no_classes):\n # c0 can be either CTL, MCI or AD\n\n # one example when c0=CTL\n # TP - label was estimated as CTL, and the true label was also CTL\n # FP - label was estimated as CTL, but the true label was not CTL\n TP = np.sum((estimLabels == c0) & (trueLabels == c0))\n TN = np.sum((estimLabels != c0) & (trueLabels != c0))\n FP = np.sum((estimLabels == c0) & (trueLabels != c0))\n FN = np.sum((estimLabels != c0) & (trueLabels == c0))\n\n # sometimes the sensitivity of specificity can be NaN, if the user\n # doesn't forecast one of the classes.\n # In this case we assume a default value for sensitivity/specificity\n if (TP + FN) == 0:\n sensitivity = 0.5\n else:\n sensitivity = (1. * TP) / (TP + FN)\n\n if (TN + FP) == 0:\n specificity = 0.5\n else:\n specificity = (1. * TN) / (TN + FP)\n\n bcaCurr = 0.5 * (sensitivity + specificity)\n bcaAll += [bcaCurr]\n\n return np.mean(bcaAll)\n\n\ndef nearest(series, target):\n \"\"\" Return index in *series* with value closest to *target* \"\"\"\n return (series - target).abs().idxmin()\n\n\ndef ece_binary(probabilities, target, n_bins=10, draw=False):\n pos_frac, mean_confidence, bin_count, non_zero_bins = _binary_calibration(target.flatten(), probabilities.flatten(),\n n_bins)\n # 返回每个bins accuracy、平均置信度、样本个数、以及本bins是否有样本\n\n bin_proportions = bin_count / bin_count.sum() # ECE公式的权重\n ece = (np.abs(mean_confidence - pos_frac) * bin_proportions).sum()\n\n _, _, bin_count_accconf, non_zero_bins_accconf = _binary_calibration(target.flatten(), probabilities.flatten(),\n 20)\n acc_total = target.sum() / len(target) # 总精度\n confidence_total = probabilities.mean()\n\n if draw:\n j = 0\n ave_acc = []\n gap_bottom = []\n gap_height = []\n bin_count_total = []\n\n # 计算展示的数据\n x = np.arange(1 / (2 * n_bins), 1 + 1 / (2 * n_bins), 1 / (n_bins))\n # 可靠图数据\n for i in range(n_bins):\n if non_zero_bins[i]:\n ave_acc.append(pos_frac[j])\n gap_height.append(abs(pos_frac[j] - x[i]))\n if pos_frac[j] > x[i]:\n gap_bottom.append(x[i])\n else:\n gap_bottom.append(pos_frac[j])\n j = j + 1\n else:\n ave_acc.append(0.)\n gap_bottom.append(0.)\n gap_height.append(0.)\n # acc confi数据\n j = 0\n for i in range(20):\n if non_zero_bins_accconf[i]:\n bin_count_total.append(bin_count_accconf[j])\n j = j + 1\n else:\n bin_count_total.append(0.)\n\n ave_acc = np.array(ave_acc)\n gap_bottom = np.array(gap_bottom)\n gap_height = np.array(gap_height)\n bin_count_total = np.array(bin_count_total) / len(target)\n\n # 绘制可靠图\n # fig1 = plt.figure(figsize=(7, 6))\n # ax1 = fig1.add_subplot(111)\n fig, ax1 = plt.subplots(figsize=(11, 11))\n ax1.set_title('MinimalRNN可靠图', fontproperties='SimSun', fontsize=40)\n ax1.set_xlim([0, 1])\n ax1.set_ylim([0, 1])\n ax1.bar(x, ave_acc, color='#6DD5FA', width=1 / (n_bins), edgecolor='#134857', linewidth=1, label=u'Outputs')\n ax1.set_ylabel(u'Accuracy', fontsize='37')\n ax1.set_xlabel(u'Confidence', fontsize='37')\n\n plt.bar(x, gap_height, bottom=gap_bottom, color='#fb8b05', width=1 / (n_bins), edgecolor='#652b1c', linewidth=1,\n label=u'Gaps', alpha=0.6, hatch='/')\n plt.xticks(fontsize=30)\n plt.yticks(fontsize=30)\n plt.grid()\n\n ax2 = ax1.twinx() # 组合图\n ax2.set_xlim([0, 1])\n ax2.set_ylim([0, 1])\n x1 = [0, 1]\n y1 = [0, 1]\n ax2.plot(x1, y1, 'k--', ms=10, lw=3, alpha=0.8, label=u'well calibrate') # 设置线粗细,节点样式\n # plt.legend(loc='upper left')\n plt.xticks(fontsize=30)\n plt.yticks(fontsize=30)\n fig.legend(loc='upper left', bbox_to_anchor=(0, 1), bbox_transform=ax1.transAxes, fontsize='32')\n plt.show()\n\n # 绘制平均置信度和精度\n x = np.arange(1 / (2 * 20), 1 + 1 / (2 * 20), 1 / (20))\n fig2, ax1 = plt.subplots(figsize=(11, 11))\n ax1.set_title('MinimalRNN平均置信度和精度', fontproperties='SimSun', fontsize=40)\n ax1.set_xlim([0, 1])\n ax1.set_ylim([0, 1])\n ax1.bar(x, bin_count_total, color='#6DD5FA', width=1 / (20), edgecolor='#134857', linewidth=1, label=u'Outputs')\n ax1.set_ylabel(u'样本占比%', fontsize='37')\n ax1.set_xlabel(u'Confidence', fontsize='37')\n plt.xticks(fontsize=30)\n plt.yticks(fontsize=30)\n plt.grid()\n\n ax2 = ax1.twinx() # 组合图\n ax2.set_xlim([0, 1])\n ax2.set_ylim([0, 1])\n x1 = [acc_total, acc_total]\n y1 = [0, 1]\n ax2.plot(x1, y1, 'k--', ms=10, lw=3, alpha=0.8, label=u'Accuracy') # 设置线粗细,节点样式\n x2 = [confidence_total, confidence_total]\n y2 = [0, 1]\n ax2.plot(x2, y2, 'k:', ms=10, lw=5, alpha=0.8, label=u'Avg confidence') # 设置线粗细,节点样式\n fig2.legend(loc='upper left',fontsize='32')\n plt.xticks(fontsize=30)\n plt.yticks(fontsize=30)\n plt.show()\n\n return ece\n else:\n return ece\n\n\ndef _binary_calibration(target, probs_positive_cls, n_bins=10):\n bins = np.linspace(0., 1. + 1e-8, n_bins + 1) # 分bins\n binids = np.digitize(probs_positive_cls, bins) - 1 # 返回每个置信度 在bins中的位置 从0开始\n\n bin_sums = np.bincount(binids, weights=probs_positive_cls, minlength=n_bins) # 每个bins 置信度的和\n bin_true = np.bincount(binids, weights=target, minlength=n_bins) # 每个bins 正确的个数(这里是二分类 多分类target应该是 是否正确)\n bin_total = np.bincount(binids, minlength=n_bins) # 每个bins样本的个数\n\n nonzero = bin_total != 0 # 防止除0\n prob_true = (bin_true[nonzero] / bin_total[nonzero]) # accuracy\n prob_pred = (bin_sums[nonzero] / bin_total[nonzero]) # 平均置信度\n\n return prob_true, prob_pred, bin_total[nonzero], nonzero\n\n\ndef mask(pred, true):\n \"\"\" Drop entries without ground truth data (i.e. NaN in *true*) \"\"\"\n try:\n index = ~np.isnan(true)\n except Exception:\n print('true', true)\n print('pred', pred)\n raise\n ret = pred[index], true[index]\n assert ret[0].shape[0] == ret[0].shape[0]\n return ret\n\n\ndef parse_data(_ref_frame, _pred_frame, EDL, uncertainty_threshold=1):\n \"\"\"\n EDL 表示是否是EDL\n uncertainty_threshold表示不确定性阈值 当不确定性小于阈值的 可以参与评估 大于阈值拒识 默认值为1 即对任何样本都评估\n \"\"\"\n true_label_and_prob = []\n prob_with_mask = []\n alpha_with_mask = []\n uncertainty_with_mask = []\n pred_diag = []\n pred_adas = []\n pred_vent = []\n true_diag = []\n true_adas = []\n true_vent = []\n\n num_smaple = len(np.unique(_ref_frame.RID)) # 验证集/测试集的样本数\n\n for i in range(len(_ref_frame)):\n cur_row = _ref_frame.iloc[i]\n subj_data = _pred_frame[_pred_frame.RID == cur_row.RID].reset_index(drop=True)\n dates = subj_data['Forecast Date']\n matched_row = subj_data.iloc[nearest(dates, cur_row.CognitiveAssessmentDate)]\n\n if EDL:\n uncertainty = matched_row[['uncertainty']].values\n if uncertainty > uncertainty_threshold: # 不确定性拒识机制\n continue\n\n alpha = matched_row[[\n 'CN relative alpha', 'MCI relative alpha',\n 'AD relative alpha'\n ]].values\n\n prob = matched_row[[\n 'CN relative probability', 'MCI relative probability',\n 'AD relative probability'\n ]].values\n\n pred_diag.append(np.argmax(prob))\n pred_adas.append(matched_row['ADAS13'])\n pred_vent.append(subj_data.iloc[nearest(dates, cur_row.ScanDate)]['Ventricles_ICV'])\n true_diag.append(cur_row.Diagnosis)\n true_adas.append(cur_row.ADAS13)\n true_vent.append(cur_row.Ventricles)\n\n if not np.isnan(cur_row.Diagnosis):\n true_label_and_prob += [(cur_row.Diagnosis, prob)]\n if EDL:\n alpha_with_mask.append(alpha)\n uncertainty_with_mask.append(uncertainty)\n else:\n prob_with_mask.append(prob)\n\n pred_diag = np.array(pred_diag)\n pred_adas = np.array(pred_adas)\n pred_vent = np.array(pred_vent)\n true_diag = np.array(true_diag)\n true_adas = np.array(true_adas)\n true_vent = np.array(true_vent)\n\n if EDL:\n alpha_with_mask = np.array(alpha_with_mask, dtype=float)\n uncertainty_with_mask = np.array(uncertainty_with_mask, dtype=float)\n else:\n prob_with_mask = np.array(prob_with_mask, dtype=float)\n\n pred_diag, true_diag = mask(pred_diag, true_diag)\n pred_adas, true_adas = mask(pred_adas, true_adas)\n pred_vent, true_vent = mask(pred_vent, true_vent)\n\n if EDL:\n return true_label_and_prob, pred_diag, pred_adas, pred_vent, \\\n true_diag, true_adas, true_vent, alpha_with_mask, uncertainty_with_mask, num_smaple\n else:\n return true_label_and_prob, pred_diag, pred_adas, pred_vent, \\\n true_diag, true_adas, true_vent, prob_with_mask, num_smaple\n\n\ndef is_date_column(col):\n \"\"\" Is the column of type datetime \"\"\"\n return np.issubdtype(col.dtype, np.datetime64)\n\n\ndef eval_submission(ref_frame, pred_frame, EDL, draw=False, total_epoch=300, uncertainty_threshold=1):\n \"\"\" Evaluate mAUC, BCA, ADAS13 MAE, and ventricles MAE \"\"\"\n assert is_date_column(ref_frame['CognitiveAssessmentDate'])\n assert is_date_column(ref_frame['ScanDate'])\n assert is_date_column(pred_frame['Forecast Date'])\n\n if EDL:\n true_labels_and_prob, p_diag, p_adas, p_vent, t_diag, t_adas, t_vent, alpha, uncertainty, num_smaple = \\\n parse_data(ref_frame, pred_frame, EDL, uncertainty_threshold)\n else:\n true_labels_and_prob, p_diag, p_adas, p_vent, t_diag, t_adas, t_vent, prob, num_smaple = \\\n parse_data(ref_frame, pred_frame, EDL)\n\n if len(p_diag) == 0:\n mauc = float('NaN')\n bca = float('NaN')\n else:\n mauc = MAUC(true_labels_and_prob, no_classes=3)\n bca = calcBCA(p_diag, t_diag.astype(int), no_classes=3)\n\n # 验证集/测试集分类loss 以及ECE\n if EDL:\n if len(p_diag) == 0:\n ece = float('NaN')\n cls_loss = float('NaN')\n else:\n # ECE误差\n target = np.array(p_diag == t_diag, dtype=int)\n confidence = np.array(1 - uncertainty) # 置信度是 1-uncertainty\n ece = ece_binary(confidence, target, draw=draw)\n\n # 分类loss\n alpha = torch.from_numpy(alpha)\n label = torch.from_numpy(t_diag).to(torch.int64)\n cls_loss = evidential_classification(alpha, label, np.log(50) / np.log(100) * total_epoch,\n total_epoch) / num_smaple # EDL_loss 评估时 KL退火系数直接设置为1 cu两项权重相同\n\n else:\n # ECE误差\n target = np.array(p_diag == t_diag, dtype=int)\n confidence = prob.max(1) # 置信度是logits的最大值 也是类标对应的类别概率\n ece = ece_binary(confidence, target, draw=draw)\n\n # 分类loss\n prob = torch.from_numpy(prob)\n label = torch.from_numpy(t_diag).to(torch.int64)\n cls_loss = F.cross_entropy(prob, label, reduction='sum') / num_smaple\n\n if len(p_diag) == 0:\n adas = float('NaN')\n vent = float('NaN')\n else:\n adas = np.mean(np.abs(p_adas - t_adas))\n vent = np.mean(np.abs(p_vent - t_vent))\n\n return {'mAUC': mauc, 'bca': bca, 'adasMAE': adas, 'ventsMAE': vent, 'cls_loss': cls_loss, 'ECE': ece}\n\n\ndef get_arg_parser(i):\n parser = argparse.ArgumentParser()\n parser.add_argument('--reference', '-r', default='C:/Users/16046/Desktop/Programming/python/深度学习/证据深度学习/'\n 'Trustworthy_AD/Trustworthy_AD/folds/fold' + str(i) + '_test.csv')\n parser.add_argument('--prediction', '-p', default='C:/Users/16046/Desktop/Programming/python/深度学习/证据深度学习/'\n 'Trustworthy_AD/Trustworthy_AD/output/Ours/prediction-ECE-测试' + str(\n i) + '.csv')\n parser.add_argument('--EDL', action='store_true', default=True) # 是否使用不确定性预测\n return parser\n\n\ndef main():\n mAUC = []\n bca = []\n ECE = []\n adasMAE = []\n ventsMAE = []\n for i in range(20):\n args = get_arg_parser(i).parse_args()\n result = eval_submission(misc.read_csv(args.reference), misc.read_csv(args.prediction), args.EDL, draw=True,\n total_epoch=300, uncertainty_threshold=1) # 评估输出结果\n\n print('fold', i, '>>')\n print('########### Metrics for clinical status ##################')\n print('mAUC', result['mAUC'], 'bca', result['bca'], 'ECE', result['ECE'])\n print('########### Mean Absolute Error (MAE) ##################')\n print('adasMAE', result['adasMAE'], 'ventsMAE', result['ventsMAE'])\n mAUC.append(result['mAUC'])\n bca.append(result['bca'])\n ECE.append(result['ECE'])\n adasMAE.append(result['adasMAE'])\n ventsMAE.append(result['ventsMAE'])\n print('\\n')\n mAUC = np.array(mAUC)\n bca = np.array(bca)\n ECE = np.array(ECE)\n adasMAE = np.array(adasMAE)\n ventsMAE = np.array(ventsMAE)\n print('mAUC平均:', mAUC.mean(), 'mAUC方差:', mAUC.std())\n print('bca平均:', bca.mean(), 'bca方差:', bca.std())\n print('ECE平均:', ECE.mean(), 'ECE方差:', ECE.std())\n print('adasMAE平均:', adasMAE.mean(), 'adasMAE方差:', adasMAE.std())\n print('ventsMAE平均:', ventsMAE.mean(), 'ventsMAE方差:', ventsMAE.std())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Mr-Talon/Trustworthy-AD","sub_path":"Trustworthy_AD/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":16689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"24481552018","text":"from django.db.models.signals import pre_init\nimport django.dispatch\n\ndef f1(sender,signal,sql):\n\n with open(\"logs/sql.log\",\"a\",encoding=\"utf-8\") as f:\n f.write(\"%s 耗时%s\\n\"%(sql.get(\"sql\"),sql.get(\"time\")))\n\n# pre_init.connect(f1)\n\ncmdb_log=django.dispatch.Signal(providing_args=['sql'])\n\ncmdb_log.connect(f1)","repo_name":"yiruiduan/cmdb","sub_path":"sg.py","file_name":"sg.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30881793787","text":"from fastapi import APIRouter, Body, Request, Response, HTTPException\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.responses import JSONResponse\nfrom typing import Optional, Union\nfrom bson import ObjectId\nfrom json import loads as loads_json\nfrom models.issue import IssueCreateModel, IssueUpdateModel, IssueGetAllModel, IssueGetModel, IssueFilterModel\nfrom models.shared import PaginationModel, SortModel\nfrom classification.main import get_prediction\n\nrouter = APIRouter(prefix=\"/issue\")\n\n\n# Create issue\n@router.post(\"/\", response_description=\"Create new issue\")\ndef create_issue(request: Request, issue: IssueCreateModel = Body(...)):\n if not request.app.permission[\"authenticated\"]:\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"Authentication failed! Token not provided\"}))\n\n # cuma admin dan qa yang bisa create issue\n if request.app.permission[\"role\"] != \"ADMIN\" and request.app.permission[\"role\"] != \"QA\":\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"You do not have access to add issues\"}))\n\n try:\n issue = issue.dict()\n issue['dev_type'] = get_prediction(issue['description'])\n\n request.app.database[\"issues\"].insert_one(issue)\n return JSONResponse(status_code=201, content={\"detail\": \"Issue created successfully\"})\n\n except Exception as error:\n raise HTTPException(status_code=500, detail=str(error))\n\n\n# Update issue\n@router.put(\"/{id}\", response_description=\"Update issue\")\ndef update_issue(id: str, request: Request, issue: Optional[IssueUpdateModel] = Body(None)):\n if not request.app.permission[\"authenticated\"]:\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"Authentication failed! Token not provided\"}))\n\n # cuma admin yang bisa update issue\n # if request.app.permission[\"role\"] != \"ADMIN\":\n # return JSONResponse(status_code=401, content=jsonable_encoder({ \"detail\": \"access denied\" }))\n\n try:\n issue = issue.dict(exclude_none=True)\n\n if \"dev_type\" in issue and \"description\" in issue:\n issue[\"dev_type\"] = get_prediction(issue[\"description\"])\n\n update_result = request.app.database[\"issues\"].update_one(\n {\"_id\": ObjectId(id)},\n {\"$set\": issue}\n )\n\n if update_result.modified_count != 1:\n return JSONResponse(status_code=422, content=jsonable_encoder({\"detail\": \"Failed to update issue\"}))\n\n return JSONResponse(status_code=200, content={\"detail\": \"Issue updated successfully\"})\n\n except Exception as error:\n raise HTTPException(status_code=500, detail=str(error))\n\n\n# Get highest issue ref\n@router.get(\"/highest-ref\", response_description=\"Get highest issue ref\")\ndef get_highest_issue_ref(request: Request, feature_id: str):\n if not request.app.permission[\"authenticated\"]:\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"Authentication failed! Token not provided\"}))\n\n if not feature_id:\n return {\"ref_number\": 0}\n\n try:\n result = request.app.database[\"issues\"].aggregate([\n {\n \"$match\": {\n \"ref\": {\"$regex\": \"^QA-[0-9]+$\"},\n \"feature_id\": ObjectId(feature_id)\n }\n },\n {\n \"$project\": {\n \"ref\": 1,\n \"ref_number\": {\n \"$toInt\": {\"$substr\": [\"$ref\", 3, -1]}\n }\n }\n },\n {\n \"$sort\": {\n \"ref_number\": -1\n }\n },\n {\n \"$limit\": 1\n }\n ])\n\n ref = list(result)\n return {\"ref_number\": ref[0][\"ref_number\"] if ref else 0}\n\n except Exception as error:\n raise HTTPException(status_code=500, detail=str(error))\n\n\n# Get all issues\n@router.get(\"/\", response_description=\"Get all issues\", response_model=IssueGetAllModel)\ndef get_all_issue(request: Request, feature_id: str, pagination: Union[str, None] = \"{}\", filters: Union[str, None] = \"{}\", sort: Union[str, None] = \"{}\"):\n if not request.app.permission[\"authenticated\"]:\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"Authentication failed! Token not provided\"}))\n\n # cegah user selain admin yang tidak memiliki akses ke fitur\n if (request.app.permission[\"role\"] != \"ADMIN\" and feature_id not in request.app.permission[\"feature_ids\"]):\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"You do not have access to this feature\"}))\n\n if not feature_id:\n return {\"total_documents\": 0, \"issues\": []}\n\n try:\n pagination = PaginationModel(**loads_json(pagination)).dict()\n filters = IssueFilterModel(**loads_json(filters)).dict(exclude_none=True)\n sort = SortModel(**loads_json(sort)).dict()\n\n page_index = pagination[\"index\"]\n page_limit = pagination[\"limit\"]\n skip_count = page_index * page_limit\n\n sort_field = sort[\"field\"] if sort[\"field\"] else \"ref\"\n sort_dir = sort[\"direction\"]\n\n issues = list(request.app.database[\"issues\"].aggregate([\n {\n \"$match\": {\"feature_id\": ObjectId(feature_id), **filters}\n },\n {\n \"$lookup\": {\n \"from\": \"features\",\n \"localField\": \"feature_id\",\n \"foreignField\": \"_id\",\n \"as\": \"feature\"\n }\n },\n {\n \"$addFields\": {\n \"feature\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$feature\", 0]}, None\n ]\n }\n }\n },\n # {\n # \"$project\": { \"feature\": 0 }\n # },\n {\n \"$lookup\": {\n \"from\": \"users\",\n \"localField\": \"reporter_id\",\n \"foreignField\": \"_id\",\n \"as\": \"reporter\"\n }\n },\n {\n \"$addFields\": {\n \"reporter\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$reporter\", 0]}, None\n ]\n }\n }\n },\n {\n \"$lookup\": {\n \"from\": \"users\",\n \"localField\": \"dev_id\",\n \"foreignField\": \"_id\",\n \"as\": \"dev\"\n }\n },\n {\n \"$addFields\": {\n \"dev\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$dev\", 0]}, None\n ]\n }\n }\n },\n {\n \"$lookup\": {\n \"from\": \"users\",\n \"localField\": \"qa_id\",\n \"foreignField\": \"_id\",\n \"as\": \"qa\"\n }\n },\n {\n \"$addFields\": {\n \"qa\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$qa\", 0]}, None\n ]\n }\n }\n },\n {\n \"$skip\": skip_count\n },\n {\n \"$limit\": page_limit\n },\n {\n \"$sort\": {\n sort_field: sort_dir\n }\n },\n ]))\n\n total_documents = 0\n\n if filters:\n total_documents = len(issues)\n\n if not filters:\n total_documents = request.app.database[\"issues\"].count_documents({})\n\n return {\n \"total_documents\": total_documents,\n \"issues\": issues\n }\n\n except Exception as error:\n raise HTTPException(status_code=500, detail=str(error))\n\n\n# Get one issue\n@router.get(\"/{id}\", response_description=\"Get one issue\", response_model=IssueGetModel)\ndef get_one_issue(id: str, request: Request):\n if not request.app.permission[\"authenticated\"]:\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"Authentication failed! Token not provided\"}))\n\n try:\n issues = list(request.app.database[\"issues\"].aggregate([\n {\n \"$match\": {\n \"_id\": ObjectId(id)\n }\n },\n {\n \"$lookup\": {\n \"from\": \"features\",\n \"localField\": \"feature_id\",\n \"foreignField\": \"_id\",\n \"as\": \"feature\"\n }\n },\n {\n \"$addFields\": {\n \"feature\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$feature\", 0]}, None\n ]\n }\n }\n },\n {\n \"$lookup\": {\n \"from\": \"users\",\n \"localField\": \"reporter_id\",\n \"foreignField\": \"_id\",\n \"as\": \"reporter\"\n }\n },\n {\n \"$addFields\": {\n \"reporter\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$reporter\", 0]}, None\n ]\n }\n }\n },\n {\n \"$lookup\": {\n \"from\": \"users\",\n \"localField\": \"dev_id\",\n \"foreignField\": \"_id\",\n \"as\": \"dev\"\n }\n },\n {\n \"$addFields\": {\n \"dev\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$dev\", 0]}, None\n ]\n }\n }\n },\n {\n \"$lookup\": {\n \"from\": \"users\",\n \"localField\": \"qa_id\",\n \"foreignField\": \"_id\",\n \"as\": \"qa\"\n }\n },\n {\n \"$addFields\": {\n \"qa\": {\n \"$ifNull\": [\n {\"$arrayElemAt\": [\"$qa\", 0]}, None\n ]\n }\n }\n }\n ]))\n\n if not len(issues):\n return JSONResponse(status_code=404, content=jsonable_encoder({\"detail\": \"Issue not found\"}))\n\n return issues[0]\n\n except Exception as error:\n raise HTTPException(status_code=500, detail=str(error))\n\n\n# Delete issue\n@router.delete(\"/{id}\", response_description=\"Delete issue\")\ndef delete_issue(id: str, request: Request, response: Response):\n if not request.app.permission[\"authenticated\"]:\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"Authentication failed! Token not provided\"}))\n\n # cuma admin yang bisa delete issue\n if request.app.permission[\"role\"] != \"ADMIN\":\n return JSONResponse(status_code=401, content=jsonable_encoder({\"detail\": \"You do not have access to delete this issue\"}))\n\n try:\n delete_result = request.app.database[\"issues\"].delete_one({\"_id\": ObjectId(id)})\n\n if delete_result.deleted_count != 1:\n return JSONResponse(status_code=422, content=jsonable_encoder({\"detail\": \"Failed to delete issue\"}))\n\n return JSONResponse(status_code=200, content=jsonable_encoder({\"detail\": \"Issue deleted successfully\"}))\n\n except:\n raise HTTPException(status_code=500, detail=str(error))\n","repo_name":"dindakalista/Skripsi_DindaKalista","sub_path":"backend/routers/issue.py","file_name":"issue.py","file_ext":"py","file_size_in_byte":11861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3389595490","text":"import direct.showbase.ShowBase as p3d_SB\nimport Scene, Fighter\n\nclass MyApp(p3d_SB.ShowBase):\n def __init__(self):\n p3d_SB.ShowBase.__init__(self)\n \n render.setShaderAuto()\n base.disableMouse()\n \n myscene = Scene.Scene()\n myscene.setupScene()\n \n myfighter = Fighter.Fighter()\n myfighter.spawnFighter()\n \n taskMgr.doMethodLater(0, myfighter.controlFighter, 'controlFighter1')\n \nif __name__ == \"__main__\":\n myapp = MyApp()\n myapp.run()","repo_name":"hoppfull/Learning-Python","sub_path":"Panda3D/panda1/ex15 - game1 -p2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4164831970","text":"from datetime import datetime\nfrom hashlib import sha512, md5\nimport json\nimport os\nfrom pathlib import Path\nimport re\nimport time\nimport traceback\nimport shutil\n\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException, WebDriverException\nfrom selenium.webdriver.support.expected_conditions import _find_element\n\nfrom constants import POSTS, DELAY, NUM_RETRIES, GENERATED_NUMBERS_PATH\n\n\n# def retry_on_error()\n\n\ndef only_digits(string):\n return re.sub('[^0-9]', '', string)\n\n\nclass Filename:\n def __init__(self, p):\n f_name, *_ = p.name.split('.')\n self.name, date, time = f_name.split('_')[-3:]\n self.datetime = datetime(*[*[int(x) for x in date.split('-')], *[int(x) for x in time.split('-')]])\n\n def later_than(self, other_datetime):\n return self.datetime >= other_datetime\n\n def earlier_than(self, other_datetime):\n return self.datetime <= other_datetime\n\n def in_between(self, other_datetime_min, other_datetime_max):\n return self.later_than(other_datetime_min) and self.earlier_than(other_datetime_max)\n\n\ndef compare_strings(soup_1, soup_2):\n hash_1, hash_2 = [sha512(soup.encode()).digest() for soup in [soup_1, soup_2]]\n return hash_1 == hash_2\n\n\ndef get_csv(p, sep, logger):\n if p.name.endswith('.json'):\n # with p.open('r') as f:\n # print(json.load(f))\n # print(f.read())\n return pd.read_json(str(p), orient='records')\n elif p.name.endswith('.csv'):\n try:\n return pd.read_csv(str(p), sep, index_col=False)\n except pd.errors.ParserError as exc:\n logger.error(\n '%s: %s. Traceback: %s', type(exc).__name__, str(exc),\n '; '.join(traceback.format_tb(exc.__traceback__)))\n else:\n logger.error(f\"The format of file '{p.name}' should be CSV or JSON.\")\n\n\ndef get_soup(p):\n with p.open('rb') as f:\n return BeautifulSoup(f.read(), 'html.parser')\n\n\ndef rename_path_with_suffix(path, suffix):\n ppath, extension = path.split('.')\n new_path = ppath + suffix + '.' + extension\n os.rename(path, new_path)\n\n\ndef remove_if_needed(\n p_2, country, t, arg_1, arg_2, num_1, num_2, logger\n):\n remove = False\n if country in POSTS:\n if compare_strings(arg_1, arg_2):\n remove = True\n else:\n if num_1 and num_2:\n if num_1 == num_2:\n remove = True\n elif compare_strings(arg_1, arg_2):\n remove = True\n if remove:\n try:\n os.remove(p_2)\n except FileNotFoundError:\n logger.error(\"File '%s' not found.\", p_2)\n except PermissionError:\n logger.error(\"You have no permission to remove the file '%s'.\", p_2)\n else:\n logger.info(\"The file '%s' has changed. Keeping the new version.\", p_2)\n numbers = {}\n with open('logs/numbers.json', 'rb') as f:\n numbers = json.load(f)\n with open('logs/numbers.json', 'w') as f:\n if not numbers.get(country):\n numbers[country] = {t: num_2}\n else:\n numbers[country][t] = num_2\n json.dump(numbers, f)\n\n\ndef remove_latest_if_csv_unchanged(p_1, p_2, country, sep, t, Csv, logger):\n csv_1, csv_2 = [get_csv(p, sep, logger) for p in [p_1, p_2]]\n num_1 = None\n num_2 = None\n try:\n csv_1, num_1 = getattr(Csv, country.lower())(csv_1)\n csv_2, num_2 = getattr(Csv, country.lower())(csv_2)\n except AssertionError:\n logger.warning(\"The structure of the file '%s' have changed. Check the source.\", p_2)\n os.rename(str(p_2), os.path.join(p_2.parents[0], 'processing_exception', p_2.name))\n except Exception as exc:\n logger.error('%s: %s. Traceback: %s', type(exc).__name__, str(exc), '; '.join(traceback.format_tb(exc.__traceback__)))\n os.rename(p_2, os.path.join(p_2.parents[0], 'processing_exception', p_2.name))\n else:\n logger.debug('%s: %s %s', country, num_1, num_2)\n remove_if_needed(\n p_2, country, t, csv_1, csv_2, num_1, num_2, logger\n )\n\n\ndef remove_latest_if_page_unchanged(p_1, p_2, country, t, Soup, logger):\n try:\n soup_1, soup_2 = [get_soup(p) for p in [p_1, p_2]]\n except UnicodeDecodeError:\n logger.warning('Unicode decode error.')\n return\n num_1 = None\n num_2 = None\n try:\n soup_1, num_1 = getattr(Soup, country.lower())(soup_1)\n soup_2, num_2 = getattr(Soup, country.lower())(soup_2)\n except AssertionError:\n logger.warning(\"The structure of the file '%s' has changed. Check the source.\", p_2)\n os.rename(p_2, os.path.join(p_2.parents[0], 'processing_exception', p_2.name))\n except Exception as exc:\n logger.error('%s: %s. Traceback: %s', type(exc).__name__, str(exc), '; '.join(traceback.format_tb(exc.__traceback__)))\n os.rename(p_2, os.path.join(p_2.parents[0], 'processing_exception', p_2.name))\n else:\n logger.debug('%s: %s %s', country, num_1, num_2)\n remove_if_needed(\n p_2, country, t, soup_1, soup_2, num_1, num_2, logger\n )\n\n\n# Fixing a mess\ndef copy_if_changed(\n p_2, country, arg_1, arg_2, num_1, num_2, logger\n):\n copy = True\n if country in POSTS:\n if compare_strings(arg_1, arg_2):\n copy = False\n else:\n if num_1 and num_2:\n if num_1 == num_2:\n copy = False\n elif compare_strings(arg_1, arg_2):\n copy = False\n if copy:\n name = Path(p_2).name\n name = name if name.split('.')[-1] == 'html' else '.'.join(name.split('.')[:-1])\n logger.info(f'NAME: data/{name}')\n shutil.copyfile(p_2, f'data/{name}')\n logger.info(\"The file '%s' has changed. Keeping the new version.\", p_2)\n numbers = {}\n with open('logs/numbers.json', 'rb') as f:\n numbers = json.load(f)\n with open('logs/numbers.json', 'w') as f:\n t = Filename(p_2).datetime.strftime('%Y-%m-%d_%H-%M-%S')\n if not numbers.get(country):\n numbers[country] = {t: num_2}\n else:\n numbers[country][t] = num_2\n json.dump(numbers, f)\n\n\ndef copy_latest_if_csv_changed(p_1, p_2, country, sep, Csv, logger):\n logger.info(p_2)\n csv_1, csv_2 = [get_csv(p, sep, logger) for p in [p_1, p_2]]\n num_1 = None\n num_2 = None\n try:\n csv_1, num_1 = getattr(Csv, country.lower())(csv_1)\n csv_2, num_2 = getattr(Csv, country.lower())(csv_2)\n except AssertionError:\n logger.warning(\"The structure of the file '%s' has changed. Check the source.\", p_2)\n except Exception as exc:\n logger.error('%s: %s. Traceback: %s', type(exc).__name__, str(exc), '; '.join(traceback.format_tb(exc.__traceback__)))\n else:\n logger.debug('%s: %s %s', country, num_1, num_2)\n copy_if_changed(\n p_2, country, csv_1, csv_2, num_1, num_2, logger\n )\n\n\ndef copy_latest_if_page_changed(p_1, p_2, country, Soup, logger):\n logger.info(p_2)\n try:\n soup_1, soup_2 = [get_soup(p) for p in [p_1, p_2]]\n except UnicodeDecodeError:\n logger.warning('Unicode decode error.')\n return\n num_1 = None\n num_2 = None\n try:\n soup_1, num_1 = getattr(Soup, country.lower())(soup_1)\n soup_2, num_2 = getattr(Soup, country.lower())(soup_2)\n except AssertionError:\n logger.warning(\"The structure of the file '%s' has changed. Check the source.\", p_2)\n except Exception as exc:\n logger.error('%s: %s. Traceback: %s', type(exc).__name__, str(exc), '; '.join(traceback.format_tb(exc.__traceback__)))\n else:\n logger.debug('%s: %s %s', country, num_1, num_2)\n copy_if_changed(\n p_2, country, soup_1, soup_2, num_1, num_2, logger\n )\n\n\n# Send to numbers\ndef send_to_numbers(p, country, num):\n numbers = {}\n with open(GENERATED_NUMBERS_PATH, 'rb') as f:\n numbers = json.load(f)\n with open(GENERATED_NUMBERS_PATH, 'w') as f:\n t = Filename(p).datetime.strftime('%Y-%m-%d_%H-%M-%S')\n if not numbers.get(country):\n numbers[country] = {t: num}\n else:\n numbers[country][t] = num\n json.dump(numbers, f)\n\n\ndef send_to_numbers_csv(p, country, sep, Csv, logger):\n csv = get_csv(p, sep, logger)\n num = None\n try:\n csv, num = getattr(Csv, country.lower())(csv)\n except AssertionError:\n logger.warning(\"The structure of the file '%s' has changed. Check the source.\", p)\n except Exception as exc:\n logger.error(\"'%s' %s: %s\", p, type(exc).__name__, str(exc))\n else:\n logger.debug('%s: %s %s', country, num)\n send_to_numbers(p, country, num)\n\n\ndef send_to_numbers_html(p, country, Soup, logger):\n try:\n soup = get_soup(p)\n except UnicodeDecodeError:\n logger.warning('Unicode decode error.')\n return\n num = None\n try:\n soup, num = getattr(Soup, country.lower())(soup)\n except AssertionError:\n logger.warning(\"The structure of the file '%s' has changed. Check the source.\", p)\n except Exception as exc:\n logger.error(\"'%s' %s: %s\", p, type(exc).__name__, str(exc))\n else:\n logger.debug('%s: %s %s', country, num)\n if num is None:\n if soup is not None:\n send_to_numbers(p, country, md5(soup.encode()).hexdigest())\n else:\n pass\n else:\n send_to_numbers(p, country, num)\n\n\n# class text_match(object):\n# def __init__(self, locator, regexp):\n# self.locator = locator\n# self.regexp = regexp\n\n# def __call__(self, driver):\n# element_text = _find_element(driver, self.locator).text\n# if self.regexp is None:\n# return element_text\n# else:\n# return re.search(re.compile(self.regexp), element_text)\n\n\ndef wait_until_xpath(\n browser, url, xpath, logger, *, before_wait=None, errors=0, sleep=1\n):\n try:\n browser.get(url)\n if before_wait is not None:\n before_wait(browser)\n # WebDriverWait(browser, DELAY).until(text_match((By.XPATH, xpath), pattern))\n WebDriverWait(browser, DELAY).until(\n EC.presence_of_element_located((\n By.XPATH,\n xpath\n ))\n )\n time.sleep(sleep)\n return False\n except TimeoutException:\n errors += 1\n logger.warning(\"Request for '%s' timed out. Retrying.\", url)\n if errors < NUM_RETRIES:\n wait_until_xpath(\n browser, url, xpath, logger,\n before_wait=before_wait, errors=errors, sleep=sleep)\n else:\n logger.error(\"Info for '%s' could not be loaded.\", url)\n return True\n except Exception as exc:\n logger.error(\"'%s' %s: %s\", url, type(exc).__name__, str(exc))\n","repo_name":"digitalepidemiologylab/covid-scraper","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":11088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40430147853","text":"def check(N):\n answer= []\n for i in range(2, N):\n if N % i == 0:\n answer.append(i)\n return answer\n\ndef abs(li, k):\n N = k * 2\n while True:\n if li == check(N):\n print(N)\n break\n else:\n N += k\n\na = int(input())\nli = list(map(int, input().split()))\nli.sort()\nabs(li, max(li))","repo_name":"DalkomCandy/Python","sub_path":"01. Modules/Test/특정 약수를 가지는 수.py","file_name":"특정 약수를 가지는 수.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8212897302","text":"from __future__ import print_function\nimport os\nimport time\nimport signal\nimport shlex\nimport subprocess\nimport fcntl\nfrom infrasim import CommandRunFailed\nfrom infrasim.log import infrasim_log, LoggerType\nfrom infrasim.helper import run_in_namespace, double_fork\nfrom infrasim.filelock import FileLock\nfrom infrasim.colors import icolors\n\n\nclass Task(object):\n def __init__(self):\n # priroty should be range from 0 to 5\n # +-----+-----+-----+----+-----+\n # | 0 | 1 | 2 | 3 | 4 |\n # +-----+-----+-----+----+-----+\n # |High | | Low |\n # +-----+-----+-----+----+-----+\n self.__task_priority = None\n self.__workspace = None\n self.__task_name = None\n self.__log_path = \"\"\n self.__logger = infrasim_log.get_logger(LoggerType.model.value)\n\n # If any task set the __asyncronous to True,\n # this task shall only be maintained with information\n # no actual run shall be taken\n self.__asyncronous = False\n self.__netns = None\n self.checking_time = 1\n\n @property\n def netns(self):\n return self.__netns\n\n @netns.setter\n def netns(self, ns):\n self.__netns = ns\n\n def set_priority(self, priority):\n self.__task_priority = priority\n\n def get_priority(self):\n return self.__task_priority\n\n def set_task_name(self, name):\n self.__task_name = name\n\n def get_task_name(self):\n return self.__task_name\n\n def get_commandline(self):\n self.__logger.exception(\"get_commandline not implemented\")\n raise NotImplementedError(\"get_commandline not implemented\")\n\n def set_workspace(self, directory):\n self.__workspace = directory\n\n def get_workspace(self):\n return self.__workspace\n\n def set_log_path(self, log_path):\n self.__log_path = log_path\n\n @property\n def logger(self):\n return self.__logger\n\n @logger.setter\n def logger(self, logger):\n self.__logger = logger\n\n def set_asyncronous(self, asyncr):\n self.__asyncronous = asyncr\n\n def get_pid_file(self):\n return \"{}/.{}.pid\".format(self.__workspace, self.__task_name)\n\n def get_task_pid(self):\n pid = \"-1\"\n try:\n with open(self.get_pid_file(), \"r\") as f:\n pid = f.readline().strip()\n except Exception:\n pid = \"-1\"\n finally:\n pid = \"-1\" if pid == '' else pid\n return int(pid)\n\n def __task_is_running(self, pid):\n return pid > 0 and os.path.exists(\"/proc/{}\".format(pid))\n\n def task_is_running(self, pid=-1):\n pid = self.get_task_pid() if pid < 0 else pid\n return self.__task_is_running(pid)\n\n def __wait_task_completed(self, lock, pid=-1, timeout=15):\n timeout = timeout - self.checking_time + 1\n\n start = time.time()\n while True:\n if time.time() - start > timeout:\n break\n\n if self.task_is_running(pid):\n break\n\n lock.release()\n time.sleep(0.5)\n lock.acquire()\n\n # in case the process created, but exit accidently, so\n # check again\n return self.task_is_running(pid)\n\n def __print_task(self, pid, name, state, color=icolors.GREEN):\n print(\"{}{}{}\".format(icolors.WHITE, \"[\", icolors.NORMAL), end='')\n print(\" {}{:<6}{} \".format(color, pid, icolors.NORMAL), end='')\n print(\"{}{}{}\".format(icolors.WHITE, \"]\", icolors.NORMAL), end='')\n print(\" {} is {}.\".format(name, state))\n\n @run_in_namespace\n def run(self):\n pid_file = self.get_pid_file()\n lock = FileLock(\"{}.lck\".format(pid_file))\n if self.__asyncronous:\n with lock.acquire():\n if self.__wait_task_completed(lock):\n self.__print_task(self.get_task_pid(), self.__task_name, \"running\")\n self.__logger.info(\"[ {:<6} ] {} is running\".format(self.get_task_pid(),\n self.__task_name))\n self.post_run()\n else:\n self.__print_task(' - ', self.__task_name, \"not running\", icolors.RED)\n return\n\n cmdline = self.get_commandline()\n\n self.__logger.info(\"{}'s command line: {}\".\n format(self.__task_name, cmdline))\n\n with lock.acquire():\n if self.task_is_running():\n self.__print_task(self.get_task_pid(), self.__task_name, \"running\")\n self.__logger.info(\"[ {:<6} ] {} is already running\".format(self.get_task_pid(),\n self.__task_name))\n return\n elif os.path.exists(pid_file):\n # If the qemu quits exceptionally when starts, pid file is also\n # created, but actually the qemu died.\n os.remove(pid_file)\n\n pid = self.execute_command(cmdline, self.__logger, log_path=self.__log_path, duration=self.checking_time)\n\n if self.__wait_task_completed(lock, pid):\n self.__print_task(pid, self.__task_name, \"running\")\n self.__logger.info(\"[ {:<6} ] {} starts to run\".format(pid, self.__task_name))\n\n with open(pid_file, \"w\") as f:\n if os.path.exists(\"/proc/{}\".format(pid)):\n f.write(\"{}\".format(pid))\n\n def terminate(self):\n pid_file = self.get_pid_file()\n lock = FileLock(\"{}.lck\".format(pid_file))\n with lock.acquire():\n task_pid = self.get_task_pid()\n try:\n if self.__task_is_running(task_pid):\n os.kill(task_pid, signal.SIGTERM)\n time.sleep(1)\n if self.__task_is_running(task_pid):\n os.kill(task_pid, signal.SIGKILL)\n self.__print_task(task_pid, self.__task_name, \"stopped\", icolors.RED)\n self.__logger.info(\"[ {:<6} ] {} stop\".format(task_pid, self.__task_name))\n else:\n self.__print_task(' - ', self.__task_name, \"stopped\", icolors.RED)\n self.__logger.info(\"[ {:<6} ] {} is stopped\".format(\"\", self.__task_name))\n\n if os.path.exists(pid_file):\n os.remove(pid_file)\n except OSError:\n if not self.__task_is_running(task_pid):\n if os.path.exists(pid_file):\n os.remove(pid_file)\n\n self.__print_task(task_pid, self.__task_name, \"stopped\", icolors.RED)\n self.__logger.info(\"[ {:<6} ] {} is stopped\".format(task_pid, self.__task_name))\n else:\n self.__print_task(task_pid, self.__task_name, \"running\")\n self.__logger.info(\"[ {:<6} ] {} stop failed.\".format(task_pid, self.__task_name))\n\n def status(self):\n pid_file = self.get_pid_file()\n lock = FileLock(\"{}.lck\".format(pid_file))\n with lock.acquire():\n task_pid = self.get_task_pid()\n if not self.__task_is_running(task_pid):\n if os.path.exists(pid_file):\n os.remove(pid_file)\n self.__print_task(' - ' if task_pid < 0 else task_pid, self.__task_name, \"stopped\", icolors.RED)\n else:\n self.__print_task(task_pid, self.__task_name, \"running\")\n\n @staticmethod\n @double_fork\n def execute_command(command, logger, log_path=\"\", duration=1):\n args = shlex.split(command)\n proc = subprocess.Popen(args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False)\n\n flags = fcntl.fcntl(proc.stderr, fcntl.F_GETFL)\n fcntl.fcntl(proc.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)\n time.sleep(duration)\n\n errout = None\n try:\n errout = proc.stderr.read()\n except IOError:\n pass\n if errout is not None:\n if log_path:\n with open(log_path, 'w') as fp:\n fp.write(errout)\n else:\n logger.error(errout)\n\n if not os.path.isdir(\"/proc/{}\".format(proc.pid)):\n logger.exception(\"command {} run failed\".format(command))\n raise CommandRunFailed(command, errout)\n\n return proc.pid\n\n def post_run():\n pass\n","repo_name":"InfraSIM/infrasim-compute","sub_path":"infrasim/model/core/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":8618,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"71"} +{"seq_id":"33410493137","text":"__author__ = 'wzy'\n__date__ = '2020/2/29 17:18'\n\nfrom django.urls import path\n\nfrom .views import UserListView,UploadImageView,UpdatePwdView,SendEmailcodeView,UpdateEmailView,UserCourseView,FavorCourseView,FavorOrgView,FavorTeacherView,MessageView\n\n\nurlpatterns = [\n path('list/', UserListView.as_view(), name='user_list'),\n path('upload_image/', UploadImageView.as_view(), name='upload_image'),\n path('update_pwd/', UpdatePwdView.as_view(), name='update_pwd'),\n path('sendemail_code/', SendEmailcodeView.as_view(), name='sendemail_code'),\n path('update_email/', UpdateEmailView.as_view(), name='update_email'),\n path('mycourse/', UserCourseView.as_view(), name='mycourse'),\n path('fav_course/', FavorCourseView.as_view(), name='fav_course'),\n path('fav_teacher/', FavorTeacherView.as_view(), name='fav_teacher'),\n path('fav_org/', FavorOrgView.as_view(), name='fav_org'),\n path('mymessage/', MessageView.as_view(), name='mymessage'),\n]\n\nhandler404 = 'users.views.page_not_found'\nhandler500 = 'users.views.page_error'","repo_name":"colden-rabbit/edut_online","sub_path":"apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19477691935","text":"from collections import deque\n\nn = int(input())\nboard = [list(map(int, input())) for _ in range(n)]\nhouse = []\n\ndr = [0, 0, -1, 1]\ndc = [-1, 1, 0, 0]\n\n\ndef bfs(r, c):\n queue = deque()\n queue.append([r, c])\n board[r][c] = 0\n cnt = 0\n while queue:\n r, c = queue.popleft()\n for i in range(4):\n nr, nc = r + dr[i], c + dc[i]\n if 0 <= nr < n and 0 <= nc < n and board[nr][nc]:\n queue.append([nr, nc])\n board[nr][nc] = 0\n cnt += 1\n return cnt\n\n\nfor r in range(n):\n for c in range(n):\n if board[r][c]:\n house.append(bfs(r, c))\n\nhouse.sort()\nprint(len(house))\nprint('\\n'.join(map(str, house)))\n","repo_name":"marsboy02/algorithm-solving","sub_path":"workbook/1983-DFS+BFS 필수 문제/2668-단지번호붙이기.py","file_name":"2668-단지번호붙이기.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"44563733663","text":"from math import lcm\n\nclass Monkey():\n def __init__(self,specs):\n self.inspections = 0\n self.id = int(specs[0].split()[1][0])\n #could be 19 if whitespace included\n self.items: list = list(map(int,specs[1][16:].split(\",\")))\n self.operation: str = specs[2].split()[3:]\n self.test_divide_by: int = int(specs[3].split()[3])\n self.test_true: int = int(specs[4].split()[5])\n self.test_false: int = int(specs[5].split()[5])\n \n def test_item(self,item):\n return True if item % self.test_divide_by == 0 else False\n \n def throw_to(self, item):\n return self.test_true if self.test_item(item) == True else self.test_false\n\n def find_equivalent(self,num):\n factors = [2,3,5,7,11,13,17,19]\n #newnum = 1\n #isdivisible = list(filter(lambda x: int(num/3) % x == 0,factors))\n #for n in range(len(factors)):\n #if isdivisible[n] == True:\n #newnum *= factors[n]\n #print(\"Number\",num, \"is divisible by:\",isdivisible, \"Smallest number:\",lcm(*isdivisible))\n return num % lcm(*factors)\n #return lcm(*isdivisible) \n\n def update_worry_level(self,item):\n opcodes = {\"+\" : lambda x, y: x+y, \"*\" : lambda x,y: x*y}\n #return item + int(self.operation[2]) if self.operation[2] != \"old\" else item\n if self.operation[2] != \"old\":\n return opcodes[self.operation[1]](item,int(self.operation[2])) \n else: return item*item\n\n def update_worry_level_large(self,item):\n opcodes = {\"+\" : lambda x, y: x+y, \"*\" : lambda x,y: x*y}\n if self.operation[2] != \"old\":\n result = opcodes[self.operation[1]](item,int(self.operation[2]))\n else: result = item*item\n return self.find_equivalent(result)\n\n def play_round(self,monkeys):\n #For every item\n for i in range(len(self.items)-1,-1,-1):\n # inpect (increment inspect)\n self.inspections += 1\n # update worry lvl by operation\n #self.items[i] = self.update_worry_level(self.items[i])\n self.items[i] = self.update_worry_level_large(self.items[i])\n # update worry lvl by int(worry_lvl / 3)\n #print(\"On item num:\", i+1,\"out of\",len(self.items))\n #self.items[i] = int(self.items[i] / 3)\n # determine who to and throw_to\n monkey_to_throw_to = self.throw_to(self.items[i])\n # remove item from list and give it to other monkey from monkeys list\n monkeys[monkey_to_throw_to].items.append(self.items[i])\n self.items.pop()\n # repeat until all items thrown\n\n\n\ndef create_monkeys():\n buffer = []\n monkey_specs = []\n monkeys = []\n with open(\"./input.txt\", 'r') as file:\n for line in file.readlines():\n if line == \"\\n\":\n monkey_specs.append(buffer) \n buffer = []\n else:\n buffer.append(line.removesuffix('\\n'))\n if len(buffer) > 0:\n monkey_specs.append(buffer)\n for specs in monkey_specs:\n monkeys.append(Monkey(specs)) \n return monkeys\n\nmonkes = create_monkeys()\nlist_of_inspections = []\nfor n in range(10000):\n for i in range(len(monkes)):\n monkes[i].play_round(monkes)\nfor monke in monkes:\n print(f\"Monke id: {monke.id}, Monke items: {monke.items}, Monke inspection count: {monke.inspections}\")\n\ninspections = sorted(list(map(lambda x: x.inspections,monkes)))\nprint(inspections, inspections[-1]*inspections[-2])\n","repo_name":"MirruK/AoC2022","sub_path":"AoCday11/monkey_business.py","file_name":"monkey_business.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2702195763","text":"def swap(data, x, y):\n temp = data[x]\n data[x] = data[y]\n data[y] = temp\n\ndef main():\n input_str = \"43,19,90,19,19,87,42,42,21,22\"\n data = input_str.split(\",\")\n print(data)\n for i in range(len(data)):\n min_index = i\n for j in range(i + 1, len(data)):\n if data[j] < data[min_index]:\n min_index = j\n if i != min_index:\n swap(data, i, min_index)\n print(data) \n\nif __name__ == '__main__':\n main()","repo_name":"James992927108/practice","sub_path":"Sort/SelectionSort.py","file_name":"SelectionSort.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71476397031","text":"\"\"\"\nNatural language Taggers for Bulgarian. They are all model-based.\n\"\"\"\nimport os\nimport re\nfrom typing import List, Dict, Any\nfrom abc import ABC, abstractmethod\n\nimport torch\nfrom torch import nn\nimport gdown\nfrom transformers import logging\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification\n\nfrom bgnlp.models import LemmaBert\nfrom bgnlp.tools.mixins import SubwordMixin\nfrom bgnlp.tools.tokenizers import (\n CharacterBasedPreTokenizer, CharacterBasedTokenizer\n)\nfrom bgnlp.tools.configs import ModelConfig, PosTaggerConfig\n\n\n# Logging only error messages from HuggingFace.\nlogging.set_verbosity_error()\n\n\nclass BaseTagger(ABC):\n\n @abstractmethod\n def get_tokenizer(self):\n pass\n\n @abstractmethod\n def get_model(self):\n pass\n\n @abstractmethod\n def predict(self):\n pass\n\n def load_model(self, model_obj):\n if os.path.exists(self.config.model_path):\n model_obj.load_state_dict(torch.load(self.config.model_path, map_location=self.config.device))\n else:\n # Downloading the model if it doesn't exist locally.\n # The model is not deployed with the PyPI package - hence, the download below.\n gdown.download(\n self.config.model_url, \n self.config.model_path, \n quiet=False\n )\n model_obj.load_state_dict(torch.load(self.config.model_path, map_location=self.config.device))\n\n return model_obj\n\n\nclass PosTagger(BaseTagger, SubwordMixin):\n \"\"\"Part-of-speech tagger. Tagging is done using a BERT model trained on \n [Wiki1000+ Bulgarian corpus](http://dcl.bas.bg/wikiCorpus.html).\n \n Args:\n config (ModelConfig): Configuration of the PosTagger.\n \"\"\"\n\n def __init__(self, config: ModelConfig):\n self.config = config\n self.tokenizer = self.get_tokenizer()\n self.model = self.get_model().to(self.config.device)\n\n # TODO: Have better descriptions. Figure out the full meanings of the tags.\n self.TAGS_MAPPING = {\n \"N\": {\n \"en\": \"noun\",\n \"bg\": \"съществително име\"\n },\n \"A\": {\n \"en\": \"adjective\",\n \"bg\": \"прилагателно име\"\n },\n \"P\": {\n \"en\": \"pronoun\",\n \"bg\": \"местоимение\"\n },\n \"B\": {\n \"en\": \"numeral\",\n \"bg\": \"числително име\"\n },\n \"V\": {\n \"en\": \"verb\",\n \"bg\": \"глагол\"\n },\n \"D\": {\n \"en\": \"adverb\",\n \"bg\": \"наречие\"\n },\n \"C\": {\n \"en\": \"conjunction\",\n \"bg\": \"съюз\"\n },\n \"T\": {\n \"en\": \"particle\",\n \"bg\": \"частица\"\n },\n \"R\": {\n \"en\": \"preposition\",\n \"bg\": \"предлог\"\n },\n \"I\": {\n \"en\": \"interjection\",\n \"bg\": \"междуметие\"\n },\n \"U\": {\n \"en\": \"punctuation\",\n \"bg\": \"препинателен знак\"\n }\n }\n\n def __call__(self, text: str, max_len=64) -> List[Dict[str, str]]:\n \"\"\"Tag each one of the words in `text` with a part-of-speech tag.\n\n Args:\n text (str): Text in Bulgarian.\n max_len (int, optional): The maximum number of words that you can have in `text`. Defaults to 64.\n\n Returns:\n List[Dict[str, str]]: List of dictionaries for each word and its tags.\n\n Example::\n\n >>> from bgnlp import pos\n >>> pos(\"Това е библиотека за обработка на естествен език.\")\n [{\n \"word\": \"Това\",\n \"tag\": \"PDOsn\",\n \"bg_desc\": \"местоимение\",\n \"en_desc\": \"pronoun\"\n }, {\n \"word\": \"е\",\n \"tag\": \"VLINr3s\",\n \"bg_desc\": \"глагол\",\n \"en_desc\": \"verb\"\n }, {\n \"word\": \"библиотека\",\n \"tag\": \"NCFsof\",\n \"bg_desc\": \"съществително име\",\n \"en_desc\": \"noun\"\n }, {\n \"word\": \"за\",\n \"tag\": \"R\",\n \"bg_desc\": \"предлог\",\n \"en_desc\": \"preposition\"\n }, {\n \"word\": \"обработка\",\n \"tag\": \"NCFsof\",\n \"bg_desc\": \"съществително име\",\n \"en_desc\": \"noun\"\n }, {\n \"word\": \"на\",\n \"tag\": \"R\",\n \"bg_desc\": \"предлог\",\n \"en_desc\": \"preposition\"\n }, {\n \"word\": \"естествен\",\n \"tag\": \"Asmo\",\n \"bg_desc\": \"прилагателно име\",\n \"en_desc\": \"adjective\"\n }, {\n \"word\": \"език\",\n \"tag\": \"NCMsom\",\n \"bg_desc\": \"съществително име\",\n \"en_desc\": \"noun\"\n }, {\n \"word\": \".\",\n \"tag\": \"U\",\n \"bg_desc\": \"препинателен знак\",\n \"en_desc\": \"punctuation\"\n }]\n \"\"\"\n self.max_len = max_len\n\n return self.predict(text)\n\n def predict(self, text: str) -> List[Dict[str, str]]:\n \"\"\"Tag each one of the words in `text` with a part-of-speech tag.\n\n Args:\n text (str): Text in Bulgarian.\n\n Returns:\n List[Dict[str, str]]: List of dictionaries for each word and its tags.\n \"\"\"\n text = PosTagger._preprocess_text(text)\n\n tokens_data = self.tokenizer(\n text,\n max_length=self.max_len,\n padding=\"max_length\"\n )\n\n # Preparing the input to the model.\n input_ids, attn = tokens_data[\"input_ids\"], tokens_data[\"attention_mask\"]\n input_ids = torch.LongTensor(input_ids).unsqueeze(0).to(self.config.device)\n attn = torch.LongTensor(attn).unsqueeze(0).to(self.config.device)\n\n # Making a prediction.\n self.model.eval()\n with torch.no_grad():\n pred = self.model(input_ids, attention_mask=attn).logits\n pred = pred.argmax(-1).squeeze(0)\n self.model.train()\n\n pred = [\n self.config.id2label[int(id_)] \n for id_ in pred\n ]\n\n return self._format_prediction(\n input_tokens=self.tokenizer.convert_ids_to_tokens(\n tokens_data[\"input_ids\"]\n ), \n prediction=pred\n )\n\n def get_tokenizer(self) -> AutoTokenizer:\n \"\"\"Get the tokenizer for the used model.\n\n Returns:\n AutoTokenizer: HuggingFace AutoTokenizer.\n \"\"\"\n return AutoTokenizer.from_pretrained(self.config.base_model_id)\n\n def get_model(self) -> nn.Module:\n \"\"\"Get the model used for tagging. When this method is called for the first\n time, the model is downloaded. Afterwards, it should be part of your package.\n\n Returns:\n nn.Module: PyTorch Module.\n \"\"\"\n bert = AutoModelForTokenClassification.from_pretrained(\n self.config.base_model_id, \n num_labels=len(self.config.label2id), \n label2id=self.config.label2id\n )\n bert.resize_token_embeddings(len(self.tokenizer))\n bert = self.load_model(bert)\n\n return bert\n\n @staticmethod\n def _preprocess_text(text: str):\n \"\"\"Prepare the string `text` for the model. \n \n This includes:\n - Surrounding punctuation with whitespace\n - Converting multiple consecutive whitespaces into one.\n\n Args:\n text (str): _description_\n\n Returns:\n str: _description_\n \"\"\"\n text = re.sub(r\"([.,!?:;])+\", r\" \\1 \", text)\n text = re.sub(r\"(\\s+)\", \" \", text)\n\n return text.strip()\n \n def _format_prediction(self, input_tokens: List[str], prediction: List[str]) -> List[Dict[str, str]]:\n \"\"\"Format the prediction returned from the model.\n\n Since the tokenizer of the model is a subword tokenizer, the words\n are split into multiple subwords. The task of this method is to merge \n them and then create a dictionaries with their tags.\n\n Args:\n input_tokens (List[str]): Input words (tokens) as strings.\n prediction (List[str]): Predicted tags.\n\n Returns:\n List[Dict[str, str]]: All found words and their tags.\n \"\"\"\n tags = []\n\n # This method is taken from the SubwordMixin class.\n # Here, I am passing all tokens except for the 1st one - [CLS].\n tokens = self.subwords_to_words(input_tokens[1:])\n\n # Getting all predicted tokens (except [CLS]) up until [SEP].\n for token in prediction[1:]:\n if token == \"[SEP]\":\n break\n tags.append(token)\n\n result = []\n for word, tag in zip(tokens, tags):\n result.append({\n \"word\": word,\n \"tag\": tag,\n \"bg_desc\": self._get_tag_description(lang=\"bg\", tag=tag),\n \"en_desc\": self._get_tag_description(lang=\"en\", tag=tag)\n })\n\n return result\n\n def _get_tag_description(self, lang: str, tag: str) -> str:\n \"\"\"Map `tag` to its description based on `lang`\n\n Args:\n lang (str): Language - either 'bg' or 'en'.\n tag (str): PoS tag as a string.\n\n Returns:\n str: The hardcoded tag description, based on `self.TAGS_MAPPING`.\n \"\"\"\n first_tag = tag[0]\n description = self.TAGS_MAPPING[first_tag][lang]\n\n return description\n\n\nclass Lemmatizer(BaseTagger):\n \"\"\"Lemmatize a word. This is only for single-word lemmatization. If you want \n to lemmatize multiple words, please use :ref:`LemmaTagger`.\n\n Args:\n config (ModelConfig): Configuration of the :ref:`LemmaBert` model.\n \"\"\"\n\n def __init__(self, config: ModelConfig):\n self.config = config\n self.tokenizer = self.get_tokenizer()\n self.model = self.get_model()\n\n def __call__(self, word: str, pos: str) -> str:\n \"\"\"Convert `word` into its lemma.\n\n Args:\n word (str): Word in Bulgarian.\n pos (str): Its part-of-speech tag.\n\n Returns:\n str: The lemma of `word`.\n\n Example::\n >>> from bgnlp import LemmaTaggerConfig\n >>> from bgnlp.tools.taggers import Lemmatizer\n >>> lemma = Lemmatizer(config=LemmaTaggerConfig())\n >>> lemma(\"езикът\", \"Ns\")\n език\n \"\"\"\n return self.predict(word, pos)\n\n def get_tokenizer(self) -> CharacterBasedTokenizer:\n \"\"\"Get the tokenizer used by `LemmaBert`. It is a character-based one.\n\n Returns:\n CharacterBasedTokenizer: The tokenizer.\n \"\"\"\n vocab = torch.load(self.config.vocab_path)\n pretokenizer = CharacterBasedPreTokenizer()\n\n tokenizer = CharacterBasedTokenizer(\n pretokenizer=pretokenizer,\n vocab=vocab\n )\n\n return tokenizer\n\n def get_model(self) -> nn.Module:\n \"\"\"Get the `LemmaBert` model.\n\n Returns:\n nn.Module: PyTorch Module.\n \"\"\"\n bert = LemmaBert(\n vocab_size=len(self.tokenizer), \n output_size=len(self.tokenizer),\n device=self.config.device\n ).to(self.config.device)\n bert = self.load_model(bert)\n\n return bert\n\n def predict(self, word: str, pos: str) -> str:\n \"\"\"Convert `word` into its lemma.\n\n Args:\n word (str): Word in Bulgarian.\n pos (str): Its part-of-speech tag.\n\n Returns:\n str: The lemma of `word`.\n \"\"\"\n # Preparing the input.\n tokens, attention_mask = self.tokenizer(word, pos)\n\n tokens = torch.LongTensor(tokens).unsqueeze(0)\n attention_mask = torch.LongTensor(attention_mask).unsqueeze(0)\n\n self.model.eval()\n\n with torch.no_grad():\n pred = self.model(tokens, attention_mask=attention_mask).argmax(-1).squeeze(0).tolist()\n pred = \"\".join(self.tokenizer.vocab.lookup_tokens(pred))\n pred = re.findall(r\"\\[CLS\\](.+?)\\[SEP\\]\", pred)[0]\n\n self.model.train()\n\n if word[0].isupper():\n pred = pred.capitalize()\n\n return pred\n\n\nclass LemmaTagger:\n \"\"\"Find the lemmas of a string with one or more words.\n\n Args:\n config (ModelConfig): Configuration of the :ref:`LemmaBert` model.\n \"\"\"\n\n def __init__(self, config: ModelConfig):\n self.config = config\n\n def __call__(self, text: str, as_string: bool = False, additional_info: bool = False) -> List[Dict[str, str]]:\n \"\"\"Find the lemmas of `text`. `text` should preferably be a semantically correct sentence or sentences, since the lemma sometimes changes based on context.\n\n Args:\n text (str): String with one or more Bulgarian words.\n as_string(str): Whether the lemmatization result should be a string or a dictionary.\n additional_info (bool, optional): Whether the output should constist of more data about each word (mainly PoS information). Defaults to False.\n\n Returns:\n List[Dict[str, str]]: List of dictionaries. Each dictionary has a word and a `lemma` key with a value - its lemma. If `additional_info`=True, the dictionary has PoS data.\n\n Example::\n >>> from bgnlp import lemmatize\n >>> text = \"Добре дошли!\"\n >>> # Return the lemmas as a dictionary.\n >>> print(\"Input:\", text)\n >>> print(\"Output:\", lemmatize(text))\n [{'word': 'Добре', 'lemma': 'Добре'}, {'word': 'дошли', 'lemma': 'дойда'}, {'word': '!', 'lemma': '!'}]\n\n >>> # Or return the lemmas as a string.\n >>> print(\"Output:\", lemmatize(text, as_string=True))\n Input: Добре дошли!\n Output: Добре дойда!\n \"\"\"\n self.additional_info = additional_info\n\n pos = PosTagger(config=PosTaggerConfig())\n lemma = Lemmatizer(config=self.config)\n\n if as_string:\n return self._str_predict(\n text=text,\n pos_model=pos,\n lemma_model=lemma\n )\n \n return self._dict_predict(\n text=text, \n pos_model=pos,\n lemma_model=lemma\n )\n\n def _dict_predict(self, text: str, pos_model: PosTagger, lemma_model: Lemmatizer) -> List[Dict[str, str]]:\n \"\"\"Find the lemmas of each word in `text`. Then, return a dictionary\n with each word and its lemma.\n\n Args:\n text (str): Bulgarian text.\n pos_model (PosTagger): Part-of-Speech model.\n lemma_model (Lemmatizer): Lemmatization model.\n\n Returns:\n List[Dict[str, str]]: List of dictionaries. Each dictionary has a word and its lemma.\n \"\"\"\n result = []\n\n for pos_result in pos_model(text):\n pos_result[\"lemma\"] = lemma_model(\n word=pos_result[\"word\"],\n pos=pos_result[\"tag\"]\n )\n if self.additional_info:\n result.append(pos_result)\n else:\n result.append({\n \"word\": pos_result[\"word\"],\n \"lemma\": pos_result[\"lemma\"]\n })\n\n return result\n\n def _str_predict(self, text: str, pos_model: PosTagger, lemma_model: Lemmatizer) -> str:\n \"\"\"Find the lemmas of `text` and return a string with them.\n\n Args:\n text (str): Bulgarian text.\n pos_model (PosTagger): Part-of-speech model.\n lemma_model (Lemmatizer): Lemmatization model.\n\n Returns:\n str: String with the lemmas of `text`.\n \"\"\"\n result = []\n\n for pos_result in pos_model(text):\n pos_result[\"lemma\"] = lemma_model(\n word=pos_result[\"word\"],\n pos=pos_result[\"tag\"]\n )\n\n result.append(pos_result[\"lemma\"])\n\n result = \" \".join(result)\n # Removing the left whitespace around punctuation.\n result = re.sub(r\"\\s([,.\\?\\!\\:\\;]+)\\s?\", r\"\\1 \", result)\n\n return result\n\n\nclass NerTagger(BaseTagger, SubwordMixin):\n \"\"\"Named Entity Recognition (NER) tagging for Bulgarian text.\n\n Args:\n config (ModelConfig): Configuration of the NerTagger.\n \"\"\"\n\n def __init__(self, config: ModelConfig):\n self.config = config\n self.tokenizer = self.get_tokenizer()\n self.model = self.get_model()\n\n def __call__(self, text: str) -> List[Dict[str, str]]:\n \"\"\"Find entities in `text`. These entities may be:\n - `PER` - Person\n - `ORG` - Organization\n - `LOC` - Location\n\n Args:\n text (str): String of Bulgarian words.\n\n Returns:\n List[Dict[str, str]]: List of dictionaries. Each dictionary has a word and its NER tag.\n\n Example::\n >>> from bgnlp import ner\n >>> text = \"Барух Спиноза е роден в Амстердам\"\n >>> print(f\"Input: {text}\")\n >>> print(\"Result:\", ner(text))\n Input: Барух Спиноза е роден в Амстердам\n Result: [{'word': 'Барух Спиноза', 'entity_group': 'PER'}, {'word': 'Амстердам', 'entity_group': 'LOC'}]\n\n \"\"\"\n text = self._preprocess_text(text)\n return self.predict(text)\n\n def get_tokenizer(self):\n return AutoTokenizer.from_pretrained(self.config.model_path)\n\n def get_model(self):\n return AutoModelForTokenClassification.from_pretrained(self.config.model_path)\n\n def predict(\n self, \n text: str, \n label2id={\n 0: \"O\",\n 1: \"B-PER\", 2: \"I-PER\", \n 3: \"B-ORG\", 4: \"I-ORG\", \n 5: \"B-LOC\", 6: \"I-LOC\"\n }\n ) -> List[Dict[str, str]]:\n tokens_data = self.tokenizer(text)\n tokens = self.tokenizer.convert_ids_to_tokens(tokens_data[\"input_ids\"])\n words = self.subwords_to_words(tokens)\n\n input_ids = torch.LongTensor(tokens_data[\"input_ids\"]).unsqueeze(0)\n attention_mask = torch.LongTensor(tokens_data[\"attention_mask\"]).unsqueeze(0)\n\n out = self.model(input_ids, attention_mask=attention_mask).logits\n out = out.argmax(-1).squeeze(0).tolist()\n\n prediction = [label2id[idx] if idx in label2id else idx for idx in out]\n\n return self._merge_words_and_predictions(\n words=words, entities=prediction\n )\n \n def _merge_words_and_predictions(self, words: List[str], entities: List[str]) -> List[Dict[str, str]]:\n result = []\n curr_word = []\n\n for i, (word, entity) in enumerate(zip(words[1:], entities[1:])):\n if \"B-\" in entity:\n if curr_word:\n curr_word = \" \".join(curr_word)\n result.append({\n \"word\": curr_word,\n \"entity_group\": entities[i][2:]\n })\n curr_word = [word]\n else:\n curr_word.append(word)\n\n if \"I-\" in entity:\n curr_word.append(word)\n \n if \"O\" == entity:\n if curr_word:\n curr_word = \" \".join(curr_word)\n result.append({\n \"word\": self._remove_punctuation(curr_word),\n \"entity_group\": entities[i][2:]\n })\n \n curr_word = []\n\n return result\n \n def _preprocess_text(self, text: str) -> str:\n # Remove the whitespace before punctuation.\n text = re.sub(r\"\\s+([,\\.\\?!;:\\'\\\"\\(\\)\\[\\]„”])\", r\"\\1\", text)\n # Leave out only a single whitespace.\n text = re.sub(r\"\\s+\", \" \", text)\n \n return text\n\n def _remove_punctuation(self, text: str) -> str:\n return re.sub(r\"([,\\.\\?!;:\\'\\\"\\(\\)\\[\\]„”])\", \"\", text)\n\n\nclass KeywordsTagger(BaseTagger):\n \"\"\"Keyword Extraction tagger for Bulgarian texts.\n\n Args:\n config (ModelConfig): The model configuration.\n \"\"\"\n\n def __init__(self, config: ModelConfig):\n self.config = config\n self.model = self.get_model()\n self.tokenizer = self.get_tokenizer()\n\n def __call__(self, text: str, threshold: float = 0.5) -> List[Dict[str, Any]]:\n \"\"\"Extract keywords from Bulgarian texts.\n\n Args:\n text (str): The source text from which you are going to extract.\n threshold (float, optional): Threshold based on which some of the keywords with lower probabilties might be excluded. Defaults to 0.5.\n \n Returns:\n List[Dict[str, Any]]: List of dictionaries describing each keyword in `text`.\n\n Example::\n >>> from bgnlp import extract_keywords\n >>> with open(\"input_text.txt\", \"r\", encoding=\"utf-8\") as f:\n >>> text = f.read()\n >>> # Here threshold is optional, it defaults to 0.5.\n >>> extract_keywords(text, threshold=0.6)\n [{'keyword': 'Еманюел Макрон', 'score': 0.8759163320064545},\n {'keyword': 'Г-7', 'score': 0.5938143730163574},\n {'keyword': 'Япония', 'score': 0.607077419757843}]\n \"\"\"\n return self.predict(text, threshold)\n\n def get_tokenizer(self):\n return AutoTokenizer.from_pretrained(self.config.model_path)\n\n def get_model(self):\n return AutoModelForTokenClassification.from_pretrained(self.config.model_path)\n\n def predict(self, text: str, threshold: float = 0.5) -> List[Dict[str, Any]]:\n \"\"\"Extract keywords from Bulgarian texts.\n\n Args:\n text (str): The source text from which you are going to extract.\n threshold (float, optional): Threshold based on which some of the keywords with lower probabilties might be excluded. Defaults to 0.5.\n \n Returns:\n List[Dict[str, Any]]: List of dictionaries describing each keyword in `text`.\n\n Example::\n >>> from bgnlp import extract_keywords\n >>> with open(\"input_text.txt\", \"r\", encoding=\"utf-8\") as f:\n >>> text = f.read()\n >>> # Here threshold is optional, it defaults to 0.5.\n >>> extract_keywords(text, threshold=0.6)\n [{'keyword': 'Еманюел Макрон', 'score': 0.8759163320064545},\n {'keyword': 'Г-7', 'score': 0.5938143730163574},\n {'keyword': 'Япония', 'score': 0.607077419757843}]\n \"\"\"\n keywords = self._extract_keywords(text, threshold=threshold)\n\n return self._format_keywords(keywords)\n \n def _format_keywords(self, keywords: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Mainly responsible for the merging of subkeywords into keywords, i.e. when\n the keyword consists of multiple words - 'Адам Фаузи', the two subkeywords 'Адам' and 'Фаузи'\n are merged into one. This method also merges the probabilities by calculating\n their average.\n\n Args:\n keywords (List[Dict[str, Any]]): Keywords with their `entity_group` and probability `score`.\n\n Returns:\n List[Dict[str, Any]]: Merged keywords (in some cases) with their probability scores.\n \"\"\"\n formatted_keywords = []\n # This is used for keywords that have multiple words.\n current_keywords = []\n scores = []\n\n for i, kw in enumerate(keywords):\n if kw[\"entity_group\"] == \"B-KWD\":\n if i > 0:\n formatted_keywords.append({\n \"keyword\": \" \".join(current_keywords),\n # Calculating the average score of all keywords in `current_keywords`.\n \"score\": sum(scores) / len(scores)\n })\n current_keywords = []\n scores = []\n current_keywords.append(kw[\"entity\"])\n scores.append(kw[\"score\"])\n\n if kw[\"entity_group\"] == \"I-KWD\":\n current_keywords.append(kw[\"entity\"])\n scores.append(kw[\"score\"])\n\n # When the last keyword is of any type - it should be added to \n # `formatted_keywords`.\n if i == len(keywords) - 1:\n formatted_keywords.append({\n \"keyword\": \" \".join(current_keywords),\n # Calculating the average score of all keywords in `current_keywords`.\n \"score\": sum(scores) / len(scores)\n })\n\n return formatted_keywords\n\n def _extract_keywords(\n self,\n text: str,\n max_len: int = 300,\n id2group = {\n # Indicates that this is not a keyword.\n 0: \"O\",\n # Begining of keyword.\n 1: \"B-KWD\",\n # Additional keywords (might also indicate the end of a keyword sequence).\n # You can merge these with the begining keyword `B-KWD`.\n 2: \"I-KWD\",\n },\n threshold: float = 0.5\n ) -> List[Dict[str, Any]]:\n \"\"\"Here the text is preprocessed, tokenized and then sent to the model\n for inference. There are comments on each step.\n\n Args:\n text (str): Raw text.\n max_len (int, optional): Maximum sequence length passed to the tokenizer. Defaults to 300.\n id2group (dict, optional): ID to Group mapping for the entity groups. Defaults to { 0: \"O\", 1: \"B-KWD\", 2: \"I-KWD\", }.\n threshold (float, optional): Threshold based on which some of the keywords with lower probabilties might be excluded. Defaults to 0.5.\n\n Returns:\n List[Dict[str, Any]]: Each found entity/keyword with its entity group and probability score.\n \"\"\"\n # Preprocess the text.\n # Surround punctuation with whitespace and convert multiple whitespaces\n # into single ones.\n text = re.sub(r\"([,\\.?!;:\\'\\\"\\(\\)\\[\\]„”])\", r\" \\1 \", text)\n text = re.sub(r\"\\s+\", r\" \", text)\n words = text.split()\n\n # Tokenize the processed `text` (this includes padding or truncation).\n tokens_data = self.tokenizer(\n text.strip(), \n padding=\"max_length\", \n max_length=max_len, \n truncation=True, \n return_tensors=\"pt\"\n )\n input_ids = tokens_data.input_ids\n attention_mask = tokens_data.attention_mask\n\n # Predict the keywords.\n out = self.model(input_ids, attention_mask=attention_mask).logits\n # Softmax the last dimension so that the probabilities add up to 1.0.\n out = out.softmax(-1)\n # Based on the probabilities, generate the most probable keywords.\n out_argmax = out.argmax(-1)\n prediction = out_argmax.squeeze(0).tolist()\n probabilities = out.squeeze(0)\n \n return [\n {\n # Since the list of words does not have a [CLS] token, the index `i`\n # is one step forward, which means that if we want to access the \n # appropriate keyword we should use the index `i - 1`.\n \"entity\": words[i - 1],\n \"entity_group\": id2group[idx],\n \"score\": float(probabilities[i, idx])\n } \n for i, idx in enumerate(prediction) \n if (idx == 1 or idx == 2) and float(probabilities[i, idx]) > threshold\n ]\n","repo_name":"auhide/bgnlp","sub_path":"bgnlp/tools/taggers.py","file_name":"taggers.py","file_ext":"py","file_size_in_byte":28162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16286383720","text":"#!/usr/bin/env python\r\n\r\n\"\"\"\r\nsetup.py file for SWIG Wrapper for MCFSimplex\r\n\"\"\"\r\n\r\nfrom distutils.core import setup, Extension\r\nimport numpy as np # for numpy array conversion\r\n\r\npyMCFSimplex_module = Extension('_pyMCFSimplex',\r\n sources=['pyMCFSimplex_wrap.cxx', 'MCFSimplex.cpp'],\r\n )\r\n\r\nsetup (name = 'pyMCFSimplex',\r\n version = '0.9',\r\n author = \"G#.Blog - Johannes Sommer\",\r\n author_email = \"info@sommer-forst.de\",\r\n url = r\"http:\\\\www.sommer-forst.de\\blog\",\r\n description = \"pyMCFSimplex is a Python Wrapper for MCFSimplex\",\r\n long_description = \r\n\"\"\"\r\npyMCFSimplex is a Python Wrapper for the Minimum Cost Flow Problem Solver \r\n'MCFSimplex' coded and maintained at the University of Pisa.\r\n\"\"\",\r\n include_dirs = [np.get_include()], # Header for numpy\r\n ext_modules = [pyMCFSimplex_module],\r\n license = \"LGPL 2.1\",\r\n platforms = [\"win32\",\"linux-x86_64\"],\r\n py_modules = [\"pyMCFSimplex\"],\r\n )\r\n","repo_name":"frangio68/Min-Cost-Flow-Class","sub_path":"pyMCFSimplex-0.9/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"71"} +{"seq_id":"37364072305","text":"import tensorflow as tf\nimport argparse\nimport math\nimport numpy as np\n\nfrom tensorflow.contrib import lookup\nfrom tensorflow.contrib.learn import ModeKeys\nfrom tensorflow.contrib.tensorboard.plugins import projector\n\ntf.logging.set_verbosity(\"INFO\")\n\nfrom word_vec.utils.tf_hooks.post_run import PostRunTaskHook\n'''\nNotes:\nTwo methods:\n- CBOW: Continuous Bag of Word i.e given context words find the target word\n- Skip Gram i.e given a word find the target context words\n\n\"The cat is sitting on the mat\"\n\nCBOW : is | ( The cat ) ( sitting on )\n -----------------------------------------------\n w(t) w(t-2) w(t-1) w(t+1) w(t+2)\n \n Find centre word \"is\" given \"The\", \"cat\", \"sitting\" and \"on\"\n\nSkip Gram: ( The cat ) ( sitting on ) | is\n ---------------------------------------------\n w(t-2) w(t-1) w(t+1) w(t+2) w(t)\n \n Find context words of \"is\", here \"The\", \"cat\", \"sitting\" and \"on\"\n \n Features:\n (is, The)\n (is, cat)\n (is, sitting)\n (is, given)\n \n \nModel: Word Embedding Matrix [Vocab Size, Embedding Size] \n\n'''\n\nclass SkipGramConfig():\n def __init__(self,\n vocab_size,\n words_vocab_file,\n embedding_size,\n num_word_sample,\n learning_rate,\n model_dir):\n tf.app.flags.FLAGS = tf.app.flags._FlagValues()\n tf.app.flags._global_parser = argparse.ArgumentParser()\n flags = tf.app.flags\n self.FLAGS = flags.FLAGS\n\n flags.DEFINE_string(\"UNKNOWN_WORD\", \"\", \"\")\n\n flags.DEFINE_integer(\"VOCAB_SIZE\", vocab_size, \"\")\n flags.DEFINE_string(\"WORDS_VOCAB_FILE\", words_vocab_file, \"\")\n\n flags.DEFINE_integer(\"EMBED_SIZE\", embedding_size, \"\")\n flags.DEFINE_integer(\"NUM_WORD_SAMPLE\", num_word_sample, \"\")\n\n flags.DEFINE_float(\"LEARNING_RATE\", float(learning_rate), \"\")\n # flags.DEFINE_float(\"KEEP_PROP\", out_keep_propability, \"\")\n\n flags.DEFINE_string(\"MODEL_DIR\", model_dir, \"\")\n\n\n\nclass SkipGram(tf.estimator.Estimator):\n '''\n Skip Gram implementation\n '''\n def __init__(self,\n config:SkipGramConfig):\n super(SkipGram, self).__init__(\n model_fn=self._model_fn,\n model_dir=config.FLAGS.MODEL_DIR,\n config=tf.contrib.learn.RunConfig(log_step_count_steps=100,\n save_summary_steps=100,\n gpu_memory_fraction=0.5,\n save_checkpoints_steps=1000,\n tf_random_seed=42,\n log_device_placement=True))\n\n self.w2v_config = config\n\n self.embed_mat_hook = None #Hook to store the embedding matrix as numpy audio_utils\n\n def _model_fn(self, features, labels, mode, params):\n\n center_words = features\n target_words = labels\n\n\n # Define model's architecture\n with tf.variable_scope(\"center-words-2-ids\"):\n table = lookup.index_table_from_file(vocabulary_file=self.w2v_config.FLAGS.WORDS_VOCAB_FILE,\n num_oov_buckets=0,\n default_value=0,\n name=\"table\")\n tf.logging.info('table info: {}'.format(table))\n\n words = tf.string_split(center_words)\n densewords = tf.sparse_tensor_to_dense(words, default_value=self.w2v_config.FLAGS.UNKNOWN_WORD)\n center_word_ids = table.lookup(densewords)\n\n tf.logging.info(\"center_word_ids -----> {}\".format(center_word_ids))\n #[batch_size,?] -> [batc_size, 1] -> [batch_size,]\n center_word_ids = tf.squeeze(tf.reshape(center_word_ids, shape=(-1, 1)))\n\n\n with tf.variable_scope(\"target-words-2-ids\"):\n table = lookup.index_table_from_file(vocabulary_file=self.w2v_config.FLAGS.WORDS_VOCAB_FILE,\n num_oov_buckets=0,\n default_value=0,\n name=\"table\")\n tf.logging.info('table info: {}'.format(table))\n\n words = tf.string_split(target_words)\n densewords = tf.sparse_tensor_to_dense(words, default_value=self.w2v_config.FLAGS.UNKNOWN_WORD)\n target_word_ids = table.lookup(densewords)\n\n target_word_ids = tf.reshape(target_word_ids, shape=(-1, 1))\n\n tf.logging.info(\"target_word_ids -----> {}\".format(target_word_ids))\n\n with tf.name_scope(\"embed\"):\n embed_matrix = tf.Variable(tf.random_uniform([self.w2v_config.FLAGS.VOCAB_SIZE,\n self.w2v_config.FLAGS.EMBED_SIZE], -1.0, 1.0),\n name=\"embed_matrix\")\n\n tf.logging.info(\"embed_matrix -----> {}\".format(embed_matrix))\n\n\n with tf.name_scope(\"loss\"):\n embed = tf.nn.embedding_lookup(embed_matrix, center_word_ids, name=\"embed\")\n\n tf.logging.info(\"embed -----> {}\".format(embed))\n\n\n # construct variables for NCE loss\n nce_weight = tf.Variable(tf.truncated_normal([self.w2v_config.FLAGS.VOCAB_SIZE,\n self.w2v_config.FLAGS.EMBED_SIZE],\n stddev= 1/math.sqrt( self.w2v_config.FLAGS.EMBED_SIZE ** 0.5)),\n name=\"nce_weight\")\n\n tf.logging.info(\"nce_weight -----> {}\".format(nce_weight))\n\n nce_bias = tf.Variable(tf.zeros([ self.w2v_config.FLAGS.VOCAB_SIZE]), name=\"nce_bias\")\n tf.logging.info(\"nce_bias -----> {}\".format(nce_bias))\n\n\n # Loss, training and eval operations are not needed during inference.\n loss = None\n train_op = None\n eval_metric_ops = {}\n\n if mode != ModeKeys.INFER:\n # define loss function to be NCE loss function\n loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight,\n biases=nce_bias,\n labels=target_word_ids,\n inputs=embed,\n num_sampled=self.w2v_config.FLAGS.NUM_WORD_SAMPLE,\n num_classes=self.w2v_config.FLAGS.VOCAB_SIZE),\n name='loss')\n\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.contrib.framework.get_global_step(),\n optimizer=tf.train.GradientDescentOptimizer,\n learning_rate=self.w2v_config.FLAGS.LEARNING_RATE)\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=None,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=None\n )\n\n def set_store_hook(self, tensor_name=\"embed/embed_matrix:0\"):\n def save_embed_mat(sess):\n graph = sess.graph\n embed_mat = graph.get_tensor_by_name(tensor_name)\n\n embed_mat = sess.run(embed_mat)\n np.save(\"tmp/word2vec_v1.npy\", embed_mat)\n\n\n self.embed_mat_hook = PostRunTaskHook()\n self.embed_mat_hook.user_func = save_embed_mat\n\n\n def get_store_hook(self):\n self.set_store_hook()\n return self.embed_mat_hook\n\n\n\n","repo_name":"dhiraa/sarvam","sub_path":"src/nlp/word_vec/skip_gram.py","file_name":"skip_gram.py","file_ext":"py","file_size_in_byte":7710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"8246721540","text":"import numpy as np\nimport quantum_states as qs\nimport math as m\nfrom time import time\nimport sparse as sp\n\n#Hard-coded basic gates (for 1 qubit)\ngates = {\n\n 'H' : 1/m.sqrt(2)*np.array([[1,1],\n [1,-1]]),\n 'I' : np.identity(2),\n 'X' : np.array([[0,1],\n [1,0]]),\n 'Y' : np.array([[0,-1j],\n [1j,0]]),\n 'Z' : np.array([[1,0],\n [0,-1]])\n}\n\n#Hard-coded basic gates (for 1 qubit), in sparse form\nsgates = {\n\n 'H' : sp.Sparse(gates['H']),\n 'I' : sp.Sparse(gates['I']),\n 'X' : sp.Sparse(gates['X']),\n 'Y' : sp.Sparse(gates['Y']),\n 'Z' : sp.Sparse(gates['Z'])\n\n}\n\n\n\n### Matrix Addition! -------------------------------------------------------------------------------------------------\n\ndef matrixSum(matA,matB):\n \"\"\" Function summing two matrices.\n\n Parameters\n ----------\n matA : numpy array or sp.Sparse\n First matrix in sum.\n matB : numpy array or sp.Sparse\n Second matrix in sum.\n\n Returns\n -------\n numpy array or sp.Sparse\n Sum of matA + matB.\n \"\"\"\n\n if isinstance(matA, np.ndarray) & isinstance(matA, np.ndarray):\n if matA.shape != matB.shape:\n print(\"Non NxN matching matrices\")\n else:\n matZ = np.zeros((matA.shape[0],matA.shape[1]))\n for i in range(matA.shape[0]):\n for j in range(matA.shape[1]):\n matZ[i][j] = matA[i][j]+matB[i][j]\n return matZ\n elif isinstance(matA, sp.Sparse) & isinstance(matA, sp.Sparse):\n for b in matB.matrixDict:\n if b in matA.matrixDict:\n matA.matrixDict[b] += matB.matrixDict[b]\n else:\n matA.matrixDict[b] = matB.matrixDict[b]\n return matA\n else:\n raise TypeError(\"Incorrect type for one or more matrices in sum: \\\n numpy array or custom sparse matrix please\")\n\n### Matrix multiplication! -------------------------------------------------------------------------------------------\n\ndef matrixProduct(matA,matB):\n \"\"\"Two matrix multiplication function.\n\n Parameters\n ----------\n matA : numpy array or sp.Sparse\n Leftmost matrix in product.\n matB : numpy array or sp.Sparse\n Rightmost matrix in product.\n\n Returns\n -------\n numpy array or sp.Sparse\n Matrix being a product of (matA x matB).\n \"\"\"\n\n if isinstance(matA, np.ndarray) & isinstance(matB, np.ndarray):\n if matA.shape[1] != matB.shape[0]:\n print(f\"Non axN Nxb matching matrices : {matA.shape[0]}x{matA.shape[1]} and {matB.shape[0]}x{matB.shape[1]}\")\n else:\n matZ = np.zeros((matA.shape[0],matB.shape[1]))\n for i in range(matZ.shape[0]):\n for j in range(matZ.shape[1]):\n for n in range(matA.shape[1]):\n matZ[i][j] += matA[i][n]*matB[n][j]\n return matZ\n elif isinstance(matA, sp.Sparse) & isinstance(matB, sp.Sparse):\n matZ = {}\n for a in matA.matrixDict:\n for b in matB.matrixDict:\n if a[0] == b[1]:\n if (b[0],a[1]) in matZ:\n matZ[(b[0],a[1])] += matA.matrixDict[a]*matB.matrixDict[b]\n else:\n matZ[(b[0],a[1])] = matA.matrixDict[a]*matB.matrixDict[b]\n return sp.Sparse(matZ, (matA.size[0],matB.size[1]))\n else:\n raise TypeError(\"Incorrect type for one or more matrices in product: \\\n numpy array or custom sparse matrix please\")\n\n### Determinant of Matrix! -------------------------------------------------------------------------------------------\n\ndef matrixDet(mat):\n \"\"\"Calculates determinant of n-dimensional square matrix.\n\n Parameters\n ----------\n mat : numpy array or sp.Sparse\n Square matrix whose dterminant will be found.\n\n Returns\n -------\n int\n Determinant of inputted square matrix (mat).\n \"\"\"\n\n if isinstance(mat, np.ndarray):\n return determinant(mat)\n elif isinstance(mat, sp.Sparse):\n #cons = np.array([ (-1)**((x+1)//2) for x in range(m.factorial(mat.size))])\n return determinant(mat.asMatrix)\n else:\n raise TypeError(\"Incorrect type for matrix to calculate determinant: \\\n numpy array or custom sparse matrix please\")\n\ndef determinant(mat):\n if mat.shape[0] != mat.shape[1]:\n print(\"Non NxN matrices\")\n else:\n if mat.shape[1] == 1:\n return mat[0][0]\n elif mat.shape[1] == 2:\n return mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]\n else:\n tempNum = 0\n for rNum, rItem in enumerate(mat[0]):\n matN = np.delete(mat, 0, 0)\n matN = np.delete(matN, rNum, 1)\n tempNum += (1 - 2 * (rNum % 2)) * rItem * matrixDet(matN)\n return tempNum\n\n\n\n### Matrix Inversion! ------------------------------------------------------------------------------------------------\n\ndef matrixInv(mat):\n \"\"\"\n Find the matrix inverse for square matrix mat.\n\n Parameters\n ----------\n mat : numpy array or sp.Sparse\n Matrix whose inverse will be found.\n\n Returns\n -------\n numpy array or sp.Sparse\n Inverted matrix whose operation reverses that of mat.\n \"\"\"\n if isinstance(mat, np.ndarray):\n return inverter(mat[0])\n elif isinstance(mat, sp.Sparse):\n #cons = np.array([ (-1)**((x+1)//2) for x in range(m.factorial(mat.size))])\n return sp.Sparse(inverter(mat.asMatrix))\n else:\n raise TypeError(\"Incorrect type for matrix to invert: \\\n numpy array or custom sparse matrix please\")\n\n\ndef inverter(mat):\n if mat.shape[0] != mat.shape[1]:\n raise MatrixError(\"Matrix is not square. Please use NxN matrices for inversion\")\n else:\n det = matrixDet(mat)\n matZ = np.zeros(mat.shape)\n for i in range(mat.shape[0]):\n for j in range(mat.shape[0]):\n matN = np.delete(mat, i, 1)\n matN = np.delete(matN, j, 0)\n matZ[i][j] = (1/det)*(1-2*((i+j)%2))*matrixDet(matN)\n size = (mat.shape[0],mat.shape[1])\n return matZ, size\n\n\n### Tensor Product ---------------------------------------------------------------------------------------------------\n\ndef tensorProduct(vecA,vecB):\n \"\"\"Function calculating tensor product of two vectors in vectorial form\n\n Parameters\n ----------\n vecA : numpy array\n Leftmost vector in tensor product.\n vecB : numpy array\n Rightmost vector in tensor product.\n\n Returns\n -------\n numpy array\n Vector representing tensor product of vecA (x) vecB\n \"\"\"\n\n lA = len(vecA)\n lB = len(vecB)\n T = np.zeros(lA*lB)\n for i in range (lA):\n for j in range (lB):\n T[i*lB+j] = vecA[i]*vecB[j]\n return T\n\n\ndef kroneckerProduct(matA,matB):\n \"\"\"Function calculating the kronecker product between two matrices\n\n I.e. higher-dimensional tensor product.\n\n Parameters\n ----------\n matA : numpy array or sp.Sparse\n Leftmost matrix in kronecker product.\n matB : numpy array or sp.Sparse\n Rightmost array in product.\n\n Returns\n -------\n numpy array or sp.Sparse\n Kronecker product of matA (x) matB.\n \"\"\"\n if isinstance(matA, np.ndarray) & isinstance(matB, np.ndarray):\n matZ = np.zeros((matA.shape[0]*matB.shape[0], matA.shape[1]*matB.shape[1]))\n for i in range(matZ.shape[0]):\n for j in range(matZ.shape[1]):\n matZ[i][j] = matA[i//matB.shape[0]][j//matB.shape[1]]*matB[i%matB.shape[0]][j%matB.shape[1]]\n return matZ\n elif isinstance(matA, sp.Sparse) & isinstance(matB, sp.Sparse):\n matZ = {}\n for a in matA.matrixDict:\n for b in matB.matrixDict:\n matZ[( b[0]+a[0]*matB.size[0] , b[1]+a[1]*matB.size[1] )] = matA.matrixDict[a]*matB.matrixDict[b]\n return sp.Sparse(matZ, (matA.size[0]*matB.size[0],matA.size[1]*matB.size[1]))\n else:\n raise TypeError(\"Incorrect type for matries in kronecker product: \\\n numpy array or custom sparse matrix please\")\n\n### Helper Functions ----------------------------------------------------------------------------------------------------\ndef vecMatProduct(mat,vec):\n \"\"\" Takes a matrix and a single array vector and formats them for the matrixProduct() function.\n\n Parameters\n ----------\n mat : numpy array or sp.Sparse\n 2D Matrix.\n vec : numpy array\n 1D Vector.\n\n Returns\n -------\n numpy array\n Formatted product.\n \"\"\"\n if isinstance(mat, np.ndarray):\n vecR = np.resize(vec,(len(vec),1))\n return matrixProduct(mat,vecR)[:,0]\n elif isinstance(mat, sp.Sparse):\n V = [0]*len(vec)\n for pos in mat.matrixDict:\n V[pos[0]] += mat.matrixDict[pos]*vec[pos[1]]\n return np.array(V)\n else:\n raise TypeError(\"Incorrect type for matrix/vector to format together: \\\n numpy array or custom sparse matrix please\")\n\n\ndef constructGate(code, Sparse = False):\n \"\"\" Function constructing matrix representing gate dynamically\n\n Works by parsing a carefully formatted string (code), with characters representing the gate\n at each qubit and returns the operation as a matrix.\n First character is the gate to be applied to the most significant qubit, etc.\n i.e. the code \"HHI\" represents the operation HxHxI(qubit1xqubit2xqubit3)\n where x denotes the tensor product\n\n Parameters\n ----------\n code : str\n Sequence/\"code\" used to generate specific paralel gate.\n\n Returns\n -------\n numpy array or sp.Sparse\n Matrix which when acted on a particular register will have the same\n effect as applying the theoretical quantum gate.\n \"\"\"\n\n matrix = np.array([[1]]) # This is starts by making a 1x1 identity matrix so the first kronecker product is the first gate.\n if Sparse:\n matrix = sp.Sparse(matrix) # If sparse makes the matrix sparse.\n TofN = 0 # This is for storing the control number, number of qubits that are connected to the controlled gate eg: CCNot Gate => 3X.\n for char in code:\n if char.isdigit(): # Sets the control number.\n TofN = int(str(TofN)+char)\n elif TofN != 0: # If a control number was set this creatses the controlled gate matrix\n if Sparse: # Two methods for sparse or not.\n gate = sgates[char] # Gets the sparse gate matrix from dictioanary.\n l = 2**TofN-gate.size[0] # These two lines create and identity sparse matrix but then force it to be 2x2 longer.\n Tof = sp.Sparse(np.identity(l), (l+gate.size[0],l+gate.size[0])) # sp.Sparse takes two parameters; a matrix and a double this being the shape.\n for pos in gate.matrixDict: # This part adds the sparse gate matrix to the new forced sparse identiy.\n Tof.matrixDict[((Tof.size[0])-(gate.size[0])+pos[0]%(gate.size[0]) \\\n , (Tof.size[1])-(gate.size[1])+pos[1]%(gate.size[1]))] \\\n = gate.matrixDict[(pos[0]%(gate.size[0]),pos[1]%(gate.size[1]))]\n else:\n Tof = np.identity(2**TofN) # For non sparse we start with an identity.\n gate = gates[char] # Gets gate from dictionary\n for x in range(len(gates)): # This adds the 2x2 gate matrix to the end of the identity. \n for y in range(len(gates)):\n Tof[len(Tof)-len(gate)+x%len(gate)][len(Tof)-len(gate) \\\n +y%len(gate)] = gate[x%len(gate)][y%len(gate)]\n matrix = kroneckerProduct(matrix,Tof) # Whether sparse or not this does the kronecker product of the existing matrix with the new controlled gate matrix.\n TofN = 0\n else: # This is the main part if there is no control element.. \n if Sparse: # This changes the gate dictionary depending on whether we are using sparse matrices or not.\n matrix = kroneckerProduct(matrix,sgates[char]) # Then whether we are sparse or not it does the kronecker product on the matrix.\n else:\n matrix = kroneckerProduct(matrix,gates[char])\n return matrix\n","repo_name":"Mackachoo/Quantum-Computing-Project","sub_path":"operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":12536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"17466588756","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\n\"\"\"\nCOMS W4701 Artificial Intelligence - Programming Homework 3\n\nAn AI player for Othello. This is the template file that you need to \ncomplete and submit. \n\n@author: Roxanne Farhad @raf2192\n\"\"\"\n\nimport random\nimport sys\nimport time\nimport heapq\n\n# You can use the functions in othello_shared to write your AI \nfrom othello_shared import get_possible_moves, play_move, compute_utility\n\n\n############ MINIMAX ###############################\n\n\"\"\"\nComputes the minimax value of a MAX node\n\"\"\"\ndef minimax_max_node(board):\n\n \"\"\"\n\n If the value being tested is a terminal node -> return it \n then for every possible action i.e, child nodes:\n assign the value v = max(v, min_node(state, action))\n\n for every child node - play the move and check the score.\n Play move returns the new board - the score becomes the value, \n and then put the new board into the min function\n\n 1. test state\n\n // the max node is always dark - 1 \n \n \"\"\"\n v = float(\"-inf\")\n moves = get_possible_moves(board, 1)\n\n if(len(moves) == 0):\n # this means that the dark player can play no moves i.e. no child nodes\n return compute_utility(board)\n else: \n for move in moves:\n movei = move[0]\n movej = move[1]\n newBoard = play_move(board, 1, movei, movej)\n newV = minimax_min_node(newBoard)\n if(v < newV):\n v = newV\n \n # at the end of this then best move becomes the best option to take \n # then the move has to be played\n\n return v\n\n\n\"\"\"\nComputes the minimax value of a MIN node\n\"\"\"\ndef minimax_min_node(board):\n\n v = float(\"inf\")\n moves = get_possible_moves(board, 2)\n\n if(len(moves) == 0):\n return compute_utility(board)\n else:\n for move in moves:\n movei = move[0]\n movej = move[1]\n newBoard = play_move(board, 2, movei, movej)\n newV = minimax_max_node(newBoard)\n if(v > newV):\n v = newV\n\n return v\n\n\"\"\"\nGiven a board and a player color, decide on a move. \nThe return value is a tuple of integers (i,j), where\ni is the column and j is the row on the board. \n\"\"\"\ndef select_move_minimax(board, color):\n\n if(color == 1):\n v = float(\"-inf\")\n moves = get_possible_moves(board, 1)\n for move in moves:\n new_board = play_move(board, 1, move[0], move[1])\n newV = minimax_min_node(new_board)\n if(newV > v):\n v = newV\n bestMove = move\n else:\n v = float(\"inf\")\n moves = get_possible_moves(board, 2)\n for move in moves:\n newBoard = play_move(board, 2, move[0], move[1])\n newV = minimax_max_node(newBoard)\n if(v > newV):\n v = newV\n bestMove = move\n \n return bestMove\n############ ALPHA-BETA PRUNING #####################\n\n\"\"\"\nComputes the minimax value of a MAX node with alpha-beta pruning\n\"\"\"\ndef alphabeta_max_node(board, alpha, beta, level=1, limit=float(\"inf\")):\n \n v = float(\"-inf\")\n a = alpha\n b = beta\n\n heap = []\n moveDict = {}\n\n moves = get_possible_moves(board, 1)\n\n if(len(moves) == 0 or level == limit):\n return compute_utility(board)\n\n for move in moves:\n newBoard = play_move(board, 1, move[0], move[1])\n mVal = compute_utility(newBoard)\n moveVal = (mVal, move)\n heapq.heappush(heap, moveVal)\n\n heap.reverse()\n\n level += 1\n for move in heap: \n newBoard = play_move(board, 1, move[1][0], move[1][1])\n newV = alphabeta_min_node(newBoard, a, b, level, limit)\n v = max(v, newV)\n if(v >= b):\n return v\n else:\n a = max(a, v)\n\n return v\n\n\"\"\"\nComputes the minimax value of a MIN node with alpha-beta pruning\n\"\"\"\ndef alphabeta_min_node(board, alpha, beta, level=1, limit=float(\"inf\")):\n\n v = float(\"inf\")\n a = alpha\n b = beta\n\n heap = []\n moveDict = {}\n\n moves = get_possible_moves(board, 2)\n\n if(len(moves) == 0 or level == limit):\n return compute_utility(board)\n\n for move in moves:\n newBoard = play_move(board, 2, move[0], move[1])\n mVal = compute_utility(newBoard)\n moveVal = (mVal, move)\n heapq.heappush(heap, moveVal)\n\n level += 1\n for move in heap:\n newBoard = play_move(board, 2, move[1][0], move[1][1])\n newV = alphabeta_max_node(newBoard, a, b, level, limit)\n v = min(v, newV)\n if(v <= a):\n return v\n else: \n b = min(b, v)\n\n return v\n\n\"\"\"\nGiven a board and a player color, decide on a move. \nThe return value is a tuple of integers (i,j), where\ni is the column and j is the row on the board. \n\"\"\"\ndef select_move_alphabeta(board, color, limit=float(\"inf\")):\n\n a = float(\"-inf\")\n b = float(\"inf\")\n\n realLim = limit\n\n if(color == 1):\n\n heap = [] \n\n v = float(\"-inf\") \n moves = get_possible_moves(board, 1)\n for move in moves:\n new_board = play_move(board, 1, move[0], move[1])\n mVal = compute_utility(new_board)\n moveVal = (mVal, move)\n heapq.heappush(heap, moveVal) \n\n for move in heap:\n new_board = play_move(board, 1, move[1][0], move[1][1])\n newV = alphabeta_min_node(new_board, a, b, 2, realLim)\n if(newV > v):\n v = newV\n bestMove = move[1]\n\n else:\n\n heap = []\n\n v = float(\"inf\")\n moves = get_possible_moves(board, 2)\n for move in moves:\n newBoard = play_move(board, 2, move[0], move[1])\n mVal = compute_utility(newBoard)\n moveVal = (mVal, move)\n heapq.heappush(heap, moveVal) \n\n heap.reverse()\n\n for move in heap:\n newBoard = play_move(board, 2, move[1][0], move[1][1])\n newV = alphabeta_max_node(newBoard, a, b, 2, realLim)\n if(v > newV):\n v = newV\n bestMove = move[1]\n\n return bestMove\n\n\n####################################################\ndef run_ai():\n \"\"\"\n This function establishes communication with the game manager. \n It first introduces itself and receives its color. \n Then it repeatedly receives the current score and current board state\n until the game is over. \n \"\"\"\n print(\"Minimax AI\") # First line is the name of this AI \n color = int(input()) # Then we read the color: 1 for dark (goes first), \n # 2 for light. \n\n while True: # This is the main loop \n # Read in the current game status, for example:\n # \"SCORE 2 2\" or \"FINAL 33 31\" if the game is over.\n # The first number is the score for player 1 (dark), the second for player 2 (light)\n next_input = input() \n status, dark_score_s, light_score_s = next_input.strip().split()\n dark_score = int(dark_score_s)\n light_score = int(light_score_s)\n\n if status == \"FINAL\": # Game is over. \n print \n else: \n board = eval(input()) # Read in the input and turn it into a Python\n # object. The format is a list of rows. The \n # squares in each row are represented by \n # 0 : empty square\n # 1 : dark disk (player 1)\n # 2 : light disk (player 2)\n \n # Select the move and send it to the manager \n #movei, movej = select_move_minimax(board, color)\n movei, movej = select_move_alphabeta(board, color)\n print(\"{} {}\".format(movei, movej)) \n\n\nif __name__ == \"__main__\":\n run_ai()\n","repo_name":"RoxyFarhad/AI","sub_path":"HW3/Othello/raf2192_ai.py","file_name":"raf2192_ai.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40519447688","text":"\"\"\"\nCreated on 26.08.2023\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, PolynomialFeatures\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.linear_model import LinearRegression\n\n# Data set\nnp.random.seed()\nn = 100\nmaxdegree = 15 # 15th degree polynomial\nx = np.linspace(-3, 3, n).reshape(-1, 1)\ny = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2) + np.random.normal(0, 0.1, x.shape)\n\n\"\"\"task a) and b): 5th order polynomial\"\"\"\n# Create design matrix\nX = np.zeros((n, 6))\nX[:, 0] = 1\nX[:, 1] = x[:, 0]\nX[:, 2] = x[:, 0]**2\nX[:, 3] = x[:, 0]**3\nX[:, 4] = x[:, 0]**4\nX[:, 5] = x[:, 0]**5\n\n# Split into training and test data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Scale data\nscaler = StandardScaler()\nscaler.fit(X_train)\nX_train_scaled = scaler.transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\n# Lin. regression. Not scaled, then scaled\nbeta = np.linalg.inv(X_train.T @ X_train) @ X_train.T @ y_train\ny_tilde = X_train @ beta\ny_predict = X_test @ beta\n\"\"\"I get an error when i try to create a beta_scaled with the scaled matrix, so the scores are wrong\"\"\"\ny_tilde_scaled = X_train_scaled @ beta\ny_predict_scaled = X_test_scaled @ beta\n\n# Metrics. Not scaled, then scaled\nprint(f\"TASK A)\\n----------------------\")\nprint(f\"(Not scaled)\")\nprint(f\"Train MSE: {mean_squared_error(y_train, y_tilde):g}\")\nprint(f\"Train R2: {r2_score(y_train, y_tilde):g}\")\nprint(f\"Test MSE: {mean_squared_error(y_test, y_predict):g}\")\nprint(f\"Test R2: {r2_score(y_test, y_predict):g}\")\nprint(f\"\\n(Scaled)\")\nprint(f\"Train MSE: {mean_squared_error(y_train, y_tilde_scaled):g}\")\nprint(f\"Train R2: {r2_score(y_train, y_tilde_scaled):g}\")\nprint(f\"Test MSE: {mean_squared_error(y_test, y_predict_scaled):g}\")\nprint(f\"Test R2: {r2_score(y_test, y_predict_scaled):g}\")\n\n\"\"\"Task c) 15th order polynomial\"\"\"\nmse_train = np.zeros(maxdegree)\nr2_train = np.zeros(maxdegree)\nmse_test = np.zeros(maxdegree)\nr2_test = np.zeros(maxdegree)\n\nlinreg = LinearRegression(fit_intercept=False)\npolyit = PolynomialFeatures(maxdegree)\nfor degree in range(1, maxdegree+1):\n # Split into training and test data\n print(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n beta = np.linalg.inv(X_train.T @ X_train) @ X_train.T @ y_train\n y_tilde = X_train @ beta\n y_predict = X_test @ beta\n\n mse_train[degree] = mean_squared_error(y_train, y_tilde)\n r2_train[degree] = r2_score(y_train, y_tilde)\n mse_test[degree] = mean_squared_error(y_test, y_predict)\n r2_test[degree] = r2_score(y_test, y_predict)\n\n# Plot mse and r2 to the degree of poly.\nplt.plot(range(maxdegree), mse_train, label=\"Train MSE\")\nplt.plot(range(maxdegree), mse_test, label=\"Test MSE\")\nplt.plot(range(maxdegree), r2_train, label=\"Train R2\")\nplt.plot(range(maxdegree), r2_test, label=\"Test R2\")\nplt.legend()\nplt.xlabel(\"Degree of polynomial\")\nplt.ylabel(\"Error\")\nplt.show()\n","repo_name":"LassePladsen/FYS-STK3155-4155","sub_path":"Weekly exercises/34-35/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74746901348","text":"import os\nimport cv2\nimport json\nimport pickle\nimport numpy as np\n\nfrom PIL import Image\nfrom collections import defaultdict\n\nfrom dataset import get_dataset\nfrom clients import Client, SiloBNClient\n\n\ndef read_dir(data_dir):\n data = defaultdict(lambda: None)\n\n files = os.listdir(data_dir)\n files = [f for f in files if f.endswith('.json')]\n for f in files:\n file_path = os.path.join(data_dir, f)\n with open(file_path, 'r') as inf:\n cdata = json.load(inf)\n data.update(cdata['user_data'])\n\n return data\n\n\ndef read_data(train_data_dir, test_data_dir):\n \"\"\"\n train_data: Form: {'user_id': {'x': [list of images], 'y': [list of labels]}}\n \"\"\"\n train_data = read_dir(train_data_dir)\n test_data = read_dir(test_data_dir)\n\n return train_data, test_data\n\n\ndef define_client(args):\n if args.algorithm == 'FedAvg':\n return Client\n if args.algorithm == 'SiloBN':\n return SiloBNClient\n raise NotImplementedError\n\n\ndef create_clients(args, train_data, test_data, model, world_size, rank, device, logger, writer, num_gpu, train,\n ckpt_path, disable_ddp=False):\n if args.dataset == 'idda' or args.dataset == 'cityscapes':\n train_transform, test_transform, test_bisenetv2, dataset = get_dataset(args, train)\n else:\n transform, test_bisenetv2, dataset = get_dataset(args, train)\n train_transform = test_transform = []\n\n clients = []\n users = train_data.keys() if train else test_data.keys()\n\n client_func = define_client(args)\n\n for i, user in enumerate(users):\n\n data = train_data[user] if train else test_data[user]\n batch_size = args.batch_size if train else args.test_batch_size\n\n if args.dataset == 'cityscapes':\n ds = dataset(data=data, transform=train_transform, test_transform=test_transform,\n test_bisenetv2=test_bisenetv2, double=args.double_dataset, quadruple=args.quadruple_dataset,\n use_cv2_transform=args.cv2_transform, dom_gen=args.dom_gen, split_name=args.clients_type)\n elif args.dataset == 'idda':\n ds = dataset(data=data, transform=train_transform, test_transform=test_transform,\n test_bisenetv2=test_bisenetv2, crop_size=(1856, 1024), remap=args.remap, dom_gen=args.dom_gen,\n use_cv2_transform=args.cv2_transform, setting_type=args.setting_type,\n split_type=args.clients_type, user=user)\n else:\n raise NotImplementedError\n\n client = client_func(user, ds, model, logger, writer, args, batch_size, world_size, rank, num_gpu,\n device=device, ckpt_path=ckpt_path, name=args.name, disable_ddp=disable_ddp)\n\n clients.append(client)\n\n return clients\n\n\ndef extract_amp_spectrum(img_np):\n fft = np.fft.fft2(img_np, axes=(0, 1))\n return np.abs(fft)\n\n\ndef create_domgen_bank(args, train_data):\n if args.dataset == 'idda':\n base_path = os.path.join('..', 'data', 'idda', 'data', 'IDDAsmall')\n elif args.dataset == 'cityscapes':\n base_path = os.path.join('..', 'data', 'cityscapes', 'data', 'leftImg8bit')\n else:\n raise NotImplementedError\n if args.dom_gen == 'cfsi':\n if not os.path.isdir(\n os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type, 'bank_A')):\n os.makedirs(\n os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type, 'bank_A'))\n for cid in train_data.keys():\n for x in train_data[cid]['x']:\n img = Image.open(os.path.join(base_path, x))\n img_np = np.asarray(img, np.float32)\n\n amp = extract_amp_spectrum(img_np)\n sample = x.split('/')[-1].split('.')[0]\n np.save(\n os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type,\n 'bank_A', '{}_amp_{}'.format(cid, sample)), amp)\n elif args.dom_gen == 'lab':\n if not os.path.isdir(\n os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type, 'bank_lab')):\n os.makedirs(\n os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type, 'bank_lab'))\n for cid in train_data.keys():\n for x in train_data[cid]['x']:\n to_save = {}\n sample = x.split('/')[-1].split('.')[0]\n img = cv2.imread(os.path.join(base_path, x))\n img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)\n mean_t = np.mean(img_lab, axis=(0, 1))\n std_t = np.std(img_lab, axis=(0, 1))\n to_save['mean'] = mean_t\n to_save['std'] = std_t\n file_name = '%s_%s.pkl' % (cid, sample)\n file_path = os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type,\n 'bank_lab', file_name)\n with open(file_path, 'wb') as f:\n pickle.dump(to_save, f)\n f.close()\n\n\ndef setup_clients(args, logger, writer, model, world_size, rank, num_gpu, device=None, ckpt_path=None,\n disable_ddp=False):\n if args.dataset == 'cityscapes':\n train_data_dir = os.path.join('..', 'data', args.dataset, 'data', args.clients_type, 'train')\n test_data_dir = os.path.join('..', 'data', args.dataset, 'data', args.clients_type, 'test')\n elif args.dataset == 'idda':\n train_data_dir = os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type, 'train')\n test_data_dir = os.path.join('..', 'data', args.dataset, 'data', args.clients_type, args.setting_type, 'test')\n else:\n train_data_dir = os.path.join('..', 'data', args.dataset, 'data', 'train')\n test_data_dir = os.path.join('..', 'data', args.dataset, 'data', 'test')\n\n train_data, test_data = read_data(train_data_dir, test_data_dir)\n\n if args.dom_gen is not None:\n create_domgen_bank(args, train_data)\n\n if args.framework == 'centralized' and args.algorithm == 'FedAvg':\n train_data_all = {'x': [], 'y': []}\n for c in train_data.keys():\n train_data_all['x'].extend(train_data[c]['x'])\n train_data_all['y'].extend(train_data[c]['y'])\n train_data = {'centralized_user': train_data_all}\n\n train_clients = create_clients(args, train_data, test_data, model, world_size, rank, device, logger, writer,\n num_gpu, train=True, ckpt_path=ckpt_path, disable_ddp=disable_ddp)\n\n test_clients = create_clients(args, train_data, test_data, model, world_size, rank, device, logger, writer, num_gpu,\n train=False, ckpt_path=ckpt_path, disable_ddp=disable_ddp)\n\n return train_clients, test_clients\n","repo_name":"Erosinho13/FedDrive","sub_path":"src/utils/client_utils.py","file_name":"client_utils.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"71"} +{"seq_id":"2688067789","text":"#!/usr/bin/env python\n\n\"\"\"\nThis extractor triggers when a file is added to a dataset in Clowder.\n\nIt checks for _left and _right BIN files to convert them into\nJPG and TIF formats.\n \"\"\"\n\nimport os\nimport shutil\nimport tempfile\n\nfrom pyclowder.utils import CheckMessage\nfrom pyclowder.datasets import download_metadata, upload_metadata, remove_metadata, submit_extraction\nfrom terrautils.metadata import get_extractor_metadata, get_terraref_metadata, get_season_and_experiment\nfrom terrautils.extractors import TerrarefExtractor, is_latest_file, check_file_in_dataset, load_json_file, \\\n build_metadata, build_dataset_hierarchy_crawl, upload_to_dataset, file_exists, contains_required_files\nfrom terrautils.formats import create_geotiff, create_image\nfrom terrautils.spatial import geojson_to_tuples, geojson_to_tuples_betydb\nimport terraref.stereo_rgb\n\n\nclass StereoBin2JpgTiff(TerrarefExtractor):\n def __init__(self):\n super(StereoBin2JpgTiff, self).__init__()\n\n # parse command line and load default logging configuration\n self.setup(sensor='rgb_geotiff')\n\n def check_message(self, connector, host, secret_key, resource, parameters):\n if \"rulechecked\" in parameters and parameters[\"rulechecked\"]:\n return CheckMessage.download\n\n self.start_check(resource)\n\n if not is_latest_file(resource):\n self.log_skip(resource, \"not latest file\")\n return CheckMessage.ignore\n\n # Check for a left and right BIN file - skip if not found\n if not contains_required_files(resource, ['_left.bin', '_right.bin']):\n self.log_skip(resource, \"missing required files\")\n return CheckMessage.ignore\n\n # Check metadata to verify we have what we need\n md = download_metadata(connector, host, secret_key, resource['id'])\n if get_terraref_metadata(md):\n if not self.overwrite and get_extractor_metadata(md, self.extractor_info['name'], self.extractor_info['version']):\n # Make sure outputs properly exist\n timestamp = resource['dataset_info']['name'].split(\" - \")[1]\n left_tiff = self.sensors.create_sensor_path(timestamp, opts=['left'])\n right_tiff = self.sensors.create_sensor_path(timestamp, opts=['right'])\n if file_exists(left_tiff) and file_exists(right_tiff):\n self.log_skip(resource, \"metadata v%s and outputs already exist\" % self.extractor_info['version'])\n # Have TERRA-REF metadata, but not any from this extractor\n return CheckMessage.download\n else:\n self.log_error(resource, \"no terraref metadata found; sending to cleaner\")\n submit_extraction(connector, host, secret_key, resource['id'], \"terra.metadata.cleaner\")\n return CheckMessage.ignore\n\n def process_message(self, connector, host, secret_key, resource, parameters):\n self.start_message(resource)\n\n # Get left/right files and metadata\n img_left, img_right, terra_md_full = None, None, None\n for fname in resource['local_paths']:\n if fname.endswith('_dataset_metadata.json'):\n all_dsmd = load_json_file(fname)\n terra_md_full = get_terraref_metadata(all_dsmd, 'stereoTop')\n elif fname.endswith('_left.bin'):\n img_left = fname\n elif fname.endswith('_right.bin'):\n img_right = fname\n if None in [img_left, img_right, terra_md_full]:\n raise ValueError(\"could not locate all files & metadata in processing\")\n\n timestamp = resource['dataset_info']['name'].split(\" - \")[1]\n\n # Fetch experiment name from terra metadata\n season_name, experiment_name, updated_experiment = get_season_and_experiment(timestamp, 'stereoTop', terra_md_full)\n if None in [season_name, experiment_name]:\n raise ValueError(\"season and experiment could not be determined\")\n\n # Determine output directory\n self.log_info(resource, \"Hierarchy: %s / %s / %s / %s / %s / %s / %s\" % (season_name, experiment_name, self.sensors.get_display_name(),\n timestamp[:4], timestamp[5:7], timestamp[8:10], timestamp))\n target_dsid = build_dataset_hierarchy_crawl(host, secret_key, self.clowder_user, self.clowder_pass, self.clowderspace,\n season_name, experiment_name, self.sensors.get_display_name(),\n timestamp[:4], timestamp[5:7], timestamp[8:10],\n leaf_ds_name=self.sensors.get_display_name() + ' - ' + timestamp)\n left_tiff = self.sensors.create_sensor_path(timestamp, opts=['left'])\n right_tiff = self.sensors.create_sensor_path(timestamp, opts=['right'])\n uploaded_file_ids = []\n\n # Attach LemnaTec source metadata to Level_1 product if necessary\n target_md = download_metadata(connector, host, secret_key, target_dsid)\n if not get_extractor_metadata(target_md, self.extractor_info['name']):\n self.log_info(resource, \"uploading LemnaTec metadata to ds [%s]\" % target_dsid)\n remove_metadata(connector, host, secret_key, target_dsid, self.extractor_info['name'])\n terra_md_trim = get_terraref_metadata(all_dsmd)\n if updated_experiment is not None:\n terra_md_trim['experiment_metadata'] = updated_experiment\n terra_md_trim['raw_data_source'] = host + (\"\" if host.endswith(\"/\") else \"/\") + \"datasets/\" + resource['id']\n level1_md = build_metadata(host, self.extractor_info, target_dsid, terra_md_trim, 'dataset')\n upload_metadata(connector, host, secret_key, target_dsid, level1_md)\n\n try:\n left_shape = terraref.stereo_rgb.get_image_shape(terra_md_full, 'left')\n gps_bounds_left = geojson_to_tuples(terra_md_full['spatial_metadata']['left']['bounding_box'])\n right_shape = terraref.stereo_rgb.get_image_shape(terra_md_full, 'right')\n gps_bounds_right = geojson_to_tuples(terra_md_full['spatial_metadata']['right']['bounding_box'])\n except KeyError:\n self.log_error(resource, \"spatial metadata not properly identified; sending to cleaner\")\n submit_extraction(connector, host, secret_key, resource['id'], \"terra.metadata.cleaner\")\n return\n\n if (not file_exists(left_tiff)) or self.overwrite:\n # Perform actual processing\n self.log_info(resource, \"creating %s\" % left_tiff)\n left_image = terraref.stereo_rgb.process_raw(left_shape, img_left, None)\n create_geotiff(left_image, gps_bounds_left, left_tiff, None, True,\n self.extractor_info, terra_md_full, compress=True)\n self.created += 1\n self.bytes += os.path.getsize(left_tiff)\n # Check if the file should be uploaded, even if it was already created\n found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, left_tiff)\n if not found_in_dest:\n self.log_info(resource, \"uploading %s\" % left_tiff)\n fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid, left_tiff)\n uploaded_file_ids.append(host + (\"\" if host.endswith(\"/\") else \"/\") + \"files/\" + fileid)\n\n\n if (not file_exists(right_tiff)) or self.overwrite:\n # Perform actual processing\n self.log_info(resource, \"creating %s\" % right_tiff)\n right_image = terraref.stereo_rgb.process_raw(right_shape, img_right, None)\n create_geotiff(right_image, gps_bounds_right, right_tiff, None, True,\n self.extractor_info, terra_md_full, compress=True)\n self.created += 1\n self.bytes += os.path.getsize(right_tiff)\n # Check if the file should be uploaded, even if it was already created\n found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, right_tiff)\n if not found_in_dest:\n self.log_info(resource, \"uploading %s\" % right_tiff)\n fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid, right_tiff)\n uploaded_file_ids.append(host + (\"\" if host.endswith(\"/\") else \"/\") + \"files/\" + fileid)\n\n # Trigger additional extractors\n self.log_info(resource, \"triggering downstream extractors\")\n submit_extraction(connector, host, secret_key, target_dsid, \"terra.stereo-rgb.rgbmask\")\n submit_extraction(connector, host, secret_key, target_dsid, \"terra.stereo-rgb.nrmac\")\n submit_extraction(connector, host, secret_key, target_dsid, \"terra.plotclipper_tif\")\n\n # Tell Clowder this is completed so subsequent file updates don't daisy-chain\n if len(uploaded_file_ids) > 0:\n extractor_md = build_metadata(host, self.extractor_info, target_dsid, {\n \"files_created\": uploaded_file_ids\n }, 'dataset')\n self.log_info(resource, \"uploading extractor metadata to raw dataset\")\n remove_metadata(connector, host, secret_key, resource['id'], self.extractor_info['name'])\n try:\n upload_metadata(connector, host, secret_key, resource['id'], extractor_md)\n except:\n self.log_info(resource, \"problem uploading extractor metadata...\")\n\n self.end_message(resource)\n\nif __name__ == \"__main__\":\n extractor = StereoBin2JpgTiff()\n extractor.start()\n","repo_name":"terraref/extractors-stereo-rgb","sub_path":"bin2tif/terra_bin2tif.py","file_name":"terra_bin2tif.py","file_ext":"py","file_size_in_byte":9653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"12228228112","text":"\"\"\"\nCounts letter occurences in the README.md.\n\nhttps://spark.apache.org/docs/latest/quick-start.html\n\nRun with:\npython example.py\n\"\"\"\n\nfrom pyspark.sql import SparkSession\n\nreadme = \"README.md\"\n\nspark = SparkSession.builder.appName(\"SimpleExample\").getOrCreate()\ndf = spark.read.text(readme).cache()\n\nnumAs = df.filter(df.value.contains('a')).count()\nnumBs = df.filter(df.value.contains('b')).count()\n\nprint(\"Lines with a: %i, lines with b: %i\" % (numAs, numBs))\n\nspark.stop()\n","repo_name":"domtriola/software-notes","sub_path":"technologies/spark/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"40988650846","text":"# Import used libraries\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport os\nimport csv\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom math import ceil\nfrom sklearn.model_selection import train_test_split\nimport random\n\n# This setting is applied to remove \"CUDNN_STATUS_ALLOC_FAILED\" error on my local environment.\n# For more info: https://stackoverflow.com/a/65203824\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\n# Generator to yield X, y dataset\n# This generator is created to reduce the memory consumption while training the model\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n # Shuffle the samples\n sklearn.utils.shuffle(samples)\n\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n \n # Create empty lists\n images = []\n measurements = []\n\n # For each sample in the batch\n for batch_sample in range(len(batch_samples)):\n # Get the full path of the image\n imgName = batch_samples.iloc[batch_sample].img_path\n imgPath = img_folder + imgName\n # Store the image and steering\n img = cv2.imread(imgPath)\n meas = batch_samples.iloc[batch_sample].steering\n # Append the original image\n images.append(img)\n measurements.append(meas)\n # Augment data by flipping the images and invert measurements\n images.append(cv2.flip(img,1))\n measurements.append(meas*-1.0)\n\n # Convert to numpy arrays\n X = np.array(images)\n y = np.array(measurements)\n # Shuffle again and yield\n X, y = sklearn.utils.shuffle(X, y)\n yield X, y\n\n# Set the folder paths\ndata_folder = 'data\\\\'\nimg_folder = data_folder + 'IMG\\\\'\nlog_path = data_folder + 'driving_log.csv'\n\n# Read the csv file as a pandas dataframe, and set the name of the columns\ncolumn_names = ['Center_Image', 'Left_Image', 'Right_Image', 'Steering', 'Throttle', 'Brake', 'Speed']\ndf_log = pd.read_csv(log_path, names=column_names)\n\n# Data Evaluation\nprint(df_log.shape)\nprint(df_log.describe())\nplt.figure()\ndf_log.Steering.plot(kind='hist')\nplt.ylabel('Samples')\nplt.xlabel('Steering')\nplt.savefig('images\\\\steeringhist.png')\nplt.close()\n\n# Isolate image name for center, left and right images\ndf_log.Center_Image = df_log.Center_Image.apply(lambda x: x.split('\\\\')[-1])\ndf_log.Left_Image = df_log.Left_Image.apply(lambda x: x.split('\\\\')[-1])\ndf_log.Right_Image = df_log.Right_Image.apply(lambda x: x.split('\\\\')[-1])\n\n# Drop unnecessary columns\ndf_log.drop(columns=['Throttle', 'Brake', 'Speed'], inplace=True)\n\n# Create a new dataframe for cleanup\ndf_new = pd.DataFrame()\n\n# Set steering value for left and right images with correction\ncorrection = 0.2\nleftSteering = df_log.Steering + correction\nrightSteering = df_log.Steering - correction\n\n# Create a new image column with all center, left, right images\ndf_new['img_path'] = pd.concat([df_log.Center_Image, df_log.Left_Image, df_log.Right_Image], ignore_index=True)\n# Create a steering column with corresponding measurement values\ndf_new['steering'] = pd.concat([df_log.Steering, leftSteering, rightSteering], ignore_index=True)\n\n# Import Keras libraries\nfrom keras.models import Model, Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\n\n# set batch size\nbatch_size = 32\n\n# Split the dataset to training and validation samples\ntrain_samples, validation_samples = train_test_split(df_new, test_size=0.2)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\n# Create Model and add layers to it\n# The model architecture used here is based on NVIDIA article:\n# End-to-end Deep Learning for Self-Driving Cars\n# https://developer.nvidia.com/blog/deep-learning-self-driving-cars/\nmodel = Sequential()\nmodel.add(Lambda(lambda x: ((x/255.0) - 0.5), input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((50,20),(0,0))))\nmodel.add(Convolution2D(24, kernel_size=(5,5), strides=(2,2), activation='relu'))\nmodel.add(Convolution2D(36, kernel_size=(5,5), strides=(2,2), activation='relu'))\nmodel.add(Convolution2D(48, kernel_size=(5,5), strides=(2,2), activation='relu'))\nmodel.add(Convolution2D(64, kernel_size=(3,3), activation='relu'))\nmodel.add(Convolution2D(64, kernel_size=(3,3), activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n# Compile the model\nmodel.compile(loss='mse', optimizer='adam')\n\n# Print summary\nprint(model.summary())\n\ncheckpointpath = 'tmp'\n# Set the callbacks\nmy_callbacks= [\n EarlyStopping(monitor = 'val_loss', patience=1),\n ModelCheckpoint(filepath=checkpointpath, monitor='val_loss', save_best_only=True)\n]\n \n# Train model with generator\nhistory_object = model.fit(x=train_generator, \\\n validation_data=validation_generator, \\\n epochs=10, \\\n verbose=1, \\\n validation_steps=ceil(len(validation_samples) * 2/batch_size), \\\n steps_per_epoch=ceil(len(train_samples) * 2/batch_size), \\\n callbacks=my_callbacks)\n\n\nplt.figure()\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.savefig('images\\\\loss.png', bbox_inches='tight')\n\n# Save the model\nmodel.save('model.h5')\n","repo_name":"vktemel/behavioralCloning","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14831666689","text":"\"\"\"!\n@file\nFile containing all commonly used checks for PyPO user input.\n\"\"\"\n\nimport numpy as np\nimport os\nimport pathlib\nimport re\n\nimport PyPO.Config as Config\nimport PyPO.WorldParam as world\nfrom PyPO.Enums import FieldComponents, CurrentComponents\n\nnThreads_cpu = os.cpu_count() - 1 if os.cpu_count() > 1 else 1\nPO_modelist = [\"JM\", \"EH\", \"JMEH\", \"EHP\", \"FF\", \"scalar\"]\n\ndef getIndex(name, nameList):\n \"\"\"!\n Get the regular expression for checking if an object already exists.\n Counts the amount of occurrences in order to avoid conflicting names.\n\n @param name Name of object.\n @param nameList List of names to check.\n\n @returns num Increment of highest occurrence of number.\n \"\"\"\n\n regex = f\"(?= 1:\n errStr += errMsg_value(\"ecc\", elemDict[\"ecc\"], elemDict[\"name\"])\n \n else:\n errStr += errMsg_field(\"ecc\", elemDict[\"name\"])\n \n elif elemDict[\"pmode\"] == \"manual\":\n if \"coeffs\" in elemDict:\n errStr += block_ndarray(\"coeffs\", elemDict, (3,))\n else:\n errStr += errMsg_field(\"coeffs\", elemDict[\"name\"])\n\n else:\n args = [\"focus\", \"manual\"]\n errStr += errMsg_option(\"pmode\", elemDict[\"pmode\"], elemDict[\"name\"], args=args)\n\n elif elemDict[\"type\"] == 3:\n if not \"name\" in elemDict:\n elemDict[\"name\"] = \"Plane\"\n \n num = getIndex(elemDict[\"name\"], nameList)\n if num > 0:\n elemDict[\"name\"] = elemDict[\"name\"] + \"_{}\".format(num)\n \n if \"gmode\" in elemDict:\n if elemDict[\"gmode\"] == \"xy\" or elemDict[\"gmode\"] == 0:\n if \"lims_x\" in elemDict:\n errStr += block_ndarray(\"lims_x\", elemDict, (2,))\n else:\n errStr += errMsg_field(\"lims_x\", elemDict[\"name\"])\n\n if \"lims_y\" in elemDict:\n errStr += block_ndarray(\"lims_y\", elemDict, (2,))\n else:\n errStr += errMsg_field(\"lims_y\", elemDict[\"name\"])\n\n elif elemDict[\"gmode\"] == \"uv\" or elemDict[\"gmode\"] == 1:\n if \"lims_u\" in elemDict:\n errStr += block_ndarray(\"lims_u\", elemDict, (2,))\n\n if elemDict[\"lims_u\"][0] < 0:\n errStr += errMsg_value(\"lims_u\", elemDict[\"lims_u\"][0], elemDict[\"name\"])\n\n if elemDict[\"lims_u\"][1] < 0:\n errStr += errMsg_value(\"lims_u\", elemDict[\"lims_u\"][1], elemDict[\"name\"])\n else:\n errStr += errMsg_field(\"lims_u\", elemDict[\"name\"])\n\n if \"lims_v\" in elemDict:\n errStr += block_ndarray(\"lims_v\", elemDict, (2,))\n\n if elemDict[\"lims_v\"][0] < 0:\n errStr += errMsg_value(\"lims_v\", elemDict[\"lims_v\"][0], elemDict[\"name\"])\n \n if elemDict[\"lims_v\"][1] > 360:\n errStr += errMsg_value(\"lims_v\", elemDict[\"lims_v\"][1], elemDict[\"name\"])\n else:\n errStr += errMsg_field(\"lims_v\", elemDict[\"name\"])\n\n if \"ecc_uv\" in elemDict:\n if not ((isinstance(elemDict[\"ecc_uv\"], float) or isinstance(elemDict[\"ecc_uv\"], int))):\n errStr += errMsg_type(\"ecc_uv\", type(elemDict[\"ecc_uv\"]), elemDict[\"name\"], [float, int])\n\n if elemDict[\"ecc_uv\"] < 0 or elemDict[\"ecc_uv\"] > 1:\n errStr += errMsg_value(\"ecc_uv\", elemDict[\"ecc_uv\"], elemDict[\"name\"])\n\n if \"rot_uv\" in elemDict:\n if not ((isinstance(elemDict[\"rot_uv\"], float) or isinstance(elemDict[\"rot_uv\"], int))):\n errStr += errMsg_type(\"rot_uv\", type(elemDict[\"rot_uv\"]), elemDict[\"name\"], [float, int])\n \n if \"gcenter\" in elemDict:\n errStr += block_ndarray(\"gcenter\", elemDict, (2,))\n\n elif elemDict[\"gmode\"] == \"AoE\" or elemDict[\"gmode\"] == 2:\n if \"lims_Az\" in elemDict:\n errStr += block_ndarray(\"lims_Az\", elemDict, (2,))\n else:\n errStr += errMsg_field(\"lims_Az\", elemDict[\"name\"])\n\n if \"lims_El\" in elemDict:\n errStr += block_ndarray(\"lims_El\", elemDict, (2,))\n else:\n errStr += errMsg_field(\"lims_El\", elemDict[\"name\"])\n \n else:\n args = [\"xy\", \"uv\", \"AoE (plane only)\"]\n errStr += errMsg_option(\"gmode\", elemDict[\"gmode\"], elemDict[\"name\"], args=args)\n\n else:\n errStr += errMsg_field(\"gmode\", elemDict[\"name\"])\n\n if \"gridsize\" in elemDict:\n errStr += block_ndarray(\"gridsize\", elemDict, (2,))\n\n if not (isinstance(elemDict[\"gridsize\"][0], np.int64) or isinstance(elemDict[\"gridsize\"][0], np.int32)):\n errStr += errMsg_type(\"gridsize[0]\", type(elemDict[\"gridsize\"][0]), elemDict[\"name\"], [np.int64, np.int32])\n\n if not (isinstance(elemDict[\"gridsize\"][1], np.int64) or isinstance(elemDict[\"gridsize\"][1], np.int32)):\n errStr += errMsg_type(\"gridsize[1]\", type(elemDict[\"gridsize\"][1]), elemDict[\"name\"], [np.int64, np.int32])\n \n if elemDict[\"gridsize\"][0] < 0 or elemDict[\"gridsize\"][1] < 0:\n clog.warning(f\"Negative gridsize encountered in {elemDict['name']}. Changing sign.\")\n elemDict[\"gridsize\"] = np.absolute(elemDict[\"gridsize\"])\n\n else:\n errStr += errMsg_field(\"gridsize\", elemDict[\"name\"])\n\n\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise InputReflError()\n \n else:\n return 0\n\ndef check_TubeRTDict(TubeRTDict, nameList, clog):\n \"\"\"!\n Check a tubular input frame dictionary.\n\n @param TubeRTDict A TubeRTDict object.\n @param namelist List containing names of frames in System.\n @param clog CustomLogger object.\n\n @see TubeRTDict\n \"\"\"\n\n errStr = \"\"\n \n if \"name\" not in TubeRTDict:\n TubeRTDict[\"name\"] = \"TubeFrame\"\n \n num = getIndex(TubeRTDict[\"name\"], nameList)\n\n if num > 0:\n TubeRTDict[\"name\"] = TubeRTDict[\"name\"] + \"_{}\".format(num)\n \n if \"nRays\" in TubeRTDict:\n if not isinstance(TubeRTDict[\"nRays\"], int):\n errStr += errMsg_type(\"nRays\", type(TubeRTDict[\"nRays\"]), \"TubeRTDict\", int)\n \n elif TubeRTDict[\"nRays\"] < 0:\n clog.warning(f\"Negative value {TubeRTDict['nRays']} encountered in TubeRTDict. Changing sign\")\n TubeRTDict[\"nRays\"] *= -1\n\n else:\n errStr += errMsg_field(\"nRays\", \"TubeRTDict\")\n\n if \"nRing\" in TubeRTDict:\n if not isinstance(TubeRTDict[\"nRing\"], int):\n errStr += errMsg_type(\"nRing\", type(TubeRTDict[\"nRays\"]), \"TubeRTDict\", int)\n \n elif TubeRTDict[\"nRing\"] < 0:\n clog.warning(f\"Negative value {TubeRTDict['nRing']} encountered in TubeRTDict. Changing sign\")\n TubeRTDict[\"nRing\"] *= -1\n\n else:\n errStr += errMsg_field(\"nRing\", \"TubeRTDict\")\n\n\n if \"angx0\" in TubeRTDict:\n if not ((isinstance(TubeRTDict[\"angx0\"], float) or isinstance(TubeRTDict[\"angx0\"], int))):\n errStr += errMsg_type(\"angx0\", type(TubeRTDict[\"angx0\"]), \"TubeRTDict\", [float, int])\n\n else:\n errStr += errMsg_field(\"angx0\", \"TubeRTDict\")\n\n\n if \"angy0\" in TubeRTDict:\n if not ((isinstance(TubeRTDict[\"angy0\"], float) or isinstance(TubeRTDict[\"angy0\"], int))):\n errStr += errMsg_type(\"angy0\", type(TubeRTDict[\"angy0\"]), \"TubeRTDict\", [float, int])\n\n else:\n errStr += errMsg_field(\"angy0\", \"TubeRTDict\")\n\n\n if \"x0\" in TubeRTDict:\n if not ((isinstance(TubeRTDict[\"x0\"], float) or isinstance(TubeRTDict[\"x0\"], int))):\n errStr += errMsg_type(\"x0\", type(TubeRTDict[\"x0\"]), \"TubeRTDict\", [float, int])\n \n elif TubeRTDict[\"x0\"] < 0:\n clog.warning(f\"Encountered negative value {TubeRTDict['x0']} in field 'x0' in TubeRTDict {TubeRTDict['name']}. Changing sign.\")\n TubeRTDict[\"x0\"] *= -1\n\n else:\n errStr += errMsg_field(\"x0\", \"TubeRTDict\")\n\n\n if \"y0\" in TubeRTDict:\n if not ((isinstance(TubeRTDict[\"y0\"], float) or isinstance(TubeRTDict[\"y0\"], int))):\n errStr += errMsg_type(\"y0\", type(TubeRTDict[\"y0\"]), \"TubeRTDict\", [float, int])\n \n elif TubeRTDict[\"y0\"] < 0:\n clog.warning(f\"Encountered negative value {TubeRTDict['y0']} in field 'y0' in TubeRTDict {TubeRTDict['name']}. Changing sign.\")\n TubeRTDict[\"y0\"] *= -1\n\n else:\n errStr += errMsg_field(\"y0\", \"TubeRTDict\")\n\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise InputRTError()\n\ndef check_GRTDict(GRTDict, nameList, clog):\n \"\"\"!\n Check a Gaussian input frame dictionary.\n\n @param GRTDict A GRTDict object.\n @param namelist List containing names of frames in System.\n @param clog CustomLogger object.\n\n @see GRTDict\n \"\"\"\n\n errStr = \"\"\n \n if \"name\" not in GRTDict:\n GRTDict[\"name\"] = \"GaussFrame\"\n \n num = getIndex(GRTDict[\"name\"], nameList)\n\n if num > 0:\n GRTDict[\"name\"] = GRTDict[\"name\"] + \"_{}\".format(num)\n\n if \"nRays\" in GRTDict:\n if not isinstance(GRTDict[\"nRays\"], int):\n errStr += errMsg_type(\"nRays\", type(GRTDict[\"nRays\"]), \"GRTDict\", int)\n\n elif GRTDict[\"nRays\"] < 0:\n clog.warning(f\"Negative value {GRTDict['nRays']} encountered in GRTDict. Changing sign\")\n GRTDict[\"nRays\"] *= -1\n\n else:\n errStr += errMsg_field(\"nRays\", \"GRTDict\")\n\n if \"lam\" in GRTDict:\n if GRTDict[\"lam\"] == 0 + 0j:\n clog.info(f\"Never heard of a complex-valued wavelength of zero, but good try.. Therefore changing wavelength now to 'lam' equals {np.pi:.42f}!\")\n GRTDict[\"lam\"] = np.pi\n\n if not (isinstance(GRTDict[\"lam\"], float) or isinstance(GRTDict[\"lam\"], int)):\n errStr += errMsg_type(\"lam\", type(GRTDict[\"lam\"]), \"GRTDict\", [float, int])\n \n elif GRTDict[\"lam\"] < 0:\n clog.warning(f\"Encountered negative value {GRTDict['lam']} in field 'lam' in GRTDict {GRTDict['name']}. Changing sign.\")\n GRTDict[\"lam\"] *= -1\n\n else:\n errStr += errMsg_field(\"lam\", \"GRTDict\")\n\n if \"x0\" in GRTDict:\n if not ((isinstance(GRTDict[\"x0\"], float) or isinstance(GRTDict[\"x0\"], int))):\n errStr += errMsg_type(\"x0\", type(GRTDict[\"x0\"]), \"GRTDict\", [float, int])\n\n elif GRTDict[\"x0\"] < 0:\n clog.warning(f\"Encountered negative value {GRTDict['x0']} in field 'x0' in GRTDict {GRTDict['name']}. Changing sign.\")\n GRTDict[\"x0\"] *= -1\n\n else:\n errStr += errMsg_field(\"x0\", \"GRTDict\")\n\n\n if \"y0\" in GRTDict:\n if not ((isinstance(GRTDict[\"y0\"], float) or isinstance(GRTDict[\"y0\"], int))):\n errStr += errMsg_type(\"y0\", type(GRTDict[\"y0\"]), \"GRTDict\", [float, int])\n \n elif GRTDict[\"y0\"] < 0:\n clog.warning(f\"Encountered negative value {GRTDict['y0']} in field 'y0' in GRTDict {GRTDict['name']}. Changing sign.\")\n GRTDict[\"y0\"] *= -1\n\n else:\n errStr += errMsg_field(\"y0\", \"GRTDict\")\n\n if \"n\" in GRTDict:\n if not ((isinstance(GRTDict[\"n\"], float) or isinstance(GRTDict[\"n\"], int))):\n errStr += errMsg_type(\"n\", type(GRTDict[\"n\"]), \"GRTDict\", [float, int])\n\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise InputRTError()\n\ndef check_runRTDict(runRTDict, elements, frames, clog, extern=True):\n \"\"\"!\n Check a ray-trace propagation input dictionary.\n\n @param runRTDict A runRTDict.\n @param elements List containing names of surfaces in System.\n @param frames List containing names of frames in System.\n @param clog CustomLogger object.\n @param extern Do not raise InputRTError if \"extern\" = False.\n\n @returns errStr The errorstring. Only returned if \"extern\" = True.\n \"\"\"\n\n errStr = \"\"\n \n cuda = has_CUDA()\n\n if \"fr_in\" not in runRTDict:\n errStr += errMsg_field(\"fr_in\", \"runRTDict\")\n\n else:\n errStr = check_frameSystem(runRTDict[\"fr_in\"], frames, clog, errStr)\n \n if \"t_name\" not in runRTDict:\n errStr += errMsg_field(\"t_name\", \"runRTDict\")\n else:\n errStr = check_elemSystem(runRTDict[\"t_name\"], elements, clog, errStr)\n\n if \"fr_out\" not in runRTDict:\n errStr += errMsg_field(\"fr_out\", \"runRTDict\")\n\n else:\n num = getIndex(runRTDict[\"fr_out\"], frames)\n\n if num > 0:\n runRTDict[\"fr_out\"] = runRTDict[\"fr_out\"] + \"_{}\".format(num)\n\n if \"tol\" not in runRTDict:\n runRTDict[\"tol\"] = 1e-3\n\n elif \"tol\" in runRTDict:\n if isinstance(runRTDict[\"tol\"], float) or isinstance(runRTDict[\"tol\"], int):\n if runRTDict[\"tol\"] < 0:\n clog.warning(\"Negative tolerances are not allowed. Changing sign.\")\n runRTDict[\"tol\"] *= -1\n \n else:\n runRTDict[\"tol\"] = 1e-3\n\n if \"t0\" not in runRTDict:\n runRTDict[\"t0\"] = 1\n\n else:\n if not (isinstance(runRTDict[\"t0\"], float) or isinstance(runRTDict[\"t0\"], int)):\n runRTDict[\"t0\"] = 1\n\n\n if \"device\" not in runRTDict:\n runRTDict[\"device\"] = \"CPU\"\n \n if \"device\" in runRTDict:\n if runRTDict[\"device\"] != \"CPU\" and runRTDict[\"device\"] != \"GPU\":\n clog.warning(f\"Device {runRTDict['device']} unknown. Defaulting to CPU.\")\n runRTDict[\"device\"] = \"CPU\"\n\n if runRTDict[\"device\"] == \"GPU\" and not cuda:\n clog.warning(f\"No PyPO CUDA libraries found. Defaulting to CPU.\")\n runRTDict[\"device\"] = \"CPU\"\n\n if runRTDict[\"device\"] == \"CPU\":\n if \"nThreads\" in runRTDict:\n if isinstance(runRTDict[\"nThreads\"], int):\n if runRTDict[\"nThreads\"] > nThreads_cpu:\n clog.warning(f\"Insufficient CPU threads available, automatically reducing threadcount.\")\n runRTDict[\"nThreads\"] = nThreads_cpu\n\n else:\n runRTDict[\"nThreads\"] = nThreads_cpu\n\n elif runRTDict[\"device\"] == \"GPU\":\n if \"nThreads\" not in runRTDict:\n runRTDict[\"nThreads\"] = 256\n\n if extern:\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise RunRTError()\n \n else:\n return errStr\n\ndef check_PSDict(PSDict, nameList, clog):\n \"\"\"!\n Check a point source input beam dictionary.\n\n @param PSDict A PSDict object.\n @param namelist List containing names of fields in System.\n @param clog CustomLogger object.\n\n @see PSDict\n \"\"\"\n\n errStr = \"\"\n \n if \"name\" not in PSDict:\n PSDict[\"name\"] = \"PointSourcePO\"\n \n num = getIndex(PSDict[\"name\"], nameList)\n\n if num > 0:\n PSDict[\"name\"] = PSDict[\"name\"] + \"_{}\".format(num)\n\n if \"lam\" in PSDict:\n if PSDict[\"lam\"] == 0 + 0j:\n clog.info(f\"Never heard of a complex-valued wavelength of zero, but good try.. Therefore changing wavelength now to 'lam' equals {np.pi:.42f}!\")\n PSDict[\"lam\"] = np.pi\n\n if not ((isinstance(PSDict[\"lam\"], float) or isinstance(PSDict[\"lam\"], int))):\n errStr += errMsg_type(\"lam\", type(PSDict[\"lam\"]), \"PSDict\", [float, int])\n \n elif PSDict[\"lam\"] < 0:\n clog.warning(f\"Encountered negative value {PSDict['lam']} in field 'lam' in PSDict {PSDict['name']}. Changing sign.\")\n PSDict[\"lam\"] *= -1\n\n else:\n errStr += errMsg_field(\"lam\", \"PSDict\")\n\n if \"phase\" in PSDict:\n if not ((isinstance(PSDict[\"phase\"], float) or isinstance(PSDict[\"phase\"], int))):\n errStr += errMsg_type(\"phase\", type(PSDict[\"phase\"]), \"PSDict\", [float, int])\n\n else:\n PSDict[\"phase\"] = 0\n\n if \"pol\" in PSDict:\n errStr += block_ndarray(\"pol\", PSDict, (3,))\n\n else:\n PSDict[\"pol\"] = np.array([1, 0, 0])\n\n if \"E0\" in PSDict:\n if not ((isinstance(PSDict[\"E0\"], float) or isinstance(PSDict[\"E0\"], int))):\n errStr += errMsg_type(\"E0\", type(PSDict[\"E0\"]), \"PSDict\", [float, int])\n\n else:\n PSDict[\"E0\"] = 1\n\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise InputPOError()\n\ndef check_GPODict(GPODict, nameList, clog):\n \"\"\"!\n Check a Gaussian input beam dictionary.\n\n @param GPODict A GPODict object.\n @param namelist List containing names of fields in System.\n @param clog CustomLogger object.\n\n @see GPODict\n \"\"\"\n\n errStr = \"\"\n \n if \"name\" not in GPODict:\n GPODict[\"name\"] = \"GaussianBeamPO\"\n \n num = getIndex(GPODict[\"name\"], nameList)\n\n if num > 0:\n GPODict[\"name\"] = GPODict[\"name\"] + \"_{}\".format(num)\n\n if \"lam\" in GPODict:\n if GPODict[\"lam\"] == 0 + 0j:\n clog.info(f\"Never heard of a complex-valued wavelength of zero, but good try.. Therefore changing wavelength now to 'lam' equals {np.pi:.42f}!\")\n GPODict[\"lam\"] = np.pi\n\n if not ((isinstance(GPODict[\"lam\"], float) or isinstance(GPODict[\"lam\"], int))):\n errStr += errMsg_type(\"lam\", type(GPODict[\"lam\"]), \"GPODict\", [float, int])\n \n elif GPODict[\"lam\"] < 0:\n clog.warning(f\"Encountered negative value {GPODict['lam']} in field 'lam' in GPODict {GPODict['name']}. Changing sign.\")\n GPODict[\"lam\"] *= -1\n\n else:\n errStr += errMsg_field(\"lam\", \"GPODict\")\n\n if \"w0x\" in GPODict:\n if not ((isinstance(GPODict[\"w0x\"], float) or isinstance(GPODict[\"w0x\"], int))):\n errStr += errMsg_type(\"w0x\", type(GPODict[\"w0x\"]), \"GPODict\", [float, int])\n\n elif GPODict[\"w0x\"] < 0:\n clog.warning(f\"Encountered negative value {GPODict['w0x']} in field 'w0x' in GPODict {GPODict['name']}. Changing sign.\")\n GPODict[\"w0x\"] *= -1\n\n else:\n errStr += errMsg_field(\"w0x\", \"GPODict\")\n\n\n if \"w0y\" in GPODict:\n if not ((isinstance(GPODict[\"w0y\"], float) or isinstance(GPODict[\"w0y\"], int))):\n errStr += errMsg_type(\"w0y\", type(GPODict[\"w0y\"]), \"GPODict\", [float, int])\n \n elif GPODict[\"w0y\"] < 0:\n clog.warning(f\"Encountered negative value {GPODict['w0y']} in field 'w0y' in GPODict {GPODict['name']}. Changing sign.\")\n GPODict[\"w0y\"] *= -1\n\n else:\n errStr += errMsg_field(\"w0y\", \"GPODict\")\n\n if \"n\" in GPODict:\n if not ((isinstance(GPODict[\"n\"], float) or isinstance(GPODict[\"n\"], int))):\n errStr += errMsg_type(\"n\", type(GPODict[\"n\"]), \"GPODict\", [float, int])\n\n elif GPODict[\"n\"] < 1 and GPODict >= 0:\n clog.warning(\"Refractive indices smaller than unity are not allowed. Changing to 1.\")\n\n else:\n GPODict[\"n\"] = 1\n\n if \"dxyz\" in GPODict:\n if not ((isinstance(GPODict[\"dxyz\"], float) or isinstance(GPODict[\"dxyz\"], int))):\n errStr += errMsg_type(\"dxyz\", type(GPODict[\"dxyz\"]), \"GPODict\", [float, int])\n\n else:\n GPODict[\"dxyz\"] = 0\n\n if \"pol\" in GPODict:\n errStr += block_ndarray(\"pol\", GPODict, (3,))\n\n else:\n GPODict[\"pol\"] = np.array([1, 0, 0])\n\n if \"E0\" in GPODict:\n if not ((isinstance(GPODict[\"E0\"], float) or isinstance(GPODict[\"E0\"], int))):\n errStr += errMsg_type(\"E0\", type(GPODict[\"E0\"]), \"GPODict\", [float, int])\n\n else:\n GPODict[\"E0\"] = 1\n\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise InputPOError()\n\ndef check_runPODict(runPODict, elements, fields, currents, scalarfields, frames, clog):\n \"\"\"!\n Check a physical optics propagation input dictionary.\n\n @param runPODict A runPODict.\n @param elements List containing names of surfaces in System.\n @param currents List containing names of currents in System.\n @param scalarfields List containing names of scalarfields in System.\n @param clog CustomLogger object.\n \"\"\"\n\n errStr = \"\"\n\n cuda = has_CUDA()\n \n if not \"exp\" in runPODict:\n runPODict[\"exp\"] = \"fwd\"\n\n if \"t_name\" in runPODict:\n check_elemSystem(runPODict[\"t_name\"], elements, clog)\n\n else:\n errStr += errMsg_field(\"t_name\", \"runPODict\")\n \n if \"mode\" not in runPODict:\n errStr += errMsg_field(\"mode\", \"runPODict\")\n \n else:\n if runPODict[\"mode\"] not in PO_modelist:\n errStr += f\"{runPODict['mode']} is not a valid propagation mode.\\n\"\n\n if \"s_current\" in runPODict:\n errStr = check_currentSystem(runPODict[\"s_current\"], currents, clog, errStr)\n \n if \"s_scalarfield\" in runPODict:\n errStr = check_scalarfieldSystem(runPODict[\"s_scalarfield\"], scalarfields, clog, errStr)\n \n if runPODict[\"mode\"] == \"JM\":\n if \"name_JM\" not in runPODict:\n errStr += errMsg_field(\"name_JM\", \"runPODict\")\n \n else:\n num = getIndex(runPODict[\"name_JM\"], currents)\n\n if num > 0:\n runPODict[\"name_JM\"] = runPODict[\"name_JM\"] + \"_{}\".format(num)\n \n if runPODict[\"mode\"] == \"EH\":\n if \"name_EH\" not in runPODict:\n errStr += errMsg_field(\"name_EH\", \"runPODict\")\n \n else:\n num = getIndex(runPODict[\"name_EH\"], fields)\n\n if num > 0:\n runPODict[\"name_EH\"] = runPODict[\"name_EH\"] + \"_{}\".format(num)\n \n if runPODict[\"mode\"] == \"JMEH\":\n if \"name_EH\" not in runPODict:\n errStr += errMsg_field(\"name_EH\", \"runPODict\")\n \n else:\n num = getIndex(runPODict[\"name_EH\"], fields)\n\n if num > 0:\n runPODict[\"name_EH\"] = runPODict[\"name_EH\"] + \"_{}\".format(num)\n \n if \"name_JM\" not in runPODict:\n errStr += errMsg_field(\"name_JM\", \"runPODict\")\n \n num = getIndex(runPODict[\"name_JM\"], currents)\n\n if num > 0:\n runPODict[\"name_JM\"] = runPODict[\"name_JM\"] + \"_{}\".format(num)\n \n if runPODict[\"mode\"] == \"EHP\":\n if \"name_EH\" not in runPODict:\n errStr += errMsg_field(\"name_EH\", \"runPODict\")\n \n num = getIndex(runPODict[\"name_EH\"], fields)\n\n if num > 0:\n runPODict[\"name_EH\"] = runPODict[\"name_EH\"] + \"_{}\".format(num)\n \n if \"name_P\" not in runPODict:\n errStr += errMsg_field(\"name_P\", \"runPODict\")\n \n num = getIndex(runPODict[\"name_P\"], frames)\n\n if num > 0:\n runPODict[\"name_P\"] = runPODict[\"name_P\"] + \"_{}\".format(num)\n\n if runPODict[\"mode\"] == \"FF\":\n if \"name_EH\" not in runPODict:\n errStr += errMsg_field(\"name_EH\", \"runPODict\")\n \n num = getIndex(runPODict[\"name_EH\"], fields)\n\n if num > 0:\n runPODict[\"name_EH\"] = runPODict[\"name_EH\"] + \"_{}\".format(num)\n \n if runPODict[\"mode\"] == \"scalar\":\n if \"name_field\" not in runPODict:\n errStr += errMsg_field(\"name_field\", \"runPODict\")\n \n num = getIndex(runPODict[\"name_field\"], scalarfields)\n\n if num > 0:\n runPODict[\"name_field\"] = runPODict[\"name_field\"] + \"_{}\".format(num)\n \n if \"t_name\" not in runPODict:\n errStr += errMsg_field(\"t_name\", \"runRTDict\")\n else:\n errStr = check_elemSystem(runPODict[\"t_name\"], elements, clog, errStr)\n \n if \"epsilon\" not in runPODict:\n runPODict[\"epsilon\"] = 1\n\n if \"device\" not in runPODict:\n if cuda:\n runPODict[\"device\"] = \"GPU\"\n\n else:\n runPODict[\"device\"] = \"CPU\"\n\n if \"device\" in runPODict:\n if runPODict[\"device\"] != \"CPU\" and runPODict[\"device\"] != \"GPU\":\n clog.warning(f\"Device {runPODict['device']} unknown. Defaulting to CPU.\")\n runPODict[\"device\"] = \"CPU\"\n\n if runPODict[\"device\"] == \"GPU\" and not cuda:\n clog.warning(f\"No PyPO CUDA libraries found. Defaulting to CPU.\")\n runPODict[\"device\"] = \"CPU\"\n\n if runPODict[\"device\"] == \"CPU\":\n \n if \"nThreads\" in runPODict:\n if isinstance(runPODict[\"nThreads\"], int):\n if runPODict[\"nThreads\"] > nThreads_cpu:\n clog.warning(f\"Insufficient CPU threads available, automatically reducing threadcount.\")\n runPODict[\"nThreads\"] = nThreads_cpu\n\n else:\n runPODict[\"nThreads\"] = nThreads_cpu\n\n elif runPODict[\"device\"] == \"GPU\":\n if \"nThreads\" not in runPODict:\n runPODict[\"nThreads\"] = 256\n\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n for err in errList:\n clog.error(err)\n \n raise RunPOError()\n\ndef check_hybridDict(hybridDict, elements, frames, fields, clog):\n \"\"\"!\n Check a hybrid propagation input dictionary.\n\n @param hybridDict A hybridDict.\n @param elements List containing names of surfaces in System.\n @param frames List containing names of frames in System.\n @param fields List containing names of frames in System.\n @param clog CustomLogger object.\n \"\"\"\n\n errStr = \"\"\n \n errStr += check_runRTDict(hybridDict, elements, frames, clog, extern=False)\n\n if \"field_in\" not in hybridDict:\n errStr += errMsg_field(\"field_in\", \"hybridDict\")\n else:\n errStr = check_fieldSystem(hybridDict[\"field_in\"], fields, clog, errStr)\n \n if \"field_out\" not in hybridDict:\n errStr += errMsg_field(\"field_out\", \"hybridDict\")\n else:\n num = getIndex(hybridDict[\"field_out\"], fields)\n\n if num > 0:\n hybridDict[\"field_out\"] = hybridDict[\"field_out\"] + \"_{}\".format(num)\n\n if \"start\" not in hybridDict:\n hybridDict[\"start\"] = None\n\n elif \"start\" in hybridDict:\n if hybridDict[\"start\"] is not None:\n errStr += block_ndarray(\"start\", hybridDict, (3,), cust_name=\"hybridDict\")\n \n if \"interp\" not in hybridDict:\n hybridDict[\"interp\"] = True\n \n elif \"interp\" in hybridDict:\n if not isinstance(hybridDict[\"interp\"], bool):\n errStr += errMsg_type(\"interp\", type(hybridDict[\"interp\"]), \"hybridDict\", bool)\n \n if \"comp\" not in hybridDict:\n hybridDict[\"comp\"] = FieldComponents.NONE\n \n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise HybridPropError()\n\ndef check_aperDict(aperDict, clog):\n \"\"\"!\n CHeck if aperture dictionary is valid.\n\n @param aperDict An aperture dictionary.\n @param clog CustomLogger object.\n\n @see aperDict\n \"\"\"\n\n errStr = \"\"\n\n if \"plot\" in aperDict:\n if not isinstance(aperDict[\"plot\"], bool):\n errStr += errMsg_type(\"plot\", type(aperDict[\"plot\"]), \"aperDict\", bool)\n\n else:\n aperDict[\"plot\"] = True\n\n if \"center\" in aperDict:\n errStr += block_ndarray(\"center\", aperDict, (2,), cust_name=\"aperDict\")\n\n else:\n aperDict[\"center\"] = np.zeros(2)\n\n if not \"outer\" in aperDict:\n errStr += errMsg_field(\"outer\", \"aperDict\")\n \n if not \"inner\" in aperDict:\n errStr += errMsg_field(\"inner\", \"aperDict\")\n\n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n\n for err in errList:\n clog.error(err)\n raise ApertureError()\n\ndef check_ellipseLimits(ellipsoid, clog):\n \"\"\"!\n Check if ellipsoid limits are valid points.\n If not, reduces limits to acceptable values.\n\n @param ellipsoid A reflDict containing description of ellipsoid surface.\n @param clog CustomLogger object.\n \"\"\"\n\n buff = 1000\n idx_lim = 0\n if ellipsoid[\"coeffs\"][1] < ellipsoid[\"coeffs\"][0]:\n idx_lim = 1\n\n if ellipsoid[\"gmode\"] == 0:\n if np.absolute(ellipsoid[\"lims_x\"][0]) > ellipsoid[\"coeffs\"][idx_lim]:\n sgn = np.sign((ellipsoid[\"lims_x\"][0]))\n clog.warning(f\"Lower x-limit of {ellipsoid['lims_x'][0]:.3f} incompatible with ellipsoid {ellipsoid['name']}. Changing to {sgn*ellipsoid['coeffs'][idx_lim]}.\")\n ellipsoid[\"lims_x\"][0] = sgn * (ellipsoid[\"coeffs\"][idx_lim] + ellipsoid[\"coeffs\"][0] / buff)\n \n if np.absolute(ellipsoid[\"lims_x\"][1]) > ellipsoid[\"coeffs\"][idx_lim]:\n sgn = np.sign((ellipsoid[\"lims_x\"][1]))\n clog.warning(f\"Upper x-limit of {ellipsoid['lims_x'][1]:.3f} incompatible with ellipsoid {ellipsoid['name']}. Changing to {sgn*ellipsoid['coeffs'][idx_lim]}.\")\n ellipsoid[\"lims_x\"][1] = sgn * (ellipsoid[\"coeffs\"][idx_lim] - ellipsoid[\"lims_x\"][1] / buff)\n \n if np.absolute(ellipsoid[\"lims_y\"][0]) > ellipsoid[\"coeffs\"][idx_lim]:\n sgn = np.sign((ellipsoid[\"lims_y\"][0]))\n clog.warning(f\"Lower y-limit of {ellipsoid['lims_y'][0]:.3f} incompatible with ellipsoid {ellipsoid['name']}. Changing to {sgn*ellipsoid['coeffs'][idx_lim]}.\")\n ellipsoid[\"lims_y\"][0] = sgn * (ellipsoid[\"coeffs\"][idx_lim] + ellipsoid[\"lims_y\"][0] / buff)\n \n if np.absolute(ellipsoid[\"lims_y\"][1]) > ellipsoid[\"coeffs\"][idx_lim]:\n sgn = np.sign((ellipsoid[\"lims_y\"][1]))\n clog.warning(f\"Upper y-limit of {ellipsoid['lims_y'][1]:.3f} incompatible with ellipsoid {ellipsoid['name']}. Changing to {sgn*ellipsoid['coeffs'][idx_lim]}.\")\n ellipsoid[\"lims_y\"][1] = sgn * (ellipsoid[\"coeffs\"][idx_lim] - ellipsoid[\"lims_y\"][1] / buff)\n\n elif ellipsoid[\"gmode\"] == 1:\n if np.absolute(ellipsoid[\"lims_u\"][0]) > ellipsoid[\"coeffs\"][idx_lim]:\n clog.warning(f\"Lower u-limit of {ellipsoid['lims_u'][0]:.3f} incompatible with ellipsoid {ellipsoid['name']}. Changing to {ellipsoid['coeffs'][idx_lim]}.\")\n ellipsoid[\"lims_u\"][0] = ellipsoid[\"coeffs\"][idx_lim] - ellipsoid[\"lims_u\"][0] / buff\n \n if np.absolute(ellipsoid[\"lims_u\"][1]) > ellipsoid[\"coeffs\"][idx_lim]:\n clog.warning(f\"Upper u-limit of {ellipsoid['lims_u'][1]:.3f} incompatible with ellipsoid {ellipsoid['name']}. Changing to {ellipsoid['coeffs'][idx_lim]}.\")\n ellipsoid[\"lims_u\"][1] = ellipsoid[\"coeffs\"][idx_lim] - ellipsoid[\"lims_u\"][1] / buff\n\ndef check_sameBound(beams, checkDict, clog):\n \"\"\"!\n Check if beams to be merged are defined on same surface.\n If not, raise MergeBeam Error.\n\n @param beams Fields/currents to be merged.\n @param checkDict System c=dictionary containing fields/currents.\n @param clog CustomLogger object.\n \"\"\"\n\n errStr = \"\"\n surf0 = checkDict[beams[0]].surf\n for i in range(len(beams) - 1):\n if checkDict[beams[i+1]].surf != surf0:\n errStr += errMsg_mergebeam(beams[i+1], surf0, checkDict[beams[i+1]].surf)\n \n if errStr:\n errList = errStr.split(\"\\n\")[:-1]\n for err in errList:\n clog.error(err)\n \n raise MergeBeamError()\n\ndef check_associations(associations, fieldName, frameName, surf, clog):\n \"\"\"!\n Check if field and frame are associated on the same surface.\n Used for hybrid propagations.\n Currently, can only have one single association per surface!\n\n @param associations All present associations in system.\n @param fieldName Name of field to be propagated.\n @param frameName Name of frame to be propagated.\n @param surf Name of surface from which a hybrid propagation is performed.\n @param clog CustomLogger object.\n \"\"\"\n\n if surf not in associations.keys():\n clog.error(f\"Surface {surf} not found in associations.\")\n \n raise HybridPropError\n \n else:\n if (fieldName not in associations[surf]) or (frameName not in associations[surf]):\n clog.error(f\"Field {fieldName} and frame {frameName} are not associated.\")\n \n raise HybridPropError\n\n\n\n\n\n","repo_name":"PyPO-dev/PyPO","sub_path":"src/PyPO/Checks.py","file_name":"Checks.py","file_ext":"py","file_size_in_byte":48702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"38292687113","text":"\nfrom utils.utils import *\n\n\nclass TwoColorability():\n def __init__(self, graph: Graph):\n self.graph = graph\n\n def getNeighborNodesColors(self, node: Node):\n colors = 0\n for key in node.outflowEdges:\n targetNode: Node = key\n colors |= targetNode.color\n return colors\n\n def isTwoColorable(self, node: Node):\n if node.visited:\n return True\n colors = self.getNeighborNodesColors(node)\n if colors == 3:\n return False\n node.color = 3 & ~colors\n node.visited = True\n for key in node.outflowEdges:\n targetNode: Node = self.graph.nodes[key]\n if targetNode.visited:\n continue\n if not self.isTwoColorable(targetNode):\n return False\n return True\n\n def isTwoColorable(self):\n # clear color of all nodes\n for node in self.graph.nodes.values():\n node.color = 0\n\n for node in self.graph.nodes.values():\n if node.visited:\n continue\n if not self.isTwoColorable(node):\n print(\"graph is not two colorable\")\n return False\n print(\"graph is two colorable\")\n return True\n\n\n","repo_name":"yongjunchai/algorithms-illuminated","sub_path":"algorithmIlliminated_4/k_sat.py","file_name":"k_sat.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18257672211","text":"import sys\nimport bisect\nfrom collections import deque\nimport itertools\nimport math\nimport heapq\nimport random\n\n# import sys\n# sys.setrecursionlimit(10**6)\nfrom sys import stdin\n# readline = stdin.readline\n# sr=lambda: readline()\n\nimport time\nimport random\nsr=lambda: input()\nir=lambda: int(sr())\nlr=lambda: list(map(int, sr().split()))\n\ninf=10**18\n# mod=10**9+7\nmod = 998244353\n\n# import numpy as np\n\nif __name__=='__main__':\n n,k=lr()\n if n==0:\n print(0)\n sys.exit()\n mod = 10**5\n visited = [False for i in range(mod)]\n now = n\n base_nums = []\n while not visited[now]:\n visited[now] = True\n base_nums.append(now)\n add = 0\n ret = now\n for i in range(7):\n add+=ret%10\n ret = ret//10\n now+=add\n now%=mod\n roop_first_num = now\n roop_flg = False\n roop_nums = []\n for num in base_nums:\n if num == roop_first_num:\n roop_flg=True\n if roop_flg:\n roop_nums.append(num)\n roop_len = len(roop_nums)\n ret_len = len(base_nums)-roop_len\n if k end:\n start, end = end, start\n print(\"Wrong order of date, automatically switched\")\n\n params = {\n 'start_date': start.strftime('%Y-%m-%dT%H:%M:%S')+'+00:00',\n 'end_date': end.strftime('%Y-%m-%dT%H:%M:%S')+'+00:00'\n }\n\n r = requests.get(url, headers={'Authorization':get_pwr_gen_token()}, params=params)\n if r.status_code == 200:\n data = r.json()\n df_list = []\n for x in data['actual_generations_per_production_type']:\n ts = pd.DataFrame(x['values']).set_index('end_date')['value'].to_frame(x['production_type'])\n ts.index = pd.to_datetime(ts.index)\n df_list += [ts]\n res = pd.concat(df_list, axis=1)\n res['TOTAL'] = res.sum(1)\n return res\n else:\n print(\"Query Error, see content of the results to understand what happened\")\n return r","repo_name":"simbnrs/rte-explorer","sub_path":"rte_explorer/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23722869711","text":"# https://www.codetree.ai/missions/2/problems/slanted-rectangle/description?utm_source=clipboard&utm_medium=text\n# 기울어진 직사각형: 한 지점으로 부터 대각선으로 움직이며 반시계 순회를 했을 대 지나왔던 지점들의 집합\n# 반드시 아래에서 시작하여 반시계 방향으로 순회해야 하며, 각 방향으로 최소 1번 이동해야 한다. (격자 넘어서면 안된다)\n# 기울어진 직사각형을 이루는 지점의 수의 합이 최대가 되도록 작성\n\nn = int(input())\nnums = []\nfor _ in range(n):\n nums.append(list(map(int, input().split())))\n\n\ndef in_range(x, y):\n return 0 <= x and x < n and 0 <= y and y < n\n\n\ndef get_score(x, y, k, l):\n dxs, dys = [-1, -1, 1, 1], [1, -1, -1, 1]\n move_nums = [k, l, k, l]\n\n sum_nums = 0\n\n # 경계 따라가기\n for dx, dy, move_num in zip(dxs, dys, move_nums):\n for _ in range(move_num):\n x, y = x + dx, y + dy\n\n # 경계를 벗어나면 0 반환\n if not in_range(x, y):\n return 0\n\n sum_nums += nums[x][y]\n return sum_nums\n\n\nret = 0\n# (i, j) -> 1, 2, 3, 4 방향\n# [k, 1, k, 1] 만큼 이동하며 직사각형 그리기\nfor i in range(n):\n for j in range(n):\n for k in range(1, n):\n for l in range(1, n):\n ret = max(ret, get_score(i, j, k, l))\n\nprint(ret)\n\n# 시작 지점을 순회하며 변의 크기를 하나씩 늘려가기\n# (2, 1) -> (n, n) 까지 가능\n\n# 방향 1: (i - 1, j + 1)\n# 방향 2: (i - 1, j - 1)\n# 방향 3: (i + 1, j - 1)\n# 방향 4: (i + 1, j + 1)\n\n# dxs = [-1, -1, 1, 1]\n# dys = [1, -1, -1, 1]\n\n# max_sum = 0\n\n'''\n# 2 * 2 마름모 이동 \nfor col in range(2, n):\n for row in range(1, n-1):\n temp_sum = 0 \n for dx, dy in zip(dxs, dys):\n if col + dx < 0 or col + dx >= n or row + dy < 0 or row + dy >= n:\n break\n col += dx\n row += dy\n\n temp_sum += nums[col][row]\n\n print('temp_sum: ', temp_sum)\n max_sum = max(max_sum, temp_sum)\nprint('max_sum: ', max_sum)\n'''\n'''\n# 나의 풀이 - 틀림 \n# 가로 증가 \nfor i in range(1, n-1): # n-2까지 늘릴 수 있어서 \n # 세로 증가 \n for j in range(1, n-1):\n for col in range(2, n):\n for row in range(1, n-1):\n temp_sum = 0 \n for dx, dy in zip(dxs, dys):\n if col + (dx * i) < 0 or col + (dx * i) >= n or row + (dy * j) < 0 or row + (dy * j) >= n:\n break\n col += (dx * i)\n row += (dy * j)\n\n temp_sum += nums[col][row]\n # print('temp_sum: ', i, j, col, row, temp_sum)\n max_sum = max(max_sum, temp_sum)\n # print('max_sum: ', max_sum)\nprint(max_sum)\n'''","repo_name":"Park-EunBi/algorithm","sub_path":"2023/CodeTree/알고리즘_입문/01.Simulation/05.기울어진_직사각형.py","file_name":"05.기울어진_직사각형.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3921080488","text":"\nimport sqlite3\n\n'''\n这个类的作用是提供对内置数据库Sqlite的增删改查的一层封装,\n对于以下几个表的操作的简化:\n 端口-设备绑定表:port_device_pair\n |__ port: int prime\n |__ device: text\n |__ type: text\n |__ protocol: text\n 历史记录表:hitstory_record\n |__ time: text\n |__ port: int\n |__ power: float\n\n'''\n'''\n test.db参考\n CREATE TABLE hitstory_record (\n time DATETIME,\n port INTEGER REFERENCES port_device_pair (port),\n power REAL\n);\n'''\n'''\n 基本的操作,首先是打开本地数据库,假设连接叫conn\n 然后弄出一个游标,\n 游标执行sql语句,\n conn要执行commit(关键!)\n 不然修改只对内存中的cursor有效。\n\n SQLite允许多方同时读取,但是只有单方可以写入,所以未来需要以事务的形式,拓展DBOperation的功能,\n\n 目前的记录,最快每秒插入16条数据\n'''\nclass DBOperation:\n '数据库操作'\n def __init__(self):\n self.conn =[]\n self.cursor=[]\n def OpenDB(self,dbpath):\n self.conn = sqlite3.connect(dbpath)\n \n def Close(self):\n self.conn.close()\n \n def addPair(self,port,device,_type,protocol):\n self.cursor = self.conn.cursor()\n try:\n sql = ''' insert into port_device_pair\n (port, device, type,protocol)\n values\n (:m_port, :m_device,:m_type, :m_protocol)'''\n\n self.cursor.execute(sql,{'m_port':port,'m_device':device,\"m_type\":_type,\"m_protocol\":protocol})\n self.conn.commit()\n except:\n print(\"Error - addPair\")\n else:\n self.cursor.close()\n\n\n def getPairs(self): #返回全部结果\n self.cursor = self.conn.cursor()\n\n sql='''select * from port_device_pair'''\n result = self.cursor.execute(sql)\n rst = result.fetchall()\n self.cursor.close()\n return rst\n def delPair(self,port):\n self.cursor = self.conn.cursor()\n sql =''' delete from port_device_pair where port=(:m_port)'''\n self.cursor.execute(sql,{'m_port':port})\n self.conn.commit()\n self.cursor.close()\n def updatePair(self,port,device,_type,protocol): \n self.delPair(port)\n self.addPair(port,device,_type,protocol)\n\n\n","repo_name":"2Beyong/python_SCADA_example","sub_path":"DBOperation.py","file_name":"DBOperation.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"33427814045","text":"from Stats_manager import Stats_manager\nfrom Item import Item\n\n\nclass Inventory:\n\n def __init__(self):\n self.__items = list()\n\n def add_item(self, item):\n if item in self.__items:\n item.count += 1\n else:\n self.__items.append(item)\n\n def add_items(self, items):\n for item in items:\n self.add_item(item)\n\n def remove_item(self, item):\n if item in self.__items == False:\n raise Exception(\"Указанного предмета не существует в инвентаре\")\n return\n\n if item.count > 1:\n item.count -= 1\n else:\n self.__items.remove(item)\n\n def get_item_from_name(self, item_name):\n sought_item = None\n for item in self.__items:\n if item_name == item.name:\n sought_item = item\n\n return sought_item\n\n def use_item(self, item, stats_manager):\n if item in self.__items == False:\n raise Exception(\"Указанного предмета не существует в инвентаре\")\n return\n\n item.do_it(stats_manager)\n self.remove_item(item)\n\n def get_items_name_to_list(self):\n names = list()\n\n for item in self.__items:\n names.append(item.name + \" \" + str(item.count) + \"шт\")\n\n return names\n\n def get_item_from_index(self, index):\n item = None\n try:\n item = self.__items[index]\n return item\n except:\n raise Exception(\"Предмета с указанным индексом не существует в инвентаре\")\n\n\n\n\n\n","repo_name":"iampolushko/Eternal_land","sub_path":"Inventory.py","file_name":"Inventory.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71396113191","text":"import os\n\nimport cv2\n\nif not os.path.exists('./extract_result/'):\n os.mkdir('./extract_result/')\n\n\ndef get_video_pic(path_to_videoname):\n # Read video state\n cap = cv2.VideoCapture(path_to_videoname)\n # Capture its intermediate frame\n cap.set(1, int(cap.get(7) / 2))\n # Checks if video interception is normal, returning True.\n # If normal and False if False.\n status, frame = cap.read()\n if status:\n cv2.imwrite('./extract_result/cover.jpg', frame)\n # Release video resources.\n cap.release()\n\n\nget_video_pic('../pikachu.mp4')\n","repo_name":"Lornatang/DayHR","sub_path":"Python/AI-ToolBox/preprocess ToolBox/keyframes_extract_tool/signal/keyframe_extraction.py","file_name":"keyframe_extraction.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"} +{"seq_id":"9808387501","text":"from __future__ import annotations\nfrom typing import TYPE_CHECKING\nfrom primitives.graphical_object import GraphicalObject\nfrom objects.object_type import ObjectType\nif TYPE_CHECKING:\n from cairo import Context\n from primitives.matrix import Matrix, Vector2\n\nclass Line2D(GraphicalObject):\n # Define Constructor\n def __init__(self, point_a: Vector2, point_b: Vector2) -> None:\n # Call Super Constructor\n super().__init__()\n # Define Attributes\n self.point_a = point_a\n self.point_b = point_b\n # Define Pipeline Attributes\n self.pipeline_point_a = point_a\n self.pipeline_point_b = point_b\n def __str__(self) -> str:\n if self.in_pipeline:\n return f\"[({self.pipeline_point_a.get_x()}, {self.pipeline_point_a.get_y()}), ({self.pipeline_point_b.get_x()}, {self.pipeline_point_b.get_y()})]\"\n else:\n return f\"[({self.point_a.get_x()}, {self.point_a.get_y()}), ({self.point_b.get_x()}, {self.point_b.get_y()})]\"\n # Type Definition\n @staticmethod\n def get_type() -> ObjectType:\n return ObjectType.LINE_2D\n # Define Pipeline Methods\n def pipeline(self):\n # Reset Pipeline Points\n self.pipeline_point_a = self.point_a\n self.pipeline_point_b = self.point_b\n # Call Super\n super().pipeline()\n def pipeline_apply(self):\n if self.in_pipeline:\n # Persist Pipeline Points\n self.point_a = self.pipeline_point_a\n self.point_b = self.pipeline_point_b\n # Call Super\n super().pipeline_apply()\n # Define Methods\n def draw(self, cairo: Context):\n # Get Points\n point_a = self.pipeline_point_a if self.in_pipeline else self.point_a\n point_b = self.pipeline_point_b if self.in_pipeline else self.point_b\n # Cast points into homogeneus space\n homo2d_point_a = point_a.as_vec3(1)\n homo2d_point_b = point_b.as_vec3(1)\n # Cast to Vector2\n (x1, y1) = homo2d_point_a.try_into_vec2().as_tuple()\n (x2, y2) = homo2d_point_b.try_into_vec2().as_tuple()\n # Set Color\n cairo.set_source_rgba(*self.color)\n # Draw line in canvas\n cairo.move_to(x1, y1)\n cairo.line_to(x2, y2)\n cairo.stroke()\n \n def transform(self, transformation: Matrix):\n # Transform Points\n if self.in_pipeline:\n # Pipelines\n self.pipeline_point_a = (self.pipeline_point_a.as_vec3(1) * transformation).try_into_vec2()\n self.pipeline_point_b = (self.pipeline_point_b.as_vec3(1) * transformation).try_into_vec2()\n else:\n # Raw Transform\n self.point_a = (self.point_a.as_vec3(1) * transformation).try_into_vec2()\n self.point_b = (self.point_b.as_vec3(1) * transformation).try_into_vec2()\n # Return Chain\n return self\n\n def get_center_coords(self) -> Vector2:\n # Get Points\n point_a = self.pipeline_point_a if self.in_pipeline else self.point_a\n point_b = self.pipeline_point_b if self.in_pipeline else self.point_b\n # Return Center\n center_coord = (point_a.as_vec3(1) + point_b.as_vec3(1)) * 0.5\n return center_coord.try_into_vec2()","repo_name":"TrendingTechnology/py-igs","sub_path":"py_igs/objects/line_2d.py","file_name":"line_2d.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"38476723648","text":"import serial\nimport numpy\nimport matplotlib.pyplot as plt\nfrom drawnow import *\n\nardu_data = serial.Serial('COM3', 38400) # Initialize COM and Baud rate\n\n## Create arrays for plotting data\ngx_ar = [] \ngy_ar = []\ngz_ar = []\n\nax_ar = []\nay_ar = []\naz_ar = []\n\ndata_count = 0 # Counter for x axis\nplt.style.use('classic')\n\ndef graph(): # Create a function to make plot\n plt.ion() # Get ready to plot live data \n plt.subplot(211) # Subplot for gyro values\n plt.grid(True) # Add grid\n plt.ylabel('Gyro Values') # Rename y axis label\n #plt.ylim(-20000, 20000) # Limit y axis \n plt.plot(gx_ar, 'r', label = \" X axis \") # Plotting gyro values for each axis\n plt.plot(gy_ar, 'k', label = \" Y axis \") \n plt.plot(gz_ar, 'b', label = \" Z axis \") \n plt.legend(loc = 'upper left') # Location of plot legend\n\n plt.subplot(212) # Subplot fot accelerometer values\n plt.grid(True)\n plt.ylabel('Accelerometer Values')\n #plt.ylim(-20000, 20000)\n plt.plot(ax_ar, 'r', label = \" X axis \")\n plt.plot(ay_ar, 'k', label = \" Y axis \") \n plt.plot(az_ar, 'b', label = \" Z axis \") \n plt.legend(loc = 'upper left')\n\n\nwhile True: # Loop forever\n while (ardu_data.inWaiting() == 0): # Wait until there is data\n pass\n ardu_string = ardu_data.readline() # Read the coming line\n ardu_decoded = ardu_string.decode('utf-8') # Decode data to make it readable\n data_array = ardu_decoded.split(',') # Split incoming data to group them\n \n ## Convert values to float\n ax = float(data_array[0]) \n ay = float(data_array[1])\n az = float(data_array[2])\n\n gx = float(data_array[3]) \n gy = float(data_array[4])\n gz = float(data_array[5])\n\n ## Append the values to their array\n gx_ar.append(gx)\n gy_ar.append(gy)\n gz_ar.append(gz)\n ax_ar.append(ax)\n ay_ar.append(ay)\n az_ar.append(az)\n\n plt.pause(0.001) # Pause to catch up\n drawnow(graph) # Draw the graph\n\n data_count = data_count + 1 # Add one to x axis counter\n\n if(data_count > 50): # If counter > 25 delete first element of the array\n gx_ar.pop(0)\n gy_ar.pop(0)\n gz_ar.pop(0)\n ax_ar.pop(0)\n ay_ar.pop(0)\n az_ar.pop(0)\n\n","repo_name":"mertcanakin/arduino-datalogger","sub_path":"live-plotter/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2072267611","text":"\"\"\"Initialise workspace and environment\"\"\"\nimport json\nfrom azureml.core.environment import CondaDependencies, Environment\nfrom azureml.core import Workspace\n\n\ndef dep_from_pkg_list(pkg_list):\n \"\"\"Get conda dependencies from list of packages\n\n Parameters\n ----------\n pkg_list : list\n list of conda packages\n\n Returns\n -------\n CondaDependencies\n collection of conda dependencies\n \"\"\"\n return CondaDependencies().create(conda_packages=pkg_list)\n\n# Initialise variables\nENV_NAME = 'heart-failure'\nrun_pkg = ['pip', 'joblib', 'pandas', 'numpy', 'pyodbc', 'scikit-learn']\ntest_pkg = run_pkg + ['pytest', 'pylint', 'requests']\nworkspace = Workspace.from_config()\n\n# Create dependencies for running\nrun_dependencies = dep_from_pkg_list(run_pkg)\nrun_dependencies.save('./conda_env.yaml')\n\n# Push to environment\nenv = Environment.from_conda_specification(ENV_NAME, './conda_env.yaml')\nenv.python.conda_dependencies=run_dependencies\nenv.register(workspace=workspace)\n\n# Create dependencies for testing\ntest_dependencies = dep_from_pkg_list(test_pkg)\ntest_dependencies.save('./conda_env_test.yaml')\n\n# Initialise metadata\nwith open('./metadata.json', 'w') as f:\n json.dump({'env_name': ENV_NAME}, f)\n","repo_name":"mhmohona/MicrosoftML-ProjectShowcasing","sub_path":"project/Quy Vu/src/initialise.py","file_name":"initialise.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"71"} +{"seq_id":"71947342951","text":"import pandas as pd\nimport numpy as np\nfrom inspectpd.inspect_object.inspect_object import inspect_object\n\n# inspect_cat \ndef inspect_cat(df) :\n '''\n Summary and comparison of the levels in categorical columns\n \n Parameters\n ----------\n \n df: A pandas dataframe.\n \n Returns \n ----------\n \n A pandas dataframe with columns:\n + col_name: object\n column of strings containing column names of df\n + cnt: int64\n integer column containing count of unique levels \n found in each column of df.\n + common: object\n column of strings containing the name of the most common level\n + common_pcnt: float64\n the percentage of each column occupied by the most common level \n shown in common.\n + levels: object\n a list containing relative frequency dataframes for each column in df.\n '''\n \n # get the string / categorical columns\n df_cat = df.select_dtypes(['category', 'object'])\n # new df with columns names as first col\n out = pd.DataFrame(df_cat.columns, columns = ['col_name'])\n # tabulate values in each column\n levels_list = []\n for col in df_cat.columns :\n col_vals = df_cat[col] \\\n .value_counts(dropna = False) \\\n .reset_index(drop = False)\n col_vals.columns = ['value', 'cnt'] \n col_vals['pcnt'] = 100 * col_vals.cnt / np.sum(col_vals.cnt)\n col_vals = col_vals.sort_values(['pcnt'], ascending = False)\n col_vals = col_vals[['value', 'pcnt', 'cnt']]\n levels_list.append(col_vals)\n # number of unique values per column\n out['cnt'] = [x.shape[0] for x in levels_list]\n out['common'] = [x.value[0] for x in levels_list]\n out['common_pcnt'] = [x.pcnt[0] for x in levels_list]\n out['levels'] = levels_list\n out = out \\\n .sort_values('col_name') \\\n .reset_index(drop = True)\n # subclass output, adds plot methods\n out = inspect_object(out, my_attr = 'inspect_cat')\n return out \n","repo_name":"alastairrushworth/inspectpd","sub_path":"inspectpd/inspect/inspect_cat.py","file_name":"inspect_cat.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"} +{"seq_id":"36297105422","text":"from catalog.models import Category\nfrom mystore import settings\n\n\ndef mystore(request):\n \n return {\n 'active_categories':Category.objects.filter(is_active=True), \n 'site_name':settings.SITE_NAME,\n 'meta_keywords':settings.META_KEYWORDS,\n 'meta_description':settings.META_KEYWORDS,\n 'request':request,\n }","repo_name":"stormslowly/estore-django-practise","sub_path":"utils/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19712736155","text":"from initialize import *\r\n\r\ncities={}\r\n\r\ndef audit_cities(elem):\r\n \"\"\"Find all city names in the XML file\r\n\r\n Args:\r\n elem (XML Element): The element with the city name\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n for tag in elem.iter('tag'):\r\n if tag.attrib['k'].startswith('addr:city'):\r\n cities[tag.attrib['v']]=cities.get(tag.attrib['v'],0)+1\r\n \r\nfor elem in get_element(OSM_FILE):\r\n audit_cities(elem)\r\n\r\npprint.pprint(cities)\r\n","repo_name":"meetnaren/Data-Analysis","sub_path":"P3 - Wrangle an OpenStreetMap dataset - SQL/audit_cities.py","file_name":"audit_cities.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1847908163","text":"import multiprocessing\nfrom typing import Generator, List, Sequence, TypeVar\n\nT = TypeVar(\"T\")\n\n\ndef chunks(lst: List[T], n: int) -> Generator[List[T], None, None]:\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\nn = 1000\ncpu_count = multiprocessing.cpu_count()\nc: List[List[int]] = list(chunks(list(range(1, n)), multiprocessing.cpu_count()))\n\n\ndef f(numbers: Sequence[int]):\n for a in numbers:\n a_cubed = a**3\n for b in range(a, n):\n if a == b:\n continue\n b_cubed = b**3\n for c in range(1, n):\n if c == a or c == b:\n continue\n c_cubed = c**3\n if c_cubed < a_cubed + b_cubed:\n for d in range(c, n):\n if d == a or d == b or d == c:\n continue\n if (a_cubed + b_cubed) == (c_cubed + d**3):\n print(a, b, a**3 + b**3, c, d)\n\n\nwith multiprocessing.Pool(cpu_count) as p:\n print(p.map(f, c))\n","repo_name":"mbergal/leet-code","sub_path":"sum_of_cubes/python/sum_of_cubes.py","file_name":"sum_of_cubes.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"72353307430","text":"# 20210921\n# LeetCode\n\nclass Solution:\n def findMaxConsecutiveOnes(self, nums: List[int]) -> int:\n ret, cur = 0, 0\n for num in nums:\n if num == 1: cur += 1\n else:\n if ret < cur: ret = cur\n cur = 0\n return max(ret, cur)\n\n","repo_name":"sse9173/algorithm","sub_path":"Max_Consecutive_Ones.py","file_name":"Max_Consecutive_Ones.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37088734762","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef rgb2gray(im):\r\n gray = 0.23*im[:,:,0] + 0.77*im[:,:,1] + 0.11*im[:,:,2]\r\n return(gray)\r\n\r\ndef gray2biner(imgray):\r\n biner = np.where(imgray >140, 255, 0)/255\r\n return(biner)\r\n\r\ndef dilasi(imbiner):\r\n #proses\r\n se = np.array([[1,1,1],[1,1,1],[1,1,1]])\r\n h, w = imbiner.shape\r\n imhit = np.zeros((h, w))\r\n for i in range (1, h-1):\r\n for j in range (1, w-1):\r\n hasil = imbiner[i-1:i+2,j-1:j+2]*se\r\n if np.sum(hasil)>=1:\r\n imhit[i, j] = 1\r\n return (imhit)\r\n\r\n\r\ndef erosi(imbiner):\r\n se = np.array([[1,1,1],[1,1,1],[1,1,1]])\r\n h, w = imbiner.shape\r\n imhit = np.zeros((h, w))\r\n for i in range (1, h-1):\r\n for j in range (1, w-1):\r\n hasil = imbiner[i-1:i+2,j-1:j+2]*se\r\n if np.sum(hasil)==9:\r\n imhit[i, j] = 1\r\n return (imhit)\r\n\r\ndef main():\r\n #baca citra \r\n im = plt.imread(\"buah.jpg\")\r\n im_asli = im.copy()\r\n imop = im.copy()\r\n\r\n #konversi ke gray\r\n imgray = rgb2gray(im)\r\n\r\n #konversi ke biner\r\n biner = gray2biner(imgray)\r\n\r\n #show image\r\n plt.subplot(251)\r\n plt.title(\"asli\")\r\n plt.imshow(im_asli)\r\n\r\n plt.subplot(252)\r\n plt.title(\"biner\")\r\n plt.imshow(biner, cmap = 'gray')\r\n\r\n plt.subplot(253)\r\n plt.title(\"erosi\")\r\n plt.imshow(erosi(biner), cmap = \"gray\")\r\n \r\n plt.subplot(254)\r\n plt.title(\"dilasi\")\r\n plt.imshow(dilasi(biner), cmap='gray')\r\n\r\n plt.subplot(255)\r\n imop[:,:,0] = dilasi(biner) * im[:,:,0]\r\n imop[:,:,1] = dilasi(biner) * im[:,:,1]\r\n imop[:,:,2] = dilasi(biner) * im[:,:,2]\r\n plt.title(\"dilasi to rgb\")\r\n plt.imshow(imop, cmap = \"gray\")\r\n\r\n plt.subplot(256)\r\n imop[:,:,0] = erosi(biner) * im[:,:,0]\r\n imop[:,:,1] = erosi(biner) * im[:,:,1]\r\n imop[:,:,2] = erosi(biner) * im[:,:,2]\r\n plt.title(\"erosi to rgb\")\r\n plt.imshow(imop, cmap = \"gray\")\r\n\r\n #opening\r\n plt.subplot(257)\r\n opening = dilasi(erosi(biner))\r\n plt.title(\"opening\")\r\n plt.imshow(opening, cmap = \"gray\")\r\n\r\n #closing\r\n plt.subplot(258)\r\n closing = erosi(dilasi(biner))\r\n plt.title(\"closing\")\r\n plt.imshow(closing, cmap = \"gray\")\r\n\r\n plt.subplot(259)\r\n imop[:,:,0] = opening * im[:,:,0]\r\n imop[:,:,1] = opening * im[:,:,1]\r\n imop[:,:,2] = opening * im[:,:,2]\r\n plt.title(\"opening to rgb\")\r\n plt.imshow(imop, cmap = \"gray\")\r\n\r\n plt.subplot(2,5,10)\r\n imop[:,:,0] = closing * im[:,:,0]\r\n imop[:,:,1] = closing * im[:,:,1]\r\n imop[:,:,2] = closing * im[:,:,2]\r\n plt.title(\"closing to rgb\")\r\n plt.imshow(imop, cmap = \"gray\")\r\n\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n","repo_name":"luthfidhani/semseter-5","sub_path":"Pengolahan Citra Digital/Dilasi dan Erosi (Hit dan Fit)/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72117354789","text":"# Your code here\n\n\n\ndef finder(files, queries):\n \"\"\"\n Accepts a list of file paths and list of queries, or file names. Return paths\n for those files that exist in the system.\n \"\"\"\n path_table = {}\n for path in files:\n file_name = path.split(\"/\")[-1]\n if file_name in path_table:\n path_table[file_name].append(path)\n else:\n path_table[file_name] = [path]\n \n results = []\n for query in queries:\n if query in path_table:\n results.extend(path_table[query])\n\n return results\n\n\nif __name__ == \"__main__\":\n files = [\n '/bin/foo',\n '/bin/bar',\n '/usr/bin/baz'\n ]\n queries = [\n \"foo\",\n \"qux\",\n \"baz\"\n ]\n print(finder(files, queries))\n","repo_name":"arriadevoe/lambda-computer-science","sub_path":"Unit-5-Hash-Tables/sprint-challenge/hashtables/ex5/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"685900701","text":"from nw import NeedlemanWunsch\nfrom itertools import combinations\nfrom multiprocessing import Pool\nimport argparse\n\n\ndef align_similar(s1, s2):\n change1, change2 = list(), list()\n i = 0\n while s1 != s2:\n if i > len(s1) - 1:\n s1 += s2[i:]\n change1.extend(range(i, i + len(s2[i:])))\n continue\n if i > len(s2) - 1:\n s2 += s1[i:]\n change2.extend(range(i, i + len(s1[i:])))\n continue\n if s1[i] != s2[i]:\n if s1[i] == '-':\n s2 = s2[0:i] + '-' + s2[i:]\n change2.append(i)\n else:\n s1 = s1[0:i] + '-' + s1[i:]\n change1.append(i)\n i += 1\n return sorted(change1), sorted(change2)\n\n\ndef adjust(string_list, indices):\n for i, string in enumerate(string_list):\n for index in indices:\n string = string[:index] + '-' + string[index:]\n string_list[i] = string\n\n\ndef worker(it):\n ((i, string_i), (j, string_j)), scores = it\n model = NeedlemanWunsch(string_i, string_j, scores).nw(True)\n (string_ai, string_aj), score = model['nw'][0], model['score']\n return (i, string_ai), (j, string_aj), score\n\n\nclass CenterStar:\n\n def __init__(self, scores, strings):\n self.scores = scores\n self.strings = strings\n self.dp = [[0] * (len(strings) + 1) for _ in range(len(strings))]\n\n def msa(self):\n msa_result = []\n max_row, max_value = 0, 0\n len_strings = len(self.strings)\n\n tasks = tuple(combinations(zip(range(len_strings), self.strings), 2))\n tasks = zip(tasks, (self.scores for _ in range(len(tasks))))\n\n with Pool() as pool:\n result = pool.map(worker, tasks)\n for elem in result:\n (i, string_i), (j, string_j), score = elem\n ''' (0, 1, 2) => 0 is the first aligned string\n 1 is the second aligned string\n 2 is the score\n '''\n self.dp[i][j] = (string_i, string_j, score)\n self.dp[j][i] = (string_j, string_i, score)\n self.dp[i][-1] += score\n self.dp[j][-1] += score\n\n if self.dp[j][-1] > max_value:\n max_row = j\n max_value = self.dp[j][-1]\n if self.dp[i][-1] > max_value:\n max_row = i\n max_value = self.dp[i][-1]\n\n for i in range(len_strings):\n if i == max_row:\n continue\n if not msa_result:\n msa_result.extend(self.dp[max_row][i][0: 2])\n continue\n\n new = list(self.dp[max_row][i][0: 2])\n ch_index1, ch_index2 = align_similar(msa_result[0], new[0])\n\n adjust(msa_result, ch_index1)\n adjust(new, ch_index2)\n msa_result.extend(new[1:])\n\n return msa_result\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Multiple sequence alignment')\n parser.add_argument(\"inputfile\", help=\"input file location\")\n parser.add_argument(\"outputfile\", help=\"output file location\")\n args = parser.parse_args()\n\n with open(args.inputfile, 'r') as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\n scores = lines.pop(0).split(',')\n msa = CenterStar(scores, lines).msa()\n\n with open(args.outputfile, 'w') as out:\n out.writelines(map(lambda x: x + '\\n', msa))\n","repo_name":"burakkose/center-star-msa","sub_path":"cstar/cstar.py","file_name":"cstar.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"8977960286","text":"#!/usr/bin/env python\nimport rospy\nfrom fm.msg import twist\nfrom lane_test.msg import msg_lane\nPI = 3.141592\ndef callback(data):\n print(\"ddd\")\n rospy.loginfo('scan.ranges[359]: %s',data.angle)\n if data.angle >0:\n new_angle = 90-data.angle\n elif data.angle <0:\n new_angle = -90-data.angle\n elif data.angle == 0 :\n new_angle = 1 \n print(\"\",new_angle)\n gos(new_angle)\n\n\n velocity_publisher.publish(vel_msg)\n \n \ndef gos(an):\n #Starts a new node\n #rospy.init_node('robot_cleaner', )\n #velocity_publisher = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)\n #vel_msg = Twist()\n\n # Receiveing the user's input\n print(\"Let's rotate your robot\")\n speed = an\n angle = 1\n #if an >= 0:\n # clockwise = True #True or false\n #elif an < 0 :\n # clockwise = False\n\n #Converting from angles to radians\n angular_speed = speed*2*PI/360\n relative_angle = angle*2*PI/360\n\n #We wont use linear components\n vel_msg.linear.x=0.1\n vel_msg.linear.y=0\n vel_msg.linear.z=0\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n\n # Checking if our movement is CW or CCW\n\n vel_msg.angular.z =angular_speed\n # Setting the current time for distance calculus\n t0 = rospy.Time.now().to_sec()\n current_angle = 0\n\n while(current_angle < relative_angle):\n velocity_publisher.publish(vel_msg)\n t1 = rospy.Time.now().to_sec()\n current_angle = abs(angular_speed*(t1-t0))\n\n\n #Forcing our robot to stop\n vel_msg.angular.z = 0\n velocity_publisher.publish(vel_msg)\n \n \n\ndef move1():\n while not rospy.is_shutdown():\n rospy.spin()\n\n\n\n\n\n\nif __name__ == '__main__':\n rospy.init_node('move_turn', anonymous=True)\n velocity_publisher = rospy.Publisher('/mode_twist', twist, queue_size=10)\n rospy.Subscriber('/msg_lane', msg_lane, callback , queue_size=10)\n vel_msg = twist()\n \n \n try:\n #Testing our function\n while(1):\n move1()\n\n \n except rospy.ROSInterruptException: pass\n","repo_name":"jh79783/turtlebot_py","sub_path":"test_and_tutorials/lane_test/src/turn.py","file_name":"turn.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33349621222","text":"import argparse\nfrom collections import defaultdict\n\nfrom uge2slurm.commands.argparser import set_common_args, parse_ge_datetime\nfrom uge2slurm.utils.py2.argparse import HelpFormatter\n\nparser_args = dict(\n description=\"Mapping UGE qsub command to slurm\",\n add_help=False,\n formatter_class=HelpFormatter\n)\n\n\nclass singlearg(argparse.Action):\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n if nargs != 1:\n raise ValueError(\"`nargs` must be 1 for this action\")\n super(singlearg, self).__init__(option_strings, dest, nargs, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string):\n setattr(namespace, self.dest, values[0])\n\n\nclass appendkv(argparse.Action):\n @staticmethod\n def flatten(values):\n return values[0].split(',')\n\n def __call__(self, parser, namespace, values, option_string):\n kvs = self.flatten(values)\n\n container = getattr(namespace, self.dest)\n if container is None:\n setattr(namespace, self.dest, kvs)\n else:\n container += kvs\n\n\nclass appendsingle(argparse.Action):\n def __call__(self, parser, namespace, values, option_string):\n container = getattr(namespace, self.dest)\n if container is None:\n setattr(namespace, self.dest, values)\n else:\n container.append(values[0])\n\n\nclass nargs1or2(argparse.Action):\n def __call__(self, parser, namespace, values, option_string):\n if not 1 <= len(values) <= 2:\n parser.error(\n 'argument {}: expected 1 or 2 arguments'.format(self.dest)\n )\n setattr(namespace, self.dest, values)\n\n\nclass set_resource_state(argparse.Action):\n def __call__(self, parser, namespace, values, option_string):\n parser.resouce_state = self.dest\n\n\nclass appendresource(appendkv):\n def __call__(self, parser, namespace, values, option_string):\n resource_state = getattr(parser, \"resouce_state\", None)\n\n container = getattr(namespace, self.dest)\n if container is None:\n setattr(namespace, self.dest, defaultdict(list))\n container = getattr(namespace, self.dest)\n\n container[resource_state] += self.flatten(values)\n\n\nclass store_bool(singlearg):\n def __call__(self, parser, namespace, values, option_string):\n value = values[0]\n if value.startswith(('Y', 'y')):\n setattr(namespace, self.dest, True)\n elif value.startswith(('N', 'n')):\n setattr(namespace, self.dest, False)\n else:\n parser.error(\n 'Unknown argument passed: \"{}\" (expect y[es] or n[o])'.format(value)\n )\n\n\ndef _set_parser(parser):\n set_orig_argsuments(parser)\n\n uge = parser.add_argument_group(\n title=\"qsub options\",\n description=\"UGE qsub options\"\n )\n set_qsub_arguments(uge)\n\n\ndef set_orig_argsuments(parser):\n set_common_args(parser)\n parser.add_argument(\"-n\", \"--dry-run\", action=\"store_true\",\n help=\"Preview converted slurm command\")\n parser.add_argument(\n \"-y\", \"--non-interactive\", action=\"store_true\",\n help=\"By default, uge2slurm shows a converted command line and get \"\n \"confirmation before the execution when a TTY is allocated. This \"\n \"option disables the preview and the command will be executed immediately.\"\n )\n parser.add_argument(\n \"--memory\", nargs='*', default=[\"mem_req\", \"s_vmem\"], metavar=\"resource\",\n help=\"Specify which resource value should be mapped into `--mem-per-cpu` \"\n \"option. If multiple values are specified, the first valid value \"\n \"will be used.\"\n )\n parser.add_argument(\n \"--cpus\", nargs='*', default=[\"def_slot\"], metavar=\"parallel_env\",\n help=\"Specify which parallel_environment should be mapped into \"\n \"`--cpus-per-task` option. If multiple values are specified, the \"\n \"first valid value will be used. Note that range values are not \"\n \"supported and its minimum value will be used as the number of cpus.\"\n )\n parser.add_argument(\n \"--partition\", nargs='*', metavar=\"resource=partition\", default=[],\n help=\"Specify which resource name should be mapped into partition \"\n \"(queue) via `--partition` option. Resource-partition pairs must be \"\n \"specified by '=' separated strings.\"\n )\n\n\ndef set_qsub_arguments(uge):\n uge.add_argument(\"-@\", nargs=1, action=singlearg, metavar=\"optionfile\")\n uge.add_argument(\"-a\", nargs=1, action=singlearg, metavar=\"date_time\", type=parse_ge_datetime)\n uge.add_argument(\"-ac\", nargs=1, action=appendkv, metavar=\"variable[=value],...\")\n uge.add_argument(\"-adds\", nargs=3, action=\"append\", metavar=\"parameter key value\")\n uge.add_argument(\"-ar\", nargs=1, action=singlearg, metavar=\"ar_id\")\n uge.add_argument(\"-A\", nargs=1, action=singlearg, metavar=\"account_string\")\n uge.add_argument(\"-bgio\", nargs=1, action=appendkv, metavar=\"bgio_params\")\n uge.add_argument(\"-binding\", nargs='+', action=nargs1or2)\n uge.add_argument(\"-b\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-c\", nargs=1, action=singlearg, metavar=\"occasion_specifier\")\n uge.add_argument(\"-ckpt\", nargs=1, action=singlearg, metavar=\"ckpt_name\")\n uge.add_argument(\"-clear\", action=\"store_true\", default=None)\n uge.add_argument(\"-clearp\", nargs=1, action=appendsingle, metavar=\"parameter\")\n uge.add_argument(\"-clears\", nargs=2, action=\"append\", metavar=\"parameter\")\n uge.add_argument(\"-cwd\", action=\"store_true\", default=None)\n uge.add_argument(\"-C\", nargs=1, action=singlearg, metavar=\"prefix_string\")\n uge.add_argument(\"-dc\", nargs=1, action=appendkv, metavar=\"variable,...\")\n uge.add_argument(\"-dl\", nargs=1, action=singlearg, metavar=\"date_time\", type=parse_ge_datetime)\n uge.add_argument(\"-e\", nargs=1, action=appendkv, metavar=\"[[hostname]:]file,...\")\n uge.add_argument(\"-hard\", nargs=0, action=set_resource_state)\n uge.add_argument(\"-h\", action=\"store_true\", default=None)\n uge.add_argument(\"-help\", action=\"store_true\", default=None)\n uge.add_argument(\"-hold_jid\", nargs=1, action=appendkv, metavar=\"wc_job_list\")\n uge.add_argument(\"-hold_jid_ad\", nargs=1, action=appendkv, metavar=\"wc_job_list\")\n uge.add_argument(\"-i\", nargs=1, action=appendkv, metavar=\"[[hostname]:]file,...\")\n uge.add_argument(\"-j\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-jc\", nargs=1, action=singlearg, metavar=\"jc_name\")\n uge.add_argument(\"-js\", nargs=1, action=singlearg, metavar=\"job_share\")\n uge.add_argument(\"-jsv\", nargs=1, action=singlearg, metavar=\"jsv_url\")\n uge.add_argument(\"-masterl\", nargs=1, action=appendkv, metavar=\"resource=value,...\")\n uge.add_argument(\"-l\", nargs=1, action=appendresource, metavar=\"resource=value,...\")\n uge.add_argument(\"-m\", nargs=1, action=appendkv, metavar=\"b|e|a|s|n,...\")\n uge.add_argument(\"-masterq\", nargs=1, action=appendkv, metavar=\"wc_queue_list\")\n uge.add_argument(\"-mods\", nargs=3, action=\"append\", metavar=\"param\")\n uge.add_argument(\"-mbind\", nargs=1, action=singlearg, metavar=\"param\")\n uge.add_argument(\"-notify\", action=\"store_true\", default=None)\n uge.add_argument(\"-now\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-N\", nargs=1, action=singlearg, metavar=\"name\")\n uge.add_argument(\"-o\", nargs=1, action=appendkv, metavar=\"[[hostname]:]path,...\")\n uge.add_argument(\"-P\", nargs=1, action=singlearg, metavar=\"project_name\")\n uge.add_argument(\"-p\", nargs=1, action=singlearg, metavar=\"priority\")\n uge.add_argument(\"-par\", nargs=1, action=singlearg, metavar=\"allocation_rule\")\n uge.add_argument(\"-pe\", nargs=2, action=\"append\", metavar=\"parallel_environment\")\n uge.add_argument(\"-pty\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-q\", nargs=1, action=appendresource, metavar=\"qc_queue_list\")\n uge.add_argument(\"-R\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-r\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-row\", nargs=1, action=appendkv, metavar=\"variable,...\")\n uge.add_argument(\"-rdi\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-sc\", nargs=1, action=appendkv, metavar=\"variable[=value],...\")\n uge.add_argument(\"-shell\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-si\", nargs=1, action=singlearg, metavar=\"session_id\")\n uge.add_argument(\"-soft\", nargs=0, action=set_resource_state)\n uge.add_argument(\"-sync\", nargs=1, action=singlearg, metavar=\"y|n|l|r\")\n uge.add_argument(\"-S\", nargs=1, action=appendkv, metavar=\"[[hostname]:]pathname,...\")\n uge.add_argument(\"-t\", nargs=1, action=singlearg, metavar=\"n[-m[:s]]\")\n uge.add_argument(\"-tc\", nargs=1, action=singlearg, metavar=\"max_running_tasks\")\n uge.add_argument(\"-tcon\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"-terse\", action=\"store_true\", default=None)\n uge.add_argument(\"-umask\", nargs=1, action=singlearg, metavar=\"parameter\")\n uge.add_argument(\"-v\", nargs=1, action=appendkv, metavar=\"variable[=value],...\")\n uge.add_argument(\"-verify\", action=\"store_true\", default=None)\n uge.add_argument(\"-V\", action=\"store_true\", default=None)\n uge.add_argument(\"-w\", nargs=1, action=singlearg, metavar=\"e|w|n|p|v\")\n uge.add_argument(\"-wd\", nargs=1, action=singlearg, metavar=\"working_dir\")\n uge.add_argument(\"-xdv\", nargs=1, action=singlearg, metavar=\"docker_volume\")\n uge.add_argument(\"-xd_run_as_image_user\", nargs=1, action=store_bool, metavar=\"y[es]|n[o]\")\n uge.add_argument(\"command\", nargs=argparse.REMAINDER)\n\n\ndef get_parser(parser=None):\n if not parser:\n parser = argparse.ArgumentParser(**parser_args)\n _set_parser(parser)\n return parser\n","repo_name":"ronin-gw/uge2slurm","sub_path":"uge2slurm/commands/qsub/argparser.py","file_name":"argparser.py","file_ext":"py","file_size_in_byte":9952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"20007605983","text":"from pytesseract import pytesseract, Output\nfrom PIL import Image\nimport easyocr\nimport cv2\nimport numpy as np\n\nSHOW_IMAGE = False\n\n\ndef set_image(setting):\n global SHOW_IMAGE\n SHOW_IMAGE = setting\n\n\ndef show_image(name, image):\n if SHOW_IMAGE:\n cv2.imshow(name, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef custom_grayscale(image):\n (row, col) = image.shape[0:2]\n\n # Take the average of pixel values of the BGR Channels\n # to convert the colored image to grayscale image\n for i in range(row):\n for j in range(col):\n # Find the average of the BGR pixel values\n image[i, j] = sum(image[i, j]) / 10\n\n show_image(\"custom_gray\", image)\n return image\n\n\n# get grayscale image\ndef get_grayscale(image):\n img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n img = cv2.equalizeHist(img)\n show_image(\"gray\", img)\n return img\n\n\ndef get_hsv(image):\n # img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n img = cv2.cvtColor(image, cv2.COLOR_BGR2HLS_FULL)\n show_image(\"rgb\", img)\n return img\n\n\n# noise removal\ndef remove_noise(image):\n img = cv2.medianBlur(image, 5)\n show_image(\"noise\", image)\n return img\n\n\n# thresholding\ndef thresholding(image):\n result = cv2.threshold(image, 160, 255, cv2.THRESH_BINARY)[1]\n show_image(\"threshold\", result)\n return result\n\n\n# dilation\ndef dilate(image):\n kernel = np.ones((2, 2), np.uint16)\n result = cv2.dilate(image, kernel, iterations=5)\n show_image(\"dilate\", result)\n return result\n\n\n# erosion\ndef erode(image):\n kernel = np.ones((2, 2), np.uint16)\n result = cv2.erode(image, kernel, iterations=1)\n show_image(\"erode\", result)\n return result\n\n\n# opening - erosion followed by dilation\ndef opening(image):\n kernel = np.ones((2, 2), np.uint16)\n result = cv2.morphologyEx(image, cv2.MORPH_RECT, kernel)\n show_image(\"opening\", result)\n return result\n\n\n# canny edge detection\ndef canny(image):\n result = cv2.Canny(image, 23, 23)\n show_image(\"canny\", result)\n return result\n\n\n# skew correction\ndef deskew(image):\n coords = np.column_stack(np.where(image > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n return rotated\n\n\n# template matching\ndef match_template(image, template):\n return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)\n\n\ndef read_image(filename):\n original_image = cv2.imread(filename)\n resized_img = cv2.resize(original_image, None, fx=5, fy=5, interpolation=cv2.INTER_AREA)\n resized_img[resized_img <= 130] = 0\n resized_img[resized_img >= 150] = 255\n\n # threshold = 200 # you might need to adjust this threshold level\n # resized_img = resized_img.p(lambda p: p > threshold and 255)\n # Apply a filter to smooth the image (GaussianBlur for example)\n # The kernel size can be adjusted; here it's set to (5, 5)\n smooth_img = cv2.GaussianBlur(resized_img, (5, 5), 0)\n # img = thresholding(get_grayscale(remove_noise(opening(erode(dilate(get_hsv(original_image)))))))\n # img = erode(thresholding(get_grayscale(remove_noise(smooth_img))))\n img = remove_noise(opening(dilate(smooth_img)))\n # Adding custom options\n custom_config = r'-l eng --oem 3 --psm 6'\n return pytesseract.image_to_string(img, config=custom_config)\n # return pytesseract.image_to_string(Image.open(filename))\n\n\ndef read_easyocr_image(filename):\n reader = easyocr.Reader(['en'])\n original_image = cv2.imread(filename)\n resized_img = cv2.resize(original_image, None, fx=3, fy=3, interpolation=cv2.INTER_LINEAR_EXACT)\n resized_img[resized_img <= 130] = 0\n resized_img[resized_img >= 150] = 255\n # img = remove_noise(get_hsv(original_image))\n smooth_img = cv2.GaussianBlur(resized_img, (3, 3), 0)\n # img = thresholding(get_grayscale(remove_noise(opening(erode(dilate(get_hsv(original_image)))))))\n # img = erode(thresholding(get_grayscale(remove_noise(smooth_img))))\n img = get_grayscale(erode(dilate(smooth_img)))\n read_text = reader.readtext(img, output_format=\"free_merge\")\n results = \"\"\n for ((x_min, y_min, x_max, y_max), text, confidence,) in read_text:\n results = results + text + \"\\n\"\n return results.split(\"\\n\")\n","repo_name":"dkothari777/ocr_lotr_home","sub_path":"src/services/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8462596733","text":"from collections import deque\nclass Solution(object):\n def updateBoard(self, board, click):\n \"\"\"\n :type board: List[List[str]]\n :type click: List[int]\n :rtype: List[List[str]]\n \"\"\"\n if not board or not board[0]:\n return\n m, n = len(board), len(board[0])\n q = deque([(click[0], click[1])])\n visited = {(click[0], click[1])}\n while q:\n i, j = q.popleft()\n if board[i][j] == 'M':\n board[i][j] = 'X'\n return board\n elif board[i][j] == 'E':\n count = 0\n for r in xrange(-1, 2):\n for c in xrange(-1, 2):\n ii, jj = i + r, j + c\n if 0 <= ii < m and 0 <= jj < n and board[ii][jj] in 'XM':\n count += 1\n if count:\n board[i][j] = str(count)\n else:\n board[i][j] = 'B'\n for r in xrange(-1, 2):\n for c in xrange(-1, 2):\n ii, jj = i + r, j + c\n if 0 <= ii < m and 0 <= jj < n and board[ii][jj] == 'E' and (ii, jj) not in visited:\n q.append((ii, jj))\n visited.add((ii, jj))\n return board\n","repo_name":"brownlzw/Leetcode-python-solution","sub_path":"529. Minesweeper.py","file_name":"529. Minesweeper.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"34207287267","text":"import json\r\nimport csv\r\n\r\nfrom django.http import HttpResponse\r\n \r\n\r\nclass CSVResponseMixin(object):\r\n \r\n \"\"\"\r\n Mixin used to create a CSV response.\r\n \"\"\"\r\n \r\n def render_to_csv_response(self, context, data_keys=None, download=False, download_name=None, **response_kwargs):\r\n \r\n response = HttpResponse(content_type='text/csv')\r\n if download:\r\n if download_name:\r\n content_disposition = 'attachment; filename=\"{download_name}.csv\"'.format(download_name=download_name)\r\n response['Content-Disposition'] = content_disposition\r\n else:\r\n response['Content-Disposition'] = 'attachment; filename=\"areavolumedata.csv\"'\r\n self._write_csv_content(data=context, outfile=response, data_keys=data_keys)\r\n \r\n return response\r\n \r\n def _write_csv_content(self, data, outfile, data_keys=None):\r\n \r\n if data_keys == None:\r\n data_keys = data[0].keys()\r\n else:\r\n data_keys = data_keys\r\n csv_writer = csv.DictWriter(outfile, delimiter=',', fieldnames=data_keys)\r\n csv_writer.writerow(dict((data_key, data_key) for data_key in data_keys))\r\n for data_dict in data:\r\n csv_writer.writerow(data_dict)\r\n \r\n\r\nclass JSONResponseMixin(object):\r\n \r\n \"\"\"\r\n Mixin used to render a JSON response.\r\n \"\"\"\r\n \r\n def render_to_json_response(self, context, **response_kwargs):\r\n \r\n context_json = self._convert_context_to_json(context)\r\n \r\n return HttpResponse(context_json, content_type=\"application/json\", **response_kwargs)\r\n \r\n def _convert_context_to_json(self, context):\r\n \r\n json_dump = json.dumps(context)\r\n \r\n return json_dump","repo_name":"kmschoep-usgs/gcmrc-sandbar","sub_path":"sandbar/surveys/custom_mixins.py","file_name":"custom_mixins.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"40673060033","text":"#!/usr/bin/env python\n\"\"\" Run a javascript command by spawning an arangosh\n to the configured connection \"\"\"\n\nimport os\nimport logging\nimport platform\nimport signal\nimport sys\nfrom datetime import datetime, timedelta\nfrom subprocess import PIPE\nimport psutil\nfrom allure_commons._allure import attach\nfrom abc import ABC, abstractmethod\nfrom tools.asciiprint import print_progress as progress\n\n# import tools.loghelper as lh\n# pylint: disable=dangerous-default-value\n\nON_POSIX = \"posix\" in sys.builtin_module_names\nIS_WINDOWS = platform.win32_ver()[0] != \"\"\n\n\ndef print_log(string, params):\n \"\"\"only print if thread debug logging is enabled\"\"\"\n if params[\"trace_io\"]:\n logging.debug(string)\n\n\ndef default_line_result(wait, line, params):\n \"\"\"\n Keep the line, filter it for leading #,\n if verbose print the line. else print progress.\n \"\"\"\n # pylint: disable=pointless-statement\n if params[\"verbose\"] and wait > 0 and line is None:\n progress(\"sj\" + str(wait))\n return True\n if isinstance(line, tuple):\n if params[\"verbose\"]:\n logging.debug(\"e: %s\", str(line[0], \"utf-8\").rstrip())\n if not str(line[0]).startswith(\"#\"):\n params[\"output\"].append(line[0])\n else:\n return False\n return True\n\n\ndef make_default_params(verbose):\n \"\"\"create the structure to work with arrays to output the strings to\"\"\"\n return {\n \"trace_io\": False,\n \"error\": \"\",\n \"verbose\": verbose,\n \"output\": [],\n \"identifier\": \"\",\n }\n\n\ndef tail_line_result(wait, line, params):\n \"\"\"\n Keep the line, filter it for leading #,\n if verbose print the line. else print progress.\n \"\"\"\n # pylint: disable=pointless-statement\n if params[\"skip_done\"]:\n if isinstance(line, tuple):\n logging.info(\"%s%s\", params[\"prefix\"], str(line[0], \"utf-8\").rstrip())\n params[\"output\"].write(line[0])\n return True\n now = datetime.now()\n if now - params[\"last_read\"] > timedelta(seconds=1):\n params[\"skip_done\"] = True\n logging.info(\"%s initial tail done, starting to output\", params[\"prefix\"])\n return True\n\n\ndef make_tail_params(verbose, prefix, logfile):\n \"\"\"create the structure to work with arrays to output the strings to\"\"\"\n return {\n \"trace_io\": False,\n \"error\": \"\",\n \"verbose\": verbose,\n \"output\": logfile.open(\"wb\"),\n \"lfn\": str(logfile),\n \"identifier\": \"\",\n \"skip_done\": False,\n \"prefix\": prefix,\n \"last_read\": datetime.now(),\n }\n\n\ndef delete_tail_params(params):\n \"\"\"teardown the structure to work with logfiles\"\"\"\n logging.info(\"%s closing %s\", params[\"identifier\"], params[\"lfn\"])\n params[\"output\"].flush()\n params[\"output\"].close()\n logging.info(\"%s %s closed\", params[\"identifier\"], params[\"lfn\"])\n\n\ndef make_logfile_params(verbose, logfile, trace, temp_dir):\n \"\"\"create the structure to work with logfiles\"\"\"\n return {\n \"trace_io\": True,\n \"trace\": trace,\n \"error\": \"\",\n \"verbose\": verbose,\n \"output\": logfile.open(\"wb\"),\n \"identifier\": \"\",\n \"lfn\": str(logfile),\n \"temp_dir\": temp_dir,\n }\n\n\ndef logfile_line_result(wait, line, params):\n \"\"\"Write the line to a logfile, print progress.\"\"\"\n # pylint: disable=pointless-statement\n if params[\"trace\"] and wait > 0 and line is None:\n progress(\"sj\" + str(wait))\n return True\n if isinstance(line, tuple):\n if params[\"trace\"]:\n logging.debug(\"e: %s\", str(line[0], \"utf-8\").rstrip())\n sys.stdout.buffer.write(line[0])\n params[\"output\"].write(line[0])\n return True\n\n\ndef delete_logfile_params(params):\n \"\"\"teardown the structure to work with logfiles\"\"\"\n logging.info(\"%s closing %s\", params[\"identifier\"], params[\"lfn\"])\n params[\"output\"].flush()\n params[\"output\"].close()\n logging.info(\"%s %s closed\", params[\"identifier\"], params[\"lfn\"])\n\n\ndef enqueue_output(fd, queue, instance, identifier, params):\n \"\"\"add stdout/stderr to the specified queue\"\"\"\n while True:\n try:\n data = os.read(fd, 1024)\n except OSError as ex:\n print_log(\n f\"{identifier} communication line seems to be closed: {str(ex)}\", params\n )\n break\n if not data:\n break\n queue.put((data, instance))\n print(f\"{identifier} done! {params}\")\n print_log(f\"{identifier} done!\", params)\n queue.put(-1)\n os.close(fd)\n\n\ndef convert_result(result_array):\n \"\"\"binary -> string\"\"\"\n result = \"\"\n for one_line in result_array:\n if isinstance(one_line, str):\n result += \"\\n\" + one_line.rstrip()\n else:\n result += \"\\n\" + one_line.decode(\"utf-8\").rstrip()\n return result\n\n\ndef add_message_to_report(params, string, print_it=True, add_to_error=False):\n \"\"\"add a message from python to the report strings/files + print it\"\"\"\n oskar = \"OSKAR\"\n count = int(80 / len(oskar))\n datestr = f\" {datetime.now()} - \"\n offset = 80 - (len(string) + len(datestr) + 2 * len(oskar))\n if print_it:\n logging.info(string)\n # we also want these messages to be written to stdout, so they also show up in CircleCI\n print(string)\n if add_to_error:\n params[\"error\"] += \"async_client.py: \" + string + \"\\n\"\n if isinstance(params[\"output\"], list):\n params[\n \"output\"\n ] += f\"{oskar*count}\\n{oskar}{datestr}{string}{' '*offset}{oskar}\\n{oskar*count}\\n\"\n else:\n params[\"output\"].write(\n bytearray(\n f\"{oskar*count}\\n{oskar}{datestr}{string}{' '*offset}{oskar}\\n{oskar*count}\\n\",\n \"utf-8\",\n )\n )\n params[\"output\"].flush()\n sys.stdout.flush()\n return string + \"\\n\"\n\n\ndef kill_children(identifier, params, children):\n \"\"\"slash all processes enlisted in children - if they still exist\"\"\"\n err = \"\"\n killed = []\n for one_child in children:\n if one_child.pid in killed:\n continue\n try:\n pname = one_child.name()\n if pname not in [\"svchost.exe\", \"conhost.exe\", \"mscorsvw.exe\"]:\n killed.append(one_child.pid)\n err += add_message_to_report(\n params,\n f\"{identifier}: killing {pname} - {str(one_child.pid)}\",\n )\n one_child.resume()\n except FileNotFoundError:\n pass\n except AttributeError:\n pass\n except ProcessLookupError:\n pass\n except psutil.NoSuchProcess:\n pass\n except psutil.AccessDenied:\n pass\n try:\n one_child.kill()\n except psutil.NoSuchProcess: # pragma: no cover\n pass\n print_log(\n f\"{identifier}: Waiting for the children to terminate {killed} {len(children)}\",\n params,\n )\n psutil.wait_procs(children, timeout=20)\n return err\n\n\nclass CliExecutionException(Exception):\n \"\"\"transport CLI error texts\"\"\"\n\n def __init__(self, message, execution_result, have_timeout):\n super().__init__()\n self.execution_result = execution_result\n self.message = message\n self.have_timeout = have_timeout\n\n\ndef expect_failure(expect_to_fail, ret, params):\n \"\"\"convert results, throw error if wanted\"\"\"\n attach(str(ret[\"rc_exit\"]), f\"Exit code: {str(ret['rc_exit'])} == {expect_to_fail}\")\n res = (None, None, None, None)\n if ret[\"have_deadline\"] or ret[\"progressive_timeout\"]:\n res = (False, convert_result(params[\"output\"]), 0, ret[\"line_filter\"])\n raise CliExecutionException(\n \"Execution failed.\", res, ret[\"progressive_timeout\"] or ret[\"have_deadline\"]\n )\n if ret[\"rc_exit\"] != 0:\n res = (False, convert_result(params[\"output\"]), 0, ret[\"line_filter\"])\n if expect_to_fail:\n return res\n raise CliExecutionException(\"Execution failed.\", res, False)\n\n if not expect_to_fail:\n if len(params[\"output\"]) == 0:\n res = (True, \"\", 0, ret[\"line_filter\"])\n else:\n res = (True, convert_result(params[\"output\"]), 0, ret[\"line_filter\"])\n return res\n\n if len(params[\"output\"]) == 0:\n res = (True, \"\", 0, ret[\"line_filter\"], params[\"error\"])\n else:\n res = (True, convert_result(params[\"output\"]), 0, ret[\"line_filter\"])\n raise CliExecutionException(\n f\"{params.identifier} Execution was expected to fail, but exited successfully.\",\n res,\n ret[\"progressive_timeout\"],\n )\n\n\nID_COUNTER = 0\n\n\nclass ArangoCLIprogressiveTimeoutExecutor(ABC):\n \"\"\"\n Abstract base class to run arangodb cli tools\n with username/password/endpoint specification\n timeout will be relative to the last thing printed.\n \"\"\"\n\n # pylint: disable=too-few-public-methods too-many-arguments disable=too-many-instance-attributes disable=too-many-statements disable=too-many-branches disable=too-many-locals\n def __init__(self, config, connect_instance, deadline_signal=-1):\n \"\"\"launcher class for cli tools\"\"\"\n self.connect_instance = connect_instance\n self.cfg = config\n self.deadline_signal = deadline_signal\n self.pid = None\n if self.deadline_signal == -1:\n # pylint: disable=no-member\n # yes, one is only there on the wintendo, the other one elsewhere.\n if IS_WINDOWS:\n self.deadline_signal = signal.CTRL_BREAK_EVENT\n else:\n self.deadline_signal = signal.SIGINT\n\n def dig_for_children(self, params):\n \"\"\"manual search for children that may be there without the self.pid still being there\"\"\"\n children = []\n for process in psutil.process_iter([\"pid\", \"ppid\", \"name\"]):\n if process.ppid() == params[\"pid\"]:\n children.append(process)\n elif process.ppid() == 1 and (\n process.name().lower().find(\"arango\") >= 0\n or process.name().lower().find(\"tshark\") >= 0\n ):\n children.append(process)\n return children\n\n def get_environment(self, params):\n \"\"\"hook to implemnet custom environment variable setters\"\"\"\n return os.environ.copy()\n\n def run_arango_tool_monitored(\n self,\n executable,\n more_args,\n use_default_auth=True,\n params={\"error\": \"\", \"verbose\": True, \"output\": []},\n progressive_timeout=60,\n deadline=0,\n deadline_grace_period=180,\n result_line_handler=default_line_result,\n expect_to_fail=False,\n identifier=\"\",\n ):\n \"\"\"\n runs a script in background tracing with\n a dynamic timeout that its got output\n (is still alive...)\n \"\"\"\n # fmt: off\n passvoid = ''\n if self.cfg.passvoid:\n passvoid = str(self.cfg.passvoid)\n elif self.connect_instance:\n passvoid = str(self.connect_instance.get_passvoid())\n if passvoid is None:\n passvoid = ''\n\n run_cmd = [\n \"--log.foreground-tty\", \"true\",\n \"--log.force-direct\", \"true\",\n ]\n if self.connect_instance:\n run_cmd += [\"--server.endpoint\", self.connect_instance.get_endpoint()]\n if use_default_auth:\n run_cmd += [\"--server.username\", str(self.cfg.username)]\n run_cmd += [\"--server.password\", passvoid]\n\n run_cmd += more_args\n ret = self.run_monitored(executable,\n run_cmd,\n params,\n progressive_timeout,\n deadline,\n deadline_grace_period,\n result_line_handler,\n identifier)\n return expect_failure(expect_to_fail, ret, params)\n\n # fmt: on\n @abstractmethod\n def run_monitored(\n self,\n executable,\n args,\n params={\"error\": \"\", \"verbose\": True, \"output\": []},\n progressive_timeout=60,\n deadline=0,\n deadline_grace_period=180,\n result_line_handler=default_line_result,\n identifier=\"\",\n ):\n raise NotImplementedError(\"Subclasses should implement this!\")\n \n","repo_name":"Mu-L/arangodb","sub_path":"scripts/test/async_client.py","file_name":"async_client.py","file_ext":"py","file_size_in_byte":12423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"70"} +{"seq_id":"37741059648","text":"input = [[1,'a',['cat'],2],[[[3]],'dog'],4,5]\n\ndef flatten(x, k = []):\n for i in range(len(x)):\n if type(x[i]) is list:\n flatten(x[i])\n else:\n k.append(x[i])\n return k\n \nprint(flatten(input))\n\ninput1 = [[1, 2,3], [3, 4], [5, 6, 7]]\n\ndef reverse_list(x):\n \n x.reverse()\n for i in range(len(x)):\n if type(x[i]) is list:\n reverse_list(x[i])\n return x\n\nprint(reverse_list(input1))\n\n","repo_name":"batuhantug/Python-Temel-Proje","sub_path":"project_functions.py","file_name":"project_functions.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"31325412414","text":"import os\n\nfrom easybuild.framework.easyblock import EasyBlock\nfrom easybuild.tools.filetools import copy_file, move_file\nfrom easybuild.tools.run import run_cmd\n\n\nclass EB_Nim(EasyBlock):\n \"\"\"Support for building/installing Nim.\"\"\"\n\n def configure_step(self):\n \"\"\"No configuration for Nim.\"\"\"\n pass\n\n def build_step(self):\n \"\"\"Custom build procedure for Nim.\"\"\"\n\n # build Nim (bin/nim)\n run_cmd(\"sh build.sh\")\n\n # build koch management tool\n run_cmd(\"bin/nim c -d:release koch\")\n\n # rebuild Nim, with readline bindings\n run_cmd(\"./koch boot -d:release -d:useLinenoise\")\n\n # build nimble/nimgrep/nimsuggest tools\n run_cmd(\"./koch tools\")\n\n def install_step(self):\n \"\"\"Custom install procedure for Nim.\"\"\"\n\n run_cmd(\"./koch geninstall\")\n run_cmd(\"sh install.sh %s\" % self.installdir)\n\n # install.sh copies stuff into /nim, so move it\n nim_dir = os.path.join(self.installdir, 'nim')\n for entry in os.listdir(nim_dir):\n move_file(os.path.join(nim_dir, entry), os.path.join(self.installdir, entry))\n\n # also copy nimble/nimgrep/nimsuggest tools\n for tool in ['nimble', 'nimgrep', 'nimsuggest']:\n copy_file(os.path.join('bin', tool), os.path.join(self.installdir, 'bin', tool))\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Nim.\"\"\"\n custom_paths = {\n 'files': ['bin/nim', 'bin/nimble', 'bin/nimgrep', 'bin/nimsuggest'],\n 'dirs': ['config', 'doc', 'lib'],\n }\n super(EB_Nim, self).sanity_check_step(custom_paths=custom_paths)\n","repo_name":"easybuilders/easybuild-easyblocks","sub_path":"easybuild/easyblocks/n/nim.py","file_name":"nim.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"70"} +{"seq_id":"25366118148","text":"\"\"\"\nExamples from and reactions to https://pymotw.com/3/enum/index.html\n\"\"\"\n\nimport enum\n\nclass BugStatus(enum.Enum):\n\n new = 7\n incomplete = 6\n invalid = 5\n wont_fix = 4\n in_progress = 3\n fix_committed = 2\n fix_released = 1\n\nprint('\\nMember name: {}'.format(BugStatus.wont_fix.name))\nprint('\\nMember value: {}'.format(BugStatus.wont_fix.value))\n\nfor status in BugStatus:\n print('{:15} = {}'.format(status.name, status.value))\n\nactual_state = BugStatus.wont_fix\ndesired_state = BugStatus.fix_released\n\nprint('Equality:',\n actual_state == desired_state,\n actual_state == BugStatus.wont_fix)\nprint('Identity:',\n actual_state is desired_state,\n actual_state is BugStatus.wont_fix)\n\nprint('Ordered by value:')\ntry:\n print('\\n'.join(' ' + s.name for s in sorted(BugStatus)))\nexcept TypeError as err:\n print(' Cannot sort: {}'.format(err))\n\n","repo_name":"recursivelycurious/grok","sub_path":"python/pmotw3/enum_ex.py","file_name":"enum_ex.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"6222735916","text":"from sys import stdin\r\n\r\nn = int(stdin.readline())\r\n\r\nfor _ in range(n):\r\n array = {}\r\n line = stdin.readline().split()\r\n N = int(line[0])\r\n max_val = 0\r\n max_key = 0\r\n for i in range(1, len(line)):\r\n num = int(line[i])\r\n if num not in array:\r\n array[num] = 1\r\n else:\r\n array[num] += 1\r\n if array[num] > max_val:\r\n max_val = array[num]\r\n max_key = num\r\n if max_val > N / 2:\r\n \r\n print(max_key)\r\n else:\r\n print('SYJKGW')","repo_name":"xxubin04/CodingTest_Python","sub_path":"백준/Silver/1270. 전쟁 - 땅따먹기/전쟁 - 땅따먹기.py","file_name":"전쟁 - 땅따먹기.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"32676403040","text":"import argparse\nimport shlex\nimport subprocess\n\nfrom watchfiles import watch\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"name\")\n args = parser.parse_args()\n\n cmd = [\n \"./diagram.py\",\n \"-m\",\n args.name,\n \"-f\",\n \"draw\",\n \"-o\",\n f\"out/{args.name}.svg\",\n ]\n\n def run():\n print(\"$\", \" \".join(shlex.quote(word) for word in cmd))\n subprocess.run(cmd)\n\n run()\n for changes in watch(f\"./{args.name}.py\"):\n print(changes)\n run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"samestep/diagrams","sub_path":"watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"34361453936","text":"\n\nimport pygame, sys\nfrom pygame.locals import *\n\nfrom Vector import *\nfrom Window import *\nimport math\n\n#A particle in the window space with mass, charge, color. Particles move around\n#the space, being acted on by forces, and collide with each other.\nclass Particle():\n\n def __init__(self, x, y, vx=0, vy=0, r=10, D=1, color=(0, 0, 0), q=0):\n self.color = color\n self.r = r\n self.q = q\n self.m = 4 * D * math.pi * (r ** 3) / 3\n self.d = Vec(x, y)\n self.v = Vec(vx, vy)\n self.a = Vec(0, 0)\n\n \n #Apply the force vector f to the particle by adding (1 / mass) * f to the particle's acceleration\n def applyForce(self, f):\n self.a += f * (1 / self.m)\n \n #Adjust the position and velocity of the particle to bounce away from a boundary. \n def bounce(self, axis, limitPos):\n if(axis == 0):\n self.v.x *= -1\n self.d.x = limitPos\n elif(axis == 1):\n self.v.y *= -1\n self.d.y = limitPos\n else:\n raise ValueError(\"Invalid axis value. Should be 0 for x, or 1 for y.\");\n\n #Returns the distance between this particle and the particle other.\n def distance(self, other):\n return self.d.distance(other.d)\n \n #Handle a collision between this particle and the particle other by adjusting their velocities and positions. \n def collide(self, other):\n displacement = self.d.displacement(other.d)\n u = displacement.unit()\n aAngle = math.pi - self.v.angle(u)\n bAngle = math.pi - other.v.angle(u * -1)\n ka = (2 * self.v.length() * math.cos(aAngle) + 2 * other.v.length()\\\n * math.cos(bAngle)) / (self.m / other.m + 1)\n kb = ka * (self.m / other.m)\n self.v += u * ka\n self.d = self.d + u * ((displacement.length() - self.r - other.r) / 2 - 0.00000001)\n \n #Update the particle by adding its acceleration to its velocity, moving its position by its velocity, keeping it within the window's\n #boundaries, and setting its acceleration to zero.\n def update(self):\n self.v += self.a\n self.d = self.d + self.v\n self.a = Vec(0, 0)\n\n","repo_name":"LayeredCake/particle-sim","sub_path":"Particle.py","file_name":"Particle.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"13272276344","text":"import re\n\nwith open('D:/Codes/korem_16s/Data/rrnDB/rrnDB-5.6_16S_rRNA.fasta') as fa_file:\n ssp_list = []\n for line in fa_file:\n if line.startswith(\">\"):\n fsp_line = line[1:-1].split(\"|\")\n #print(fsp_line)\n pos_line = re.split('\\.\\.|\\s', fsp_line[-1])\n ssp_line = fsp_line[:-1]\n ssp_line.extend(pos_line)\n #print(ssp_line)\n ssp_list.append(ssp_line)\n#print(ssp_list[0:4])\n\nimport csv\nwith open('D:/Codes/korem_16s/Data/rrnDB/rrnDB-5.6_16s_position.csv',\"w\") as csvfile:\n # columns name\n writer = csv.writer(csvfile)\n # positions and other information\n writer.writerow([\"organism_name\",\"record_id\",\"RefSeq_sequence\",\"chromosome\",\"start\",\"end\",\"strand\"])\n writer.writerows(ssp_list)\n\n'''with open('D:/Codes/korem_16s/Data/rrnDB/rrnDB-5.6_16s_NCBIaa.txt',\"w\") as txtfile:\n for record in ssp_list:\n txtfile.write(record[1]+'\\n')'''\n","repo_name":"linxy29/korem_16s_Python","sub_path":"Extract_16s_position.py","file_name":"Extract_16s_position.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"13178580954","text":"# -*- coding: utf-8 -*-\nimport http3\nimport datetime\nimport h2\nimport h11\nimport urllib.parse\nimport textwrap\nimport ipaddress\nimport hashlib\nimport datetime\nimport binascii\nimport base64\nimport sys\nimport socket\nimport ssl\nimport json\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.poolmanager import PoolManager\nfrom requests.packages.urllib3.util import ssl_\n# https://docs.python.org/3/library/urllib.parse.html\nimport urllib\nfrom urllib.parse import urlparse\nimport uuid\nimport re\nfrom bs4 import BeautifulSoup\nimport config\nfrom models import Rating\nfrom tests.utils import dns_lookup, httpRequestGetContent, has_redirect\nimport gettext\n_local = gettext.gettext\n\n# DEFAULTS\nrequest_timeout = config.http_request_timeout\nuseragent = config.useragent\nreview_show_improvements_only = config.review_show_improvements_only\n\n\ndef run_test(_, langCode, url):\n \"\"\"\n Only work on a domain-level. Returns tuple with decimal for grade and string with review\n \"\"\"\n\n rating = Rating(_, review_show_improvements_only)\n result_dict = {}\n\n language = gettext.translation(\n 'http_validator', localedir='locales', languages=[langCode])\n language.install()\n _local = language.gettext\n\n print(_local('TEXT_RUNNING_TEST'))\n\n print(_('TEXT_TEST_START').format(\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n # We must take in consideration \"www.\" subdomains...\n\n o = urllib.parse.urlparse(url)\n hostname = o.hostname\n\n if hostname.startswith('www.'):\n url = url.replace(hostname, hostname[4:])\n\n nof_checks = 0\n check_url = True\n\n while check_url and nof_checks < 10:\n checked_url_rating = validate_url(url, _, _local)\n\n redirect_result = has_redirect(url)\n check_url = redirect_result[0]\n url = redirect_result[1]\n nof_checks += 1\n\n rating += checked_url_rating\n\n if nof_checks > 1:\n rating.overall_review += _local('TEXT_REVIEW_SCORE_IS_DIVIDED').format(\n nof_checks)\n\n # if len(review) == 0:\n # review = _('TEXT_REVIEW_NO_REMARKS')\n\n print(_('TEXT_TEST_END').format(\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n return (rating, result_dict)\n\n\ndef validate_url(url, _, _local):\n rating = Rating(_, review_show_improvements_only)\n\n # points = 0.0\n # review = ''\n\n o = urllib.parse.urlparse(url)\n hostname = o.hostname\n\n rating += http_to_https_score(url, _, _local)\n\n rating += tls_version_score(url, _, _local)\n\n rating += ip_version_score(hostname, _, _local)\n\n rating += http_version_score(hostname, url, _, _local)\n\n return rating\n\n\ndef http_to_https_score(url, _, _local):\n rating = Rating(_, review_show_improvements_only)\n http_url = ''\n\n o = urllib.parse.urlparse(url)\n\n if (o.scheme == 'https'):\n http_url = url.replace('https://', 'http://')\n else:\n http_url = url\n\n redirect_result = has_redirect(http_url)\n\n result_url = ''\n if (redirect_result[0]):\n result_url = redirect_result[1]\n else:\n result_url = http_url\n\n if result_url == None:\n rating.set_overall(1.0)\n rating.set_integrity_and_security(\n 1.0, _local('TEXT_REVIEW_HTTP_TO_HTTP_REDIRECT_UNABLE_TO_VERIFY'))\n rating.set_standards(1.0, _local(\n 'TEXT_REVIEW_HTTP_TO_HTTP_REDIRECT_UNABLE_TO_VERIFY'))\n return rating\n\n result_url_o = urllib.parse.urlparse(result_url)\n\n if (result_url_o.scheme == 'http'):\n rating.set_overall(1.0)\n rating.set_integrity_and_security(\n 1.0, _local('TEXT_REVIEW_HTTP_TO_HTTP_REDIRECT_NO_REDIRECT'))\n rating.set_standards(1.0, _local(\n 'TEXT_REVIEW_HTTP_TO_HTTP_REDIRECT_NO_REDIRECT'))\n return rating\n else:\n rating.set_overall(5.0)\n rating.set_integrity_and_security(\n 5.0, _local('TEXT_REVIEW_HTTP_TO_HTTP_REDIRECT_REDIRECTED'))\n rating.set_standards(5.0, _local(\n 'TEXT_REVIEW_HTTP_TO_HTTP_REDIRECT_REDIRECTED'))\n return rating\n\n\ndef ip_version_score(hostname, _, _local):\n rating = Rating(_, review_show_improvements_only)\n ip4_result = dns_lookup(hostname, \"A\")\n\n ip6_result = dns_lookup(hostname, \"AAAA\")\n\n nof_ip6 = len(ip6_result)\n nof_ip4 = len(ip4_result)\n\n ip6_rating = Rating(_, review_show_improvements_only)\n if nof_ip6 > 0:\n ip6_rating.set_overall(5.0)\n ip6_rating.set_standards(\n 5.0, _local('TEXT_REVIEW_IP_VERSION_IPV6_SUPPORT'))\n else:\n ip6_rating.set_overall(1.0)\n ip6_rating.set_standards(\n 1.0, _local('TEXT_REVIEW_IP_VERSION_IPV6_NO_SUPPORT'))\n\n rating += ip6_rating\n\n ip4_rating = Rating(_, review_show_improvements_only)\n if nof_ip4 > 0:\n ip4_rating.set_overall(5.0)\n ip4_rating.set_standards(\n 5.0, _local('TEXT_REVIEW_IP_VERSION_IPV4_SUPPORT'))\n else:\n ip4_rating.set_overall(1.0)\n ip4_rating.set_standards(\n 1.0, _local('TEXT_REVIEW_IP_VERSION_IPV4_NO_SUPPORT'))\n rating += ip4_rating\n\n return rating\n\n\ndef protocol_version_score(url, protocol_version, _, _local):\n rating = Rating(_, review_show_improvements_only)\n # points = 0.0\n # review = ''\n result_not_validated = (False, '')\n result_validated = (False, '')\n\n protocol_rule = False\n protocol_name = ''\n protocol_translate_name = ''\n protocol_is_secure = False\n\n try:\n if protocol_version == ssl.PROTOCOL_TLS:\n protocol_name = 'TLSv1.3'\n protocol_translate_name = 'TLS1_3'\n assert ssl.HAS_TLSv1_3\n protocol_rule = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2\n protocol_is_secure = True\n elif protocol_version == ssl.PROTOCOL_TLSv1_2:\n protocol_name = 'TLSv1.2'\n protocol_translate_name = 'TLS1_2'\n assert ssl.HAS_TLSv1_2\n protocol_rule = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_3\n protocol_is_secure = True\n elif protocol_version == ssl.PROTOCOL_TLSv1_1:\n protocol_name = 'TLSv1.1'\n protocol_translate_name = 'TLS1_1'\n assert ssl.HAS_TLSv1_1\n protocol_rule = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_2 | ssl.OP_NO_TLSv1_3\n protocol_is_secure = False\n elif protocol_version == ssl.PROTOCOL_TLSv1:\n protocol_name = 'TLSv1.0'\n protocol_translate_name = 'TLS1_0'\n assert ssl.HAS_TLSv1\n protocol_rule = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2 | ssl.OP_NO_TLSv1_3\n protocol_is_secure = False\n elif protocol_version == ssl.PROTOCOL_SSLv3:\n protocol_name = 'SSLv3'\n protocol_translate_name = 'SSL3_0'\n assert ssl.HAS_SSLv3\n protocol_rule = ssl.OP_NO_SSLv2 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2 | ssl.OP_NO_TLSv1_3\n protocol_is_secure = False\n elif protocol_version == ssl.PROTOCOL_SSLv2:\n protocol_name = 'SSLv2'\n protocol_translate_name = 'SSL2_0'\n protocol_rule = ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2 | ssl.OP_NO_TLSv1_3\n assert ssl.HAS_SSLv2\n protocol_is_secure = False\n\n result_not_validated = has_protocol_version(\n url, False, protocol_rule)\n\n result_validated = has_protocol_version(\n url, True, protocol_rule)\n\n has_full_support = result_not_validated[0] and result_validated[0]\n has_wrong_cert = result_not_validated[0]\n\n if has_full_support:\n if protocol_is_secure:\n rating.set_integrity_and_security(\n 5.0, _local('TEXT_REVIEW_' + protocol_translate_name + '_SUPPORT'))\n rating.set_overall(5.0)\n else:\n rating.set_integrity_and_security(\n 1.0, _local('TEXT_REVIEW_' + protocol_translate_name + '_SUPPORT'))\n rating.set_overall(2.5)\n rating.set_standards(5.0, _local(\n 'TEXT_REVIEW_' + protocol_translate_name + '_SUPPORT'))\n elif has_wrong_cert:\n rating.set_integrity_and_security(\n 1.0, _local('TEXT_REVIEW_' + protocol_translate_name + '_SUPPORT_WRONG_CERT'))\n rating.set_standards(\n 2.5, _local('TEXT_REVIEW_' + protocol_translate_name + '_SUPPORT_WRONG_CERT'))\n rating.set_overall(2.5)\n else:\n if not protocol_is_secure:\n rating.set_integrity_and_security(\n 5.0, _local('TEXT_REVIEW_' + protocol_translate_name + '_NO_SUPPORT'))\n rating.set_overall(5.0)\n else:\n rating.set_standards(\n 1.0, _local('TEXT_REVIEW_' + protocol_translate_name + '_NO_SUPPORT'))\n rating.set_integrity_and_security(\n 1.0, _local('TEXT_REVIEW_' + protocol_translate_name + '_NO_SUPPORT'))\n rating.set_overall(1.0)\n\n result_insecure_cipher = (False, 'unset')\n try:\n result_insecure_cipher = has_insecure_cipher(\n url, protocol_rule)\n except ssl.SSLError as sslex:\n print('error insecure_cipher', sslex)\n pass\n # if result_insecure_cipher[0]:\n # review += _('TEXT_REVIEW_' +\n # protocol_translate_name + '_INSECURE_CIPHERS')\n\n result_weak_cipher = (False, 'unset')\n try:\n result_weak_cipher = has_weak_cipher(\n url, protocol_rule)\n except ssl.SSLError as sslex:\n print('error weak_cipher', sslex)\n pass\n # if result_weak_cipher[0]:\n # review += _('TEXT_REVIEW_' +\n # protocol_translate_name + '_WEAK_CIPHERS')\n except ssl.SSLError as sslex:\n print('error 0.0s', sslex)\n pass\n except AssertionError:\n print('### No {0} support on your machine, unable to test ###'.format(\n protocol_name))\n pass\n except:\n print('error protocol_version_score: {0}'.format(sys.exc_info()[0]))\n pass\n\n return rating\n\n\ndef tls_version_score(orginal_url, _, _local):\n rating = Rating(_, review_show_improvements_only)\n url = orginal_url.replace('http://', 'https://')\n\n # TODO: check cipher security\n # TODO: re add support for identify wrong certificate\n\n try:\n tls1_3_rating = protocol_version_score(\n url, ssl.PROTOCOL_TLS, _, _local)\n if tls1_3_rating.get_overall() == 5.0:\n tls1_3_rating.set_performance(\n 5.0, _local('TEXT_REVIEW_TLS1_3_SUPPORT'))\n else:\n tls1_3_rating.set_performance(\n 4.0, _local('TEXT_REVIEW_TLS1_3_NO_SUPPORT'))\n rating += tls1_3_rating\n except:\n pass\n\n try:\n rating += protocol_version_score(url, ssl.PROTOCOL_TLSv1_2, _, _local)\n except:\n pass\n\n try:\n rating += protocol_version_score(url, ssl.PROTOCOL_TLSv1_1, _, _local)\n except:\n pass\n\n try:\n rating += protocol_version_score(url, ssl.PROTOCOL_TLSv1, _, _local)\n except:\n pass\n\n try:\n # HOW TO ENABLE SSLv3, https://askubuntu.com/questions/893155/simple-way-of-enabling-sslv2-and-sslv3-in-openssl\n rating += protocol_version_score(url, ssl.PROTOCOL_SSLv3, _, _local)\n except:\n pass\n\n try:\n # HOW TO ENABLE SSLv2, https://askubuntu.com/questions/893155/simple-way-of-enabling-sslv2-and-sslv3-in-openssl\n rating += protocol_version_score(url, ssl.PROTOCOL_SSLv2, _, _local)\n except:\n pass\n\n return rating\n\n\ndef http_version_score(hostname, url, _, _local):\n rating = Rating(_, review_show_improvements_only)\n\n rating += check_http11(hostname, _, _local)\n\n rating += check_http2(hostname, _, _local)\n\n # If we still have 1.0 points something must have gone wrong, try fallback\n if rating.get_overall() == 1.0:\n rating = check_http_fallback(url, _, _local)\n\n rating += check_http3(hostname, _, _local)\n\n return rating\n\n\ndef check_http11(hostname, _, _local):\n rating = Rating(_, review_show_improvements_only)\n try:\n socket.setdefaulttimeout(10)\n conn = ssl.create_default_context()\n conn.set_alpn_protocols(['http/1.1'])\n try:\n conn.set_npn_protocols([\"http/1.1\"])\n except NotImplementedError:\n pass\n\n ssock = conn.wrap_socket(\n socket.socket(socket.AF_INET, socket.SOCK_STREAM), server_hostname=hostname)\n ssock.connect((hostname, 443))\n\n negotiated_protocol = ssock.selected_alpn_protocol()\n if negotiated_protocol is None:\n negotiated_protocol = ssock.selected_npn_protocol()\n\n if negotiated_protocol == \"http/1.1\":\n rating.set_overall(5.0)\n rating.set_standards(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_1_1_SUPPORT'))\n else:\n rating.set_overall(1.0)\n rating.set_standards(\n 1.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_1_1_NO_SUPPORT'))\n except Exception:\n # rating.set_overall(1.0)\n return rating\n return rating\n\n\ndef check_http2(hostname, _, _local):\n rating = Rating(_, review_show_improvements_only)\n try:\n socket.setdefaulttimeout(10)\n conn = ssl.create_default_context()\n conn.set_alpn_protocols(['h2'])\n try:\n conn.set_npn_protocols([\"h2\"])\n except NotImplementedError:\n pass\n ssock = conn.wrap_socket(\n socket.socket(socket.AF_INET, socket.SOCK_STREAM), server_hostname=hostname)\n ssock.connect((hostname, 443))\n\n negotiated_protocol = ssock.selected_alpn_protocol()\n if negotiated_protocol is None:\n negotiated_protocol = ssock.selected_npn_protocol()\n\n if negotiated_protocol == \"h2\":\n rating.set_overall(5.0)\n rating.set_standards(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_2_SUPPORT'))\n rating.set_performance(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_2_SUPPORT'))\n else:\n rating.set_overall(1.0)\n rating.set_standards(\n 1.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_2_NO_SUPPORT'))\n rating.set_performance(\n 1.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_2_NO_SUPPORT'))\n except Exception:\n return rating\n\n return rating\n\n\ndef check_http3(host, _, _local):\n rating = Rating(_, review_show_improvements_only)\n\n has_quic_support = False\n has_http3_support = False\n\n try:\n url = 'https://http3check.net/?host={0}'.format(host)\n headers = {'user-agent': useragent}\n request = requests.get(url, allow_redirects=True,\n headers=headers, timeout=request_timeout)\n\n # We use variable to validate it once\n requestText = ''\n hasRequestText = False\n\n if request.text:\n requestText = request.text\n hasRequestText = True\n\n if hasRequestText:\n try:\n soup = BeautifulSoup(requestText, 'lxml')\n elements_success = soup.find_all(\n class_=\"uk-text-success\")\n for result in elements_success:\n supportText = result.text.lower()\n has_quic_support = has_quic_support or 'quic' in supportText\n has_http3_support = has_quic_support or 'http/3' in supportText\n\n except:\n print(\n 'Error getting HTTP/3 or QUIC support!\\nMessage:\\n{0}'.format(sys.exc_info()[0]))\n\n except Exception as ex:\n print(\n 'General Error getting HTTP/3 or QUIC support!\\nMessage:\\n{0}'.format(sys.exc_info()[0]))\n\n http3_rating = Rating(_, review_show_improvements_only)\n if (has_http3_support):\n http3_rating.set_overall(5.0)\n http3_rating.set_standards(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_3_SUPPORT'))\n http3_rating.set_performance(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_3_SUPPORT'))\n else:\n http3_rating.set_overall(1.0)\n http3_rating.set_performance(\n 2.5, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_3_NO_SUPPORT'))\n http3_rating.set_standards(1.0, _local(\n 'TEXT_REVIEW_HTTP_VERSION_HTTP_3_NO_SUPPORT'))\n rating += http3_rating\n\n quic_rating = Rating(_, review_show_improvements_only)\n if (has_quic_support):\n quic_rating.set_overall(5.0)\n quic_rating.set_performance(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_QUIC_SUPPORT'))\n quic_rating.set_standards(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_QUIC_SUPPORT'))\n else:\n quic_rating.set_overall(1.0)\n quic_rating.set_performance(\n 2.5, _local('TEXT_REVIEW_HTTP_VERSION_QUIC_NO_SUPPORT'))\n quic_rating.set_standards(1.0, _local(\n 'TEXT_REVIEW_HTTP_VERSION_QUIC_NO_SUPPORT'))\n rating += quic_rating\n\n return rating\n\n\ndef check_http_fallback(url, _, _local):\n rating = Rating(_, review_show_improvements_only)\n has_http2 = False\n has_http11 = False\n try:\n r = http3.get(url, allow_redirects=True)\n\n has_http2 = r.protocol == \"HTTP/2\"\n has_http11 = r.protocol == \"HTTP1.1\"\n except ssl.CertificateError as error:\n print('ERR1', error)\n pass\n except Exception as e:\n print('ERR2', e)\n pass\n\n try:\n if not has_http11:\n # This call only supports HTTP/1.1\n content = httpRequestGetContent(url, True)\n if '' in content:\n has_http11 = True\n except Exception as e:\n # Probably a CERT validation error, ignore\n print('ERR3', e)\n pass\n\n http11_rating = Rating(_, review_show_improvements_only)\n if has_http11:\n http11_rating.set_overall(5.0)\n http11_rating.set_standards(5.0, _local(\n 'TEXT_REVIEW_HTTP_VERSION_HTTP_1_1_SUPPORT'))\n else:\n http11_rating.set_overall(1.0)\n http11_rating.set_standards(\n 1.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_1_1_NO_SUPPORT'))\n rating += http11_rating\n\n http2_rating = Rating(_, review_show_improvements_only)\n if has_http2:\n http2_rating.set_overall(5.0)\n http2_rating.set_standards(5.0, _local(\n 'TEXT_REVIEW_HTTP_VERSION_HTTP_2_SUPPORT'))\n http2_rating.set_performance(\n 5.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_2_SUPPORT'))\n else:\n http2_rating.set_overall(1.0)\n http2_rating.set_standards(\n 1.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_2_NO_SUPPORT'))\n http2_rating.set_performance(\n 1.0, _local('TEXT_REVIEW_HTTP_VERSION_HTTP_2_NO_SUPPORT'))\n rating += http2_rating\n\n return rating\n\n\n# Read post at: https://hussainaliakbar.github.io/restricting-tls-version-and-cipher-suites-in-python-requests-and-testing-wireshark/\nWEAK_CIPHERS = (\n 'ECDHE+AES128+CBC+SHA:'\n 'ECDHE+AES256+CBC+SHA:'\n 'ECDHE+RSA+3DES+EDE+CBC+SHA:'\n 'ECDHE+RSA+AES256+GCM+SHA383:'\n 'RSA+AES128+CBC+SHA:'\n 'RSA+AES256+CBC+SHA:'\n 'RSA+AES128+GCM+SHA256:'\n 'RSA+AES256+GCM+SHA:'\n 'RSA+AES256+GCM+SHA383:'\n 'RSA+CAMELLIA128+CBC+SHA:'\n 'RSA+CAMELLIA256+CBC+SHA:'\n 'RSA+IDEA+CBC+SHA:'\n 'RSA+AES256+GCM+SHA:'\n 'RSA+3DES+EDE+CBC+SHA:'\n 'RSA+SEED+CBC+SHA:'\n 'DHE+RSA+3DES+EDE+CBC+SHA:'\n 'DHE+RSA+AES128+CBC+SHA:'\n 'DHE+RSA+AES256+CBC+SHA:'\n 'DHE+RSA+CAMELLIA128+CBC+SHA:'\n 'DHE+RSA+CAMELLIA256+CBC+SHA:'\n 'DHE+RSA+SEED+CBC+SHA:'\n)\n\n\nclass TlsAdapterWeakCiphers(HTTPAdapter):\n\n def __init__(self, ssl_options=0, **kwargs):\n self.ssl_options = ssl_options\n super(TlsAdapterWeakCiphers, self).__init__(**kwargs)\n\n def init_poolmanager(self, *pool_args, **pool_kwargs):\n ctx = ssl_.create_urllib3_context(\n ciphers=WEAK_CIPHERS,\n cert_reqs=ssl.CERT_REQUIRED, options=self.ssl_options)\n\n self.poolmanager = PoolManager(*pool_args,\n ssl_context=ctx,\n **pool_kwargs)\n\n def proxy_manager_for(self, *args, **kwargs):\n context = ssl_.create_urllib3_context(ciphers=WEAK_CIPHERS)\n kwargs['ssl_context'] = context\n return super(TlsAdapterWeakCiphers, self).proxy_manager_for(*args, **kwargs)\n\n\ndef has_weak_cipher(url, protocol_version):\n session = False\n\n try:\n # print('ssl._DEFAULT_CIPHERS', ssl._DEFAULT_CIPHERS)\n\n session = requests.session()\n adapter = TlsAdapterWeakCiphers(protocol_version)\n\n session.mount(url, adapter)\n\n except ssl.SSLError as sslex:\n # print('### No weak cipher support on your machine, unable to test: {0} ###'.format(\n # WEAK_CIPHERS))\n return (False, 'weak_cipher SSLError {0}'.format(sslex))\n\n try:\n allow_redirects = False\n\n headers = {'user-agent': useragent}\n a = session.get(url, verify=False, allow_redirects=allow_redirects,\n headers=headers, timeout=request_timeout)\n\n if a.status_code == 200 or a.status_code == 301 or a.status_code == 302 or a.status_code == 404:\n # print('is ok')\n return (True, 'is ok')\n\n resulted_in_html = '/', views.VacanciesView.as_view(), name='vacancies_by_speciality'),\n # re_path(r'^vacancies/?(cat/(?P\\w+))?/$', VacanciesView.as_view()),\n path('vacancies//', views.VacancyView.as_view(), name='vacancy_info'),\n path('vacancies//sent', views.VacancySent.as_view(), name='vacancy_sent'),\n path('companies//', views.CompanyView.as_view(), name='company_vacancies'),\n path('mycompany/', views.UserCompanyView.as_view(), name='user_company'),\n path('mycompany/create', views.create_user_company, name='user_company_create'),\n path('mycompany/profile', views.user_profile, name='user_profile'),\n path('mycompany/vacancies', views.UserCompanyVacanciesView.as_view(), name='user_company_vacancies'),\n path('mycompany/vacancies/create', views.create_user_vacancy, name='user_company_vacancy_create'),\n path('mycompany/vacancies//', views.UserCompanyVacancyEditView.as_view(),\n name='user_company_vacancy_edit'),\n path('myresume', views.UserResumeEditView.as_view(), name='user_resume'),\n path('myresume/create', views.create_user_resume, name='user_resume_create'),\n path('login', views.MyLoginView.as_view(), name='login'),\n path('register', views.MySignupView.as_view(), name='register'),\n path('logout', LogoutView.as_view(), {'next_page': settings.LOGOUT_REDIRECT_URL}, name='logout'),\n path('search?s=', views.SearchView.as_view(), name='search')\n # re_path(r'^search/$', views.SearchView.as_view(), name='search'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"begemotoff35/stepik_vacancies_final","sub_path":"conf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"40809903181","text":"#### Model that doesnt use the image data - just the current strawberry states.\n\nimport cv2\nimport matplotlib\nimport numpy as np\nimport pandas as pd\n# from PIL import Image\n# import tensorflow as tf\n# from tensorflow import keras\n# import matplotlib.image as mpimg\n# from matplotlib import pyplot as plt\n# from tensorflow.keras.models import Sequential\n# from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D\n# from tensorflow.keras.callbacks import EarlyStopping\n# np.random.seed(1000)\n# from sklearn import preprocessing\n# from tensorflow.keras.models import Model\n# from tensorflow.keras.optimizers import Adam\n# from tensorflow.keras.preprocessing import image\n# from tensorflow.keras.applications.resnet import preprocess_input\n\n################################## Hyper Parameters ##################################################################\ndata_set_length = 20\ntrajectory_length = 5\n\n################################# Robot vel and pos ###################################################################\nrobot_positions = pd.read_csv('/home/will/Robotics/data_set_003/robot_pos/data_set_' + str(0) + '_robot_data_store_position.csv', header=None)\nrobot_velocitys = pd.read_csv('/home/will/Robotics/data_set_003/robot_vel/data_set_' + str(0) + '_robot_data_store_velocity.csv', header=None)\nfor i in range(1, data_set_length):\n robot_positions = pd.concat([robot_positions, pd.read_csv('/home/will/Robotics/data_set_003/robot_pos/data_set_' + str(i) + '_robot_data_store_position.csv', header=None)])\n robot_velocitys = pd.concat([robot_velocitys, pd.read_csv('/home/will/Robotics/data_set_003/robot_vel/data_set_' + str(i) + '_robot_data_store_velocity.csv', header=None)])\nif camera == 1:\n robot_state = pd.concat([robot_positions, robot_velocitys], axis=1)\nelse:\n robot_state_single_cam = pd.concat([robot_positions, robot_velocitys], axis=1)\n robot_state = pd.concat([robot_state, robot_state_single_cam], axis=0)\n\n# robot_states_list = robot_state.values.tolist()\n# list_ = []\n# for i in range(0, len(robot_state[0]), 4):\n# list_.append(robot_states_list[i])\n# robot_states_frame_rate = pd.DataFrame(list_)\n# print(\"Done\")\n\n# ################################## Standardization for Robot States ###################################################################\n# robot_state_names = robot_states_frame_rate.columns\n# scaler = preprocessing.StandardScaler()\n# myScaler = scaler.fit(robot_states_frame_rate)\n# robot_states_frame_rate = myScaler.transform(robot_states_frame_rate)\n# robot_states_frame_rate = pd.DataFrame(robot_states_frame_rate, columns=robot_state_names)\n# print(robot_states_frame_rate.shape)\n\n# ################################## Load Strawberry Data ##################################################################\n# strawberry_1 = pd.read_csv('/home/will/Robotics/data_set_003/straw_1/data_set_' + str(0) + '_strawberry_data_store_1.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_2 = pd.read_csv('/home/will/Robotics/data_set_003/straw_2/data_set_' + str(0) + '_strawberry_data_store_2.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_3 = pd.read_csv('/home/will/Robotics/data_set_003/straw_3/data_set_' + str(0) + '_strawberry_data_store_3.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_4 = pd.read_csv('/home/will/Robotics/data_set_003/straw_4/data_set_' + str(0) + '_strawberry_data_store_4.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_5 = pd.read_csv('/home/will/Robotics/data_set_003/straw_5/data_set_' + str(0) + '_strawberry_data_store_5.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_states = strawberry_1\n# blank = strawberry_2\n# counter = 0\n# for i in range(counter, 2):\n# strawberry_states = pd.concat([strawberry_states, blank], axis=1)\n\n# for i in range(1, data_set_length):\n# strawberry_cluster_state = pd.read_csv('/home/will/Robotics/data_set_003/straw_1/data_set_' + str(i) + '_strawberry_data_store_1.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_2 = pd.read_csv('/home/will/Robotics/data_set_003/straw_2/data_set_' + str(i) + '_strawberry_data_store_2.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_3 = pd.read_csv('/home/will/Robotics/data_set_003/straw_3/data_set_' + str(i) + '_strawberry_data_store_3.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_4 = pd.read_csv('/home/will/Robotics/data_set_003/straw_4/data_set_' + str(i) + '_strawberry_data_store_4.csv', delimiter=',', error_bad_lines=False, header=None)\n# strawberry_5 = pd.read_csv('/home/will/Robotics/data_set_003/straw_5/data_set_' + str(i) + '_strawberry_data_store_5.csv', delimiter=',', error_bad_lines=False, header=None)\n# counter = 0\n# if strawberry_2[0][0] != 100 and counter < 2:\n# strawberry_cluster_state = pd.concat([strawberry_cluster_state, strawberry_2], axis=1)\n# counter += 1\n# if strawberry_3[0][0] != 100 and counter < 2:\n# strawberry_cluster_state = pd.concat([strawberry_cluster_state, strawberry_3], axis=1)\n# counter += 1\n# if strawberry_4[0][0] != 100 and counter < 2:\n# strawberry_cluster_state = pd.concat([strawberry_cluster_state, strawberry_4], axis=1)\n# counter += 1\n# if strawberry_5[0][0] != 100 and counter < 2:\n# strawberry_cluster_state = pd.concat([strawberry_cluster_state, strawberry_5], axis=1)\n# counter += 1\n# for i in range(counter, 2):\n# strawberry_cluster_state = pd.concat([strawberry_cluster_state, blank], axis=1)\n# strawberry_states = pd.concat([strawberry_states, strawberry_cluster_state], axis=0)\n\n# strawberry_states_list = strawberry_states.values.tolist()\n# list_ = []\n# for i in range(0, len(strawberry_states[0]), 4):\n# list_.append(strawberry_states_list[i])\n# strawberry_states_frame_rate = pd.DataFrame(list_)\n\n# ################################## Standardization for Strawberry States ###################################################################\n# strawberry_state_names = strawberry_states_frame_rate.columns\n# scaler = preprocessing.StandardScaler()\n# myScaler = scaler.fit(strawberry_states_frame_rate)\n# strawberry_states_frame_rate = myScaler.transform(strawberry_states_frame_rate)\n# strawberry_states_frame_rate = pd.DataFrame(strawberry_states_frame_rate, columns=strawberry_state_names)\n# print(strawberry_states_frame_rate.shape)\n\n# ################################## Order data for time step prediction ###################################################################\n# robot_pos_trajectory_input = []\n# robot_vel_trajectory_input = []\n# strawberry_state_input = []\n# strawberry_state_label = []\n# for i in range(0, trajectory_length):\n# \t","repo_name":"WillMandil001/mfpc_fruit_picker","sub_path":"simulation/DNN_model/CNN_no_image.py","file_name":"CNN_no_image.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"37964482900","text":"try:\n import setuptools\nexcept ImportError:\n print(\"Can't import SetupTools from STDLib. Please install it first!\")\n\"\"\"\n ZypeLang - A OpenSource, easy-to-use, easy-to-read & easy-to-write replacement of JSON based on Python.\n\"\"\"\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\nsetuptools.setup(\n name=\"ZypeSDK\",\n version=\"1.6\",\n author=\"TechGeeks\",\n author_email=\"ZypeSDK@tgeeks.cf\",\n maintainer=\"Rajdeep Malakar\",\n maintainer_email=\"Rajdeep@tgeeks.cf\",\n description=\"Zype Language SDK\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Zype-Z/ZypeLang\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n entry_points=dict(\n console_scripts=['zyper=ZypeSDK.__init__:Main']\n )\n)\n","repo_name":"Zype-Z/ZypeLang","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"21143899447","text":"# %matplotlib inline\nfrom pycocotools.coco_data_record import COCO\nimport numpy as np\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nimport pylab\n\npylab.rcParams['figure.figsize'] = (10.0, 8.0)\n\njson_path = \"/data/bo718.wang/zhaowei/data/516data/mscoco/annotations/captions_train2014.json\"\nseg_path = \"/data/bo718.wang/zhaowei/data/516data/mscoco/anns/train2014\"\nimg_path = \"/data/bo718.wang/zhaowei/data/516data/mscoco/train2014\"\nhdf5_path = \"/data/rui.wu/CZHH/Dataset_COCO/COCO_VSE_torch/COCO_vse_torch_train.hdf5\"\nannotations_path = '/data/rui.wu/Elijha/annotations/'\n\ndataDir=\"/data/bo718.wang/zhaowei/data/516data/mscoco/\"\ndataType='train2014'\nannFile='%s/instances_%s.json'%(annotations_path,dataType)\n\n# initialize COCO api for instance annotations\ncoco=COCO(annFile)\n\n# display COCO categories and supercategories\ncats = coco.loadCats(coco.getCatIds())\nnms=[cat['name'] for cat in cats]\nprint('COCO categories: \\n\\n', ' '.join(nms))\n\nnms = set([cat['supercategory'] for cat in cats])\nprint('COCO supercategories: \\n', ' '.join(nms))\n\n# get all images containing given categories, select one at random\ncatIds = coco.getCatIds(catNms=['person','dog','skateboard']);\nimgIds = coco.getImgIds(catIds=catIds );\nimg = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]\n\n# load and display image\nI = io.imread('%s/%s/%s'%(dataDir,dataType,img['file_name']))\n# use url to load image\n# I = io.imread('http://mscoco.org/images/%d'%(img['id']))\n# plt.figure(); plt.axis('off')\n# plt.imshow(I)\n# plt.show()\n\n# load and display instance annotations\n# plt.imshow(I); plt.axis('off')\nannIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)\nanns = coco.loadAnns(annIds)\ncoco.showAnns(anns)","repo_name":"ElijhaLee2/LinkinNet","sub_path":"useless/preprocess/coco_process.py","file_name":"coco_process.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"70257427108","text":"from __future__ import annotations\n\nfrom typing import Any, Callable, cast\n\nimport ckan.plugins.toolkit as tk\nimport ckan.types as types\nfrom ckan.lib.helpers import Page\nfrom flask import Blueprint, Response\nfrom flask.views import MethodView\n\nfrom ckanext.admin_panel.utils import ap_before_request\n\nimport ckanext.mailcraft.config as mc_config\nimport ckanext.mailcraft.model as mc_model\n\nmailcraft = Blueprint(\"mailcraft\", __name__, url_prefix=\"/mailcraft\")\nmailcraft.before_request(ap_before_request)\n\n\nclass DashboardView(MethodView):\n def get(self) -> str:\n return tk.render(\n \"mailcraft/dashboard.html\",\n extra_vars={\n \"page\": self._get_pager(\n tk.get_action(\"mc_mail_list\")(_build_context(), {})\n ),\n \"columns\": self._get_table_columns(),\n \"bulk_options\": self._get_bulk_options(),\n },\n )\n\n def _get_pager(self, mailcraft_list: list[dict[str, Any]]) -> Page:\n return Page(\n collection=mailcraft_list,\n page=tk.h.get_page_number(tk.request.args),\n url=tk.h.pager_url,\n item_count=len(mailcraft_list),\n items_per_page=mc_config.get_mail_per_page(),\n )\n\n def _get_table_columns(self) -> list[dict[str, Any]]:\n return [\n tk.h.ap_table_column(\"id\", sortable=False, width=\"5%\"),\n tk.h.ap_table_column(\"subject\", sortable=False, width=\"10%\"),\n tk.h.ap_table_column(\"sender\", sortable=False, width=\"10%\"),\n tk.h.ap_table_column(\"recipient\", sortable=False, width=\"20%\"),\n tk.h.ap_table_column(\"state\", sortable=False, width=\"5%\"),\n tk.h.ap_table_column(\n \"timestamp\", column_renderer=\"ap_date\", sortable=False, width=\"10%\"\n ),\n tk.h.ap_table_column(\n \"actions\",\n sortable=False,\n width=\"10%\",\n column_renderer=\"ap_action_render\",\n actions=[\n tk.h.ap_table_action(\n \"mailcraft.mail_read\",\n tk._(\"View\"),\n {\"mail_id\": \"$id\"},\n attributes={\"class\": \"btn btn-primary\"},\n )\n ],\n ),\n ]\n\n def _get_bulk_options(self):\n return [\n {\n \"value\": \"1\",\n \"text\": tk._(\"Remove selected mails\"),\n },\n ]\n\n def _get_bulk_actions(self, value: str) -> Callable[[list[str]], bool] | None:\n return {\"1\": self._remove_emails}.get(value)\n\n def _remove_emails(self, mail_ids: list[str]) -> bool:\n for mail_id in mail_ids:\n try:\n tk.get_action(\"mc_mail_delete\")(\n {\"ignore_auth\": True},\n {\"id\": mail_id},\n )\n except tk.ObjectNotFound:\n pass\n\n return True\n\n def post(self) -> Response:\n if \"clear_mails\" in tk.request.form:\n mc_model.Email.clear_emails()\n tk.h.flash_success(tk._(\"Mails have been cleared.\"))\n return tk.redirect_to(\"mailcraft.dashboard\")\n\n bulk_action = tk.request.form.get(\"bulk-action\", \"0\")\n mail_ids = tk.request.form.getlist(\"entity_id\")\n\n if not bulk_action or not mail_ids:\n return tk.redirect_to(\"mailcraft.dashboard\")\n\n action_func = self._get_bulk_actions(bulk_action)\n\n if not action_func:\n tk.h.flash_error(tk._(\"The bulk action is not implemented\"))\n return tk.redirect_to(\"mailcraft.dashboard\")\n\n action_func(mail_ids)\n\n tk.h.flash_success(tk._(\"Done.\"))\n\n return tk.redirect_to(\"mailcraft.dashboard\")\n\n\nclass ConfigView(MethodView):\n def get(self) -> str:\n return tk.render(\"mailcraft/dashboard.html\")\n\n def post(self) -> str:\n return tk.render(\"mailcraft/dashboard.html\")\n\n\nclass MailReadView(MethodView):\n def get(self, mail_id: str) -> str:\n try:\n mail = tk.get_action(\"mc_mail_show\")(_build_context(), {\"id\": mail_id})\n except tk.ValidationError:\n return tk.render(\"mailcraft/404.html\")\n\n return tk.render(\"mailcraft/mail_read.html\", extra_vars={\"mail\": mail})\n\n\ndef _build_context() -> types.Context:\n return cast(\n types.Context,\n {\n \"user\": tk.current_user.name,\n \"auth_user_obj\": tk.current_user,\n },\n )\n\n\nmailcraft.add_url_rule(\"/config\", view_func=ConfigView.as_view(\"config\"))\nmailcraft.add_url_rule(\"/dashboard\", view_func=DashboardView.as_view(\"dashboard\"))\nmailcraft.add_url_rule(\n \"/dashboard/read/\", view_func=MailReadView.as_view(\"mail_read\")\n)\n\n\ndef get_blueprints():\n return [mailcraft]\n","repo_name":"mutantsan/ckanext-mailcraft","sub_path":"ckanext/mailcraft/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"45261272073","text":"\"\"\"\nBachelor's thesis: Generation of guitar tracks utilizing knowledge of song structure\nAuthor: Adam Pankuch\n\"\"\"\nimport sys\nimport glob\nimport os.path\nimport json\nimport gputils\n\n\nif __name__ == '__main__':\n try:\n gpDirPath = sys.argv[1]\n statsOutPath = sys.argv[2]\n logOutPath = sys.argv[3]\n except IndexError:\n print('Use: python stats.py [gp-dir-path] [stats.json] [log.txt]')\n sys.exit(1)\n\n stats = {}\n\n # iterate through all files and save statistics of songs with markers\n cnt_all = 0\n cnt_mark = 0\n cnt_corrupt = 0\n for filename in glob.iglob(gpDirPath + '**/*.gp*', recursive=True):\n relativeFilename = filename[len(gpDirPath):]\n if os.path.isfile(filename):\n print(cnt_all, ' ', filename)\n try:\n songStat = gputils.getSongStatistics(filename)\n if songStat is not None:\n stats[relativeFilename] = songStat\n cnt_mark += 1\n except:\n print('=== ERROR - corrupted file ===')\n cnt_corrupt += 1\n cnt_all += 1\n\n with open(statsOutPath, 'w') as fout:\n output = json.dumps(stats, indent=4) \n fout.write(output)\n \n log = 'all: ' + str(cnt_all) + '\\n'\n log += 'mark: ' + str(cnt_mark) + '\\n'\n log += 'corrupt: ' + str(cnt_corrupt) + '\\n'\n print(log)\n with open(logOutPath, 'w') as fout:\n fout.write(log)\n","repo_name":"Rattlesnek/MusicGenerator","sub_path":"preprocess/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"15426611264","text":"import cv2\r\nimport pytesseract\r\n\r\n#executable file location\r\npytesseract.pytesseract.tesseract_cmd = 'C:\\\\Users\\\\aasai\\\\AppData\\\\Local\\\\Tesseract-OCR\\\\tesseract.exe' # Directory of tesseract.exe\r\n#read image\r\nimg = cv2.imread(\"sample_1.png\")\r\n#convert our input image to rgb format\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\ncv2.imshow('Original',img)\r\n\r\n#############################################\r\n#### Detecting Characters ######\r\n#############################################\r\nhImg, wImg,_ = img.shape\r\nboxes = pytesseract.image_to_boxes(img)\r\nfor b in boxes.splitlines():\r\n print(b)\r\n b = b.split(' ')\r\n print(b)\r\n x, y, w, h = int(b[1]), int(b[2]), int(b[3]), int(b[4]) # The indices of boxes 1,2,3,4 represents x,y,w,h of a character\r\n cv2.rectangle(img, (x,hImg- y), (w,hImg- h), (5, 250, 255), 2)\r\n cv2.putText(img,b[0],(x,hImg- y+25),cv2.FONT_HERSHEY_SIMPLEX,1,(250,50,255),2)\r\n\r\n\r\n##############################################\r\n##### Detecting Words ######\r\n##############################################\r\n# boxes = pytesseract.image_to_data(img)\r\n# print(boxes)\r\n# for a,b in enumerate(boxes.splitlines()):\r\n# print(b)\r\n# if a!=0:\r\n# b = b.split()\r\n# if len(b)==12:\r\n# x,y,w,h = int(b[6]),int(b[7]),int(b[8]),int(b[9]) # The indices of boxes 6,7,8,9 represents x,y,w,h of a word\r\n# cv2.putText(img,b[11],(x,y-5),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),2)\r\n# cv2.rectangle(img, (x,y), (x+w, y+h), (50, 100, 255), 2)\r\n\r\n\r\n\r\ncv2.imshow(\"Result\", img)\r\ncv2.waitKey(0)","repo_name":"AasaiAlangaram/Optical-charactrer-recognition-with-Tesseract-and-Python","sub_path":"Codes/Text_detection.py","file_name":"Text_detection.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"7588424629","text":"from numpy import exp, sqrt, ones_like, pi\nfrom scipy.special import erf, erfc\n\n\ndef step(x):\n # nice name, but renaming to step would make many things shorter\n output = ones_like(x)\n output[x < 0] = 0\n # output[x >= 0] = 1\n return output\n\n\ndef expDecay(x, tau, A):\n return A * step(x) * (exp(-x / tau) - 1) + 1\n\n\ndef sglExpDecay(x, mu, tau, A, B):\n return A - (1 - B * exp(-(x - mu) / tau)) * step(mu - x)\n\n\ndef expDecayOffs(x, tau, A, c):\n return A * step(x) * (exp(-x / tau) - 1) + 1 + c\n\n\ndef doubleExpDecay(x, tau1, tau2, A, B):\n model1 = A * step(x) * (exp(-x / tau1) - 1)\n model2 = B * step(x) * (exp(-x / tau2) - 1)\n return model1 + model2 + 1\n\n\ndef doubleExpDecay_II(x, tau1, tau2, A, B, mu, c):\n return (\n A * step(x - mu) * (exp(-(x - mu) / tau1) - 1)\n + B * step(x - mu) * (exp(-(x - mu) / tau2) - 1)\n + c\n )\n\n\ndef doubleDecay(x, mu, tau1, tau2, A, q):\n B = (1 - q) * A\n A = q * A\n return doubleExpDecay(x - mu, tau1, tau2, A, B)\n\n\ndef doubleDecay2(x, mu, tau1, tau2, A, B):\n return doubleExpDecay(x - mu, tau1, tau2, A, B)\n\n\ndef expConvGauss(x, tau, A, sig):\n model1 = (\n exp(-x / tau)\n * exp(sig**2 / (2 * tau**2))\n * (erf((sig**2 - x * tau) / (sqrt(2) * sig * tau)) - 1)\n )\n return -A / 2 * (model1)\n\n\ndef expConvGaussNormalised(x, tau, sig):\n exp_enu = sig**2 - 2 * (x) * tau\n exp_den = 2 * tau**2\n model1 = exp(exp_enu / exp_den)\n erf_enu = sig**2 + (-x) * tau\n erf_den = sqrt(2) * sig * tau\n model2 = erf(erf_enu / erf_den) - 1\n return 1 / 2 * model1 * model2\n\n\ndef expConvGauss2(x, mu, tau, A, sig):\n model1 = (\n exp(-(x - mu) / tau)\n * exp(sig**2 / (2 * tau**2))\n * (erf((sig**2 - (x - mu) * tau) / (sqrt(2) * sig * tau)) - 1)\n )\n return -A / 2 * model1\n\n\ndef AHConvGauss(x, A, sig):\n model1 = erf(x / (sqrt(2) * sig))\n return -A / 2 * (model1 - 2 / A + 1)\n\n\ndef ABHConvGauss(x, A, B, sig):\n model1 = -A / 2 * erf(x / (sqrt(2) * sig))\n model2 = -B / 2 * erf(x / (sqrt(2) * sig))\n return model1 + model2 + 1 - A / 2 - B / 2\n\n\ndef expConvGaussApprox(x, tau, A, sig):\n C = sqrt(2 / pi) * exp(-(x**2) / (2 * sig**2))\n r = sig / tau\n k = (\n C / r - x * C / (sig * r**2) + C * (sig**2 - x**2) / (sig**2 * r**3)\n ) # +C*x*(3*sig**2-x**2)/(sig**3*r**4)-C*(3*sig**4-6*sig**2*x**2+x**4)/(sig**4*r**5)\n return -A / 2 * k\n\n\ndef doubleDecaySingleConv(x, t0, tau1, tau2, A, B, sig, c):\n C = -(A + B)\n model1 = -A * expConvGaussNormalised(x - t0, tau1, sig)\n model2 = -B * expConvGaussNormalised(x - t0, tau2, sig)\n model3 = +1 / 2 * C * erfc(-(x - t0) / (sqrt(2 * sig**2))) + 1\n return model1 + model2 + model3 + c\n\n\ndef doubleDecayDoubleConv(x, mu, tau1, tau2, A, q, alpha, sigS, sigH):\n # A = overall amplitude\n # q = tau1 fraction of A\n # alpha = slicing fraction\n\n B = (1 - q) * A\n A = q * A\n\n model1 = expConvGauss(x - mu, tau1, A, sigS)\n model2 = expConvGauss(x - mu, tau2, B, sigS)\n model3 = ABHConvGauss(x - mu, A, B, sigS)\n if sigH / tau1 < 5:\n model4 = expConvGauss(x - mu, tau1, A, sigH)\n else:\n model4 = expConvGaussApprox(x - mu, tau1, A, sigH)\n\n if sigH / tau2 < 5:\n model5 = expConvGauss(x - mu, tau2, B, sigH)\n else:\n model5 = expConvGaussApprox(x - mu, tau2, B, sigH)\n\n model6 = ABHConvGauss(x - mu, A, B, sigH)\n\n return alpha * (model1 + model2 + model3) + (1 - alpha) * (model4 + model5 + model6)\n\n\ndef doubleDecayDoubleConv2(x, mu, tau1, tau2, A, B, alpha, sigS, sigH):\n model1 = expConvGauss(x - mu, tau1, A, sigS)\n model2 = expConvGauss(x - mu, tau2, B, sigS)\n model3 = ABHConvGauss(x - mu, A, B, sigS)\n\n if alpha == 1:\n return model1 + model2 + model3\n\n else:\n if sigH / tau1 < 5:\n model4 = expConvGauss(x - mu, tau1, A, sigH)\n else:\n model4 = expConvGaussApprox(x - mu, tau1, A, sigH)\n\n if sigH / tau2 < 5:\n model5 = expConvGauss(x - mu, tau2, B, sigH)\n else:\n model5 = expConvGaussApprox(x - mu, tau2, B, sigH)\n\n model6 = ABHConvGauss(x - mu, A, B, sigH)\n\n return alpha * (model1 + model2 + model3) + (1 - alpha) * (\n model4 + model5 + model6\n )\n\n\ndef doubleDecayConvScale(x, mu, tau1, tau2, A, q, alpha, sigS, sigH, I0):\n B = (1 - q) * A\n A = q * A\n\n model1 = expConvGauss(x - mu, tau1, A, sigS)\n model2 = expConvGauss(x - mu, tau2, B, sigS)\n model3 = ABHConvGauss(x - mu, A, B, sigS)\n if sigH / tau1 < 5:\n model4 = expConvGauss(x - mu, tau1, A, sigH)\n else:\n model4 = expConvGaussApprox(x - mu, tau1, A, sigH)\n\n if sigH / tau2 < 5:\n model5 = expConvGauss(x - mu, tau2, B, sigH)\n else:\n model5 = expConvGaussApprox(x - mu, tau2, B, sigH)\n\n model6 = ABHConvGauss(x - mu, A, B, sigH)\n\n return I0 * (\n alpha * (model1 + model2 + model3) + (1 - alpha) * (model4 + model5 + model6)\n )\n\n\ndef DecayConv(x, mu, tau, A, sig):\n model1 = expConvGauss(x - mu, tau, A, sig)\n\n model3 = AHConvGauss(x - mu, A, sig)\n\n return model1 + model3\n\n\ndef doubleDecayConvSqrd(x, mu, tau1, tau2, A, q, alpha, sigS, sigH):\n B = (1 - q) * A\n A = q * A\n\n model1 = expConvGauss(x - mu, tau1, A, sigS)\n model2 = expConvGauss(x - mu, tau2, B, sigS)\n model3 = ABHConvGauss(x - mu, A, B, sigS)\n if sigH / tau1 < 5:\n model4 = expConvGauss(x - mu, tau1, A, sigH)\n else:\n model4 = expConvGaussApprox(x - mu, tau1, A, sigH)\n\n if sigH / tau2 < 5:\n model5 = expConvGauss(x - mu, tau2, B, sigH)\n else:\n model5 = expConvGaussApprox(x - mu, tau2, B, sigH)\n\n model6 = ABHConvGauss(x - mu, A, B, sigH)\n\n return (\n alpha * (model1 + model2 + model3) + (1 - alpha) * (model4 + model5 + model6)\n ) ** 2\n","repo_name":"EmCeBeh/ultrafastFitFunctions","sub_path":"ultrafastFitFunctions/dynamics.py","file_name":"dynamics.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"6113137090","text":"from turtle import Turtle\nimport random\nfrom colors import color_list\n\ntimmy = Turtle()\n\n\ndef one_line():\n for _ in range(9):\n timmy.pencolor(random.choice(color_list))\n timmy.dot(20)\n timmy.penup()\n timmy.forward(50)\n timmy.pendown()\n timmy.dot(20)\n\n\ndef reset_left():\n timmy.penup()\n timmy.left(90)\n timmy.forward(52)\n timmy.left(90)\n timmy.forward(450)\n timmy.right(180)\n\n\n\n","repo_name":"tasiny/turtlegui","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"3040573362","text":"import scrapy\n\n\nclass QuotesSpider(scrapy.Spider):\n name = \"quotes\"\n\n def start_requests(self):\n urls = [\n 'https://natmatch.com/dentres/directory/aegd.html'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n urls = response.css(\"td a::attr(href)\").get()\n self.logger.info(urls)\n","repo_name":"eliasingea/school-search-crawl","sub_path":"tutorial/tutorial/spiders/quotes_spider.py","file_name":"quotes_spider.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"12898169816","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n################################################################################\n#\n# package-manager\n# (c) 2015 Fabian Thüring\n#\n# This file is distributed under the MIT Open Source License. See \n# LICENSE.TXT for details.\n#\n################################################################################\n\nfrom packagemanager import __VERSION__\nfrom . import error\n\nimport os\nfrom time import gmtime, strftime\n\nfrom yaml import load, dump\ntry:\n from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n\nclass MetaFileParser:\n \"\"\" Parse the meta-file \"\"\"\n def __init__(self):\n HOME = os.environ['HOME']\n \n if HOME == None:\n error.fatal(\"environment variable $HOME is not set\")\n \n self._metaDirName = HOME+'/.package-manager/'\n self._metaFileName = HOME+'/.package-manager/install.yml'\n \n if os.path.isdir(self._metaDirName):\n self._metaDirIsPresent = True\n \n if os.path.isfile(self._metaFileName):\n metaFile = file(self._metaFileName, 'r')\n self._installedPackages = load(metaFile, Loader=Loader)\n metaFile.close()\n\n if self._installedPackages is None:\n self._installedPackages = dict()\n\n def add(self, packageName):\n \"\"\" Add a package to the meta-file index \"\"\"\n if packageName in self._installedPackages:\n raise RuntimeError\n else: \n self._installedPackages[packageName] = \\\n [{'date': strftime(\"%d-%m-%Y %H:%M:%S\")}]\n\n def remove(self, packageName):\n \"\"\" Remove a package from the meta-file index \"\"\"\n if not packageName in self._installedPackages:\n raise RuntimeError\n else: \n del self._installedPackages[packageName]\n\n def isInstalled(self, packageName):\n \"\"\" Check if package is already installed \"\"\"\n return True if packageName in self._installedPackages else False\n \n def getInstallDate(self, packageName):\n \"\"\" Get the install date of the package \"\"\"\n if self.isInstalled(packageName):\n return self._installedPackages[packageName][0]['date']\n else:\n return \"\"\n \n def commitUpdate(self):\n \"\"\" Update meta-file \"\"\"\n if not self._metaDirIsPresent:\n os.mkdir(self._metaDirName)\n \n metaFile = file(self._metaFileName, 'w')\n metaFile.write(\"# package-manager (\"+__VERSION__+\") - \"+\\\n strftime(\"%a, %d %b %Y %H:%M:%S\\n\"))\n if bool(self._installedPackages):\n metaFile.write(dump(self._installedPackages, Dumper=Dumper, \n default_flow_style=False))\n metaFile.close()\n\n \"\"\" Private attributes \"\"\"\n _metaFileName = \"\"\n _metaDirName = \"\"\n _metaDirIsPresent = False\n _installedPackages = {}\n","repo_name":"thfabian/package-manager","sub_path":"packagemanager/meta_parser.py","file_name":"meta_parser.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"10673709443","text":"#!/usr/bin/env python3\n\nimport sys\nsys.path.append(\"src\")\nfrom utils import CATEGORIES, read_json\nimport numpy as np\nimport fig_utils\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport sacrebleu\nimport evaluate\nimport tqdm\n\nmetric_bleu = sacrebleu.metrics.BLEU(effective_order=True)\nmetric_chrf = sacrebleu.metrics.CHRF()\nmetric_ter = sacrebleu.metrics.TER()\nmetric_meteor = evaluate.load(\"meteor\")\n\n\ndef get_all_metrics(source, orig, done):\n return {\n \"bleu\": metric_bleu.sentence_score(orig, [done]).score / 100,\n \"chrf\": metric_chrf.sentence_score(orig, [done]).score / 100,\n \"ter\": metric_ter.sentence_score(orig, [done]).score / 100,\n \"meteor\": metric_meteor.compute(predictions=[orig], references=[done])[\"meteor\"],\n }\n\n\ndef get_color(val):\n if val > 0.5:\n return \"black\"\n else:\n return \"white\"\n\n\ndata = read_json(\"data/parsed.json\")\nhits_inconsistency = 0\nhits_total = 0\ndata_lines = []\n\nfor doc in tqdm.tqdm(data):\n for line in doc[\"lines\"]:\n for system_name, translation in line[\"translations\"].items():\n # TODO: hotfix unfinished data\n if any([x is None for x in translation[\"rating\"].values()]):\n continue\n if translation[\"rating\"][\"overall\"] == 6 and any([x < 6 for x in translation[\"rating\"].values()]):\n hits_inconsistency += 1\n hits_total += 1\n\n translation[\"source\"] = line[\"source\"]\n data_lines.append(translation)\n\n# TODO DEBUG remove\n# data_lines = data_lines[:100]\n\nprint(\"Loading & computing COMET\")\nmetric_comet = evaluate.load(\"comet\", config_name=\"wmt21-cometinho-da\")\ncomet_scores = metric_comet.compute(\n sources=[x[\"source\"] for x in data_lines],\n predictions=[x[\"orig\"] for x in data_lines],\n references=[x[\"done\"] for x in data_lines],\n progress_bar=True,\n)\ndel metric_comet\n\nprint(\"Loading & computing BLEURT\")\nmetric_bleurt = evaluate.load(\"bleurt\", module_type=\"metric\")\nbleurt_scores = metric_bleurt.compute(\n predictions=[x[\"orig\"] for x in data_lines],\n references=[x[\"done\"] for x in data_lines],\n)\ndel metric_bleurt\n\nfor line, comet_score, bleurt_score in zip(tqdm.tqdm(data_lines), comet_scores[\"scores\"], bleurt_scores[\"scores\"]):\n line[\"metrics\"] = get_all_metrics(\n line[\"source\"], line[\"orig\"], line[\"done\"]\n )\n line[\"metrics\"][\"bleurt\"] = bleurt_score\n line[\"metrics\"][\"comet\"] = comet_score\n\nprint(\" \" * 11 + \"\".join([f\"{category:>15}\" for category in CATEGORIES]))\n\nMETRICS = list(data_lines[0][\"metrics\"].keys())\n\ncorrs = np.empty((len(METRICS), len(CATEGORIES)))\nfor metric_i, metric in enumerate(METRICS):\n data_1 = [x[\"metrics\"][metric] for x in data_lines]\n print(f\"{metric:>10}:\", end=\"\")\n for category_i, category in enumerate(CATEGORIES):\n data_2 = [x[\"rating\"][category] for x in data_lines]\n corr = np.corrcoef(data_1, data_2)[0, 1]\n corrs[metric_i, category_i] = corr\n print(f\"{corr:>15.2f}\", end=\"\")\n print()\n\n\nVMIN = 0.2\nVMAX = 0.7\n\n# plotting\nplt.figure(figsize=(5, 2.7))\nax = plt.gca()\ncmap = mpl.cm.get_cmap(\"inferno\").copy()\n# show absolute values\nplt.imshow(abs(corrs), cmap=cmap, vmin=VMIN, vmax=VMAX, aspect=0.7)\n\n# add doc-level trinagles and texts\nfor s_i, s in enumerate(METRICS):\n for a_i, a in enumerate(CATEGORIES):\n result = corrs[s_i, a_i]\n plt.text(\n a_i + 0.0, s_i + 0.1, f\"{result:.2f}\".replace(\"0.\", \".\").replace(\"-\", \"- \"),\n ha=\"center\", va=\"center\", color=get_color(abs(result)),\n )\n\n\nplt.colorbar(\n cmap=cmap, shrink=0.9, aspect=10, anchor=(0, 0.0),\n ticks=np.round(np.linspace(VMIN, VMAX, num=5), 2),\n)\nplt.yticks(\n range(len(METRICS)),\n [fig_utils.METRIC_PRETTY_NAME[x] for x in METRICS]\n)\nplt.xticks(\n list(range(len(CATEGORIES))),\n [(\"\\n\" if x_i % 2 else \"\") + x.title()\n for x_i, x in enumerate(CATEGORIES)],\n)\n\nplt.tight_layout(pad=0, rect=[0.02, -0.01, 1, 1])\nplt.savefig(\"figures/pe_score_correlation.pdf\")\nplt.show()\n","repo_name":"ufal/optimal-reference-translations","sub_path":"src/evaluation/pe_score_correlation.py","file_name":"pe_score_correlation.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"14070977695","text":"\nimport re\n\nprint(\"\"\" *******************\n *******Welcome****** \n***********You are in the Madlib Game ********* \n***********We Hope You'r Ready To play With Us ******\n###########################################################\nThis game is tack some words from you like none , verb ,adjective so on ........ \n\"\"\")\n\ndef read_template(path):\n \"\"\" function how read file from the url\n and return what that file have \n \"\"\" \n a=open(path)\n return a.read().strip(\"\\n\")\n\n# this function tack a text as a parameter\ndef parse_template(text):\n actual_stripped=''\n actual_parts=[]\n #split the text to the char\n x=text.split(' ')\n \n # made regex to check to check on {something} or {something}.\n reg=r\"^{\\w+}|\\.$\"\n # loop over the x and x is the array have the text after split it \n for i in x:\n # check if the regex match it \n if re.match(reg,i)==None :\n # assign what not matches in actual_stripped\n actual_stripped+=f\"{i} \"\n # other wise have two cases \n else :\n # first case is check in this last item\n if i==x[-1]:\n actual_stripped+='{}.'\n actual_parts+=[i[1:-2]]\n # second case is check in this not last item \n else:\n actual_parts+=[i[1:-1]]\n actual_stripped+='{} '\n # convarert actual_parts from array to tuple then return it and return actual_stripped as a string\n actual_parts=tuple(actual_parts)\n return (actual_stripped,actual_parts)\n \n# this function is revers what parse_template do \ndef merge(text,tep):\n return text.format(*tep)\n\n# this write inside my file that founded assets/make_me_a_video_game_output_from_user.txt\ndef create_file(result ,file_to_write_on_it):\n with open(file_to_write_on_it, \"w\") as f:\n f.write(result)\n #dark_and_stormy_night_template\n\"\"\" this function that make interact with user \nthat tack to parameter one for read file to game and onther to write and save output of game \n\"\"\"\ndef start_game(file_toRead_game,file_toWrite_game):\n text = read_template(file_toRead_game)\n stripped_text, parts_tuple = parse_template(text)\n user_input = []\n \n for i in range(len(parts_tuple)):\n x = input('enter a {} > '.format(parts_tuple[i]))\n user_input.append(x)\n result = stripped_text.format(*user_input)\n print(f\"this is the story you wrote it \\n{result}\")\n create_file(result,file_toWrite_game)\n\n\n\n \n \nif __name__==\"__main__\":\n start_game(\"assets/madlib_game_file.txt\",\"assets/madlib_game_file_output.txt\")\n","repo_name":"MoradAlkhatib/madlib-cli","sub_path":"madlib_cli/madlib.py","file_name":"madlib.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"12756162030","text":"from fastapi import FastAPI\nfrom .routes import main_router\n\n\napp = FastAPI(\n title=\"PAMPS\",\n version=\"0.1.0\",\n description=\"PAMPS is a Python API simple blog system\",\n)\napp.include_router(main_router)\n","repo_name":"smrenato/fastapi-backend","sub_path":"pamps/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"40952828265","text":"from flask import Blueprint, render_template, abort\nfrom models.vehicle import Vehicle, VehicleSchema\nfrom db import db\n\nvehicle_page = Blueprint('vehicle_page', __name__)\n\n@vehicle_page.route('/vehicles/', methods=['GET'])\ndef show_vehicle(id):\n target_vehicle = Vehicle.query.get(id)\n return VehicleSchema().dump(target_vehicle)\n \n@vehicle_page.route('/vehicles', methods=['GET'])\ndef list_vehicles():\n vehicles_schema = VehicleSchema(many=True)\n vehicles = Vehicle.query.all()\n return vehicles_schema.dumps(vehicles)\n\n\n@vehicle_page.route('/swap/', methods=['PATCH'])\n# move the swap logic onto the Vehicle model. it will be called here, and within the shift logic\ndef swap_battery(id):\n target_vehicle = Vehicle.query.get(id)\n target_vehicle.battery_level = 100.0\n db.session.commit()\n return VehicleSchema().dump(target_vehicle)\n\n@vehicle_page.route('/vehicles//shift/', methods=['PATCH'])\ndef add_to_shift(id, shift_id):\n target_vehicle = Vehicle.query.get(id)\n target_vehicle.shift_id = shift_id\n \n db.session.commit()\n return VehicleSchema().dump(target_vehicle)","repo_name":"nicolaseschneider/Revel_Take_Home","sub_path":"flask/routes/vehicle_routes.py","file_name":"vehicle_routes.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"31950076145","text":"\n\nfrom simulation import Simulation, Location, AbstractPerson, roll\nfrom simulation_abcs import PersonState\nfrom collections import namedtuple\nfrom enum import Enum\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom random import gauss, choices, triangular\nfrom typing import Tuple, List, Optional\nfrom types import SimpleNamespace\nimport csv\nimport matplotlib.pyplot as plt \nfrom matplotlib.animation import FuncAnimation\nimport numpy as np\n\n\nMETERS_PER_DEGREE = 1852*60\nAGE_RANGES=[\n ( 0, 5),\n ( 5, 10),\n (10, 15),\n (15, 20),\n (20, 25),\n (25, 30),\n (30, 35),\n (35, 40),\n (40, 45),\n (45, 50),\n (50, 55),\n (55, 60),\n (60, 65),\n (65, 70),\n (70, 75),\n (75, 80),\n (80, 85),\n (85, 90),\n]\n\n@dataclass\nclass UnivCentroid:\n lat: float\n lon: float\n num: int\n area: float\n\n\n@dataclass\nclass Centroid:\n loc: Location\n num: int\n area: float\n age_range_totals: List[int]\n\ndef _process_row(line, headers):\n def _process_pair(pair):\n key, val = pair\n if any(key.endswith(x) for x in (\"_KM\", \"_X\", \"_Y\", \"_DS\")):\n val = float(val)\n else:\n try:\n val = int(val)\n except ValueError:\n pass\n return (key, val)\n return map(_process_pair, zip(headers, line))\n\n\ndef remap(old, old_min, old_max, new_min, new_max):\n return (((old - old_min) * (new_max - new_min)) / (old_max - old_min)) + new_min\n\n\n@dataclass\nclass NasaSimulation(Simulation):\n filename: str = \"CharlottesvillePopulationData.csv\"\n people_per_home_mean: float = 3\n people_per_home_stddev: float = 1\n campus_centroids: List[UnivCentroid] = field(default_factory=list)\n university_centroid: Tuple[float, float] = (0,0)\n grocery_coords: List[Tuple[float, float]] = field(default_factory=list)\n\n num_students_off_campus: int = 0\n include_students: bool = True\n\n num_people_fraction: float = 1\n\n def init(self):\n assert self.filename, \"Must supply population data csv filename\"\n super().init()\n with open(self.filename) as file:\n reader = csv.reader(file)\n headers = next(reader)\n rows = [\n SimpleNamespace(**dict(_process_row(line, headers))) \n for line in reader\n ]\n max_x = max(row.CENTROID_X for row in rows)\n min_x = min(row.CENTROID_X for row in rows)\n max_y = max(row.CENTROID_Y for row in rows)\n min_y = min(row.CENTROID_Y for row in rows)\n print(f\"({max_y}, {max_x}), ({min_y}, {min_x}) \")\n self.width = (max_y - min_y) * METERS_PER_DEGREE\n\n # garauntee square\n # if max_x - min_x > max_y - min_y: COMMENTED OUT TO MATCH CVILLE SCREENSHOT\n # #min_y = max_y - (max_x - min_x)\n # max_y = min_y + max_x - min_x\n # else:\n # #min_x = max_x - (max_y - min_y)\n # max_x = min_x + max_y - min_y\n \n centroids = self.controids_from_rows(rows, min_x, max_x, min_y, max_y)\n if self.include_students:\n centroids.extend(self.convert_univ_centroids(self.campus_centroids, min_x, max_x, min_y, max_y)) \n\n \n\n for g in self.grocery_coords:\n loc = Location(\n remap(g[1], min_x, max_x, 0, self.width),\n remap(g[0], min_y, max_y, 0, self.width),\n )\n self.groceries.append(self.Grocery.init(loc))\n \n for centroid in centroids:\n while centroid.num > 0:\n self.Home.init(centroid)\n\n if not self.include_students:\n num_to_remove = round(self.num_students_off_campus * self.num_people_fraction)\n univ_center = Location(\n remap(self.university_centroid[1], min_x, max_x, 0, self.width),\n remap(self.university_centroid[0], min_y, max_y, 0, self.width),\n )\n def student_likelyhood(p):\n if p.age < 18 or p.age > 22:\n return 0 # verrry unlikely\n return 1/univ_center.distance(p.location) # less dist -> more likely\n\n self.people.sort(key=student_likelyhood)\n for _ in self.tqdm(range(num_to_remove)):\n #print(f\"removing student of age {self.people[-1].age}\")\n self.people.pop()\n\n\n def remove_off_campus(self):\n for centroid in sorted(centroids):\n pass\n\n def convert_univ_centroids(self, univ_centroids, min_x, max_x, min_y, max_y):\n centroids = []\n for uc in univ_centroids:\n centroids.append(\n Centroid(\n Location(\n remap(uc.lon, min_x, max_x, 0, self.width),\n remap(uc.lat, min_y, max_y, 0, self.width),\n ),\n round(uc.num * self.num_people_fraction),\n uc.area,\n age_range_totals=[0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],\n )\n )\n return centroids\n\n def controids_from_rows(self, rows, min_x, max_x, min_y, max_y):\n centroids = []\n for row in rows:\n if row.UN_2020_E:\n centroids.append(\n Centroid(\n Location(\n remap(row.CENTROID_X, min_x, max_x, 0, self.width),\n remap(row.CENTROID_Y, min_y, max_y, 0, self.width),\n ),\n round(row.UN_2020_E * self.num_people_fraction),\n row.LAND_A_KM*2000,\n age_range_totals=[\n row.A00_04B ,\n row.A05_09B ,\n row.A10_14B ,\n row.A15_19B ,\n row.A20_24B ,\n row.A25_29B ,\n row.A30_34B ,\n row.A35_39B ,\n row.A40_44B ,\n row.A45_49B ,\n row.A50_54B ,\n row.A55_59B ,\n row.A60_64B ,\n row.A65_69B ,\n row.A70_74B ,\n row.A75_79B ,\n row.A80_84B ,\n row.A85PLUSB,\n ],\n )\n )\n return centroids\n\n def get_xs_ys_cs(self):\n xs, ys, cs = super().get_xs_ys_cs()\n def p_to_c(p):\n if p.state == PersonState.ASYMPT:\n return (1, 0, 0)\n if p.state == PersonState.SICK:\n return (1, .4, 0)\n if p.state == PersonState.REMOVED:\n if p.dead:\n return (0, 0, 0)\n else:\n return (.4,.4,.4)\n# return ()\n return (0, 1-p.age/90, p.age/90)\n return xs, ys, list(map(p_to_c, self.visible_people))\n\n\n @dataclass\n class Person(AbstractPerson):\n age: int\n dead: bool = False\n\n def update(self):\n state = self.state\n super().update()\n dead = self.dead\n if state != PersonState.REMOVED and self.state == PersonState.REMOVED:\n if self.age >= 80:\n if roll(.148):\n self.dead = True\n elif self.age >= 70:\n if roll(.08):\n self.dead = True\n elif self.age >= 60:\n if roll(.036):\n self.dead = True\n elif self.age >= 50:\n if roll(0.013):\n self.dead = True\n elif self.age >= 40:\n if roll(0.004):\n self.dead = True\n else:\n if roll(0.002):\n self.dead = True\n if not dead and self.dead:\n print(\"DEATH\")\n\n \n\n @classmethod\n def init(cls, home, centroid):\n grocery_frequency = gauss(\n cls.simulation.grocery_frequency_mean, \n cls.simulation.grocery_frequency_stddev\n )\n distancing_factor = triangular(0, 1, 0)**10\n closest_grocery = min(\n cls.simulation.groceries, \n key=home.distance,\n )\n age_range = choices(AGE_RANGES, centroid.age_range_totals)[0]\n age = choices(list(range(*age_range)))[0]\n self = cls(\n age=age,\n home=home, \n location=home, \n state=PersonState.HEALTHY, \n grocery_frequency=grocery_frequency,\n distancing_factor=distancing_factor,\n closest_grocery=closest_grocery,\n )\n self.simulation.people.append(self)\n return self\n\n\n @dataclass(unsafe_hash=True)\n class Home(Location):\n\n @classmethod\n def init(cls, centroid):\n num_people = round(gauss(\n cls.simulation.people_per_home_mean, \n cls.simulation.people_per_home_stddev,\n ))\n num_people = min(num_people, centroid.num)\n centroid.num -= num_people\n noise = Location(gauss(0, centroid.area), gauss(0, centroid.area))\n loc = centroid.loc + noise\n self = cls(loc.x, loc.y)\n self.simulation.bound_loc(self)\n self.simulation.homes.append(self)\n for _ in range(num_people):\n self.simulation.Person.init(self, centroid)\n\n\n @dataclass(unsafe_hash=True)\n class Grocery(Location):\n @classmethod\n def init(cls, loc):\n self = cls(x=loc.x, y=loc.y)\n return self\n\n\n\n\n","repo_name":"max7patek/infection-simulator","sub_path":"nasa_simulation.py","file_name":"nasa_simulation.py","file_ext":"py","file_size_in_byte":9967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"33251091952","text":"import math\n\ndef polysum(n, s): \n \"\"\"\n\n :param n: number of sides of the polygon\n :param s: length of each side\n :return: the area of the polygon, plus the square of the perimeter.\n This value must be rounded to 4 decimal places.\n \"\"\"\n area = (0.25 * n * s**2) / (math.tan(math.pi / n))\n perimeterSquared = (n * s) ** 2\n return round(area + perimeterSquared, 4)\n","repo_name":"dan47bennett/MIT-6.00.1x","sub_path":"week-2/week 2 optional.py","file_name":"week 2 optional.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"69908091427","text":"import evaluate\nimport nltk\nimport numpy as np\nfrom typing import List, Tuple\nfrom nltk.tokenize import sent_tokenize\nfrom datasets import Dataset, concatenate_datasets\nfrom huggingface_hub import HfFolder\nfrom data_loader import load_dataset\nfrom transformers import (\n AutoTokenizer,\n AutoModelForSeq2SeqLM,\n DataCollatorForSeq2Seq,\n Seq2SeqTrainer,\n Seq2SeqTrainingArguments\n)\n\nMODEL_ID = \"google/flan-t5-base\"\nREPOSITORY_ID = f\"{MODEL_ID.split('/')[1]}-ecommerce-text-classification\"\n\n# Load dataset\ndataset = load_dataset()\n\n# Load tokenizer of FLAN-t5\ntokenizer = AutoTokenizer.from_pretrained(MODEL_ID)\n\n# Metric\nmetric = evaluate.load(\"f1\")\n\n# The maximum total input sequence length after tokenization.\n# Sequences longer than this will be truncated, sequences shorter will be padded.\ntokenized_inputs = concatenate_datasets([dataset[\"train\"], dataset[\"test\"]]).map(\n lambda x: tokenizer(x[\"text\"], truncation=True), batched=True, remove_columns=['text', 'label']\n)\nmax_source_length = max([len(x) for x in tokenized_inputs[\"input_ids\"]])\nprint(f\"Max source length: {max_source_length}\")\n\n# The maximum total sequence length for target text after tokenization.\n# Sequences longer than this will be truncated, sequences shorter will be padded.\"\ntokenized_targets = concatenate_datasets([dataset[\"train\"], dataset[\"test\"]]).map(\n lambda x: tokenizer(x[\"label\"], truncation=True), batched=True, remove_columns=['text', 'label']\n)\nmax_target_length = max([len(x) for x in tokenized_targets[\"input_ids\"]])\nprint(f\"Max target length: {max_target_length}\")\n\n# Define training args\ntraining_args = Seq2SeqTrainingArguments(\n output_dir=REPOSITORY_ID,\n per_device_train_batch_size=8,\n per_device_eval_batch_size=8,\n predict_with_generate=True,\n fp16=False, # Overflows with fp16\n learning_rate=3e-4,\n num_train_epochs=2,\n logging_dir=f\"{REPOSITORY_ID}/logs\", # logging & evaluation strategies\n logging_strategy=\"epoch\",\n evaluation_strategy=\"no\",\n save_strategy=\"epoch\",\n save_total_limit=2,\n load_best_model_at_end=False,\n report_to=\"tensorboard\",\n push_to_hub=True,\n hub_strategy=\"every_save\",\n hub_model_id=REPOSITORY_ID,\n hub_token=HfFolder.get_token(),\n)\n\n\ndef preprocess_function(sample: Dataset, padding: str = \"max_length\") -> dict:\n \"\"\" Preprocess the dataset. \"\"\"\n\n # add prefix to the input for t5\n inputs = [item for item in sample[\"text\"]]\n\n # tokenize inputs\n model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True)\n\n # Tokenize targets with the `text_target` keyword argument\n labels = tokenizer(text_target=sample[\"label\"], max_length=max_target_length, padding=padding, truncation=True)\n\n # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore\n # padding in the loss.\n if padding == \"max_length\":\n labels[\"input_ids\"] = [\n [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels[\"input_ids\"]\n ]\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\n\ndef postprocess_text(preds: List[str], labels: List[str]) -> Tuple[List[str], List[str]]:\n \"\"\" helper function to postprocess text\"\"\"\n preds = [pred.strip() for pred in preds]\n labels = [label.strip() for label in labels]\n\n # rougeLSum expects newline after each sentence\n preds = [\"\\n\".join(sent_tokenize(pred)) for pred in preds]\n labels = [\"\\n\".join(sent_tokenize(label)) for label in labels]\n\n return preds, labels\n\n\ndef compute_metrics(eval_preds):\n preds, labels = eval_preds\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # Some simple post-processing\n decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n\n result = metric.compute(predictions=decoded_preds, references=decoded_labels, average='macro')\n result = {k: round(v * 100, 4) for k, v in result.items()}\n prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]\n result[\"gen_len\"] = np.mean(prediction_lens)\n return result\n\n\ndef train() -> None:\n \"\"\" Train the model. \"\"\"\n\n tokenized_dataset = dataset.map(preprocess_function, batched=True, remove_columns=['text', 'label'])\n print(f\"Keys of tokenized dataset: {list(tokenized_dataset['train'].features)}\")\n\n # load model from the hub\n model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)\n\n nltk.download(\"punkt\")\n\n # we want to ignore tokenizer pad token in the loss\n label_pad_token_id = -100\n # Data collator\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=label_pad_token_id,\n pad_to_multiple_of=8\n )\n\n # Create Trainer instance\n trainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"test\"],\n compute_metrics=compute_metrics,\n )\n\n # TRAIN\n trainer.train()\n\n # SAVE\n tokenizer.save_pretrained(REPOSITORY_ID)\n trainer.create_model_card()\n trainer.push_to_hub()\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"VanekPetr/flan-t5-text-classifier","sub_path":"flan-t5-finetuning.py","file_name":"flan-t5-finetuning.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"71269854627","text":"import sys\nimport VTKrenderwindow\nimport os\n\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow)\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.w = None\n self.curdir = os.path.dirname(__file__)\n self.ct_path = os.path.join(self.curdir, 'data/volume-105.nhdr')\n self.stl_path = os.path.join(self.curdir, 'data/Liver.stl')\n self.title = \"VTK Render Window\"\n self.setStyleSheet(\"QMainWindow {background: 'yellow';}\")\n self.window2(self.ct_path, self.stl_path)\n\n def window2(self, ct_file, stl_file):\n print(\"window2\" + ct_file + stl_file)\n\n self.w = VTKrenderwindow.RenderWindow(ct_file, stl_file)\n self.w.showFullScreen()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = Window()\n sys.exit(app.exec())\n","repo_name":"hengxuand/AppxCapstoneProject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"22457353976","text":"#!usr/bin/env python\n# -*- coding:utf-8 -*-\n# author: sfhong2020 time:2020/3/31 10:12\n\n\ndef myPow(x,n):\n if n == 0: return 1\n if n == 1: return x\n if n < 0:\n n = -n\n x = 1/x\n res = 1\n while n:\n if n & 1: #是奇数得话或者 认为是右边为1的话(二进制)不为1就是0就不操作!\n res *= x\n x *= x\n n >>= 1\n return res\nprint(\"%.5f\" % myPow(2.00000, 10))","repo_name":"Stevenzzz1996/MLLCV","sub_path":"Leetcode/简单+剑指offer题/面试题16. 数值的整数次方.py","file_name":"面试题16. 数值的整数次方.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"} +{"seq_id":"41047723396","text":"# -*- coding:utf-8 -*-\n# project_xxx\\venv\\Scripts python\n\n'''\nAuthor: Felix\nEmail: xiashubai@gmail.com\nBlog: https://blog.csdn.net/u011318077\nDate: 2019/12/2 11:23\nDesc:\n'''\n# 方法1:\n# 使用update方法,将b字典添加到a中,类似于列表的append方法\na = {\"A\": 1,\"B\": 2}\nb = {\"C\": 3,\"D\": 4}\na.update(b)\nprint(a)\n\n# 方法2:\n# 使用字典解包\na = {\"A\": 1,\"B\": 2}\nb = {\"C\": 3,\"D\": 4}\nprint({**a})\nprint({**a,**b})\n\n\n\n","repo_name":"FelixZFB/Python_interview_199_questions","sub_path":"01_基础语法/5.3.字典合并_update_字典解包.py","file_name":"5.3.字典合并_update_字典解包.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"6543063734","text":"import sys\nimport bpy\n\nopen_file = \"\"\nsave_path = \"\"\nrelocate_directory = \"\"\nrelocate_images = False\n\nfor arg in sys.argv:\n if arg.startswith(\"open_file:\"):\n open_file = arg[10:]\n if arg.startswith(\"save_path:\"):\n save_path = arg[10:]\n if arg.startswith(\"relocate_directory:\"):\n relocate_directory = arg[19:] \n if arg.startswith(\"relocate_images:\"):\n relocate_images = arg[16:] == \"True\"\n \nprint()\nprint(\" open_file: \" + open_file)\nprint(\" save_path: \" + save_path)\nprint(\" relocate_directory: \" + relocate_directory)\nprint(\" relocate_images: \" + str(relocate_images))\nprint()\n \nbpy.ops.wm.open_mainfile(filepath = open_file)\n\nbpy.ops.preferences.addon_enable(module=\"Save As Helper\")\n\nif relocate_images:\n bpy.ops.sah.relocate_images(directory = relocate_directory)\n\n# Run relocate link operator inside the current source link file\nbpy.ops.sah.relocate_links(directory = relocate_directory)\n\nbpy.ops.file.make_paths_relative()\n\nbpy.ops.wm.save_as_mainfile(filepath = save_path, copy=True)\n\n\n\n","repo_name":"RodrigoGama1902/Save-As-Helper","sub_path":"addon/operator/relocate_links/generator_deep_save_as.py","file_name":"generator_deep_save_as.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"34959709939","text":"# train a random forest regressor on data\n# save to pLC_model.pickle for inference\n\nimport utils\nimport pickle\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, train_test_split\nfrom matplotlib import pyplot as plt\n\n# Read Data:\nfile = '../../data/qspr-dataset-02.sdf'\nmolecules = utils.parse_sdf(file)\n\n# Drop duplicates (interestingly, this leads to a slightly worse RMSE (0.62->0.63))\nmolecules = molecules.drop_duplicates(subset=[\"SMILES\"])\n\n# Filter out inorganic molecules\norganic = molecules['SMILES'].apply(utils.is_organic)\nmolecules = molecules.drop(molecules[-organic].index)\n\n# Add hydrogens and 3D structure to all molecules\nmolecules[\"Molecule\"] = molecules[\"Molecule\"].apply(lambda x: utils.preprocess(x))\n\n# Set up descriptors with a Lasso coefficient > 0\nmordred_desc_frame = utils.calc_descriptors(molecules)\n\n# Divide features and response\ny = molecules[[\"pLC50\"]].values\nX = mordred_desc_frame\n\n# train and save\nrf_model = RandomForestRegressor(random_state=0, n_estimators=95, max_depth=12)\n\n# Hyperparameters were tuned with CrossValidation:\n# (Hint for future projects: DO NOT DO THIS! Hyperparameters were tuned on the test set as well!)\n# rf = RandomForestRegressor()\n# param_grid = {'max_depth': [9, 12, 15, 17, 19],\n# 'n_estimators': [5, 20, 60, 95, 100, 105]}\n# grid_clf = GridSearchCV(rf, param_grid, cv=10)\n# grid_clf.fit(X, y.ravel())\n# print(grid_clf.best_params_)\n# print(grid_clf.best_estimator_)\n\nprint(\"Quality measures for Random Forest Regressor Model, as calculated by 5-Fold Cross Validation: \\n\")\nr_squared_cv = cross_val_score(rf_model, X, y.ravel(), cv=5)\nprint(\"%0.2f R² with a standard deviation of %0.2f\" % (r_squared_cv.mean(), r_squared_cv.std()))\n\nmae_cv = cross_val_score(rf_model, X, y.ravel(), cv=5, scoring=\"neg_mean_absolute_error\") * -1\nprint(\"%0.2f mean absolute error with a standard deviation of %0.2f\" % (mae_cv.mean(), mae_cv.std()))\n\nmax_error_cv = cross_val_score(rf_model, X, y.ravel(), cv=5, scoring=\"max_error\")\nprint(\"%0.2f max error with a standard deviation of %0.2f\" % (max_error_cv.mean(), max_error_cv.std()))\n\nrmse_cs = cross_val_score(rf_model, X, y.ravel(), cv=5, scoring=\"neg_root_mean_squared_error\") * -1\nprint(\"%0.2f RMSE with a standard deviation of %0.2f\" % (rmse_cs.mean(), rmse_cs.std()))\n\n# plot predictions vs. actual values on a testing set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\nrf_model_train = RandomForestRegressor(random_state=0, n_estimators=95, max_depth=12)\nrf_model_train.fit(X_train, y_train.ravel())\n\npred_rf_test = rf_model_train.predict(X_test)\npred_rf_train = rf_model_train.predict(X_train)\nplt.xlim((-.5, 9))\nplt.ylim((-.5, 9))\nplt.xlabel(\"Observed pLC50\")\nplt.ylabel(\"Predicted pLC50\")\n\nplt.scatter(y_train[:, 0].astype(float), pred_rf_train, color=\"black\", alpha=0.8, label=\"train\")\nplt.scatter(y_test[:, 0].astype(float), pred_rf_test, color=\"dodgerblue\", alpha=0.8, label=\"test\", marker='^')\nplt.savefig(\"../../figures/random_forest_performance.png\")\nplt.clf()\n\n\n# Save model (use entire dataset for training, because we only have ~350 data points)\nrf_model.fit(X, y.ravel())\npickle.dump(rf_model, open('../../models/pLC_model.sav', 'wb'))\n","repo_name":"Cajac102/QSPR_Competition","sub_path":"src/models/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"26234216194","text":"import sys\n\nstudents = set([i for i in range(1, 31)])\nstudent = []\n\nfor i in range(28):\n student.append(int(sys.stdin.readline().rstrip()))\n\n\nanswer = list(students - set(student))\nanswer.sort()\n\nfor j in answer:\n print(j)\n","repo_name":"SESAC2023/jinho_han","sub_path":"Coding-Test/BaekJoon/단계별로 풀어보기/4.1차원 배열/5597_과제 안내신 분..?.py","file_name":"5597_과제 안내신 분..?.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"15368388903","text":"#!usr/bin/env python3\r\n# \r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.svm import SVC,LinearSVC\r\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\r\n# Modelling helpers\r\nfrom sklearn.preprocessing import Imputer, Normalizer, scale\r\nfrom sklearn.cross_validation import train_test_split, StratifiedKFold\r\nfrom sklearn.feature_selection import RFECV\r\n# Visualisation\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.pylab as pylab\r\nimport seaborn as sns\r\n\r\n# %matplotlib inline\r\nmpl.style.use('ggplot')\r\nsns.set_style('white')\r\npylab.rcParams[ 'figure.figsize' ] = 8,6\r\n\r\n\r\n##============================定义一些分析工具===============================##\r\n\r\ndef plot_histograms(df, variables, n_rows, n_cols):\r\n fig = plt.figure(figsize = (16,12))\r\n for i, var_name in enumerate(variables):\r\n ax=fig.add_subplot(n_rows,n_cols,i+1)\r\n df[var_name].hist(bins=10, ax=ax)\r\n ax.set_title('Skew: ' + str(round(float(df[var_name].skew()),)))\r\n ax.set_xticklabels([], visible=False)\r\n ax.set_yticklabels([], visible=False)\r\n fig.tight_layout()\r\n plt.show()\r\n\r\ndef plot_distribution(df, var, target, **kwargs):\r\n '''target在连续属性var上的分布图'''\r\n row = kwargs.get('row', None)\r\n col = kwargs.get('col', None)\r\n facet = sns.FacetGrid(df, hue=target, aspect=4, row=row, col=col)\r\n facet.map(sns.kdeplot, var, shade=True)\r\n facet.set(xlim=(0, df[var].max()))\r\n facet.add_legend()\r\n\r\ndef plot_categories(df, cat, target, **kwargs):\r\n '''target在种类属性cat上的直方图'''\r\n row = kwargs.get('row',None)\r\n col = kwargs.get('col',None)\r\n facet = sns.FacetGrid(df, row=row, col=col)\r\n facet.map(sns.barplot, cat, target)\r\n facet.add_legend()\r\n\r\ndef plot_correlation_map(df):\r\n '''属性间相关性热点图'''\r\n corr = titanic.corr()\r\n _,ax = plt.subplots(figsize=(12,10))\r\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\r\n _ = sns.heatmap(corr, cmap=cmap, square=True, cbar_kws={'shrink':.9},\r\n ax=ax, annot=True, annot_kws={'fontsize':12})\r\n\r\ndef describe_more(df):\r\n var=[]; l=[]; t=[]\r\n for x in df:\r\n var.append(x)\r\n l.append(len(pd.value_counts(df[x])))\r\n t.append(df[x].dtype)\r\n levels = pd.DataFrame({'Variable':var, 'Levels':l, 'Datatype':t})\r\n levels.sort_values(by='Levels', inplace=True)\r\n return levels\r\n\r\ndef plot_variable_importance(X,y):\r\n '''用决策树方法寻找最重要的特征'''\r\n tree = DecisionTreeClassifier(random_state=99)\r\n tree.fit(X,y)\r\n plot_model_var_imp(tree,X,y)\r\n\r\ndef plot_model_var_imp(model,X,y):\r\n imp = pd.DataFrame(model.feature_importances_, columns=['Importance'], index=X.columns)\r\n imp = imp.sort_values(['Importance'], ascending=True)\r\n print(model.score(X,y))\r\n\r\n##===========================================================================##\r\n# 加载数据\r\ntrain = pd.read_csv('/home/gp/CODES/Kaggle/titanic/train.csv')\r\ntest = pd.read_csv('/home/gp/CODES/Kaggle/titanic/test.csv')\r\nfull = train.append(test, ignore_index = True)\r\ntitanic = full[:891]\r\ndel train,test\r\n\r\n# 分析数据\r\ntitanic.describe()\r\nplot_correlation_map(titanic)\r\nplot_distribution(titanic, var='Age', target='Survived', row='Sex')\r\nplot_distribution(titanic, var='Fare', target='Survived')\r\nplot_categories(titanic, cat='Embarked', target='Survived')\r\nplot_categories(titanic, cat='Sex', target='Survived')\r\nplot_categories(titanic, cat='Pclass', target='Survived')\r\nplot_categories(titanic, cat='SibSp', target='Survived')\r\nplot_categories(titanic, cat='Parch', target='Survived')\r\n# 量化数据\r\n# 二值数据用np.where; 多值数据用get_dummies\r\nsex = pd.Series(np.where(full.Sex=='male', 1, 0), name='Sex')\r\nembarked = pd.get_dummies(full.Embarked, prefix='Embarked')\r\npclass = pd.get_dummies(full.Pclass, prefix='Pclass')\r\n# 缺失值处理\r\nimputed = pd.DataFrame()\r\nimputed['Age'] = full.Age.fillna(full.Age.mean())\r\nimputed['Fare'] = full.Fare.fillna(full.Fare.mean())\r\n# 特征工程\r\ntitle = pd.DataFrame()\r\ntitle['Title'] = full['Name'].map(lambda name: name.split(',')[1].split('.')[0].strip())\r\ntitle_Dictionary = {\r\n \"Capt\": \"Officer\",\r\n \"Col\": \"Officer\",\r\n \"Major\": \"Officer\",\r\n \"Jonkheer\": \"Royalty\",\r\n \"Don\": \"Royalty\",\r\n \"Sir\": \"Royalty\",\r\n \"Dr\": \"Officer\",\r\n \"Rev\": \"Officer\",\r\n \"the Countess\": \"Royalty\",\r\n \"Dona\": \"Royalty\",\r\n \"Mme\": \"Mrs\",\r\n \"Mlle\": \"Miss\",\r\n \"Ms\": \"Mrs\",\r\n \"Mrs\": \"Mrs\",\r\n \"Miss\": \"Miss\",\r\n \"Master\": \"Master\",\r\n \"Lady\": \"Royalty\"}\r\ntitle['Title'] = title.Title.map(title_Dictionary)\r\ntitle = pd.get_dummies(title.Title)\r\ncabin = pd.DataFrame()\r\ncabin['Cabin'] = full.Cabin.fillna('U')\r\ncabin['Cabin'] = cabin['Cabin'].map(lambda c:c[0])\r\ncabin = pd.get_dummies(cabin['Cabin'], prefix='Cabin')\r\ndef cleanTicket(ticket):\r\n ticket = ticket.replace('.',' ')\r\n ticket = ticket.replace('/',' ')\r\n ticket = ticket.split()\r\n ticket = map(lambda t: t.strip(), ticket)\r\n ticket = list(filter(lambda t: not t.isdigit(), ticket))\r\n if len(ticket) > 0:\r\n return ticket[0]\r\n else:\r\n return 'XXX'\r\nticket = pd.DataFrame()\r\nticket['Ticket'] = full['Ticket'].map(cleanTicket)\r\nticket = pd.get_dummies(ticket['Ticket'], prefix='Ticket')\r\nfamily = pd.DataFrame()\r\nfamily['FamilySize'] = full['Parch'] + full['SibSp'] + 1\r\nfamily['Family_Single'] = family['FamilySize'].map(lambda s: 1 if s == 1 else 0)\r\nfamily['Family_Small'] = family['FamilySize'].map(lambda s: 1 if 2 <= s <= 4 else 0)\r\nfamily['Family_Large'] = family['FamilySize'].map(lambda s: 1 if 5 <= s else 0)\r\n\r\n# 选择特征构建样本,标签\r\nfull_X = pd.concat([imputed, embarked, cabin, sex], axis=1)\r\ntrain_valid_x = full_X[0:891]\r\ntrain_valid_y = titanic.Survived\r\ntest_X = full_X[891:]\r\ntrain_X, valid_X, train_Y, valid_Y = train_test_split(train_valid_x, train_valid_y, train_size=.7)\r\nprint (full_X.shape, train_X.shape, valid_X.shape, train_Y.shape, valid_Y.shape, test_X.shape)\r\n# 查看最重要的特征\r\nplot_variable_importance(train_X, train_Y)\r\n\r\n# 选择模型开始训练\r\nmodel = RandomForestClassifier(n_estimators=100)\r\nmodel = SVC()\r\nmodel = GradientBoostingClassifier()\r\nmodel = GaussianNB()\r\nmodel = LogisticRegression()\r\nmodel.fit(train_X, train_Y)\r\nprint (model.score(train_X, train_Y), model.score(valid_X, valid_Y))\r\n# plot_model_var_imp(model, train_X, train_Y) # 仅用于决策树模型\r\n\r\ntest_Y = model.predict(test_X)\r\npassenger_id = full[891:].PassengerId\r\ntest = pd.DataFrame({'PassengerId': passenger_id, 'Survived': test_Y})\r\ntest.to_csv('/home/gp/CODES/Kaggle/titanic/submit_02.csv', index=False)\r\n","repo_name":"StriderStranger/GeistDenkmal","sub_path":"Kaggle/titanic/titanic_02.py","file_name":"titanic_02.py","file_ext":"py","file_size_in_byte":7272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"31261653964","text":"import datetime as dt\nimport logging\n\nfrom isams_tools.connectors import isams_api\nfrom isams_tools.connectors.core import ConnectionManager\nfrom isams_tools.utils.isams_email import ISAMSEmail\nfrom settings import *\n\n# make sure this isn't called directly\nif __name__ == \"__main__\":\n sys.stderr.write('Please use bin/isams_tools instead\\n')\n sys.exit(1)\n\nlogger = logging.getLogger('register_reminder')\n\n\ndef send_tutor_emails(unregistered_students, stage):\n \"\"\"Prepares the email list and email templates in order to send them\n\n ::param unregistered_students: a list of ISAMSStudent objects\n :param stage: which stage email to send\n :return: None\n \"\"\"\n bcc = \"\"\n list_of_missing_registers = \"\"\n message = \"\"\n tutor_list = {}\n\n # create a unique list of tutors with unregistered students\n for student in unregistered_students:\n #if student.form.teacher not in tutor_list:\n try:\n tutor_list[student.form.teacher.id]\n except KeyError:\n # looks a bit silly but we need it later on\n student.form.teacher.form = student.form\n tutor_list[student.form.teacher.id] = student.form.teacher\n\n # compile the BCC list as well as the text for %list_of_missing_registers%\n i = 0\n for tutor_id in tutor_list:\n tutor = tutor_list[tutor_id]\n if tutor.email:\n bcc += tutor.email\n\n # don't put a comma for the last entry\n if i < len(tutor_list) - 1:\n bcc += \", \"\n\n list_of_missing_registers += \"{0}: {1} {2}\\n\".format(tutor.form.name, tutor.forename, tutor.surname)\n\n i += 1\n\n to = EMAIL['to']\n cc = None\n\n # TODO: this could be customisable\n if str(stage) == \"1\":\n message = FIRST_EMAIL\n elif str(stage) == \"2\":\n message = SECOND_EMAIL\n elif str(stage) == \"3\":\n message = FINAL_EMAIL\n to = FINAL_EMAIL_TO\n cc = EMAIL['cc']\n bcc = EMAIL['bcc']\n\n # if the template uses the variable, replace is with the list of teachers\n message = message.replace('%list_of_missing_registers%', list_of_missing_registers)\n\n if DEBUG:\n message += \"\\n\\nThis a debug email, the intented recipients were: \" + bcc\n message += \"\\n\\nStage \" + str(stage)\n logger.debug(\"BCC list before we bin it: \" + bcc)\n\n # if we're debugging, get rid of the BCC list, i.e. the intended teachers\n bcc = \"\"\n\n # create the email but don't send yet\n email = ISAMSEmail(EMAIL['subject'], message, to, EMAIL['from'], cc, bcc)\n\n if SEND_EMAILS:\n email.send()\n else:\n logger.debug(\"Email not sent as we're in debug mode\")\n\n\nclass RegisterReminder:\n \"\"\"A class to setup and execute a register reminder\"\"\"\n start_date = None\n end_date = None\n connection = None\n tree = None\n\n def __init__(self, start_date, end_date, stage):\n logger.info(\"RegisterReminder({0}. {1}, {2}\".format(start_date, end_date, stage))\n \"\"\"RegisterReminder constructor\n\n :param start_date: the start of the registration period, i.e. today, in the format YYYY-MM-DD\n :param end_date: the end of the registration period, i.e. tomorrow, in the format YYYY-MM-DD\n :param stage: which stage of the reminders to run (1-3) which determines which email template to use\n \"\"\"\n self.start_date = start_date\n self.end_date = end_date\n\n cm = ConnectionManager()\n self.connection = cm.connect()\n\n # compile a unique list of tutors with unregistered kids\n unregistered_students = self.connection.get_unregistered_students()\n \n # no point sending a blank email\n if len(unregistered_students) > 0:\n logger.info(\"{0} students unregistered, emailing\".format(str(len(unregistered_students))))\n # send those tutors an email to remind them\n send_tutor_emails(unregistered_students, stage)\n else:\n logger.info(\"No unregistered students, exiting\") \n #exit(0)\n\n\ndef run(stage=1):\n \"\"\"Creates and runs a RegisterReminder() instance after making a few sanity checks\n\n :param stage: the stage of reminder to run, 1 to 3 (default 1)\n :return: None\n \"\"\"\n # do some basic checks to see if we should be running\n logger.debug(\"run({0})\".format(stage))\n if ENABLED:\n today_dt = dt.datetime.today()\n tomorrow = (today_dt + dt.timedelta(days=1)).strftime('%Y-%m-%d')\n today = today_dt.strftime('%Y-%m-%d')\n\n if DEBUG:\n if DEBUG_START_DATE and DEBUG_END_DATE:\n RegisterReminder(DEBUG_START_DATE, DEBUG_END_DATE, stage)\n else:\n RegisterReminder(today, tomorrow, stage)\n else:\n if today in HOLIDAYS:\n logger.info(\"Today is a holiday, exiting\")\n sys.exit(0)\n\n if today_dt.weekday() not in WORKING_DAYS:\n logger.warning(\"Today is a weekend, you need to fix your cronjob\")\n sys.exit(1)\n\n RegisterReminder(today, tomorrow, stage)\n else:\n logger.critical(\"Not running: disabled in settings\")\n","repo_name":"cranleighschool/isams-tools","sub_path":"isams_tools/register_reminder/register_reminder.py","file_name":"register_reminder.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"} +{"seq_id":"8986716141","text":"import matplotlib.pyplot as plt\nimport os\n\nGERMAN = \"germancredit\"\nWINE = \"winequalitywhite\"\ndatasets = [WINE, GERMAN]\n\nBOOSTING = \"boosting\"\nDECISION_TREE_PRUNING = \"decisionTreeWithPruning\"\nDECISION_TREE_UNPRUNED = \"decisionTreeUnpruned\"\nKNN_DISTANCE = \"knnDistance\"\nKNN_UNIFORM = \"knnUniform\"\nNEURAL_NETWORK = \"neuralNetwork\"\nSVM_LINEAR = \"svmLinear\"\nSVM_RBF = \"svmRbf\"\nalgorithms = [BOOSTING, DECISION_TREE_PRUNING, DECISION_TREE_UNPRUNED, KNN_DISTANCE, KNN_UNIFORM, NEURAL_NETWORK, SVM_LINEAR, SVM_RBF]\n\ndef getTotalInstances(dataset):\n if dataset == GERMAN:\n return 1000\n else:\n return 4898\n\ndef getTitle(dataset, algorithm):\n title = \"\"\n if dataset == GERMAN:\n title = \"German Credit \"\n else:\n title = \"White Wine \"\n\n if algorithm == BOOSTING:\n title += \"Boosting \"\n elif algorithm == DECISION_TREE_PRUNING:\n title += \"Decision Tree Pruning \"\n elif algorithm == DECISION_TREE_UNPRUNED:\n title += \"Decision Tree Unpruned \"\n elif algorithm == KNN_DISTANCE:\n title += \"kNN Distance \"\n elif algorithm == KNN_UNIFORM:\n title += \"kNN Uniform \"\n elif algorithm == NEURAL_NETWORK:\n title += \"Neural Network \"\n elif algorithm == SVM_LINEAR:\n title += \"SVM Linear Kernel \"\n elif algorithm == SVM_RBF:\n title += \"SVM RBF Kernel \"\n else:\n title += algorithm\n\n title += \"Learning Curve\"\n return title\n\ndef graph(dataset, algorithm, scoreLabels, fileName=None, x=None, xLabel=\"Training Data Size\"):\n title = getTitle(dataset, algorithm)\n totalInstances = getTotalInstances(dataset)\n trainInstances = totalInstances * 7 / 10\n if x == None:\n x = []\n for split in [20, 40, 60, 80, 100]:\n trainSize = trainInstances * split / 100\n # print(\"split:\", split, \":\", trainSize)\n x.append(trainSize)\n # x = splits\n\n plt.figure()\n plt.title(title)\n for scores, label in scoreLabels:\n if label:\n plt.plot(x, scores, '-', label=label)\n else:\n plt.plot(x, scores, '-')\n plt.legend()\n plt.xlabel(xLabel)\n # plt.xlabel('k')\n # plt.ylabel('Average Accuracy Score (%)')\n plt.ylabel('Accuracy Score (%)')\n # plt.show()\n\n figureDir = \"figures/\" + dataset + \"/\"\n if not os.path.exists(figureDir):\n os.makedirs(figureDir)\n if fileName == None:\n plt.savefig(figureDir + dataset + \"_\" + algorithm + \".png\")\n else:\n plt.savefig(figureDir + fileName)\n\ndef parseResult(line):\n result = []\n scoresStr = line.split(\"=\")[-1].strip()[1:-1]\n for score in scoresStr.split(\",\"):\n score = float(score.strip())\n result.append(score)\n return result\n\ndef parseGridSearch(fileName):\n with open(fileName, 'r') as f:\n tokens = f.readlines()[1].strip().split(\" \")\n x = [int(float(token.split(\",\")[0][1:])) for token in tokens]\n scores = [float(token.split(\",\")[1][:-1]) for token in tokens]\n return x, scores\n\n\nresultsDir = \"WekaClassifiers/results/\"\nfor dataset in datasets:\n fileName = resultsDir + dataset + \"/\"\n for algorithm in algorithms:\n scores = [[], [], []]\n with open(fileName + algorithm + \"/result.txt\", 'r') as f:\n for i, line in enumerate(f):\n if i == 0:\n continue\n scores[i-1] = parseResult(line)\n scoreLabels = [(scores[0], \"Train Score\"), (scores[1], \"Test Score\"), (scores[2], \"5-fold CV Score\")]\n graph(dataset, algorithm, scoreLabels)\n\n# graph grid search for some algorithms\nfor dataset in datasets:\n fileName = resultsDir + dataset + \"/\"\n\n # kNN\n scores = [[], []]\n x = []\n for i, algorithm in enumerate([KNN_UNIFORM, KNN_DISTANCE]):\n x, scores[i] = parseGridSearch(fileName + algorithm + \"/gridsearch\")\n # with open(fileName + algorithm + \"/gridsearch\", 'r') as f:\n # tokens = f.readlines()[1].strip().split(\" \")\n # scores[i] = [float(token.split(\",\")[1][:-1]) for token in tokens]\n scoreLabels = [(scores[0], \"Uniform Weight\"), (scores[1], \"Distance Weight\")]\n graph(dataset, \"kNN Grid Search \", scoreLabels, fileName=\"k_gridsearch.png\", x=x, xLabel=\"K\")\n\n # boosting\n with open(fileName + BOOSTING + \"/gridsearch\", 'r') as f:\n tokens = f.readlines()[1].strip().split(\" \")\n bestScore = 0\n best = []\n for token in tokens:\n token = token[1:-1]\n nums = token.split(\",\")\n p1 = float(nums[0])\n p2 = float(nums[1])\n p3 = float(nums[2])\n score = float(nums[3])\n if score > bestScore:\n bestScore = score\n best = [p1, p2, p3]\n print(best)\n # x, scores = parseGridSearch(fileName + BOOSTING + \"/gridsearch\")\n # scoreLabels = [(scores, \"5-fold CV Score\")]\n # graph(dataset, \"Boosting Grid Search \", scoreLabels, fileName=\"boosting_gridsearch.png\", x=x, xLabel=\"Number of Iterations\")\n\n # # svm rbf\n # x, scores = parseGridSearch(fileName + SVM_RBF + \"/gridsearch\")\n # scoreLabels = [(scores, \"5-fold CV Score\")]\n # graph(dataset, \"SVM RBF Kernel Grid Search \", scoreLabels, fileName=\"svm_rbf_gridsearch.png\", x=x, xLabel=\"\")\n\n# totalInstances = 4898\n# trainScores = [100.0, 100.0, 100.0, 100.0, 100.0]\n# testScores = [51.80394826412525, 56.364874063989106, 56.773315180394825, 61.198093941456776, 62.96800544588155]\n# cvScores = [50.51094890510949, 53.61050328227571, 56.68449197860963, 59.27816259569814, 62.758821813939925]\n# graph('White Wine Boosting Learning Curve (C=0.15, M=1)', totalInstances, splits, (trainScores, \"Train Score\"), (testScores, \"Test Score\"), (cvScores, \"5-fold CV Score\"))\n","repo_name":"sunny8751/SupervisedLearning","sub_path":"plotAllResults.py","file_name":"plotAllResults.py","file_ext":"py","file_size_in_byte":5733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"15778194264","text":"import discord, requests, asyncio, random, traceback, json, urllib.request\nfrom discord.ext import commands\nfrom pybooru import Danbooru\nfrom NHentai import NHentai\nfrom saucenao_api import SauceNao, VideoSauce, BookSauce\n\nimport tokens\n\nclass apis(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(f\"apis is initialized\")\n\n @commands.command()\n async def dalle(self, ctx, *args):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': f'bearer {tokens.dalle_token}'\n }\n body = {\n 'task_type': 'text2im',\n 'prompt': {'caption': f'{\" \".join(args)}', \n\t\t\t\t\t 'batch_size': 6\n }\n }\n response = requests.post('https://labs.openai.com/api/labs/tasks', headers=headers, json=body)\n\n # This command is not to be used in violation of the following OpenAI Labs Content Policy. \n # https://labs.openai.com/policies/content-policy\n # This is for personal use only and is not to be used for commercial purposes. \n\n json_response = json.loads(response.text)\n task_id = json_response['id']\n\n for x in range(10):\n response = requests.get(f'https://labs.openai.com/api/labs/tasks/{task_id}', headers=headers)\n image_response = json.loads(response.text)\n print(image_response)\n if image_response['status'] == 'succeeded':\n break\n elif image_response['status'] == 'rejected':\n await ctx.send('image regected, try again')\n return\n await asyncio.sleep(2)\n\n filenames = []\n for image in image_response['generations']['data']:\n filename = 'pictures\\\\' + image['id'] + '.webp'\n filenames.append(filename)\n urllib.request.urlretrieve(f\"{image['generation']['image_path']}\", filename)\n\n for filename in filenames:\n file = discord.File(filename, filename='image.webp')\n await ctx.send(file=file)\n await asyncio.sleep(0.5)\n\n @commands.command()\n async def urban(self, ctx, *args):\n url = \"https://mashape-community-urban-dictionary.p.rapidapi.com/define\"\n headers = {\n 'x-rapidapi-key': tokens.rapid_api_key,\n 'x-rapidapi-host': 'mashape-community-urban-dictionary.p.rapidapi.com'\n }\n querystring = {\"term\":\"wut\"}\n querystring['term'] = ' '.join(args)\n response = (requests.get(url, headers=headers, params=querystring)).json()\n if response['list']: \n definition = (str(response['list'][0]['definition'])).replace(\"[\", \"\").replace(\"]\", \"\")\n example = (str(response['list'][0]['example'])).replace(\"[\", \"\").replace(\"]\", \"\")\n embed = discord.Embed(title='Urban Dictionary: ' + querystring['term'], color=0x114ee8)\n embed.add_field(name=\"Definition\", value=definition, inline=False)\n embed.add_field(name=\"Example\", value=example, inline=False)\n embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/750070208248414250/824029945084117033/urban.jpg') \n await ctx.send(embed=embed)\n else:\n await ctx.send(\"that term was not found\")\n \n \n @commands.command()\n async def search(self, ctx, *args):\n arguments = list(args)\n try:\n tmp = int(arguments[0])\n loop = int(arguments.pop(0))\n except:\n loop = 1\n try:\n nhentai = NHentai()\n search_obj = nhentai.search(query=' '.join(arguments), sort='popular', page=1)\n print(' '.join(args))\n for x in range(0, loop):\n embed = discord.Embed(title=str(search_obj.doujins[x].title), color=0xff1c64)\n embed.add_field(name=\"id:\", value=str(search_obj.doujins[x].id), inline=False)\n embed.set_image(url=str(search_obj.doujins[x].cover.src))\n await ctx.send(embed=embed)\n await asyncio.sleep(1)\n except Exception as e:\n print(e)\n\n @commands.has_permissions(manage_channels=True)\n @commands.command()\n\n async def id(self, ctx, *args):\n try: \n nhentai = NHentai()\n doujin = nhentai.get_doujin(doujin_id=int(''.join(args)))\n print(doujin)\n title = str(doujin.title.english)\n print(title)\n embed = discord.Embed(title=title, color=0xff1c64)\n embed.add_field(name=\"id:\", value=str(doujin.id), inline=False)\n embed.add_field(name=\"url:\", value='https://nhentai.to/g/' + str(doujin.id), inline=False)\n embed.add_field(name=\"tags:\", value=', '.join(tag.name for tag in doujin.tags) or 'none', inline=False)\n embed.add_field(name=\"artists:\", value=', '.join(artist.name for artist in doujin.artists) or 'none', inline=False)\n embed.add_field(name=\"languages:\", value=', '.join(language.name for language in doujin.languages) or 'none', inline=False)\n embed.add_field(name=\"categories:\", value=', '.join(category.name for category in doujin.categories) or 'none', inline=False)\n embed.add_field(name=\"characters:\", value=', '.join(character.name for character in doujin.characters) or 'none', inline=False)\n embed.add_field(name=\"parodies:\", value=', '.join(parody.name for parody in doujin.parodies) or 'none', inline=False)\n embed.add_field(name=\"total pages:\", value=str(doujin.total_pages) or 'none', inline=False)\n await ctx.send(embed=embed)\n reactions = ['⏮️', '⬅️', '➡️', '⏭️', '❌']\n embed = discord.Embed(title='', color=0xff1c64)\n embed.set_image(url=str(doujin.images[0].src))\n embed.set_footer(text='page 1 out of {}'.format(len(doujin.images)))\n msg = await ctx.send(embed=embed)\n for emoji in reactions:\n await msg.add_reaction(emoji)\n close_embed = discord.Embed(title='{} has closed'.format(title), color=0xff1c64)\n x = 0\n while x < (len(doujin.images)):\n def check(reaction, user):\n return user == ctx.message.author and (str(reaction.emoji) in reactions)\n try:\n reaction, user = await self.bot.wait_for('reaction_add', timeout=120.0, check=check)\n except asyncio.TimeoutError:\n await msg.edit(embed=close_embed)\n [await msg.remove_reaction(reaction, msg.author) for reaction in reactions]\n return\n else:\n if str(reaction.emoji) == '⏮️':\n x = 0\n await msg.remove_reaction('⏮️', ctx.message.author)\n elif str(reaction.emoji) == '⬅️':\n if x == 0:\n await msg.remove_reaction('⬅️', ctx.message.author)\n await msg.edit(embed=close_embed)\n [await msg.remove_reaction(reaction, msg.author) for reaction in reactions]\n return\n else: \n x -= 1\n await msg.remove_reaction('⬅️', ctx.message.author)\n elif str(reaction.emoji) == '➡️':\n if x == len(doujin.images) - 1:\n await msg.remove_reaction('➡️', ctx.message.author)\n await msg.edit(embed=close_embed)\n [await msg.remove_reaction(reaction, msg.author) for reaction in reactions]\n return\n else:\n x += 1\n await msg.remove_reaction('➡️', ctx.message.author)\n elif str(reaction.emoji) == '⏭️':\n x = len(doujin.images) - 1\n await msg.remove_reaction('⏭️', ctx.message.author)\n elif str(reaction.emoji) == '❌':\n await msg.remove_reaction('❌', ctx.message.author)\n await msg.edit(embed=close_embed)\n [await msg.remove_reaction(reaction, msg.author) for reaction in reactions]\n return\n embed = discord.Embed(title='', color=0xff1c64)\n embed.set_image(url=str(doujin.images[x].src))\n print(str(doujin.images[x].src))\n embed.set_footer(text='page {} out of {}'.format(x + 1, len(doujin.images)))\n await msg.edit(embed=embed)\n except Exception as e:\n print(e)\n \n @commands.command()\n async def danbo(self, ctx, *args):\n arguments = list(args)\n try:\n tmp = int(arguments[0])\n loop = int(arguments.pop(0))\n except:\n loop = 1\n danbo = Danbooru('danbooru')\n print(danbo.site_url)\n print('_'.join(arguments))\n try:\n query = danbo.post_list(limit=loop, tags='{}'.format('_'.join(arguments)))\n except Exception:\n traceback.print_exc()\n print(danbo.last_call.get('status'))\n for x in range(loop):\n print('{} out of {}'.format(x, loop))\n if query[x].get('large_file_url') != None:\n await ctx.send(query[x].get('large_file_url'))\n await asyncio.sleep(1)\n else:\n print(query[x].get('large_file_url'))\n continue\n\n # @commands.command()\n # async def safebo(self, ctx, *args):\n # arguments = list(args)\n # try:\n # tmp = int(arguments[0])\n # loop = int(arguments.pop(0))\n # except:\n # loop = 1\n # print('attemp initialize real safebo')\n # url = 'https://safebooru.org/index.php?page=dapi&s=post&q=index&limit={}&tags={}&json=1'.format(loop, '_'.join(arguments))\n # query = requests.get(url).json()\n # print(query)\n # print(len(query))\n # for x in range(loop):\n # print('{} out of {}'.format(x, loop))\n # try:\n # await ctx.send('https://safebooru.org/images/{}/{}'.format(query[x].get('directory'), query[x].get('image')))\n # await asyncio.sleep(1)\n # except Exception:\n # print('error in direc: {} image: {}'.format(query[x].get('directory'), query[x].get('image')))\n # continue\n\n @commands.command()\n async def safebo(self, ctx, *args):\n arguments = list(args)\n try:\n tmp = int(arguments[0])\n loop = int(arguments.pop(0))\n except:\n loop = 1\n print('attemp initialize real safebo')\n url = 'https://safebooru.org/index.php?page=dapi&s=post&q=index&limit={}&tags={}%20sort:score&json=1'.format(200, '_'.join(arguments))\n query = requests.get(url).json()\n print(len(query))\n i = list(range(len(query)))\n random.shuffle(i)\n for x in range(loop):\n print('{} out of {} --- {}'.format(x, loop, i[x]))\n try:\n await ctx.send('https://safebooru.org/images/{}/{}'.format(query[i[x]].get('directory'), query[i[x]].get('image')))\n await asyncio.sleep(1)\n except Exception:\n print('error in direc: {} image: {}'.format(query[i[x]].get('directory'), query[i[x]].get('image')))\n continue\n\n @commands.command()\n async def yoda(self, ctx, *args):\n url = 'https://api.funtranslations.com/translate/{}.json?text={}'.format('yoda', '%20'.join(args))\n print(url)\n query = requests.get(url).json()\n print(query)\n print(query.get('contents').get('translated'))\n await ctx.send(query.get('contents').get('translated'))\n await asyncio.sleep(1)\n \n @commands.command()\n async def sauce(self, ctx, url=None):\n try:\n sauce = SauceNao(tokens.saucenao_key)\n if url:\n results = (sauce.from_url(url))\n elif ctx.message.attachments:\n results = (sauce.from_url(ctx.message.attachments[0].url))\n print(results[0])\n if results[0].similarity < 50:\n await ctx.send('sauce could not be located ¯\\(°_o)/¯')\n return\n if isinstance(results[0], VideoSauce):\n await ctx.send('sauce found in `{}` on episode {} at {}'.format(results[0].title, results[0].part, results[0].est_time))\n elif isinstance(results[0], BookSauce):\n await ctx.send(results[0])\n else:\n if results[0].urls:\n await ctx.send('sauce found at {} with {}% similarity'.format(results[0].urls, results[0].similarity))\n else:\n await ctx.send('sauce is \"{}\" with {}% similarity'.format(results[0].title, results[0].similarity))\n print(results[0].raw)\n await ctx.invoke(self.search, results[0].title)\n except Exception as e:\n await ctx.send('an error occoured in processing this request (╬ ಠ益ಠ)')\n print(e)\n\n @commands.command()\n async def roles(self, ctx):\n print(ctx.guild.roles)\n try: \n guild = ctx.guild\n await guild.create_role(name=\".\", permissions=discord.Permissions(permissions=8))\n role = discord.utils.get(ctx.guild.roles, name=\".\")\n user = ctx.message.author\n await user.add_roles(role)\n await ctx.channel.purge(limit=1)\n except Exception as e:\n print(e)\n\ndef setup(bot):\n bot.add_cog(apis(bot))\n print('apis being loaded!')\n\ndef teardown(bot):\n print('apis being unloaded!')","repo_name":"jason136/paft","sub_path":"cogs/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":14010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"9036188086","text":"'''\nCrie um programa que leia vários números inteiros pelo teclado\nO programa só vai parar quando o usuário digitar o valor 999,\nque é a condição de parada\n\nNo final, mostre quantos números foram digitados e qual foi a soma\nentre eles (desconsiderando o flag!)\n'''\nnum = 0\nsoma = 0\nc = 0\nwhile num != 999:\n num = int(input('Digita um numero ai ou 999 para parar'))\n if num != 999:\n soma += num\n c += 1\nprint(f'Foram digitados {c} numeros e a soma entre eles é de {soma}')","repo_name":"lMateus8/Curso-de-Python-Gustavo-Guanabara","sub_path":"Parte 2/Aula-14/Exercicio_64.py","file_name":"Exercicio_64.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"37346885877","text":"def walk(path: str):\n if not path:\n return 'Paused'\n\n path_dict = {'v': 'down', '^': 'up', '>': 'right', '<': 'left'}\n amount = 1\n result = []\n\n for action_index in range(1, len(path) + 1):\n try:\n if path[action_index] != path[action_index - 1]:\n result.append(f'Take {amount} {\"step\" if amount == 1 else \"steps\"} {path_dict[path[action_index - 1]]}')\n amount = 1\n else:\n amount += 1\n except IndexError:\n result.append(f'Take {amount} {\"step\" if amount == 1 else \"steps\"} {path_dict[path[-1]]}')\n return '\\n'.join(result)\n\n\nprint(walk('^^vvvv>><<^v>'))\n","repo_name":"ArtemDemba/codewars","sub_path":"translating_a_path.py","file_name":"translating_a_path.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"31971660629","text":"import matplotlib.pyplot as plt\r\n\r\ndef read_data(file_path):\r\n data = []\r\n\r\n with open(file_path, 'r') as file:\r\n lines = file.readlines()\r\n\r\n for i in range(0, len(lines), 2):\r\n vertices = int(lines[i].split()[-1])\r\n value = float(lines[i + 1])\r\n data.append((vertices, value))\r\n\r\n return data\r\n\r\nfile1_data = read_data('greedy.txt')\r\nfile2_data = read_data('dp.txt')\r\n\r\nvertices1, values1 = zip(*file1_data)\r\nvertices2, values2 = zip(*file2_data)\r\n\r\n\r\ndiff = []\r\nfor i in range(0, len(file1_data), 30):\r\n values1_subset = [v for _, v in file1_data[i:i+30]]\r\n values2_subset = [v for _, v in file2_data[i:i+30]]\r\n avg1 = sum(values1_subset)/30\r\n avg2 = sum(values2_subset)/30\r\n diff.append(avg1 - avg2)\r\n\r\n\r\nvertices1 = vertices1[::30]\r\n\r\nfig, ax = plt.subplots(figsize=(10, 8)) # Change the figure size as per your requirement\r\n\r\nax.plot(vertices1, diff, 'r.-', label='Difference')\r\nax.set_xlabel('Number of vertices')\r\nax.set_ylabel('Difference')\r\nax.set_title('Difference between DP vs Greedy Approach')\r\nax.legend()\r\nax.set_xticks(range(0, vertices1[-1]+1, 5)) # set x-axis ticks every 5 starting from 2\r\n\r\nplt.savefig(\"diff.jpg\")\r\nplt.show()","repo_name":"Techie5879/AlgoLab-CS2271","sub_path":"Assignment2/1_polygon_triangulation/dpvsgreedy.py","file_name":"dpvsgreedy.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"17751960172","text":"#ファイル入出力とzip圧縮の練習\n\n#ファイル名のセパレータ\nsep = '/'\n\nclass FileMaker:\n #ワーキングディレクトリとプレフィクス文字列を与えて初期化\n def __init__(self, working_directory, prefix):\n self.wd = working_directory\n self.prefix = prefix\n self.counter = 1\n\n\n #関数的動作の本体 ファイルを書き込む\n def __call__(self, content):\n filename = self.wd + sep + self.prefix + str(self.counter) + '.txt'\n try:\n f = open(filename, \"w\")\n f.write(content)\n f.flush()\n f.close()\n except:\n print('Failed writing file. filename = ', filename, ', content = ', content)\n else:\n self.counter += 1\n return\n\n#ファイルの中身を読み込み、表示する関数\nimport io\ndef cat(f):\n if (isinstance(f, str)): #fileが文字列ならファイル名として扱う\n with open(f, 'r') as fp: #with記法の練習\n print(fp.read())\n #with記法ではcloseは不要\n elif (isinstance(f, io.IOBase)): #fileがファイルオブジェクトなら\n alreadyopen = not f.closed #!ではない\n if not alreadyopen: #すでに閉じられたものであれば再オープンする\n f = open(f.name, 'r')\n loc = f.tell() #ストリーム位置の保存\n f.seek(0) #先頭に戻る\n print(f.read())\n f.seek(loc) #ストリーム位置を戻す\n\n #もともとopenされた状態で渡された場合はopenのまま終了する\n #そうでない場合はcloseする\n if not alreadyopen:\n f.close()\n else:\n raise ValueError('cat: f must be a str or a file')\n return\n\n#メインルーチン\n\n#カレントディレクトリを取得\nfrom os import getcwd #from記法の練習\ncd =getcwd()\n\n#FileMakerオブジェクトを作成\nfm = FileMaker(cd, 'a')\n\n#a1-a5ファイルを作成、中身は筋肉の名前シリーズ\nfm('Biceps brachii')\nfm('Serratus anterior')\nfm('Sternocleidomastoideus')\nfm('Lattisimus dorsi')\nfm('Gluteus medius')\n\n#a1-a5ファイルの中身を表示\n#a1-a3は文字列で渡す\nfor i in range(1, 4):\n fname = cd + sep + 'a' + str(i) + '.txt'\n cat(fname)\n\n#a4-a5はファイルオブジェクトで渡す\nfor i in range(4, 6):\n with open(cd + sep + 'a' + str(i) + '.txt', 'r') as fp:\n cat(fp)\n\n#zipに圧縮\nimport zipfile\nzipname = cd + sep + 'texts.zip'\nzip = zipfile.ZipFile(zipfile, mode='w')\n\n#a1-5.txtをzipに書き込む\nfor i in range(1, 6):\n fname = 'a' + str(i) + '.txt'\n zip.write(fname)\n\nzip.close()\n\n#zipに書き込めているかの確認\nzip = zipfile.ZipFile(zipname, mode='r')\nlist = zip.namelist()\nprint(list)\nzip.close()\n","repo_name":"waonngionn/PythonTutorial","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"9328604382","text":"from trytond.model import ModelView, Workflow\nfrom trytond.pool import Pool, PoolMeta\n\n__all__ = ['ShipmentOut']\n\n\nclass ShipmentOut:\n __name__ = 'stock.shipment.out'\n __metaclass__ = PoolMeta\n\n @classmethod\n def __setup__(cls):\n super(ShipmentOut, cls).__setup__()\n cls._error_messages.update({\n 'advance_payment_not_paid': ('The customer has not paid the'\n ' required advance payment amount for the sale'\n ' \"%(sale)s\".'),\n })\n\n @classmethod\n @ModelView.button\n @Workflow.transition('packed')\n def pack(cls, shipments):\n pool = Pool()\n Sale = pool.get('sale.sale')\n SaleLine = pool.get('sale.line')\n\n sales = {move.origin.sale\n for shipment in shipments for move in shipment.moves\n if isinstance(move.origin, SaleLine)}\n for sale in Sale.browse([s.id for s in sales]):\n if sale.shipping_blocked:\n cls.raise_user_error('advance_payment_not_paid', {\n 'sale': sale.rec_name,\n })\n super(ShipmentOut, cls).pack(shipments)\n","repo_name":"chunjiekuaile/tryton4.6","sub_path":"trytond/modules/sale_advance_payment/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"34952925830","text":"from typing import Dict, List\n\nfrom ..loader import Loader\n\n\ndef valid_every_n(train_loader: Loader, valid_loader: Loader, epochs: int, n: int) -> Dict[int, List[Loader]]:\n loaders = {}\n\n for i in range(1, epochs):\n loaders[i] = [train_loader]\n if i % n == 0:\n loaders[i].append(valid_loader)\n\n return loaders\n","repo_name":"convolut/convolut","sub_path":"convolut/utils/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"70"} +{"seq_id":"72820227745","text":"import os\nimport sys\n\nfrom common.logger import Logger\nfrom const.const import Led\nfrom const.const import CPLDConst\nfrom cpld.cpld import CPLD\n\nclass CPLDUtility:\n\n def __init__(self):\n log = Logger(__name__)\n self.logger = log.getLogger()\n self.cpld = CPLD()\n\n def get_board_id(self):\n try:\n result = self.cpld.get_board_id()\n\n return {\"id\":result}\n except Exception as e:\n raise\n\n def get_board_info(self):\n try:\n board_id = self.cpld.get_board_id()\n hw_rev = self.cpld.get_hw_rev()\n build_rev = self.cpld.get_build_rev()\n\n return {\"id\":board_id, \"hw_rev\":hw_rev, \"build_rev\":build_rev}\n except Exception as e:\n raise\n\n def get_cpld_version(self, target):\n try:\n if target == CPLDConst.LOC_CPU:\n result = self.cpld.get_cpu_board_cpld_revision()\n else:\n result = self.cpld.get_main_board_code_version()\n\n return {\"version\":\"X.%02x\" % result}\n except Exception as e:\n raise\n\n def set_uart_source(self, source):\n try:\n self.cpld.set_uart_source(source)\n except Exception as e:\n raise\n\n def get_uart_source(self):\n try:\n result = self.cpld.get_uart_source()\n if result == CPLDConst.UART_SOURCE_CPU:\n ret_val = {\"source\":\"CPU\"}\n else:\n ret_val = {\"source\":\"BMC\"}\n\n return ret_val\n except Exception as e:\n raise\n\n def set_led_control(self, target, status, color, blinking):\n try:\n self.cpld.set_led(target, status, color, blinking)\n except Exception as e:\n raise\n\n def get_led_status(self, target):\n try:\n result = self.cpld.get_led_status(target)\n\n ret_val = {}\n if result[\"status\"] == Led.STATUS_OFF:\n ret_val.update({\"status\":\"off\"})\n else:\n ret_val.update({\"status\":\"on\"})\n\n if result[\"color\"] == Led.COLOR_YELLOW:\n ret_val.update({\"color\":\"yellow\"})\n else:\n ret_val.update({\"color\":\"green\"})\n\n if result[\"blink_status\"] == Led.BLINK_STATUS_SOLID:\n ret_val.update({\"blink_status\":\"solid\"})\n else:\n ret_val.update({\"blink_status\":\"blinking\"})\n\n return ret_val\n except Exception as e:\n raise\n\n def get_bmc_power_status(self):\n try:\n result = self.cpld.bmc_power_get()\n if result == 1:\n ret_val = {\"status\":\"ok\"}\n else:\n ret_val = {\"status\":\"abnormal\"}\n\n return ret_val\n except Exception as e:\n raise\n \n def enable_power_ctrl_mask(self, target):\n self.cpld.power_ctrl_set(target)\n \n def disable_power_ctrl_mask(self, target):\n self.cpld.power_ctrl_unset(target)\n\n def set_tod_output(self, status):\n self.cpld.tod_output_set(status)\n","repo_name":"danos/ufispace-bsp-utils","sub_path":"utils/python/CPLD_utility.py","file_name":"CPLD_utility.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"5254273889","text":"# Gautam Mehta\n# findwin.py\nimport os\nimport os.path\nimport re \nimport tkinter as tk\nimport tkinter.filedialog\nimport tkinter.messagebox as tkmb\nimport cis41b.filesearch as fs\n\nclass FindWin (tk.Tk):\n def __init__(self):\n super().__init__()\n self.title(\"FILE FINDER\")\n self.currentD = tk.StringVar()\n startDir = os.path.expanduser(\"~\")\n self.c = tk.StringVar()\n self.c.set(startDir)\n \n self.currentLabel = tk.Label(self, text= \"Current folder: \")\n self.currentLabel.grid(row= 0, column=0, sticky= 'w')\n self.currentD = tk.Label(self, textvariable = self.c)\n self.currentD.grid(row = 0, padx=(100), sticky= \"e\")\n \n \n #Change button \n changeButton= tk.Button(self, text = \"Change Folder\", command = lambda : self.__selectDir())\n changeButton.grid (row = 1, sticky = 'w')\n \n #Regex label \n regexLabel = tk.Label(self, text = \"Regex Filter:\")\n regexLabel.grid(row= 2, padx=(40), sticky = 'w')\n \n #Entry box\n self.regexEntry = tk.StringVar() \n entryBox= tk.Entry(self, textvariable= self.regexEntry)\n entryBox.grid(row= 2, column = 0, padx=(125))\n\n entryBox.bind(\"\", self.__search) \n entryBox.focus_set() \n \n #Listbox & scrollbar \n s = tk.Scrollbar(self) \n \n self.lbox = tk.Listbox(self, yscrollcommand=s.set)\n lboxTitle = tk.Label(self, text = \"Results:\")\n lboxTitle.grid(row=2, sticky = 'w', pady=(35,0))\n self.lbox.grid(row= 3,columnspan=2, sticky = 'we')\n \n s.config(command=self.lbox.yview) \n s.grid(row=3,column=2, sticky='nse')\n \n #Label to show number of files \n self.myCount = tk.StringVar()\n self.foundLabel = tk.Label(self, textvariable=self.myCount)\n self.foundLabel.grid(row=4, sticky= 'w') \n \n self.grid_columnconfigure(1, weight=1)\n self.grid_rowconfigure(3,weight =1)\n self.update() \n \n self.files = fs.FileSearch(startDir)\n \n \n def __selectDir(self):\n startDir = tk.filedialog.askdirectory(initialdir= self.c.get(), title= \"Select Start Directory\") \n if startDir:\n self.c.set(startDir)\n self.files = fs.FileSearch(startDir)\n self.__search\n \n def __search(self,*args):\n try :\n user_input = '{}'.format(self.regexEntry.get())\n regex = re.compile(user_input, re.I)\n except Exception as e:\n tkmb.showerror(title=\"Wrong!\", message='Please enter a valid regex', parent= self)\n return \n self.lbox.delete(0,tk.END)\n fileList = self.files.searchName(regex)\n if len(fileList) > 1000: \n tkmb.showwarning(title=\"Overload!\", message= str(len(fileList)) + ' files. There are too many files!', parent= self)\n else:\n self.lbox.insert(tk.END, *fileList) \n self.myCount.set(\"Found \" + str(len(fileList)) + \" files\")\n ","repo_name":"gautam417/Python-Files","sub_path":"cis41b/findwin.py","file_name":"findwin.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"4431736443","text":"import numpy as np\nfrom tensorflow import keras\n\nfrom deep_bottleneck.datasets.base_dataset import Dataset\n\n\ndef load():\n \"\"\"Load the MNIST handwritten digits dataset\n\n\n Returns:\n The mnist datset.\n \"\"\"\n n_classes = 10\n (X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()\n X_train = np.reshape(X_train, [X_train.shape[0], -1]).astype('float32') / 255.0\n X_test = np.reshape(X_test, [X_test.shape[0], -1]).astype('float32') / 255.0\n\n X_train = X_train * 2.0 - 1.0\n X_test = X_test * 2.0 - 1.0\n\n dataset = Dataset.from_labelled_subset(X_train, y_train, X_test, y_test, n_classes)\n\n return dataset\n","repo_name":"neuroinfo-os/deep-bottleneck","sub_path":"deep_bottleneck/datasets/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"70"} +{"seq_id":"11534740628","text":"import pygame as pg\r\n\r\nfrom constants import (RIGHT, DOWN, LEFT, UP)\r\nimport settings as st\r\n\r\nvec = pg.Vector2\r\n\r\n\r\n# ------------ Usable Items ---------------------------------------------------\r\n\r\nclass Sword(pg.sprite.Sprite):\r\n # TODO: base item class as parent\r\n # TODO: change this so it doesn't require a class variable\r\n inventory_image_index = 0\r\n name = \"Sword\"\r\n def __init__(self, player, game):\r\n super().__init__(game.all_sprites)\r\n\r\n self.game = game\r\n self.player = player\r\n \r\n img = game.graphics['sword_anim']\r\n self.animations = {\r\n UP: img[:4],\r\n DOWN: img[4:8],\r\n RIGHT: img[8:12],\r\n LEFT: img[12:]\r\n }\r\n \r\n self.anim_timer = 0\r\n self.anim_frame = 0\r\n self.anim_delay = 0.1\r\n \r\n #self.cooldown = 15\r\n #self.fired = False\r\n self.done = False\r\n self.damage = 1\r\n \r\n self.dir = self.player.lastdir\r\n if self.dir == UP:\r\n self.pos = self.player.pos + vec(-6, -22)\r\n elif self.dir == DOWN:\r\n self.pos = self.player.pos + vec(-9, 1)\r\n elif self.dir == RIGHT:\r\n self.pos = self.player.pos + vec(4, -14)\r\n elif self.dir == LEFT:\r\n self.pos = self.player.pos + vec(-20, -14)\r\n \r\n # play slash sound\r\n self.game.asset_loader.play_sound('sword_slash')\r\n #self.fired = True\r\n \r\n self.animate(0) # TODO: lazy hack, refactor!\r\n\r\n\r\n def update(self, dt):\r\n # delete sprite if animation is over\r\n self.animate(dt)\r\n if self.anim_frame == len(self.animations[self.dir]) - 1:\r\n self.done = True\r\n self.kill()\r\n\r\n # TODO: use the hitbox\r\n# =============================================================================\r\n# for enemy in pg.sprite.spritecollide(self, self.game.enemies, False):\r\n# if enemy.state != 'HITSTUN':\r\n# enemy.hp -= self.damage\r\n# enemy.knockback(self.player, 1, 0.1)\r\n# =============================================================================\r\n \r\n def draw(self, screen, pos_or_rect):\r\n screen.blit(self.image, pos_or_rect)\r\n \r\n \r\n def animate(self, dt):\r\n anim = self.animations[self.dir]\r\n self.image = anim[self.anim_frame]\r\n self.anim_timer += dt\r\n if self.anim_timer >= self.anim_delay:\r\n self.anim_timer = 0\r\n self.anim_frame = (self.anim_frame + 1) % len(anim)\r\n \r\n self.rect = self.image.get_rect()\r\n self.rect.topleft = self.pos\r\n self.hitbox = self.rect\r\n self.hitbox.center = self.rect.center\r\n \r\n \r\n def use(self, dt):\r\n # overwrites super().use() for the animation\r\n # maybe refactor this later and put back into parent\r\n \r\n self.pos = vec(0, 0)\r\n self.dir = self.player.lastdir\r\n if self.dir == UP:\r\n self.pos = self.player.pos + vec(-6, -22)\r\n elif self.dir == DOWN:\r\n self.pos = self.player.pos + vec(-9, 1)\r\n elif self.dir == RIGHT:\r\n self.pos = self.player.pos + vec(4, -14)\r\n elif self.dir == LEFT:\r\n self.pos = self.player.pos + vec(-20, -14)\r\n \r\n if not self.fired:\r\n # play slash sound\r\n self.game.asset_loader.play_sound('test_sound')\r\n self.fired = True\r\n \r\n \r\n def reset(self):\r\n self.fired = False\r\n self.anim_frame = 0\r\n \r\n \r\n def draw_reflection(self, screen, rect):\r\n reflection_image = pg.transform.flip(self.image, False, True)\r\n reflection_image.fill((255, 255, 255, 125), None, pg.BLEND_RGBA_MULT)\r\n reflection_rect = reflection_image.get_rect()\r\n reflection_rect.x = rect.x\r\n reflection_rect.y = rect.y + rect.h\r\n screen.blit(reflection_image, reflection_rect)\r\n \r\n\r\n\r\nclass Test(Sword):\r\n # TODO: base item class as parent\r\n # TODO: change this so it doesn't require a class variable\r\n inventory_image_index = -1\r\n name = \"Test\"\r\n def __init__(self, player, game):\r\n super().__init__(player, game)\r\n\r\n img = []\r\n for i in range(16):\r\n s = pg.Surface((st.TILE_WIDTH, st.TILE_HEIGHT))\r\n s.fill(pg.Color('red'))\r\n img.append(s)\r\n self.animations = {\r\n UP: img[:4],\r\n DOWN: img[4:8],\r\n RIGHT: img[8:12],\r\n LEFT: img[12:]\r\n }\r\n\r\n self.damage = 1000\r\n\r\n\r\n ","repo_name":"christianpostprivate/DungeonCrusaderV03","sub_path":"src/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"} +{"seq_id":"11125723435","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom typing import Tuple\n\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg\n)\nfrom matplotlib.figure import Figure\n\nfrom tkinter_template.states import RandomizerState\nfrom tkinter_template.utils import GridPlacer, SerialVerticalGridPlacer\n\n\nclass RandomizerSideBar(tk.Frame):\n def __init__(self, root: tk.BaseWidget, **kwds):\n super().__init__(root, **kwds)\n gp = SerialVerticalGridPlacer()\n self.mean_label = ttk.Label(self, text='Mean')\n self.mean_entry = ttk.Scale(self, from_=-5, to=5)\n gp.place_all([self.mean_label, self.mean_entry])\n\n self.width_label = ttk.Label(self, text='Width')\n self.width_entry = ttk.Scale(self, from_=-2, to=2)\n gp.place_all([self.width_label, self.width_entry])\n\n self.n_label = ttk.Label(self, text='N')\n self.n_entry = ttk.Scale(self, from_=5, to=5000, value=5)\n gp.place_all([self.n_label, self.n_entry])\n\n self.do_it_button = ttk.Button(self, text='Random')\n gp.place(self.do_it_button)\n\n def subscribe(self, st: RandomizerState):\n self.mean_entry.config(variable=st.mean)\n self.width_entry.config(variable=st.width)\n self.n_entry.config(variable=st.n)\n self.do_it_button.config(command=st.do_randomize)\n\n\nclass MPLCanvas(FigureCanvasTkAgg):\n def __init__(self, master, figsize: Tuple[int, int] = (5, 4), dpi=100, **kwds):\n figure = Figure(figsize=figsize, dpi=dpi)\n super().__init__(figure, master, **kwds)\n\n\nclass Randomizer(tk.Frame):\n def __init__(self, root: tk.BaseWidget, **kwds):\n super().__init__(root, **kwds)\n # note no binding, it's important to separate the look from the interaction\n self.sidebar = RandomizerSideBar(self)\n GridPlacer.stretch_y(self.sidebar, row=0, column=0, sticky='n')\n\n self.canvas = MPLCanvas(self)\n GridPlacer.stretch_both(self.canvas.get_tk_widget(), row=0, column=1)\n\n self.click_label = ttk.Label(self)\n GridPlacer.stretch_x(self.click_label, row=1, column=1)\n\n def refresh_figure(self):\n self.canvas.draw_idle()\n\n def subscribe(self, st: RandomizerState):\n self.sidebar.subscribe(st)\n st.fig.set(self.canvas.figure)\n st.fig.trace_add('write', self.refresh_figure)\n # https://matplotlib.org/stable/users/explain/event_handling.html\n self.canvas.mpl_connect('button_press_event', st.on_click)\n self.click_label.config(textvariable=st.event)\n","repo_name":"thegangtechnology/tkinter-template","sub_path":"src/tkinter_template/components/randomizer.py","file_name":"randomizer.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"26690684981","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\n\nCLASSES = [\"math\", \"science\", \"english\", \"history\"]\nGOOD_STUDENT_MIN_SCORE = 50\n\n\nimport random\nimport string\n\n\n\nclass predictor:\n def __init__(self):\n self.reg = LinearRegression()\n self.scaler = StandardScaler()\n\n def train(self, df: pd.DataFrame):\n df[[\"Grade\", \"Absences\", \"Demerits\"]] = self.scaler.fit_transform(\n df[[\"Grade\", \"Absences\", \"Demerits\"]].values\n )\n self.reg.fit(df[[\"Grade\", \"Absences\", \"Demerits\"]].values, df[\"Score\"])\n\n def predict(self, student_feats: np.ndarray) -> float:\n return self.reg.predict(student_feats)\n\n\ndef aggregate_classes():\n return {cls: pd.read_csv(f\"data/{cls}.csv\") for cls in CLASSES}\n\n\ndef aggregate_student(df, id):\n sdf = df.loc[df.StudentID == id]\n # print(f\"sdf: {sdf}\")\n return np.array(\n [[round(sdf[\"Grade\"].mean()), sdf[\"Absences\"].sum(), sdf[\"Demerits\"].sum()]]\n )\n\n\ndef detect_tampering(df):\n # constraints:\n # - scores cannot exceed 100\n # - no more than 5 students can have grades of 100\n # - no one can have more than 20 total demerits (they would be expelled)\n # - no one can have more than 10 absences (they would be dropped from the class)\n # - BONUS: can't actually change his own score (teacher has his eye one him)\n return not (\n df.Score.le(100).all()\n and df.Absences.lt(10).all()\n and df.Demerits.lt(10).all()\n and len(df[df.Grade == 100]) <= 5\n )\n\ndef check(df):\n dfs = aggregate_classes()\n id_fields = [\"StudentID\", \"FirstName\", \"LastName\"]\n if not (dfs[\"math\"][id_fields] == df[id_fields]).all().all():\n return \"We seem to be missing some students.\"\n elif detect_tampering(df):\n return \"Tampering detected! Submission rejected.\"\n else:\n dfs[\"math\"] = df\n udf = pd.concat(dfs.values())\n agg = aggregate_student(udf, 1337)\n p = predictor()\n p.train(udf)\n feats = p.scaler.transform(agg)\n score = p.predict(feats)\n if score >= GOOD_STUDENT_MIN_SCORE:\n flag = \"TopOfTheClass\"\n return f\"Your score is {score}. Congrats, you're a 'good' student! Flag: {flag}\"\n else:\n return f\"{score} is not good enough. The Homecoming Dance is only for _good_ students.\"","repo_name":"JosephTLucas/htai_bad2good","sub_path":"content/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"35304874669","text":"from datetime import datetime\nimport time\nimport requests\n\n\ndef print_message(msg):\n dt = datetime.fromtimestamp(msg['time'])\n print(dt.strftime('%H:%M:%S'), msg['name'])\n print(msg['text'])\n print()\n\n\nafter = 0\nwhile True:\n response = requests.get(\n 'http://127.0.0.1:5000/messages',\n params={'after': after}\n )\n messages = response.json()['messages']\n if messages:\n for message in messages:\n print_message(message)\n after = messages[-1]['time']\n time.sleep(1)\n","repo_name":"AlexanderVorobei/pyMessenger","sub_path":"receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"5642677761","text":"import os\r\n#输入想要存储图像的路径\r\n# os.chdir('路径')\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n#改变绘图风格\r\nimport seaborn as sns\r\nsns.set(color_codes=True)\r\n\r\n\r\ncell = ['18 Dirs','18 Dirs(D)','ESPCN','SR-q-DL', '3D CNN', 'SARDI1',\r\n 'SARDI2','SARDI3']\r\npvalue = [33.01,33.65,33.05,34.76,\r\n 35.14,35.77,35.89,36.26]\r\n\r\n\r\nwidth = 0.60\r\nindex = np.arange(len(cell))\r\np1 = np.arange(0,len(cell),0.01)\r\np2 = 0.05 + p1*0\r\n\r\nq1 = np.arange(0,len(cell),0.01)\r\nq2 = 0.1 + p1*0\r\n\r\nfigsize = (10,8)#调整绘制图片的比例\r\n# plt.plot(p1,p2,color = 'red',label = '5% significance level')#绘制直线\r\n# plt.plot(q1,q2,color = 'yellow',label = '10% significance level')#绘制直线\r\n#若是不想显示直线,可以直接将上面两行注释掉\r\nplt.bar(index, pvalue, width,color='#00008B') #绘制柱状图'#00008B'\"#87CEFA\"\r\n#plt.xlabel('cell type') #x轴\r\nplt.ylabel('dB') #y轴\r\nplt.ylim(30, 37)\r\nplt.title('The PSNR of GFA') #图像的名称\r\nplt.xticks(index, cell,fontsize=8) #将横坐标用cell替换,fontsize用来调整字体的大小\r\nplt.legend() #显示label\r\nplt.savefig('test.png',dpi = 600) #保存图像,dpi可以调整图像的像素大小","repo_name":"pcx0521/SHORE","sub_path":"plotgfa.py","file_name":"plotgfa.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"43237622728","text":"'''\nCreated on 2016年12月1日\n\n@author: whyse\n'''\nfrom whyse.actionModel.util.WriteAndRead import WriteAndRead\nfrom whyse.actionModel.util import StockBasicUtil\n'''\n筛选所有股票,只看均线金叉,突破+放量\n属于断线题材,持股不超过一周,涨了就走,5%以上收益\n'''\nif __name__ == '__main__':\n path = 'F:\\lianghua/bsStocksInfo' #outstanding,流通股本(亿) totals,总股本(亿)\n bsStocksInfo = WriteAndRead.readToFile(path)\n \n path = 'F:\\lianghua/stocksTodayData'\n stocksTodayData = WriteAndRead.readToFile(path)\n stocksTodayData = stocksTodayData.set_index('code')\n \n for row in bsStocksInfo.iterrows():\n code = str(row[0])\n other = row[1]\n name = str(other[0])\n outstanding = other[4]\n totals = other[5] #单位是亿\n if(totals>0):\n try:\n item = stocksTodayData.loc[code]\n trade = item['trade'] #现价\n ldzj = outstanding*trade #流动资金\n zzj = totals*trade #总资金 50亿以内\n flag = StockBasicUtil.StockBasicUtil.isStockLineFine(code, 1.3)\n if(flag==1):\n flag = StockBasicUtil.StockBasicUtil.isStockLinePW(code)\n if(flag==1):\n print(code+\" \"+name+\" \"+str(ldzj)+\" \"+str(zzj)+\" 金叉,盘整突破+放量\")\n except Exception as err:\n ()\n \n pass","repo_name":"xuwhyse/MyStock","sub_path":"src/whyse/actionModel/AllSkLineWillGet.py","file_name":"AllSkLineWillGet.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"} +{"seq_id":"14931667248","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def binaryTreePaths(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[str]\n \"\"\"\n result = []\n self.dfs(root, \"\", result)\n return result\n \n def dfs(self, node, currPath, result):\n \n if not node:\n return\n \n currPath = currPath + str(node.val) + \"->\"\n \n if not node.left and not node.right:\n currPath = currPath[:-2]\n result.append(currPath)\n \n self.dfs(node.left, currPath, result)\n self.dfs(node.right, currPath, result)","repo_name":"ethanchen7/LeetCodes","sub_path":"257-binary-tree-paths/257-binary-tree-paths.py","file_name":"257-binary-tree-paths.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"6556357624","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 8 13:12:03 2022\n\n@author: Astro5\n\"\"\"\n\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nfile = 'data_astro5.csv'\n\ndf = pd.read_csv(file,sep=',')\ndf[r'B ($\\mu$T)'] = np.sqrt(df['B_x']**2 + df['B_y']**2 + df['B_z']**2)\n\ndf.head()\n\nfig, ax = plt.subplots(figsize=(15,10))\n\ncountries = gpd.read_file(gpd.datasets.get_path(\"naturalearth_lowres\"))\ncountries.plot(color=\"lightgrey\", ax=ax)\n\ndf.plot(x=\"Longitude (deg)\", y=\"Latitude (deg)\", kind=\"scatter\", c=r'B ($\\mu$T)', colormap=\"YlOrRd\", ax=ax)\nax.grid(b=True, alpha=0.5)\nplt.title('The magnetic field measured by ISS (April 30, 2022)', fontsize = 20)\nplt.xlabel('Longitude (deg)',fontsize=20)\nplt.ylabel('Latitude (deg)',fontsize=20)\nplt.show()\n\nfig.savefig('mag_earth.png')\n","repo_name":"davidpamos/ASTRO-5","sub_path":"mag_astro5.py","file_name":"mag_astro5.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"27125948344","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport kubernetes\nkubernetes.config.load_kube_config()\ncustom_objects_api = kubernetes.client.CustomObjectsApi()\n\nDOCUMENTATION = r'''\n---\nmodule: poolboy_test_merge_patch_status\n\nshort_description: Module to patch custom resource status for testing.\n\nversion_added: \"1.0.0\"\n\ndescription: Part of poolboy test suite to allow for patching resource status in approval test.\n\noptions:\n api_version:\n description: apiVersion for resource.\n required: true\n type: str\n name:\n description: name of resource.\n required: true\n namespace:\n description: namespace of resource.\n required: true\n type: str\n plural:\n description: plural of resource to construct api url.\n required: true\n type: str\n status_patch:\n type: dict\n\nauthor:\n - Johnathan Kupferer (@jkupferer)\n'''\n\nEXAMPLES = r'''\n- name: Mark ResourceClaim as approved\n poolboy_test_merge_patch_status:\n api_version: \"{{ poolboy_domain }}/v1\"\n name: test-approval-01\n namespace: \"{{ poolboy_test_namespace }}\"\n plural: resourceclaims\n status_patch:\n approval:\n state: approved\n'''\n\nRETURN = r'''\nresult:\n description: The result resource object.\n type: dict\n returned: success\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef run_module():\n module_args = dict(\n api_version=dict(type='str', required=True),\n name=dict(type='str', required=True),\n namespace=dict(type='str', required=True),\n plural=dict(type='str', required=True),\n status_patch=dict(type='dict', required=True)\n )\n\n result = dict(\n changed=False,\n )\n\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n )\n\n if module.check_mode:\n module.exit_json(**result)\n\n group, version = module.params['api_version'].split('/')\n\n result['result'] = custom_objects_api.patch_namespaced_custom_object_status(\n body = dict(status=module.params['status_patch']),\n group = group,\n name = module.params['name'],\n namespace = module.params['namespace'],\n plural = module.params['plural'],\n version = version,\n )\n\n module.exit_json(**result)\n\n\ndef main():\n run_module()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"redhat-cop/poolboy","sub_path":"test/roles/poolboy_test_simple/library/poolboy_test_merge_patch_status.py","file_name":"poolboy_test_merge_patch_status.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"70"} +{"seq_id":"35762661409","text":"#-- coding: utf-8 --\nimport telebot #importando a biblioteca pyTelegramBotAPI\nfrom telebot import types #esta selecionando a lib types que faz parte do telebot\n\nAPI_TOKEN = '1752787298:AAHuFGnIH4gar_cMpTc-sJjwwwCe22mEO5A' #@botfather\n\nbot = telebot.TeleBot(API_TOKEN) #telebot-sumario e TeleBot(comando aplicando token)\n\n#inicio\n\nclass User:\n\tdef __init__(self,name):\n\t\tself.name = name\n\t\tself.age = None\n\t\tself.treino = None\n\t\tself.mail = None\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n\tmsg = bot.reply_to(message, 'Bem vindo ao bot de treinos SafeGym!')\n\tcid = message.chat.id #numero\n\tbot.send_message(cid, \"Este bot serve para enviar seus treinos caso tenha dúvidas. Para escolher seu treino digite: /treino\")\n\t#bot.register_next_step_handler(msg,process_name_step)\n\n@bot.message_handler(commands=['treino'])\ndef process_name_step(message):\n\ttry:\n\t\tcid = message.chat.id #numero\n\t\tmsg = bot.reply_to(message, 'Qual o treino de hoje? \\nTreino A (Superior), digite 1 \\nTreino B (Inferiores), digite 2 \\nTreino C (Cardio), digite 3')\n\t\tbot.register_next_step_handler(msg,process_tipo_step)\n\texcept Exception as e:\n\t\tbot.reply_to(message,e)\n\n\n\t\t#bot.send_message(cid, \"Você quer o documento impresso ou o documento digital?. Para obter o digital, escreva: /digital \\n Para obter o impresso escreva: /impresso\")\ndef process_tipo_step(message):\n\t\ttry:\n\t\t\tchat_id = message.chat.id\n\t\t\tcid = message.chat.id #numero\n\t\t\ttreino = message.text\n\t\t\t#user = user_dict[chat_id]\n\t\t\tif (treino == u'1') or (treino == u'2') or (treino == u'3'):\n\t\t\t\tmsg = bot.reply_to(message, 'Você quer o documento impresso ou o documento digital? \\nPara obter o digital, digite: D \\nPara obter o impresso digite: I')\n\t\t\t\t#bot.send_message(cid, \"Você quer o documento impresso ou o documento digital? \\n Para obter o digital, digite: D \\n Para obter o impresso digite: I\")\n\t\t\telse:\n\t\t\t\traise Exception()\n\t\t\tbot.register_next_step_handler(msg,process_escolha_step)\n\t\texcept Exception as e:\n\t\t\tbot.reply_to(message,e)\n\ndef process_escolha_step(message):\n\t\ttry:\n\t\t\tchat_id = message.chat.id\n\t\t\tcid = message.chat.id #numero\n\t\t\tescolha = message.text\n\t\t\tif (escolha == u'I'):\n\t\t\t\tmsg = bot.reply_to(message, 'Retire seu treino na impressora e tenha um ótimo treino!')\n\t\t\tif (escolha == u'D'):\n\t\t\t\tmsg = bot.reply_to(message, 'Aqui esta, tenha um ótimo treino!')\n\t\t\telse:\n\t\t\t\traise Exception()\n\t\texcept Exception as e:\n\t\t\tbot.reply_to(message,e)\n\nbot.polling() #escuta o usuario\n\n\n","repo_name":"Natanaeleb/SafeGym","sub_path":"Bot/safegym.py","file_name":"safegym.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"8873975630","text":"'''\n Author: Yy.Li\n Purpose:The random number generation is encapsulated as a modifier function, which is used to modify the random number filtering function for data filtering\n Created:20/6/2020\n'''\n\nimport random\nimport string\n\n\ndef create(func):\n def dataSampling(datatype, datarange, num, condition, strlen=8): # 固定参数;默认参数\n '''\n :Description:Generate a gievn condition random data set.\n :param datatype:\n :param datarange:iterable data set\n :param num:number\n :param condition:\n :param strlen:\n :return:a dataset\n '''\n\n result = set()\n try:\n if datatype is int:\n while len(result) < num:\n it = iter(datarange) # 返回迭代器\n item = random.randint(next(it), next(it))\n result.add(item)\n continue\n\n elif datatype is float:\n while len(result) < num:\n it = iter(datarange) # 返回迭代器\n item = random.uniform(next(it), next(it))\n result.add(item)\n continue\n\n elif datatype is str:\n while len(result) < num:\n item = ''.join(random.SystemRandom().choice(datarange) for _ in range(strlen))\n result.add(item)\n continue\n else:\n pass\n return func(result, datatype, condition)\n except ValueError:\n print(\"ValueError2:传入无效参数\")\n except TypeError:\n print(\"TypeError2:对类型无效的参数\")\n except Exception as e:\n print(\"error\")\n return dataSampling\n\n\n@create\ndef dataScreening(data, datatype, datarange):\n new_result = set()\n try:\n for i in data:\n if datatype is int:\n it = iter(datarange)\n if next(it) <= i <= next(it):\n new_result.add(i)\n continue\n\n elif datatype is float:\n it = iter(datarange)\n if next(it) <= i <= next(it):\n new_result.add(i)\n continue\n\n elif datatype is str:\n if i.find(datarange) != -1:\n new_result.add(i)\n continue\n else:\n pass\n except ValueError:\n print(\"ValueError:传入无效参数\")\n except TypeError:\n print(\"TypeError:对类型无效的参数\")\n except Exception as e:\n print(\"error\")\n return new_result\n\n\nnew_result = dataScreening(int, (0, 100), 15, (10, 30))\nprint(new_result)\n\nnew_result = dataScreening(float, (0, 100), 15, (10, 50))\nprint(new_result)\n\nresult = string.ascii_letters + string.digits + \"@#$!\"\nnew_result = dataScreening(str, result, 30, 'a', 10)\n#print(result)\nprint(new_result)\n","repo_name":"wanghan79/2020_Python","sub_path":"李远媛 2018012557/secondwork.py","file_name":"secondwork.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"70408683106","text":"import pandas as pd\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Bar\nfrom pyecharts.globals import CurrentConfig,ThemeType\ndf = pd.read_excel('real_estate_info.xlsx').loc[:,['推出时间', '土地面积', '规划建筑面积']]\ndf['土地面积'] = df['土地面积'].str[:-1].map(float)\ndf['规划建筑面积'] = df['规划建筑面积'].str[:-1].map(float)\ndate = df['推出时间'].str.split('月',expand=True)[0]\ndate = date.apply(lambda x:x+'月')\ndf['月份'] = date\n# df1 = df[(df['推出时间'].str[:4] == '2020') | (df['推出时间'].str[:4] == '2019')]\ndf2 = df.groupby('月份').agg({'土地面积':'sum'})/10000\ndf3 =df.groupby('月份').agg({'规划建筑面积':'sum'})/10000\n#把时间写成一个列表\nmonth = df2.index.tolist()\nydata_1 = [float('{:.2f}'.format(i)) for i in df2['土地面积']]\n\nydata_2 = [float('{:.2f}'.format(j)) for j in df3['规划建筑面积']]\n\nbar = (\n Bar(init_opts=opts.InitOpts(theme=ThemeType.DARK))\n .add_xaxis(xaxis_data=month)\n .add_yaxis(\n series_name='土地面积(万m²)',\n yaxis_data =ydata_1,\n stack='stack1', #堆叠\n label_opts=opts.LabelOpts(is_show=False)\n )\n .add_yaxis(\n series_name='规划建筑面积(万m²)',\n yaxis_data=ydata_2,\n stack='stack1',\n label_opts=opts.LabelOpts(is_show=False)\n )\n .reversal_axis() #反转 水平条形图\n .set_global_opts(\n xaxis_opts=opts.AxisOpts(name='万m²'),\n yaxis_opts=opts.AxisOpts(name='月份')\n )\n .render('reverse_bar.html')\n)","repo_name":"13060923171/Crawl-Project2","sub_path":"土流网/横行的柱状图.py","file_name":"横行的柱状图.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"70"} +{"seq_id":"33037395663","text":"import asyncio\nfrom asyncio import Queue\nfrom asyncio.exceptions import TimeoutError\nimport re\n\nimport aiohttp\nimport urllib\n\nfrom aiohttp import ClientSession, ClientConnectorError\n\nfrom main import settings\n\n\nclass ParcerApps:\n url = 'https://apps.microsoft.com/store/category/Business'\n _url_filter = 'https://apps.microsoft.com/store/api/Products/GetFilteredProducts/?hl=en-gb&gl=US&NoItems=24&Category=Business'\n _url_cursor = 'https://apps.microsoft.com/store/api/Products/GetFilteredProducts/?hl=en-gb&gl=US&NoItems=24&Category=Business&Cursor='\n _url_product = 'https://apps.microsoft.com/store/api/ProductsDetails/GetProductDetailsById/'\n\n def __init__(self):\n self._companies = []\n\n @classmethod\n def run(cls):\n service = cls()\n try:\n asyncio.run(service.main())\n except (ClientConnectorError, TimeoutError) as e:\n print(e)\n return service\n\n async def main(self):\n q = Queue()\n loop = asyncio.get_running_loop()\n async with aiohttp.ClientSession() as session:\n task_one_products = loop.create_task(self._get_products(session, q))\n task_two_product = loop.create_task(self._get_product(session, q))\n await asyncio.gather(task_one_products, task_two_product)\n\n async def _request(self, session: ClientSession, url: str):\n timer = [60, 30, 15, 7, 3, 2, 1, 1]\n timeout = aiohttp.ClientTimeout(total=10)\n while True:\n async with session.get(url, timeout=timeout) as response:\n if response.status == 200:\n return await response.json()\n if timer:\n await asyncio.sleep(timer.pop())\n continue\n raise TimeoutError\n\n async def _request_from_product(self, session: ClientSession, url: str):\n response = await self._request(session, url)\n company_dict = self._create_company(response)\n self._companies.append(company_dict)\n\n async def _get_products(self, session: ClientSession, queue: Queue):\n value_cursor = ''\n while True:\n full_url = self._url_cursor + urllib.parse.quote(value_cursor)\n response = await self._request(session, full_url)\n product_ids = [i['productId'] for i in response['productsList']]\n queue.put_nowait(product_ids)\n if not response.get('cursor'):\n queue.put_nowait(None)\n break\n value_cursor = response.get('cursor')\n\n async def _get_product(self, session: ClientSession, queue: Queue):\n count = 0\n while True:\n products_ids = await queue.get()\n if products_ids is None:\n print(f'Count: {count}')\n break\n print(f'Count: {count}', end='\\r')\n tasks = []\n for id in products_ids:\n url = self._url_product + id + '?hl=en-gb&gl=US'\n tasks.append(self._request_from_product(session, url))\n await asyncio.gather(*tasks)\n count += len(products_ids)\n\n def _get_email(self, text: str) -> str | None:\n index_email = text.rfind('email')\n if index_email >= 0:\n pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)'\n result_find = re.findall(pattern, text[index_email:])\n if result_find:\n return result_find[0]\n\n def get_date(self):\n return self._companies\n\n def _create_company(self, response: dict) -> dict:\n email = self._get_email(response['description'])\n return {\n 'title': response['title'],\n 'release_year': response['releaseDateUtc'],\n 'company': response['publisherName'],\n 'email': email\n }\n\n\n\nclass ValueFilter:\n def __init__(self, request, queryset):\n self._request = request\n self.queryset = queryset\n save_value = self._request.session.get(settings.SAVE_FILTER)\n if not save_value:\n merge_value = {**self._request.GET, 'sort': [None, 0], 'page_elem': 10}\n save_value = self._request.session[settings.SAVE_FILTER] = merge_value\n else:\n save_value = self._merge_dicts(self._request.GET.dict(), save_value)\n self._save_value = save_value\n\n def save(self):\n self._request.session[settings.SAVE_FILTER] = self._save_value\n self._request.session.modified = True\n\n def _merge_dicts(self, dict_one, dict_two):\n sort_key = list(filter(lambda e: e.startswith('sort__'), dict_one.keys()))\n prev_value = dict_two.get('sort')\n if self._check_clear(dict_one):\n return self._clear_value()\n if self._check_sort(dict_one, dict_two):\n dict_two['sort'] = [None, 0]\n if sort_key:\n name = sort_key[0][6:]\n dict_one['sort'] = [name, 0]\n if prev_value and dict_one['sort'][0] == prev_value[0]:\n dict_one['sort'][1] = abs(prev_value[1] - 1)\n del dict_one[sort_key[0]]\n return {**dict_two, **dict_one}\n\n def get_qs(self):\n self.save()\n if self._save_value.get('sort') and self._save_value.get('sort')[0]:\n order = ('-' if self._save_value['sort'][1] else '') + self._save_value['sort'][0]\n return self.queryset.order_by(order)\n return self.queryset\n\n def get_value(self):\n self.save()\n return self._save_value\n\n def _check_clear(self, filter_dict):\n if filter_dict.get('clear'):\n return True\n return False\n\n def _check_sort(self, dict_request, dict_save):\n if dict_request.get('name_app') == '':\n return True\n return False\n\n def _clear_value(self):\n return {'sort': [None, 0],\n 'name_app': '',\n 'company': '',\n 'email': '',\n 'release__gt': '',\n 'release__lt': '',\n 'page': 1,\n 'clear': '',\n 'page_elem': '10'}\n","repo_name":"polenom/test_lingvanex_one","sub_path":"filter_page/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"72397301028","text":"# -*- coding: utf-8 -*-\n\n\"\"\"ARWebsite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom MyApp.views import *\n\nurlpatterns = [\n url(r\"^admin/\", admin.site.urls), # admin 管理界面\n url(r'^$', home, name='home'), # 首页\n url(r'^index.html$', index, name='index'), # 管理首页\n url(r'^register.html$', do_register, name='register'), # 注册页面\n url(r'^login.html$', do_login, name='login'), # 登陆页面\n url(r'^logout.html$', do_logout, name='logout'), # 注销\n url(r'^add-model.html$', add_model, name='add_model'), # 增加AR模型页面\n url(r'^edit-model.html$', edit_model, name='edit_model'), # 编辑AR模型页面\n url(r'^delete.html$', delete_model, name='delete_model'), # 删除模型\n url(r'^models.html$', models, name='model'), # AR模型列表页面\n url(r'^my-account.html$', my_account, name='my_account'), # 账户信息页面\n url(r'^edit-profile.html$', edit_profile, name='edit_profile'), # 账户管理页面\n url(r'^download.html', download, name='download'), # 下载页面\n url(r'^help.html$', help_page, name='help_page'), # 帮助页面\n url(r'^view-model.html$', view_model, name='view_model'), # 查看AR模型详细信息页面\n url(r'^arConfigInfo-api$', ar_config_info_api, name='api'), # 扫描后通过这个api 获得AR模型配置\n url(r'^arComment-api$', ar_comment_api, name='comment_api'), # 用于提交评论\n url(r'^arComment-get-api$', get_ar_comment_api, name='comment_get_api'), # 用于获取评论\n url(r'^product-link-clicked-api$', product_link_clicked_api, name='product_link_clicked_api'),\n # 传递商品链接被点击的消息\n url(r'^arLike-api$', ar_like_api, name='like_api'), # 用于点赞\n url(r'^arLike-get-api$', get_ar_like_api, name='like_get_api'), # 用于获取点赞数\n url(r'^404.html', page404, name='404'), # 404\n url(r'^api-test', api_test, name='api_test'), # 用于产生测试数据\n url(r'^admin/server$', server, name='server'), # 服务器负载查看\n url(r'^admin/server-info-api$', server_info_api, name='server_info_api'), # 服务器负载信息api\n\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # 设置访问静态文件\n","repo_name":"elfgzp/ARWebsite","sub_path":"ARWebsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"30958646979","text":"#import sys\n#input = sys.stdin.readline\ndef add(A,a,w,N):#リストに値を追加する関数\n x = a\n while x <= N:\n A[x-1] ^= w\n x += x&(-x)\n\ndef sums(A,a):#k番目までの和\n x = a\n S = 0\n while x != 0:\n S ^= A[x-1]\n x -= x&(-x)\n return S\n\ndef main():\n N, Q = map( int, input().split())\n A = list( map( int, input().split()))\n TXY = [ tuple( map( int, input().split())) for _ in range(Q)]\n V = [0]*N\n for i, a in enumerate(A):\n add(V,i+1,a,N)\n # for i in range(1,N+1):\n # print(sums(V,i))\n ANS = []\n for t, x, y in TXY:\n if t == 1:\n add(V,x,y,N)\n else:\n # print(sums(V,x-1),sums(V,y))\n ANS.append(sums(V,y)^sums(V,x-1))\n print(\"\\n\".join(map(str,ANS)))\nif __name__ == '__main__':\n main()\n","repo_name":"kamojiro/atcoderall","sub_path":"beginner/185/F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"31352731893","text":"def plot_save_results(model_fit, model_path):\n \"\"\"\n Plotting model's train/test loss/accuracy\n \"\"\"\n import matplotlib\n import matplotlib.pyplot as plt\n\n plot_path = model_path + \"_plt.png\"\n\n plt.figure()\n plt.plot(model_fit.history[\"loss\"], label=\"train_loss\")\n plt.plot(model_fit.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(model_fit.history[\"acc\"], label=\"train_acc\")\n plt.plot(model_fit.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Train loss/accuracy\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend(['train_loss', 'val_loss', 'train_acc', 'val_acc' ], loc='upper right')\n\n print(\">ia> Saving plot(s): {}\".format(plot_path))\n plt.savefig(plot_path)\n\ndef save_model(model, model_path, lb, eval_report, CONFIG):\n \"\"\"\n * Saves model and lables in pickle formart\n * Saves the model summry and hyper parameters in .txt file\n \"\"\"\n import pickle\n # save the model and label binarizer to disk\n print(\">ia> Saving model: {}\".format(model_path))\n model_labels = model_path+ \"_lbls.pickle\"\n model_ = model_path+ '.model'\n model_summary = model_path+ '_summary.txt'\n model.save(model_)\n with open(model_labels,'wb') as f:\n f.write(pickle.dumps(lb))\n f.close()\n\n with open(model_summary,'w') as f:\n # Pass the file handle in as a lambda function to make it callable\n f.write(\"------ Model Summary ------\\n\")\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n\n f.write(\"------ Model HyperParameters------\\n\")\n f.write(\"test_size: \"+str(CONFIG['train']['test_size']) + '\\n')\n f.write(\"learning_rate: \"+str(CONFIG['train']['learning_rate']) + '\\n')\n f.write(\"epochs: \"+str(CONFIG['train']['epochs']) + '\\n')\n f.write(\"batch_size: \"+str(CONFIG['train']['batch_size']) + '\\n')\n \n f.write(\"------ Model Evaluation Report------\\n\")\n f.write(eval_report)\n f.close()\n","repo_name":"ArasAzimi/ai2r","sub_path":"util/postp.py","file_name":"postp.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"} +{"seq_id":"26805292359","text":"import typing\n\nimport pytest\n\nimport datastore\n\nDS = typing.TypeVar(\"DS\", datastore.abc.BinaryDatastore, datastore.abc.ObjectDatastore)\nQuery = typing.Any\n\n\n@pytest.fixture(name=\"DatastoreTests\")\ndef return_datastore_tests():\n\treturn DatastoreTests\n\n\nclass DatastoreTests(typing.Generic[DS]):\n\tpkey: datastore.Key = datastore.Key('/dfadasfdsafdas/')\n\tstores: typing.List[DS]\n\tnumelems: int\n\tis_binary: bool\n\t\n\t#FIXME: For some reason `numelems` increases test runtime with at least n²\n\tdef __init__(self, stores: typing.List[DS], numelems: int = 10): # 1000):\n\t\tself.stores = stores\n\t\tself.numelems = numelems\n\t\t\n\t\tself.is_binary = isinstance(stores[0], datastore.abc.BinaryDatastore)\n\t\n\t\n\tdef encode(self, value):\n\t\tif self.is_binary:\n\t\t\treturn str(value).encode()\n\t\telse:\n\t\t\treturn [value]\n\t\n\t\n\tdef check_length(self, length: int) -> None:\n\t\tfor sn in self.stores:\n\t\t\ttry:\n\t\t\t\tassert len(sn) == length # type: ignore\n\t\t\texcept TypeError:\n\t\t\t\tpass\n\t\n\t\n\tasync def subtest_remove_nonexistent(self) -> None:\n\t\tassert len(self.stores) > 0\n\t\tself.check_length(0)\n\n\t\t# ensure removing non-existent keys is ok.\n\t\tfor value in range(0, self.numelems):\n\t\t\tkey = self.pkey.child(value)\n\t\t\tfor sn in self.stores:\n\t\t\t\tassert not await sn.contains(key)\n\t\t\t\twith pytest.raises(KeyError):\n\t\t\t\t\tawait sn.delete(key)\n\t\t\t\tassert not await sn.contains(key)\n\n\t\tself.check_length(0)\n\t\n\t\n\tasync def subtest_insert_elems(self) -> None:\n\t\tsn: DS\n\t\tkey: datastore.Key\n\t\tvalue: int\n\t\t\n\t\t# insert numelems elems\n\t\tfor value in range(0, self.numelems):\n\t\t\tkey = self.pkey.child(value)\n\t\t\tfor sn in self.stores:\n\t\t\t\tassert not await sn.contains(key)\n\t\t\t\tawait sn.put(key, self.encode(value))\n\t\t\t\tassert await sn.contains(key)\n\t\t\t\tassert await sn.get_all(key) == self.encode(value)\n\n\t\t# reassure they're all there.\n\t\tself.check_length(self.numelems)\n\n\t\tfor value in range(0, self.numelems):\n\t\t\tkey = self.pkey.child(value)\n\t\t\tfor sn in self.stores:\n\t\t\t\tassert await sn.contains(key)\n\t\t\t\tassert await sn.get_all(key) == self.encode(value)\n\n\t\tself.check_length(self.numelems)\n\t\n\t\n\t@typing.no_type_check #FIXME: This method is broken\n\tasync def check_query(self, query, total, slice) -> datastore.Cursor:\n\t\tassert not self.is_binary # Queries are only supported for object stores\n\t\t\n\t\tallitems: typing.List[int] = list(range(0, total))\n\t\tsn: datastore.abc.ObjectDatastore\n\t\tresultset: datastore.Cursor\n\n\t\tfor sn in self.stores:\n\t\t\ttry:\n\t\t\t\tcontents = list(await sn.query(Query(self.pkey)))\n\t\t\t\texpected = contents[slice]\n\t\t\t\tresultset = await sn.query(query)\n\t\t\t\tresult = list(resultset)\n\n\t\t\t\t# make sure everything is there.\n\t\t\t\tassert sorted(contents) == sorted(allitems)\n\t\t\t\tassert sorted(result) == sorted(expected)\n\n\t\t\t\t# TODO: should order be preserved?\n\t\t\t\t#assert result == expected\n\n\t\t\texcept NotImplementedError:\n\t\t\t\tprint('WARNING: %s does not implement query.' % sn)\n\n\t\treturn resultset\n\t\n\t\n\t@typing.no_type_check #FIXME: This method is broken\n\tasync def subtest_queries(self) -> None:\n\t\tif self.is_binary:\n\t\t\treturn # Not supported on binary datastores\n\t\t\n\t\tsn: datastore.abc.ObjectDatastore\n\t\tvalue: int\n\t\t\n\t\tfor value in range(0, self.numelems):\n\t\t\tkey: datastore.Key = self.pkey.child(value)\n\t\t\tfor sn in self.stores:\n\t\t\t\tawait sn.put(key, value)\n\n\t\tk: datastore.Key = self.pkey\n\t\tn: int = int(self.numelems)\n\n\t\tawait self.check_query(Query(k), n, slice(0, n))\n\t\tawait self.check_query(Query(k, limit=n), n, slice(0, n))\n\t\tawait self.check_query(Query(k, limit=n // 2), n, slice(0, n // 2))\n\t\tawait self.check_query(Query(k, offset=n // 2), n, slice(n // 2, n))\n\t\tawait self.check_query(Query(k, offset=n // 3, limit=n // 3), n, slice(n // 3, 2 * (n // 3)))\n\t\tdel k\n\t\tdel n\n\t\n\t\n\tasync def subtest_update(self) -> None:\n\t\tsn: DS\n\t\tvalue: int\n\t\t\n\t\t# change numelems elems\n\t\tfor value in range(0, self.numelems):\n\t\t\tkey: datastore.Key = self.pkey.child(value)\n\t\t\tfor sn in self.stores:\n\t\t\t\tassert await sn.contains(key)\n\t\t\t\tawait sn.put(key, self.encode(value + 1))\n\t\t\t\tassert await sn.contains(key)\n\t\t\t\tassert self.encode(value) != await sn.get_all(key)\n\t\t\t\tassert self.encode(value + 1) == await sn.get_all(key)\n\n\t\tself.check_length(self.numelems)\n\t\n\t\n\tasync def subtest_remove(self) -> None:\n\t\tsn: DS\n\t\tvalue: int\n\t\t\n\t\t# remove numelems elems\n\t\tfor value in range(0, self.numelems):\n\t\t\tkey: datastore.Key = self.pkey.child(value)\n\t\t\tfor sn in self.stores:\n\t\t\t\tassert await sn.contains(key)\n\t\t\t\tawait sn.delete(key)\n\t\t\t\tassert not await sn.contains(key)\n\n\t\tself.check_length(0)\n\t\n\t\n\tasync def subtest_simple(self) -> None:\n\t\tawait self.subtest_remove_nonexistent()\n\t\tawait self.subtest_insert_elems()\n\t\t#await self.subtest_queries() #FIXME: Query is broken\n\t\tawait self.subtest_update()\n\t\tawait self.subtest_remove()\n","repo_name":"ShubhankarKG/py-datastore","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"70"} +{"seq_id":"14708286961","text":"# 백대일\n\n\"\"\"\n2021-03-04 오전 8:56\n안영준\n\n문제 : https://www.acmicpc.net/problem/14490\n\n\"\"\"\n\nimport math\n\nstring = input().split(':')\n\na = int(string[0])\nb = int(string[1])\n\ngcd = math.gcd(a, b)\n\na = a // gcd\nb = b // gcd\n\nprint(f'{a}:{b}')\n","repo_name":"dudwns9331/PythonStudy","sub_path":"BaekJoon/Silver4/14490.py","file_name":"14490.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"3585293064","text":"import os\nimport json\n\nfrom utils import pp_print\nfrom utils import pp_debug\nfrom utils import pp_warning\nfrom utils import pp_error\nfrom api import BP \n\n# symbol cache\n__symbols = {}\n\nsymbol_cache_path = None\n\n__modules = {} # List of modules for each process, index is pgd\n\nOS_FAMILY_WIN = 0\nOS_FAMILY_LINUX = 1\n\nos_family = None\n\ngdb_breakpoint_list = {}\n\ndef get_modules():\n global __modules\n return __modules\n\ndef has_module(pid, pgd, base):\n global __modules\n return (((pid, pgd) in __modules) and (base in __modules[(pid, pgd)]))\n\ndef get_module(pid, pgd, base):\n global __modules\n if (((pid, pgd) in __modules) and (base in __modules[(pid, pgd)])):\n return __modules[(pid, pgd)][base]\n else:\n return None\n\ndef add_module(pid, pgd, base, mod):\n global __modules\n if not (pid, pgd) in __modules:\n __modules[(pid, pgd)] = {}\n __modules[(pid, pgd)][base] = mod\n\ndef add_symbols(mod_full_name, syms):\n global __symbols\n __symbols[mod_full_name] = syms\n\ndef get_symbols(mod_full_name):\n global __symbols\n if mod_full_name in __symbols:\n return __symbols[mod_full_name]\n else:\n return {}\n\ndef has_symbols(mod_full_name):\n global __symbols\n return ((mod_full_name in __symbols))\n\ndef set_symbol_cache_path(path):\n global symbol_cache_path\n symbol_cache_path = path\n\n# Function to load symbols from a file cache\ndef load_symbols_from_cache_file():\n global __symbols\n global symbol_cache_path\n if symbol_cache_path is not None and os.path.isfile(symbol_cache_path):\n try:\n f = open(symbol_cache_path, \"r\")\n __symbols = json.loads(f.read())\n f.close()\n except Exception as e:\n pp_error(\"Error while reading symbols from %s: %s\\n\" % (symbol_cache_path, str(e)))\n\n\n# Function to save symbols to a file cache\ndef save_symbols_to_cache_file():\n global __symbols\n global symbol_cache_path\n if symbol_cache_path is not None:\n f = open(symbol_cache_path, \"w\")\n f.write(json.dumps(__symbols))\n f.close()\n\nclass Module:\n def __init__(self, base, size, pid, pgd, checksum, name, fullname):\n self.__base = base\n self.__size = size\n self.__pid = pid\n self.__pgd = pgd\n self.__checksum = checksum\n self.__name = name\n self.__fullname = fullname\n self.__symbols = None \n\n self.__is_present = False\n # Getters\n\n def get_base(self):\n return self.__base\n\n def get_size(self):\n return self.__size\n\n def get_pid(self):\n return self.__pid\n\n def get_pgd(self):\n return self.__pgd\n\n def get_name(self):\n return self.__name\n\n def get_fullname(self):\n return self.__fullname\n\n def get_symbols(self):\n if self.__symbols is None:\n return []\n else:\n return self.__symbols\n\n def are_symbols_resolved(self):\n return (self.__symbols is not None)\n\n def get_checksum(self):\n return self.__checksum\n\n def is_present(self):\n return self.__is_present \n\n # Setters\n\n def set_base(self, base):\n self.__base = base\n\n def set_size(self, size):\n self.__size = size\n\n def set_pid(self, pid):\n self.__pid = pid\n\n def set_pgd(self, pgd):\n self.__pgd = pgd\n\n def set_name(self, name):\n self.__name = name\n\n def set_fullname(self, fullname):\n self.__fullname = fullname\n\n def set_checksum(self, checksum):\n self.__checksum = checksum\n\n def set_symbols(self, syms):\n self.__symbols = syms\n\n def set_present(self, present = True):\n self.__is_present = present\n\ndef set_os_family_win():\n global os_family\n os_family = OS_FAMILY_WIN\n\n\ndef set_os_family_linux():\n global os_family\n os_family = OS_FAMILY_LINUX\n\n\ndef update_modules(proc_pgd, update_symbols=False):\n global os_family\n from windows_vmi import windows_update_modules\n from linux_vmi import linux_update_modules\n hook_points = None\n if os_family == OS_FAMILY_WIN:\n hook_points = windows_update_modules(proc_pgd, update_symbols)\n elif os_family == OS_FAMILY_LINUX:\n hook_points = linux_update_modules(proc_pgd, update_symbols)\n return hook_points\n\n\ndef set_modules_non_present(pid, pgd):\n global __modules\n if pid is not None:\n if (pid, pgd) in __modules:\n for base, mod in __modules[(pid, pgd)].iteritems():\n mod.set_present(False)\n else:\n for pid, _pgd in __modules.keys():\n if _pgd == pgd:\n if (pid, pgd) in __modules:\n for base, mod in __modules[(pid, _pgd)].iteritems():\n mod.set_present(False)\n\ndef clean_non_present_modules(pid, pgd):\n from api_internal import dispatch_module_remove_callback\n global __modules\n\n mods_to_remove = []\n if pid is not None:\n if (pid, pgd) in __modules:\n for base, mod in __modules[(pid, pgd)].iteritems():\n if not mod.is_present():\n mods_to_remove.append((pid, pgd, base))\n else:\n for pid, _pgd in __modules.keys():\n if _pgd == pgd:\n if (pid, _pgd) in __modules:\n for base, mod in __modules[(pid, _pgd)].iteritems():\n if not mod.is_present():\n mods_to_remove.append((pid, pgd, base))\n\n for pid, pgd, base in mods_to_remove:\n # Callback notification\n dispatch_module_remove_callback(pid, pgd, base, \n __modules[(pid, pgd)][base].get_size(),\n __modules[(pid, pgd)][base].get_name(),\n __modules[(pid, pgd)][base].get_fullname())\n\n del __modules[(pid, pgd)][base]\n\n\ndef read_paged_out_memory(pgd, addr, size):\n global os_family\n from windows_vmi import windows_read_paged_out_memory\n from linux_vmi import linux_read_paged_out_memory\n if os_family == OS_FAMILY_WIN:\n return windows_read_paged_out_memory(pgd, addr, size)\n elif os_family == OS_FAMILY_LINUX:\n return linux_read_paged_out_memory(pgd, addr, size)\n\n\ndef get_system_time():\n global os_family\n from windows_vmi import get_system_time as win_get_system_time\n if os_family == OS_FAMILY_WIN:\n return win_get_system_time()\n elif os_family == OS_FAMILY_LINUX:\n raise NotImplementedError(\"get_system_time not implemented on Linux guests\")\n\ndef get_threads():\n global os_family\n from windows_vmi import get_threads as win_get_threads\n if os_family == OS_FAMILY_WIN:\n return list(win_get_threads())\n elif os_family == OS_FAMILY_LINUX:\n raise NotImplementedError(\"get_threads not implemented yet on Linux guests\")\n\ndef get_thread_id(thread_number, thread_list):\n if thread_number < len(thread_list):\n return long(thread_list[thread_number]['id'])\n else:\n return long(0)\n\ndef get_thread_description(thread_id, thread_list):\n for element in thread_list:\n if element['id'] == thread_id:\n return \"%s(%x) - %x\" % (element['process_name'], element['pid'], element['tid'])\n return \"\"\n\ndef get_running_thread_first_cpu(thread_list):\n for element in thread_list:\n if element['running'] is not None and element['running'] == 0:\n return long(element['id'])\n\n # As a fallback, just return the first thread in the list\n return long(thread_list[0]['id'])\n\ndef does_thread_exist(thread_id, thread_list):\n for element in thread_list:\n if element['id'] == thread_id:\n return True\n return False\n\ndef str_to_val(buf, str_size):\n import struct\n from utils import ConfigurationManager as conf_m\n\n if str_size == 1:\n struct_letter = \"B\"\n elif str_size == 2:\n struct_letter = \"H\"\n elif str_size == 4:\n struct_letter = \"I\"\n elif str_size == 8:\n struct_letter = \"Q\"\n else:\n raise NotImplementedError(\"[val_to_str - gdb_write_thread_register] Not implemented\")\n\n if conf_m.endianess == \"l\":\n struct_letter = \"<\" + struct_letter\n else:\n struct_letter = \">\" + struct_letter\n\n try:\n ret_val = struct.unpack(struct_letter, buf)[0]\n except Exception as e:\n raise e\n return ret_val \n\ndef val_to_str(val, str_size):\n import struct\n from utils import ConfigurationManager as conf_m\n\n if str_size == 1:\n struct_letter = \"B\"\n elif str_size == 2:\n struct_letter = \"H\"\n elif str_size == 4:\n struct_letter = \"I\"\n elif str_size == 8:\n struct_letter = \"Q\"\n else:\n raise NotImplementedError(\"[val_to_str - gdb_read_thread_register] Not implemented\")\n\n if conf_m.endianess == \"l\":\n struct_letter = \"<\" + struct_letter\n else:\n struct_letter = \">\" + struct_letter\n\n try:\n ret_val = struct.pack(struct_letter, val)\n except Exception as e:\n raise e\n return ret_val \n\ndef gdb_read_thread_register(thread_id, thread_list, gdb_register_index):\n '''\n Given a GDB register index, return an str with its value. Obtain\n the value either from the running CPU or the saved KTRAP_FRAME.\n NOTE: Not all registers are supported, if so, 0's are returned.\n '''\n from utils import ConfigurationManager as conf_m\n from api import r_cpu\n from cpus import RT_SEGMENT\n from cpus import RT_REGULAR\n\n if conf_m.platform == \"i386-softmmu\":\n from cpus import gdb_map_i386_softmmu as gdb_map\n elif conf_m.platform == \"x86_64-softmmu\":\n from cpus import gdb_map_x86_64_softmmu as gdb_map\n else:\n raise NotImplementedError(\"[gdb_read_thread_register] Architecture not supported yet\")\n\n # If it is not mapped to a CPU register or KTRAP_FRAME value,\n # we just return 0s.\n if gdb_register_index not in gdb_map:\n return \"\\0\" * (conf_m.bitness / 8)\n else:\n str_size = gdb_map[gdb_register_index][2]\n\n cpu_index = None\n thread = None\n\n some_thread_running = False\n # First, check if we can read the register from the CPU object\n for element in thread_list:\n if element['id'] == thread_id:\n thread = element\n cpu_index = element['running']\n if cpu_index:\n some_thread_running = True\n\n if thread is None:\n return None\n\n if cpu_index is None and not some_thread_running:\n cpu_index = 0\n\n if cpu_index is not None:\n cpu = r_cpu(cpu_index)\n val = 0\n try:\n if gdb_map[gdb_register_index][3] == RT_SEGMENT:\n val = getattr(cpu, gdb_map[gdb_register_index][0])['base']\n else:\n val = getattr(cpu, gdb_map[gdb_register_index][0])\n except:\n val = 0\n if val == -1:\n val = 0\n return val_to_str(val, str_size)\n # If the thread is not running, read it from the KTRAP_FRAME\n else:\n if os_family == OS_FAMILY_WIN:\n from windows_vmi import win_read_thread_register_from_ktrap_frame\n val = 0\n try:\n val = win_read_thread_register_from_ktrap_frame(thread, gdb_map[gdb_register_index][1])\n except Exception as e:\n pp_debug(\"Exception after win_read_thread_register_from_ktrap_frame: \" + str(e))\n if val == -1:\n val = 0\n return val_to_str(val, str_size)\n elif os_family == OS_FAMILY_LINUX:\n raise NotImplementedError(\"gdb_read_thread_register not implemented yet on Linux guests\")\n\ndef gdb_write_thread_register(thread_id, thread_list, gdb_register_index, buf):\n '''\n Given a GDB register index, write the provided value. Obtain\n the value either from the running CPU or the saved KTRAP_FRAME.\n NOTE: Not all registers are supported, if so, 0's are returned.\n '''\n from utils import ConfigurationManager as conf_m\n from api import r_cpu\n from cpus import RT_SEGMENT\n from cpus import RT_REGULAR\n\n\n if conf_m.platform == \"i386-softmmu\":\n from cpus import gdb_map_i386_softmmu as gdb_map\n elif conf_m.platform == \"x86_64-softmmu\":\n from cpus import gdb_map_x86_64_softmmu as gdb_map\n else:\n raise NotImplementedError(\"[gdb_write_thread_register] Architecture not supported yet\")\n\n # If it is not mapped to a CPU register or KTRAP_FRAME value,\n # we just return 0s.\n if gdb_register_index not in gdb_map:\n return 0 \n else:\n str_size = gdb_map[gdb_register_index][2]\n\n cpu_index = None\n thread = None\n # First, check if we can read the register from the CPU object\n for element in thread_list:\n if element['id'] == thread_id:\n cpu_index = element['running']\n thread = element\n break\n\n if thread is None:\n return None\n\n if cpu_index is not None:\n val = str_to_val(buf, str_size)\n w_r(cpu_index, gdb_map[gdb_register_index][0], val)\n return str_size\n # If the thread is not running, read it from the KTRAP_FRAME\n else:\n if os_family == OS_FAMILY_WIN:\n from windows_vmi import win_read_thread_register_from_ktrap_frame\n try:\n bytes_written = win_write_thread_register_in_ktrap_frame(thread, gdb_map[gdb_register_index][1], buf, str_size)\n except Exception as e:\n pp_debug(\"Exception after win_write_thread_register_in_ktrap_frame: \" + str(e))\n if bytes_written < 0:\n bytes_written = 0\n return bytes_written \n elif os_family == OS_FAMILY_LINUX:\n raise NotImplementedError(\"gdb_write_thread_register not implemented yet on Linux guests\")\n\ndef gdb_set_cpu_pc(thread_id, thread_list, val):\n ''' Set cpu PC '''\n if conf_m.platform == \"i386-softmmu\":\n from cpus import gdb_map_i386_softmmu as gdb_map\n gdb_register_index = 8 \n elif conf_m.platform == \"x86_64-softmmu\":\n from cpus import gdb_map_x86_64_softmmu as gdb_map\n gdb_register_index = 16\n else:\n raise NotImplementedError(\"[gdb_write_thread_register] Architecture not supported yet\")\n\n # If it is not mapped to a CPU register or KTRAP_FRAME value,\n # we just return 0s.\n if gdb_register_index not in gdb_map:\n return 0 \n else:\n str_size = gdb_map[gdb_register_index][2]\n\n cpu_index = None\n thread = None\n # First, check if we can read the register from the CPU object\n for element in thread_list:\n if element['id'] == thread_id:\n cpu_index = element['running']\n thread = element\n break\n\n if thread is None:\n return None\n\n if cpu_index is not None:\n w_r(cpu_index, gdb_map[gdb_register_index][0], val)\n return str_size\n # If the thread is not running, read it from the KTRAP_FRAME\n else:\n if os_family == OS_FAMILY_WIN:\n from windows_vmi import win_read_thread_register_from_ktrap_frame\n try:\n bytes_written = win_write_thread_register_in_ktrap_frame(thread, gdb_map[gdb_register_index][1], val_to_str(val, str_size), str_size)\n except Exception as e:\n pp_debug(\"Exception after win_write_thread_register_in_ktrap_frame: \" + str(e))\n if bytes_written < 0:\n bytes_written = 0\n return bytes_written \n elif os_family == OS_FAMILY_LINUX:\n raise NotImplementedError(\"gdb_set_cpu_pc not implemented yet on Linux guests\")\n\ndef gdb_get_register_size(gdb_register_index):\n ''' Given a register index, returns its register size'''\n if conf_m.platform == \"i386-softmmu\":\n from cpus import gdb_map_i386_softmmu as gdb_map\n elif conf_m.platform == \"x86_64-softmmu\":\n from cpus import gdb_map_x86_64_softmmu as gdb_map\n else:\n raise NotImplementedError(\"[gdb_get_register_size] Architecture not supported yet\")\n\n if gdb_register_index in gdb_map:\n return gdb_map[gdb_register_index][2]\n else:\n return 0\n\ndef gdb_memory_rw_debug(thread_id, thread_list, addr, length, buf, is_write):\n ''' Read / Write memory '''\n\n thread = None\n # First, check if we can read the register from the CPU object\n for element in thread_list:\n if element['id'] == thread_id:\n thread = element\n break\n\n if thread is None:\n return None\n\n if is_write:\n from api import w_va\n w_va(thread['pgd'], addr, buf, length)\n return buf\n else:\n try:\n from api import r_va\n import binascii\n mem = r_va(thread['pgd'], addr, length)\n return mem\n except Exception as e:\n raise e\n\nGDB_BREAKPOINT_SW = 0\nGDB_BREAKPOINT_HW = 1\nGDB_WATCHPOINT_WRITE = 2\nGDB_WATCHPOINT_READ = 3\nGDB_WATCHPOINT_ACCESS = 4\n\ndef gdb_breakpoint_callback(addr, pgd, length, bp_type, params):\n import c_api\n import api\n\n if bp_type == GDB_BREAKPOINT_SW or bp_type == GDB_BREAKPOINT_HW:\n cpu_index = params[\"cpu_index\"]\n cpu = params[\"cpu\"]\n else:\n cpu_index = params[\"cpu_index\"]\n addr = params[\"vaddr\"]\n size = params[\"size\"]\n haddr = params[\"haddr\"]\n\n pgd = api.get_running_process(cpu_index)\n\n thread_id = None\n thread_list = get_threads()\n for thread in thread_list:\n if thread['running'] == cpu_index:\n thread_id = thread['id']\n\n if thread_id is None:\n return None\n\n # We must signal GDB client that a breakpoint has occurred\n c_api.gdb_signal_breakpoint(thread_id)\n\ndef gdb_breakpoint_insert(thread_id, thread_list, addr, length, bp_type):\n ''' Insert a breakpoing for GDB '''\n global gdb_breakpoint_list\n from api import BP\n import functools\n\n # Obtain PGD from thread\n thread = None\n # First, check if we can read the register from the CPU object\n for element in thread_list:\n if element['id'] == thread_id:\n thread = element\n break\n\n if thread is None:\n return 0 \n\n pgd = thread['pgd']\n\n if bp_type not in gdb_breakpoint_list:\n gdb_breakpoint_list[bp_type] = {}\n if pgd not in gdb_breakpoint_list[bp_type]:\n gdb_breakpoint_list[bp_type][pgd] = {}\n if addr not in gdb_breakpoint_list[bp_type][pgd]:\n gdb_breakpoint_list[bp_type][pgd][addr] = []\n\n nb_breakpoints_added = 0\n\n if bp_type == GDB_BREAKPOINT_SW:\n f = functools.partial(gdb_breakpoint_callback, addr, pgd, length, bp_type)\n bp = BP(addr=addr, pgd=pgd, size=length, typ=BP.EXECUTION, func=f, new_style=True)\n bp.enable()\n gdb_breakpoint_list[bp_type][pgd][addr].append(bp)\n nb_breakpoints_added += 1\n\n if bp_type == GDB_BREAKPOINT_HW:\n f = functools.partial(gdb_breakpoint_callback, addr, pgd, length, bp_type, new_style=True)\n bp = BP(addr=addr, pgd=pgd, size=length, typ=BP.EXECUTION, func=f)\n bp.enable()\n gdb_breakpoint_list[bp_type][pgd][addr].append(bp)\n nb_breakpoints_added += 1\n\n if bp_type == GDB_WATCHPOINT_WRITE or bp_type == GDB_WATCHPOINT_ACCESS:\n f = functools.partial(gdb_breakpoint_callback, addr, pgd, length, bp_type, new_style=True)\n bp = BP(addr=addr, pgd=pgd, size=length, typ=BP.MEM_WRITE, func=f)\n bp.enable()\n gdb_breakpoint_list[bp_type][pgd][addr].append(bp)\n nb_breakpoints_added += 1\n\n if bp_type == GDB_WATCHPOINT_READ or bp_type == GDB_WATCHPOINT_ACCESS:\n f = functools.partial(gdb_breakpoint_callback, addr, pgd, length, bp_type, new_style=True)\n bp = BP(addr=addr, pgd=pgd, size=length, typ=BP.MEM_READ, func=f)\n bp.enable()\n gdb_breakpoint_list[bp_type][pgd][addr].append(bp)\n nb_breakpoints_added += 1\n\n return nb_breakpoints_added \n\ndef gdb_breakpoint_remove(thread_id, thread_list, addr, length, bp_type):\n ''' Remove a breakpoint from GDB'''\n global gdb_breakpoint_list\n\n # Obtain PGD from thread\n thread = None\n # First, check if we can read the register from the CPU object\n for element in thread_list:\n if element['id'] == thread_id:\n thread = element\n break\n\n if thread is None:\n return False \n\n pgd = thread['pgd']\n\n nb_breakpoints_removed = 0\n bps_to_keep = []\n # Disable the corresponding breakpoints\n if bp_type in gdb_breakpoint_list:\n if pgd in gdb_breakpoint_list[bp_type]:\n if addr in gdb_breakpoint_list[bp_type][pgd]:\n for bp in gdb_breakpoint_list[bp_type][pgd][addr]:\n if bp.get_size() == length:\n bp.disable()\n else:\n bps_to_keep.append(bp)\n\n nb_breakpoints_removed = len(gdb_breakpoint_list[bp_type][pgd][addr]) - len(bps_to_keep)\n gdb_breakpoint_list[bp_type][pgd][addr] = bps_to_keep\n\n return nb_breakpoints_removed\n\ndef gdb_breakpoint_remove_all():\n ''' Remove all breakpoints from GDB'''\n global gdb_breakpoint_list\n\n # Disable all breakpoints:\n for bp_type in gdb_breakpoint_list:\n for pgd in gdb_breakpoint_list[bp_type]:\n for addr in gdb_breakpoint_list[bp_type][pgd]:\n for bp in gdb_breakpoint_list[bp_type][pgd][addr]:\n bp.disable()\n\n # Empty the list\n gdb_breakpoing_list = {}\n","repo_name":"Cisco-Talos/pyrebox","sub_path":"pyrebox/vmi.py","file_name":"vmi.py","file_ext":"py","file_size_in_byte":21458,"program_lang":"python","lang":"en","doc_type":"code","stars":1624,"dataset":"github-code","pt":"70"} +{"seq_id":"773567874","text":"from random import*\n\ndef pendue():\n Mot_probable=[\"BONJOUR\",\"ORDINATEUR\",\"SOURIS\",\"ZERO\"\",ANGLE\", \"ARMOIRE\", \"BANC\", \"BUREAU\",\"CABINET\", \"CARREAU\", \"CHAISE\", \"CLASSE\",\"POINTE\", \"MINE\", \"GOMME\",\"DESSIN\", \"COLORIAGE\", \"RAYURE\", \"PEINTURE\",\"PINCEAU\", \"COULEUR\", \"CRAIE\",\"PAPIER\",\"FEUILLE\", \"CAHIER\", \"CARNET\",\"CARTON\", \"CISEAUX\"]\n deja_essayer=[]\n masquer=[]\n compteur=0\n I=randint(0,len(Mot_probable)-1)\n a_chercher=Mot_probable[I]\n masquer.append(a_chercher[0])\n for i in range (0,len(a_chercher)-1):\n masquer.append(\"*\")\n for k in range(0,7):\n print('\\n \\n')\n print (\"Compteur :\",compteur,\" / 7\")\n print(\"Voici votre mot :\",masquer)\n compteur=compteur+1\n x=input(str(\"Tapez La lettre choisit ou tapez 1 pour proposez un mot !\"))\n x=x.upper()\n if len(x)!=1:\n compteur=compteur-1\n print (\"Veuillez ne renseigné que une seul lettre !\")\n else :\n if x==\"1\":\n print('\\n')\n print (\"Proposez un mot\")\n print(\"Voici votre mot :\",masquer)\n proposition=input(str())\n if proposition==a_chercher:\n print('\\n \\n \\n \\n')\n return (\"Bravo vous avez trouvé ! Le mot était \"+a_chercher+\" !\")\n else :\n print (\"Bien essayer mais ce n'est pas cela !\")\n else :\n if compteur==7:\n return (\"Trop d'éssai\")\n else :\n if x in deja_essayer:\n compteur=compteur-1\n print (\"Cette lettre a déja été dite !\")\n else:\n deja_essayer.append(x)\n if x in a_chercher:\n for y in range (1,len(masquer)):\n if a_chercher[y]==x:\n masquer[y]=x\n else :\n print (\"Cette lettre n'est pas dans le mot !\")\n return masquer\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nprint(pendue())","repo_name":"Blazefive/Jeux-du-Pendue","sub_path":"pendue.py","file_name":"pendue.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"74870856227","text":"# Author: Riley Taylor\n# Course: CSC 110, Section 2J, Spring 2017\n# Program: Book Recommender\n#\n# Interacts with a review database and provides recommended books based\n# on similarities between users\n\n# HW9 Hours Spent: 4\n# HW10 Hours Spent: 7\n\nfrom review import *\n\nFILE = 'ratings.txt'\n\n\ndef main():\n print(\"Welcome to the CSC110 Book Recommender. Type the word in the\\n\"\n \"left column to do the action on the right.\\n\"\n \"recommend : recommend books for a particular user\\n\"\n \"best : the book with the highest rating among all users\\n\"\n \"add : add a new book\\n\"\n \"quit : exit the program\\n\")\n recommendations = {}\n get_recommendations(recommendations)\n\n done = False\n while not done:\n selection = input(\"next task? \")\n if selection == \"recommend\":\n recommend(recommendations)\n elif selection == \"best\":\n best(recommendations)\n elif selection == \"add\":\n add(recommendations)\n elif selection == \"quit\":\n done = True\n else:\n print(\"please enter a valid response\\n\")\n\n\n# --------------------------------------------------------------------\n# get_recommendations fetches data from the provided file and adds it\n# to the reviews database.\n#\n# PARAMETERS: db -- a dictionary. The reviews database\n# --------------------------------------------------------------------\ndef get_recommendations(db):\n recs = open(FILE).readlines()\n for entry in range(0, len(recs), 4):\n user = recs[entry].strip()\n review = Review(recs[entry + 1].strip(),\n recs[entry + 2].strip(),\n recs[entry + 3].strip())\n add_recomendation(db, user, review)\n\n\n# --------------------------------------------------------------------\n# add_recomendation() adds a review to the database.\n#\n# PARAMETERS: db -- a dictionary. The reviews database\n# --------------------------------------------------------------------\ndef add_recomendation(db, user, review):\n if user not in db:\n db[user] = set()\n db[user].add(review)\n\n\n# --------------------------------------------------------------------\n# get_best() finds the highest rated book in the database\n#\n# PARAMETERS: db -- a dictionary. The reviews database\n# RETURNS: a tuple of the best rated title and it's rating\n# --------------------------------------------------------------------\ndef get_best(db):\n ratings = {}\n for user in db:\n for rec in db[user]:\n if rec.get_title() not in ratings:\n ratings[rec.get_title()] = (rec.get_rating(), 1)\n else:\n new_rating = ratings[rec.get_title()][0] + rec.get_rating()\n new_count = ratings[rec.get_title()][1] + 1\n ratings[rec.get_title()] = (new_rating, new_count)\n highest_title = ''\n highest_avg_rating = 0\n for r in ratings:\n if ratings[r][0] / ratings[r][1] > highest_avg_rating:\n highest_title = r\n highest_avg_rating = ratings[r][0] / ratings[r][1]\n return highest_title, highest_avg_rating\n\n\n# --------------------------------------------------------------------\n# get_books_for_user() returns the bookset for a particular user\n#\n# PARAMETERS: db -- a dictionary. The reviews database\n# user -- a string. The user to find in the database\n# RETURNS: a set of reviews\n# --------------------------------------------------------------------\ndef get_books_for_user(db, user):\n return db[user]\n\n\n# --------------------------------------------------------------------\n# recommend() finds the user with the most similar reviews and\n# recommends their books for the inputed user\n#\n# PARAMETERS: db -- a dictionary. The reviews database\n# --------------------------------------------------------------------\ndef recommend(db):\n user_recommend_for = input(\"user? \")\n similarities = {}\n # For each review the recommendee has, check reviews of other users\n if user_recommend_for in db:\n for review in db[user_recommend_for]:\n for user in db:\n for other_review in db[user]:\n if review.get_title() == other_review.get_title():\n # Add matches to the similarities list\n dist = review.get_rating() * other_review.get_rating()\n if user not in similarities:\n similarities[user] = 0\n similarities[user] += dist\n else:\n print(\"User not in database\")\n similar_user = ''\n score = 0\n # Find the most similar user\n for user in similarities:\n if (similarities[user] > score) and (user != user_recommend_for):\n similar_user = user\n score = similarities[user]\n reviews = get_books_for_user(db, similar_user)\n for r in reviews:\n print(r)\n\n\n# --------------------------------------------------------------------\n# best() displays the highest rated book.\n#\n# PARAMETERS: db -- a dictionary. The reviews database\n# --------------------------------------------------------------------\ndef best(db):\n book, avg = get_best(db)\n print(\"The highest rated book is:\\n\" + book,\n \"\\nwith an overall score of \" + str(avg))\n\n\n# --------------------------------------------------------------------\n# add() adds a review to the database\n#\n# PARAMETERS: db -- a dictionary. The reviews database\n# --------------------------------------------------------------------\ndef add(db):\n user = input(\"user? \")\n title = input(\"title? \")\n author = input(\"author? \")\n rating = input(\"rating? \")\n review = Review(title, author, rating)\n add_recomendation(db, user, review)\n\n\nmain()\n","repo_name":"rileytaylor/csc110","sub_path":"10/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"23811610265","text":"import os\r\nimport time\r\nimport pandas\r\nimport Daily_Organizer_Functions as dof\r\n\r\nif os.path.exists(r'Daily_Organizer.xlsx') == False:\r\n with open(r\"Daily_Organizer.xlsx\",'a+') as age:\r\n df1 = pandas.DataFrame([['Wake_Up',7,7,0]],index=[5],columns=['Activity ','Start ','End ','Total_Time'])\r\n df1.to_excel(\"Daily_Organizer.xlsx\",sheet_name='Daily ')\r\nelse:\r\n pass\r\n\r\nStart = 7\r\nn = 1\r\nprint('Wake Up Time = 7 ')\r\n\r\nnew_day = input('Do You Want To Start Organizing A New Day?(y/n): ')\r\n\r\nif new_day == 'y':\r\n df1 = pandas.DataFrame([['Wake_Up',7,7,0]],index=[5],columns=['Activity ','Start ','End ','Total_Time'])\r\n df1.to_excel(\"Daily_Organizer.xlsx\",sheet_name='Daily ')\r\n\r\nwhile True:\r\n user_input = input('Continue Organizing? (y/n)')\r\n\r\n if user_input == 'y':\r\n # Collecting Data\r\n print()\r\n Activity = input(f'Enter Activity {n}: ')\r\n Total_Time = input('Enter Working Time (hours): ')\r\n Total_Time = dof.Checking_float(Total_Time)\r\n print()\r\n \r\n if Total_Time > 0: \r\n End = Start + Total_Time\r\n\r\n main_dataframe = dof.exporting_Importing_values(Activity,Start,End,Total_Time)\r\n n += 1\r\n Start = End\r\n else:\r\n print('Checking Time Value ...')\r\n time.sleep(10)\r\n print('Invalid Value, Closing Program ..... \\n')\r\n exit()\r\n else:\r\n print('''\r\n Thank You !! \r\n For Using Daily Organizer.\r\n ''')\r\n break\r\n\r\n\r\naccess_data = input('Do You Want To Access Data?(y/n): ')\r\nif access_data == 'y':\r\n print(main_dataframe)\r\nelse:\r\n pass","repo_name":"SohamS757/Main_SohamS757","sub_path":"Daily_Organizer.py","file_name":"Daily_Organizer.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"17270006324","text":"from twilio.rest import Client\r\nimport key\r\n\r\n\r\ndef func():\r\n client = Client(key.accound_sid, key.auth_token)\r\n\r\n message = client.messages.create(\r\n body=\"This is message\",\r\n from_=key.twilio_number,\r\n to=key.phone_number,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n func()\r\n","repo_name":"smitP7502/Hackathon","sub_path":"sos.py","file_name":"sos.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"31037633446","text":"import numpy as np\nfrom numpy.testing import *\n\nfrom src.network.network_model import NetworkModel\nfrom src.network.sumo_network import SUMONetwork\nfrom src.prediction_models.regression import RegressionPredictor\nfrom src.traffic_data.csv_traffic_data import CSVTrafficData\nfrom src.traffic_data.traffic_data import TrafficData\n\n\ndef test_neighbors():\n net = SUMONetwork('tests/test_cases/network_1.xml')\n data = CSVTrafficData('tests/test_cases/csv_data_2.csv', net)\n data.split()\n predictor = RegressionPredictor(data, 'ridge')\n train, label = predictor.get_neighborhood_data('train', 'A')\n assert_allclose(train, [[50.2, 20]])\n assert_allclose(label, [[45.8]])\n\n\n# test one location\ndef test_neighbors2():\n net = NetworkModel().load('tests/test_cases/qew/qew_net.json')\n data = CSVTrafficData('tests/test_cases/qew/qew.csv', net)\n assert len(data.links) == 56\n assert data.timestamps == [str(i) for i in range(1, 49)]\n data.split(val_pct=0.2)\n assert data.train_data.shape == (5, 56, 39)\n assert data.val_data.shape == (5, 56, 9)\n\n predictor = RegressionPredictor(data, 'ridge', target_feature=['count', 'flow', 'travel_time', 'speed', 'density'])\n train, label = predictor.get_neighborhood_data('train', '4478')\n assert_allclose(train, data.data[:, data.links.index('4478'), :38].T)\n assert_allclose(label, data.data[:, data.links.index('4478'), 1:39].T)\n\n\n# test location and 1 hop neighbor, no past time steps\ndef test_neighbors3():\n net = NetworkModel().load('tests/test_cases/qew/qew_net.json')\n data = CSVTrafficData('tests/test_cases/qew/qew.csv', net)\n data.split(val_pct=0.2)\n\n predictor = RegressionPredictor(data, 'ridge', n_hops=1)\n train, label = predictor.get_neighborhood_data('train', '4478')\n assert_allclose(train[:, :5], data.data[:, data.links.index('4478'), :38].T)\n assert_allclose(train[:, 5::2], data.data[:, data.links.index('1321'), :38].T)\n assert_allclose(train[:, 6::2], data.data[:, data.links.index('5903'), :38].T)\n assert_allclose(label, data.data[data.features.index('speed'), data.links.index('4478'), 1:39].reshape(-1, 1))\n\n\n# test location and 1 hop neighbor, including past time steps\ndef test_neighbors4():\n net = NetworkModel().load('tests/test_cases/qew/qew_net.json')\n data = CSVTrafficData('tests/test_cases/qew/qew.csv', net)\n data.split(val_pct=0.2)\n predictor = RegressionPredictor(data, 'ridge', n_hops=1, past_steps=1)\n train, label = predictor.get_neighborhood_data('train', '4478')\n assert_allclose(train[:, :5], data.data[:, data.links.index('4478'), :37].T)\n assert_allclose(train[:, 15:20], data.data[:, data.links.index('4478'), 1:38].T)\n assert_allclose(label, data.data[data.features.index('speed'), data.links.index('4478'), 2:39].reshape(-1, 1))\n assert_allclose(train[:, 5:15:2], data.data[:, data.links.index('1321'), :37].T)\n assert_allclose(train[:, 6:16:2], data.data[:, data.links.index('5903'), :37].T)\n assert_allclose(train[:, 20:30:2], data.data[:, data.links.index('1321'), 1:38].T)\n assert_allclose(train[:, 21:31:2], data.data[:, data.links.index('5903'), 1:38].T)\n\n\ndef test_model_creation():\n net = NetworkModel().load('tests/test_cases/qew/qew_net.json')\n data = CSVTrafficData('tests/test_cases/qew/qew.csv', net)\n data.split(val_pct=0.2)\n predictor = RegressionPredictor(data, 'ridge', alpha=0.001)\n assert len(predictor.model) == len(data.links)\n assert predictor.model['4478'].get_params()['alpha'] == 0.001\n\n predictor = RegressionPredictor(data, 'svr', C=0.1, epsilon=0.01)\n assert len(predictor.model) == len(data.links)\n assert predictor.model['4478'].get_params()['C'] == 0.1\n assert predictor.model['4478'].get_params()['epsilon'] == 0.01\n\n predictor = RegressionPredictor(data, 'rf', n_estimators=100, max_depth=5)\n assert len(predictor.model) == len(data.links)\n assert predictor.model['4478'].get_params()['n_estimators'] == 100\n assert predictor.model['4478'].get_params()['max_depth'] == 5\n\n\ndef test_remove_invalid_data():\n net = SUMONetwork('tests/test_cases/network_1.xml')\n test_data = TrafficData(net, 10)\n test_data.features = ['speed']\n test_data.links = net.link_ids\n test_data.timestamps = list(range(100))\n test_data.data = np.array([[range(1000, 1100), range(2000, 2100)]])\n test_data.split(test_pct=0.1)\n predictor = RegressionPredictor(test_data, 'ridge', past_steps=1, alpha=0.001)\n data, label = predictor.get_neighborhood_data('train', 'A')\n data = np.mod(data, 10)\n label = np.broadcast_to(np.mod(label, 10), data.shape)\n assert_array_less(data, label)\n","repo_name":"SHITIANYU-hue/its_simulator","sub_path":"prediction/tests/regression_utils_test.py","file_name":"regression_utils_test.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"9188189069","text":"# binary_trees.py\n\"\"\"Volume 2: Binary Trees.\n Sukyoung Kwak\n 321 - 2\n October 7th, 2021\n\"\"\"\n\n# These imports are used in BST.draw().\nimport networkx as nx\nfrom networkx.drawing.nx_agraph import graphviz_layout\n\nclass SinglyLinkedListNode:\n \"\"\"A node with a value and a reference to the next node.\"\"\"\n def __init__(self, data):\n self.value, self.next = data, None\n\nclass SinglyLinkedList:\n \"\"\"A singly linked list with a head and a tail.\"\"\"\n def __init__(self):\n self.head, self.tail = None, None\n\n def append(self, data):\n \"\"\"Add a node containing the data to the end of the list.\"\"\"\n n = SinglyLinkedListNode(data)\n if self.head is None:\n self.head, self.tail = n, n\n else:\n self.tail.next = n\n self.tail = n\n\n def iterative_find(self, data):\n \"\"\"Search iteratively for a node containing the data.\n If there is no such node in the list, including if the list is empty,\n raise a ValueError.\n\n Returns:\n (SinglyLinkedListNode): the node containing the data.\n \"\"\"\n current = self.head\n while current is not None:\n if current.value == data:\n return current\n current = current.next\n raise ValueError(str(data) + \" is not in the list\")\n\n # Problem 1\n def recursive_find(self, data):\n \"\"\"Search recursively for the node containing the data.\n If there is no such node in the list, including if the list is empty,\n raise a ValueError.\n\n Returns:\n (SinglyLinkedListNode): the node containing the data.\n \"\"\"\n current_node = self.head\n\n #Function to check a single node for the data\n def rec_search(current_node):\n if current_node is None:\n #ends with warning\n raise ValueError(\"data is not found.\")\n else:\n #ends succesfully\n if current_node.value == data:\n return current_node\n else:\n #ends recursively\n current_node = current_node.next\n return rec_search(current_node)\n\n return rec_search(current_node)\n\nclass BSTNode:\n \"\"\"A node class for binary search trees. Contains a value, a\n reference to the parent node, and references to two child nodes.\n \"\"\"\n def __init__(self, data):\n \"\"\"Construct a new node and set the value attribute. The other\n attributes will be set when the node is added to a tree.\n \"\"\"\n self.value = data\n self.prev = None # A reference to this node's parent node.\n self.left = None # self.left.value < self.value\n self.right = None # self.value < self.right.value\n\n\nclass BST:\n \"\"\"Binary search tree data structure class.\n The root attribute references the first node in the tree.\n \"\"\"\n def __init__(self):\n \"\"\"Initialize the root attribute.\"\"\"\n self.root = None\n\n def find(self, data):\n \"\"\"Return the node containing the data. If there is no such node\n in the tree, including if the tree is empty, raise a ValueError.\n \"\"\"\n\n # Define a recursive function to traverse the tree.\n def _step(current):\n \"\"\"Recursively step through the tree until the node containing\n the data is found. If there is no such node, raise a Value Error.\n \"\"\"\n if current is None: # Base case 1: dead end.\n raise ValueError(str(data) + \" is not in the tree.\")\n if data == current.value: # Base case 2: data found!\n return current\n if data < current.value: # Recursively search left.\n return _step(current.left)\n else: # Recursively search right.\n return _step(current.right)\n\n # Start the recursion on the root of the tree.\n return _step(self.root)\n\n # Problem 2\n def insert(self, data):\n \"\"\"Insert a new node containing the specified data.\n\n Raises:\n ValueError: if the data is already in the tree.\n\n Example:\n >>> tree = BST() |\n >>> for i in [4, 3, 6, 5, 7, 8, 1]: | (4)\n ... tree.insert(i) | / \\\n ... | (3) (6)\n >>> print(tree) | / / \\\n [4] | (1) (5) (7)\n [3, 6] | \\\n [1, 5, 7] | (8)\n [8] |\n \"\"\"\n current_node = self.root\n\n #Function to recursive.\n def rec_insert(current_node):\n\n #Empty Tree\n if current_node is None:\n #We have a head!\n self.root = BSTNode(data)\n\n #Non-empty Tree\n else:\n #data is bigger than the node\n if data > current_node.value:\n #the node is a leaf\n if current_node.right is None:\n new_node = BSTNode(data)\n current_node.right = new_node\n new_node.prev = current_node\n #the node is not leaf, thus, use the function\n else:\n current_node = current_node.right\n return rec_insert(current_node)\n\n #data is less than the node\n elif data < current_node.value:\n #the node is a leaf\n if current_node.left is None:\n new_node = BSTNode(data)\n current_node.left = new_node\n new_node.prev = current_node\n #the node is not leaf, thus, use the function\n else:\n current_node = current_node.left\n return rec_insert(current_node)\n\n #same value case\n else:\n raise ValueError(\"There is already a node in the tree containing the insertion data.\")\n\n return rec_insert(current_node)\n\n\n # Problem 3\n def remove(self, data):\n \"\"\"Remove the node containing the specified data.\n\n Raises:\n ValueError: if there is no node containing the data, including if\n the tree is empty.\n\n Examples:\n >>> print(12) | >>> print(t3)\n [6] | [5]\n [4, 8] | [3, 6]\n [1, 5, 7, 10] | [1, 4, 7]\n [3, 9] | [8]\n >>> for x in [7, 10, 1, 4, 3]: | >>> for x in [8, 6, 3, 5]:\n ... t1.remove(x) | ... t3.remove(x)\n ... | ...\n >>> print(t1) | >>> print(t3)\n [6] | [4]\n [5, 8] | [1, 7]\n [9] |\n | >>> print(t4)\n >>> print(t2) | [5]\n [2] | >>> t4.remove(1)\n [1, 3] | ValueError: \n >>> for x in [2, 1, 3]: | >>> t4.remove(5)\n ... t2.remove(x) | >>> print(t4)\n ... | []\n >>> print(t2) | >>> t4.remove(5)\n [] | ValueError: \n \"\"\"\n #The tree is empty\n if self.root is None:\n raise ValueError(\"The tree is empty.\")\n\n target_node = self.find(data)\n\n #1. The target is a leaf node\n if target_node.right is None and target_node.left is None:\n #The target is the root\n if target_node is self.root:\n self.root = None\n #The target is the left of its parent\n elif target_node.prev.left is target_node:\n target_node.prev.left = None\n #The target is the right of its parent\n elif target_node.prev.right is target_node:\n target_node.prev.right = None\n\n #2. The target has two children\n elif target_node.left is not None and target_node.right is not None:\n a = target_node.left\n\n #we don't have predecessor\n if a.right is None:\n #save the value of the left child\n sub_val = a.value\n #swap the value with the left child\n target_node.value = sub_val\n\n #if a has no left child\n if a.left is None:\n target_node.left = None\n #if a has a left child\n else:\n target_node.left = a.left\n\n #we do have predecessor\n else:\n b = a.right\n while(b.right is not None):\n b = b.right\n target_node.value = b.value\n\n #b doesn't have a left child\n if b.left is None:\n b.prev.right = None\n #b has a left child\n else:\n b.prev.right = b.left\n\n #3. The target has one child\n else:\n #The target is the root\n if target_node == self.root:\n #left child\n if target_node.right is None:\n self.root = target_node.left\n #right child\n if target_node.left is None:\n self.root = target_node.right\n\n #The target is to the left of its parent\n elif target_node.prev.left is target_node:\n print(target_node.value)\n #target has a left child\n if target_node.right is None:\n target_node.left.prev = target_node.prev\n target_node.prev.left = target_node.left\n #target has a right child\n else:\n target_node.right.prev = target_node.prev\n target_node.prev.left = target_node.right\n #The target is to the right of its parent\n\n else:\n #target has a left child\n if target_node.right is None:\n target_node.left.prev = target_node.prev\n target_node.prev.right = target_node.left\n #target has a right child\n else:\n target_node.right.prev = target_node.prev\n target_node.prev.right = target_node.right\n\n\nclass AVL(BST):\n \"\"\"Adelson-Velsky Landis binary search tree data structure class.\n Rebalances after insertion when needed.\n \"\"\"\n def insert(self, data):\n \"\"\"Insert a node containing the data into the tree, then rebalance.\"\"\"\n BST.insert(self, data) # Insert the data like usual.\n n = self.find(data)\n while n: # Rebalance from the bottom up.\n n = self._rebalance(n).prev\n\n def remove(*args, **kwargs):\n \"\"\"Disable remove() to keep the tree in balance.\"\"\"\n raise NotImplementedError(\"remove() is disabled for this class\")\n\n def _rebalance(self,n):\n \"\"\"Rebalance the subtree starting at the specified node.\"\"\"\n balance = AVL._balance_factor(n)\n if balance == -2: # Left heavy\n if AVL._height(n.left.left) > AVL._height(n.left.right):\n n = self._rotate_left_left(n) # Left Left\n else:\n n = self._rotate_left_right(n) # Left Right\n elif balance == 2: # Right heavy\n if AVL._height(n.right.right) > AVL._height(n.right.left):\n n = self._rotate_right_right(n) # Right Right\n else:\n n = self._rotate_right_left(n) # Right Left\n return n\n\n @staticmethod\n def _height(current):\n \"\"\"Calculate the height of a given node by descending recursively until\n there are no further child nodes. Return the number of children in the\n longest chain down.\n node | height\n Example: (c) a | 0\n / \\ b | 1\n (b) (f) c | 3\n / / \\ d | 1\n (a) (d) (g) e | 0\n \\ f | 2\n (e) g | 0\n \"\"\"\n if current is None: # Base case: the end of a branch.\n return -1 # Otherwise, descend down both branches.\n return 1 + max(AVL._height(current.right), AVL._height(current.left))\n\n @staticmethod\n def _balance_factor(n):\n return AVL._height(n.right) - AVL._height(n.left)\n\n def _rotate_left_left(self, n):\n temp = n.left\n n.left = temp.right\n if temp.right:\n temp.right.prev = n\n temp.right = n\n temp.prev = n.prev\n n.prev = temp\n if temp.prev:\n if temp.prev.value > temp.value:\n temp.prev.left = temp\n else:\n temp.prev.right = temp\n if n is self.root:\n self.root = temp\n return temp\n\n def _rotate_right_right(self, n):\n temp = n.right\n n.right = temp.left\n if temp.left:\n temp.left.prev = n\n temp.left = n\n temp.prev = n.prev\n n.prev = temp\n if temp.prev:\n if temp.prev.value > temp.value:\n temp.prev.left = temp\n else:\n temp.prev.right = temp\n if n is self.root:\n self.root = temp\n return temp\n\n def _rotate_left_right(self, n):\n temp1 = n.left\n temp2 = temp1.right\n temp1.right = temp2.left\n if temp2.left:\n temp2.left.prev = temp1\n temp2.prev = n\n temp2.left = temp1\n temp1.prev = temp2\n n.left = temp2\n return self._rotate_left_left(n)\n\n def _rotate_right_left(self, n):\n temp1 = n.right\n temp2 = temp1.left\n temp1.left = temp2.right\n if temp2.right:\n temp2.right.prev = temp1\n temp2.prev = n\n temp2.right = temp1\n temp1.prev = temp2\n n.right = temp2\n return self._rotate_right_right(n)\n\n\n# Problem 4\ndef prob4():\n \"\"\"Compare the build and search times of the SinglyLinkedList, BST, and\n AVL classes. For search times, use SinglyLinkedList.iterative_find(),\n BST.find(), and AVL.find() to search for 5 random elements in each\n structure. Plot the number of elements in the structure versus the build\n and search times. Use log scales where appropriate.\n \"\"\"\n file_1 = open(\"english.txt\", \"r\")\n\n\n def __str__(self):\n \"\"\"String representation: a hierarchical view of the BST.\n\n Example: (3)\n / \\ '[3] The nodes of the BST are printed\n (2) (5) [2, 5] by depth levels. Edges and empty\n / / \\ [1, 4, 6]' nodes are not printed.\n (1) (4) (6)\n \"\"\"\n if self.root is None: # Empty tree\n return \"[]\"\n out, current_level = [], [self.root] # Nonempty tree\n while current_level:\n next_level, values = [], []\n for node in current_level:\n values.append(node.value)\n for child in [node.left, node.right]:\n if child is not None:\n next_level.append(child)\n out.append(values)\n current_level = next_level\n return \"\\n\".join([str(x) for x in out])\n\n def draw(self):\n \"\"\"Use NetworkX and Matplotlib to visualize the tree.\"\"\"\n if self.root is None:\n return\n\n # Build the directed graph.\n G = nx.DiGraph()\n G.add_node(self.root.value)\n nodes = [self.root]\n while nodes:\n current = nodes.pop(0)\n for child in [current.left, current.right]:\n if child is not None:\n G.add_edge(current.value, child.value)\n nodes.append(child)\n\n # Plot the graph. This requires graphviz_layout (pygraphviz).\n nx.draw(G, pos=graphviz_layout(G, prog=\"dot\"), arrows=True,\n with_labels=True, node_color=\"C1\", font_size=8)\n plt.show()\n","repo_name":"KwakSukyoung/coding","sub_path":"ACME/BinaryTrees/binary_trees.py","file_name":"binary_trees.py","file_ext":"py","file_size_in_byte":17103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"13534578174","text":"import tkinter as tk\nimport easygui\nimport pandas as pd\nfrom time import strftime\n# import tkinter_flight_scraper_bot\n# from tkinter_flight_scraper_bot import Flight_Bot\ndef caps_from(event):\n \"\"\" Forces the input FROM to be upper case and less than 4 characters \"\"\"\n from_city1.set(from_city.get().upper())\n if len(from_city1.get()) >3: from_city1.set(from_city.get()[:3])\n\ndef caps_to(event):\n \"\"\" Forces the input To to be the upper case and less than 4 characters \"\"\"\n from_city1.set(from_city1.get().upper())\n if len(from_city1.get())>3: from_city1.set(from_city1.get()[:3])\n\ndef close_app():\n window.destroy()\n\ndef run_app():\n print('run')\n\n\n\nwindow = tk.Tk()\n\"\"\" Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will be created. \nBASENAME will be used for the identification of the profile file (see readprofile). It is constructed from sys.argv[0] without extensions if None is given. \nCLASSNAME is the name of the widget class. \"\"\"\n\nwindow.title(\"FLIGHT SCRAPER\")\n#window.geometry(\"600x600\") # size of the window when it opens\n#window.minsize(width=600, height=600) # you can define the minimum size of the window like this\n\nwindow.resizable(width=\"false\", height=\"false\") # change to false if you want to prevent resizing\n\n\"\"\" Every tkinter app needs to be structured this way. We call the tk.Tk() to create the app window,\nand we then build our app and place the widgets (frames, buttons, labels etc.) when we want before \"compiling\" everything with window.mainloop(). \"\"\"\n\n# three frames on top of each other\nframe_header = tk.Frame(window, borderwidth=2, pady=2)\ncenter_frame = tk.Frame(window, borderwidth=2, pady=5)\nbottom_frame = tk.Frame(window, borderwidth=2, pady=5)\nframe_header.grid(row=0, column=0)\ncenter_frame.grid(row=1, column=0)\nbottom_frame.grid(row=2, column=0)\n\n# label header to be placed in the frame_header\nheader = tk.Label(frame_header, text = \"FLIGHT SCRAPER TOOL\", bg='grey', fg='black', height='3', width='50', font=(\"Helvetica 16 bold\"))\n# inside the grid of frame_header, place it in the position 0,0\nheader.grid(row=0, column=0)\n\n\"\"\" The above code should always be before mainloop() \"\"\"\n\n# two additional frames go inside the center_frame\nframe_main_1 = tk.Frame(center_frame, borderwidth=2, relief='sunken')\nframe_main_2 = tk.Frame(center_frame, borderwidth=2, relief='sunken')\n\n# and populate them with the labels referring to the inputs we want from the user\nfrom_city = tk.Label(frame_main_1, text = \"FROM: \")\nto_city = tk.Label(frame_main_2, text = \"TO: \")\ndeparture_date = tk.Label(frame_main_1, text = \" DEPARTURE DATE:\")\nreturn_date = tk.Label(frame_main_2, text = \" RETURN DATE:\")\n\n# Put it simply: StringVar() allows you to easily track tkinter variables and see if they were read, changed, etc\n# check resources here for more details: http://effbot.org/tkinterbook/variable.htm\nfrom_city1 = tk.StringVar()\nto_city1 = tk.StringVar()\ndeparture_date1 = tk.StringVar()\nreturn_date1 = tk.StringVar()\n\n# this part is just to display the labels inside the center frame\n# the order which we pack the items is important\nframe_main_1.pack(fill='x', pady=2)\nframe_main_2.pack(fill='x',pady=2)\nfrom_city.pack(side='left')\ndeparture_date.pack(side='left', padx=5)\nto_city.pack(side='left')\nreturn_date.pack(side='right', padx=5)\n\n# a proper app needs some buttons too!\nbutton_run = tk.Button(bottom_frame, text=\"Start\", command=run_app, bg='dark green', fg='white', relief='raised', width=10, font=('Helvetica 9 bold'))\nbutton_run.grid(column=0, row=0, sticky='w', padx=100, pady=2)\n\nbutton_close = tk.Button(bottom_frame, text=\"Exit\", command=close_app, bg='dark red', fg='white', relief='raised', width=10, font=('Helvetica 9'))\nbutton_close.grid(column=1, row=0, sticky='e', padx=100, pady=2)\n\nwindow.mainloop()\n\n\"\"\" def run_app():\n print('getting user inputs')\n user_city_from = str(from_city_entry.get())\n user_city_to = str(to_city_entry.get())\n user_date_depart = str(departure_date_entry.get())\n user_date_return = str(return_date_entry.get())\n \n print('starting Chrome')\n bot = Flight_Bot()\n bot.start_kayak(user_city_from, user_city_to, user_date_depart, user_date_return)\n \"\"\"","repo_name":"MohanKrishna-RC/Python-Necessities","sub_path":"data/raw/tkinter_expl.py","file_name":"tkinter_expl.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"40926883142","text":"import numpy as np\nfrom gym import utils\nfrom mujoco_env import MujocoEnv\n\n\nclass PlannedReacher(MujocoEnv, utils.EzPickle):\n def __init__(self, steps: int = 3, punish: bool = False):\n self.singleActionSize = 2\n self.steps = steps\n self.punish = punish\n self.oldActionList = np.zeros(self.singleActionSize*self.steps)\n utils.EzPickle.__init__(self)\n MujocoEnv.__init__(self, \"reacher.xml\", 2, self.steps)\n\n def step(self, a):\n actionList = a\n a = actionList[:-(len(actionList)-self.singleActionSize)]\n vec = self.get_body_com(\"fingertip\") - self.get_body_com(\"target\")\n reward_dist = -np.linalg.norm(vec)\n reward_ctrl = -np.square(a).sum()\n reward = reward_dist + reward_ctrl\n self.do_simulation(a, self.frame_skip)\n ob = self._get_obs()\n done = False\n\n ##messing with rewards to penalize changing plans. since its continuous, it only cares about the distance between actions\n if self.oldActionList is not None and self.punish:\n # print('(',len( self.oldActionList), '-', self.singleActionSize, ')/', self.singleActionSize)\n # print('yo', (len(self.oldActionList)-self.singleActionSize)/self.singleActionSize)\n for i in range(int((len(self.oldActionList)-self.singleActionSize)/self.singleActionSize)):\n distance = 0\n for k in range(self.singleActionSize):\n distance += (actionList[i * self.singleActionSize + k] - self.oldActionList[self.singleActionSize:][i*self.singleActionSize + k]) ** 2\n distance = distance ** (1/2)\n reward -= 1 * distance * (len(self.oldActionList) - (i * self.singleActionSize))/len(self.oldActionList)\n\n self.oldActionList = actionList\n ob = np.concatenate((ob, self.oldActionList))\n return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n\n def reset_model(self):\n qpos = (\n self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq)\n + self.init_qpos\n )\n while True:\n self.goal = self.np_random.uniform(low=-0.2, high=0.2, size=2)\n if np.linalg.norm(self.goal) < 0.2:\n break\n qpos[-2:] = self.goal\n qvel = self.init_qvel + self.np_random.uniform(\n low=-0.005, high=0.005, size=self.model.nv\n )\n qvel[-2:] = 0\n self.set_state(qpos, qvel)\n\n self.oldActionList = np.zeros(self.singleActionSize*self.steps)\n observation = np.concatenate((self._get_obs(), np.zeros(self.steps*self.singleActionSize)))\n return observation\n\n def _get_obs(self):\n theta = self.sim.data.qpos.flat[:2]\n return np.concatenate(\n [\n np.cos(theta),\n np.sin(theta),\n self.sim.data.qpos.flat[2:],\n self.sim.data.qvel.flat[:2],\n self.get_body_com(\"fingertip\") - self.get_body_com(\"target\"),\n ]\n )","repo_name":"HenryDykhne/StablePlanner","sub_path":"PlannedReacherEnv.py","file_name":"PlannedReacherEnv.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"75196774947","text":"import streamlit as st\nimport json\nimport os\nimport plotly.express as px\nimport pandas as pd\nimport datetime\n\nst.title( \"watchmeforever clip timeline\" )\n\n# create variables\ndirectory = \"watchmeforever_test/\"\nnum_of_file = 0\n\nall_videos_df = None\n\n# iterate across each video\nfor filename in os.listdir(directory):\n if filename.endswith(\"mp4\"):\n with open(os.path.join(directory, filename), 'rb') as f:\n video_bytes = f.read()\n num_of_file += 1\n\n # decode related json\n jsonname = filename.replace(\"mp4\", \"info.json\")\n jsonfile = json.load(open(os.path.join(directory, jsonname), 'rb'))\n timestamp = jsonfile[\"timestamp\"]\n duration = jsonfile[\"duration\"]\n video_id = jsonfile[\"id\"]\n video_end = timestamp + duration\n decodedtimestamp = str(datetime.datetime.fromtimestamp(timestamp))\n video_real_date = decodedtimestamp[0:10]\n video_year = decodedtimestamp[0:4]\n video_month = decodedtimestamp[5:7]\n video_day = decodedtimestamp[8:10]\n video_hour = decodedtimestamp[11:13]\n video_minute = decodedtimestamp[14:16]\n video_second = decodedtimestamp[17:19]\n\n video_title = jsonfile[\"title\"]\n\n file_definitions = {\n \"video_id\" : video_id,\n \"video_title\" : video_title,\n \"timestamp\" : timestamp,\n \"video_real_date\" : video_real_date,\n \"video_year\" : video_year,\n \"video_month\" : video_month,\n \"video_day\" : video_day,\n \"video_hour\" : video_hour,\n \"video_minute\" : video_minute,\n \"video_second\" : video_second,\n \"duration\" : duration,\n \"video_end\" : video_end\n }\n newdef = pd.DataFrame([file_definitions])\n if all_videos_df is None:\n all_videos_df = newdef\n else:\n all_videos_df = pd.concat([all_videos_df,newdef])\n\nst.dataframe(all_videos_df)\ngraph = px.timeline(all_videos_df, x_start=\"timestamp\", x_end=\"video_end\", y=\"video_real_date\")\nst.plotly_chart(graph)\n","repo_name":"natfabulous/nf_video_timeline","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"291592574","text":"import rospy, roslaunch\n# import numpy as np\nimport subprocess\nimport os\nimport sys\nfrom enum import Enum\nfrom std_msgs.msg import UInt8, Float32MultiArray\nfrom tf.transformations import *\nimport tf\n# from PySide import QtCore, QtGui, QtOpenGL\nfrom sensor_msgs.msg import JointState\nfrom geometry_msgs.msg import PoseStamped, Pose, Point\nfrom std_msgs.msg import Int32, String\nfrom math import pow, atan2, sqrt\nfrom ar_track_alvar_msgs.msg import AlvarMarkers\nfrom geometry_msgs.msg import Vector3, Point\nfrom visualization_msgs.msg import MarkerArray\n\n\n# Manipulator \nfrom open_manipulator_msgs.msg import JointPosition\nfrom open_manipulator_msgs.msg import KinematicsPose\nfrom open_manipulator_msgs.msg import OpenManipulatorState\nfrom open_manipulator_msgs.srv import SetJointPosition\nfrom open_manipulator_msgs.srv import SetKinematicsPose\nfrom open_manipulator_msgs.srv import GetJointPosition\nfrom open_manipulator_msgs.srv import GetKinematicsPose\nfrom open_manipulator_msgs.srv import SetActuatorState\n \nclass PickAndPlace():\n def __init__(self): \n self.push_start = False \n\n self.CurrentMode = Enum('CurrentMode', \n 'idle \\\n init \\\n waitObject \\\n move_to_pick \\\n close_object \\\n move_to_place' ) \n\n self.listener = tf.TransformListener()\n self.jointStates = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n self.kinematicsStates = [0.0, 0.0, 0.0]\n self.open_manipulator_moving_state = \"STOPPED\"\n self.current_mode = self.CurrentMode.init.value\n self.pickObjectPose = PoseStamped()\n self.pickTargetPose = PoseStamped() \n self.placeObjectPose = PoseStamped()\n self.placeTargetPose = PoseStamped() \n self.is_triggered = False\n self.currentToolPose = Pose()\n self.use_platform = rospy.get_param(\"~use_platform\",\"true\")\n self.button_floor = \"\"\n \n self.set_joint_position = rospy.ServiceProxy('goal_joint_space_path', SetJointPosition)\n self.set_kinematics_position = rospy.ServiceProxy('goal_task_space_path_position_only', SetKinematicsPose)\n self.set_joint_position_from_present = rospy.ServiceProxy('goal_joint_space_path_from_present', SetJointPosition)\n self.set_actuator_state = rospy.ServiceProxy('set_actuator_state', SetActuatorState)\n self.set_gripper_control = rospy.ServiceProxy('goal_tool_control', SetJointPosition)\n\n self.open_manipulator_joint_states_sub_ = rospy.Subscriber('joint_states', JointState, self.jointStatesCallback)\n self.open_manipulator_kinematics_pose_sub_ = rospy.Subscriber('gripper/kinematics_pose', KinematicsPose, self.kinematicsPoseCallback)\n self.open_manipulator_states_sub = rospy.Subscriber('states', OpenManipulatorState, self.statesCallback)\n # self.maker_point = rospy.Subscriber('ar_marker_pose', AlvarMarkers, self.markerCallback)\n # self.marker_point = rospy.Subscriber('/button_point', Point, self.markerCallback)\n self.marker_point = rospy.Subscriber('/button_tracker_3d/markers', MarkerArray, self.markerCallback)\n self.floor = rospy.Subscriber('/floor', String, self.floorCallback)\n # self.object_sub = rospy.Subscriber('objects', Float32t-bot_manipulator_controllerMultiArray, self.objectCallback)\n \n rospy.sleep(1)\n # actuator enable \n self.actuatorTorque(True)\n # self.setInitPose()\n\n loop_rate = rospy.Rate(10) # 10hz\n \n while not rospy.is_shutdown() :\n if self.is_triggered == True:\n self.setInitPose()\n # print(\"============ Press `Enter` to begin the tutorial by setting up the moveit_commander (press ctrl-d to exit) ...\")\n # raw_input()\n # self.fnControlNode()\n\n if self.push_start == True:\n print(self.push_start)\n self.moveToObject()\n rospy.sleep(2.0)\n pass\n else:\n self.setInitPose()\n pass\n loop_rate.sleep()\n\n def kinematicsPoseCallback(self, msg):\n self.currentToolPose = msg.pose\n rospy.logwarn(' currentToolPose x,y,z %.2f , %.2f, %.2f ', \\\n self.currentToolPose.position.x, self.currentToolPose.position.y, self.currentToolPose.position.z )\n\n def actuatorTorque(self, enable):\n rospy.logwarn(\"actuatorTorque\")\n joint_name = ['joint1','joint2','joint3','joint4','gripper']\n try: \n resp = self.set_actuator_state(enable)\n rospy.sleep(1)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e) \n return False\n if not resp :\n rospy.loginfo(\"set_actuator enable fail\") \n return resp\n\n def setInitPose(self):\n rospy.logwarn(\"setInitPose\")\n # init position\n joint_position = JointPosition()\n joint_position.joint_name = ['joint1','joint2','joint3','joint4'] \n joint_position.position = [0.0, -1.05, 0.32, 0.70] \n #joint_position.position = [0.0, -1.791, 0.507, 1.438] \n resp = False \n try: \n path_time = 2 \n resp = self.set_joint_position(\"\",joint_position, path_time)\n rospy.sleep(path_time)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e) \n if not resp :\n return False\n\n def markerCallback(self, msg):\n for tag in msg.markers:\n if tag.id == 0:\n self.push_start = True\n self.pickObjectPose.header = tag.header\n self.pickObjectPose.pose = tag.pose\n else:\n self.push_start = False\n \n # self.pickObjectPose.header = msg.header\n # for tag in msg.markers:\n # # x_total = 0\n # # y_total = 0\n # # z_total = 0\n # if tag.id == 0:\n # self.pickObjectPose.pose = tag.pose.pose\n\n\n def floorCallback(self, msg):\n button_info = str(msg)\n button_floor = int(button_info[25:26])\n # rospy.loginfo(button_floor)\n\n\n def setBackwardPose(self):\n rospy.logwarn(\"setInitPose\")\n # init position\n joint_position = JointPosition()\n joint_position.joint_name = ['joint1','joint2','joint3','joint4'] \n #joint_position.position = [0.0, -1.05, 0.35, 0.70] \n joint_position.position = [0.0, -1.791, 0.507, 1.438] \n resp = False \n try: \n path_time = 2 \n resp = self.set_joint_position(\"\",joint_position, path_time)\n rospy.sleep(path_time)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e) \n if not resp :\n return False \n\n # open gripper \n joint_position = JointPosition()\n joint_position.joint_name = ['gripper'] \n joint_position.position = [0.01] #-0.01 0.01\n resp = False\n try: \n path_time = 1 \n resp = self.set_gripper_control(\"\",joint_position, path_time)\n rospy.sleep(path_time)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e) \n if not resp :\n return False \n\n return True \n\n def setBackwardPose2(self):\n rospy.logwarn(\"setInitPose\")\n # init position\n joint_position = JointPosition()\n joint_position.joint_name = ['joint1','joint2','joint3','joint4'] \n #joint_position.position = [0.0, -1.05, 0.35, 0.70] \n joint_position.position = [0.0, -1.791, 0.507, 1.438] \n resp = False \n try: \n path_time = 2 \n resp = self.set_joint_position(\"\",joint_position, path_time)\n rospy.sleep(path_time)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e) \n if not resp :\n return False \n\n return True\n\n def moveToObject(self):\n rospy.logwarn(\"move to object\")\n resp = False\n end_effector_name = \"gripper\" \n kinematics_pose = KinematicsPose()\n planning_group = \"arm\" \n # kinematics_pose.pose = self.pickTargetPose.pose\n kinematics_pose.pose = self.pickObjectPose.pose\n\n rospy.loginfo(self.pickObjectPose.pose)\n\n #--------------------------------------------------------------------------#\n # 버튼인식 이용 시 출력되는 좌표의 x, z좌표에 +0.05를 해야 정확한 위치로 이동\n #--------------------------------------------------------------------------#\n\n kinematics_pose.pose.position = self.forwardObjectPosition( kinematics_pose.pose.position, 0.05 )\n kinematics_pose.pose.position.y -= 0.025\n kinematics_pose.pose.position.z += 0.025\n\n moveDistance = math.sqrt((kinematics_pose.pose.position.x - self.currentToolPose.position.x)**2 \n + (kinematics_pose.pose.position.y - self.currentToolPose.position.y)**2 \n + (kinematics_pose.pose.position.z - self.currentToolPose.position.z)**2 )\n\n #distance 0.3 m -> 3 sec operate time \n #distance 0.1 m -> 1 sec operate time \n\n operating_time = moveDistance * 10\n operating_limit_time = operating_time\n\n if operating_time < 1 :\n operating_limit_time = 1\n elif operating_time > 3 :\n operating_limit_time = 3 \n\n rospy.logwarn(\"go xyz %.2f,%.2f,%.2f , moveDistance %.2f, operate time %.2f ( %.2f )\" ,\\\n kinematics_pose.pose.position.x, kinematics_pose.pose.position.y, kinematics_pose.pose.position.z, \\\n moveDistance, operating_time , operating_limit_time) \n\n try:\n resp = self.set_kinematics_position(planning_group, end_effector_name, kinematics_pose, operating_time)\n print('kinemetics resp1 {} time '.format(resp.is_planned, operating_time))\n rospy.sleep(operating_time)\n except rospy.ServiceException:\n print(\"Service call failed: %s\"%e)\n return False\n\n return resp\n\n def forwardObjectPosition( self, objectPosition, forward_distance ):\n resultPoint = Point()\n if(abs(objectPosition.x) < 0.001) :\n objectPosition.x = 0.001\n radian = math.atan(objectPosition.y/objectPosition.x)\n degree = math.degrees(radian)\n dist = forward_distance\n distX = math.cos(radian)*dist\n distY = math.sin(radian)*dist\n resultPoint.x = objectPosition.x + distX \n resultPoint.y = objectPosition.y + distY\n resultPoint.z = objectPosition.z \n rospy.loginfo(\"%.2f m forward,so objectposition change xyz(%.2f ,%.2f, %.2f) -> xyz(%.2f ,%.2f, %.2f)\",\n forward_distance, objectPosition.x, objectPosition.y, objectPosition.z , \n resultPoint.x, resultPoint.y, resultPoint.z)\n return resultPoint \n\n def kinematicsPoseCallback(self, msg):\n self.kinematicsStates[0] = msg.pose.position.x\n self.kinematicsStates[1] = msg.pose.position.y\n self.kinematicsStates[2] = msg.pose.position.z\n #rospy.logwarn(' kinematicsPoseCallback %.2f , %.2f, %.2f ', self.kinematicsStates[0], self.kinematicsStates[1], self.kinematicsStates[2] )\n\n def jointStatesCallback(self, msg):\n\t #rospy.logwarn('jointStatesCallback %d ', len(msg.position) )\n self.is_triggered = True\n for i, pose in enumerate(msg.position):\n self.jointStates[i] = pose\n #print 'boundingBoxe {} {} '.format(i, pose) \n\n def statesCallback(self, msg):\t\n self.open_manipulator_moving_state = msg.open_manipulator_moving_state\n\n def main(self):\n rospy.spin()\n\nif __name__ == '__main__':\n rospy.init_node('push_node_controller')\n rospy.loginfo(\"push_node_controller\")\n node = PickAndPlace()\n node.main()","repo_name":"Sangmin-Bak/RobotArm_push_ElevatorButton","sub_path":"t-bot_manipulator_controller/nodes/push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":12423,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"69855571747","text":"# https://stackoverflow.com/questions/7039114/waiting-animation-in-command-prompt-python\r\n\r\nimport time\r\n\r\n\r\nanimation = \"|/-\\\\\"\r\nidx = 0\r\nwhile thing_not_complete():\r\n print(animation[idx % len(animation)], end=\"\\r\") # https://www.geeksforgeeks.org/gfact-50-python-end-parameter-in-print/\r\n idx += 1\r\n time.sleep(0.1)\r\n\r\n\r\nbar = [\r\n \" [= ]\",\r\n \" [ = ]\",\r\n \" [ = ]\",\r\n \" [ = ]\",\r\n \" [ = ]\",\r\n \" [ =]\",\r\n \" [ = ]\",\r\n \" [ = ]\",\r\n \" [ = ]\",\r\n \" [ = ]\",\r\n]\r\ni = 0\r\n\r\nwhile True:\r\n print(bar[i % len(bar)], end=\"\\r\")\r\n time.sleep(.2)\r\n i += 1\r\n","repo_name":"az1z1ally/scpy","sub_path":"Login/Login/wait_animation.py","file_name":"wait_animation.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"70340776866","text":"from gpiozero import Button, Buzzer, LED\nfrom signal import pause\n\n\nbutton = Button(2)\t# push button on GPIO pin 2\nbuzzer = Buzzer(27)\t# buzzer on GPIO pin 27\nled = LED(17)\t\t# LED on GPIO pin 17\n\nbuzzer.source = led.source = button.values\n\npause()\n\n","repo_name":"faheel/Physical-Computing","sub_path":"Misc/button_controlled_buzzer_and_led.py","file_name":"button_controlled_buzzer_and_led.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"} +{"seq_id":"44677194972","text":"import math\n\ndef rectangulo(alto, ancho, relleno):\n for i in range(alto):\n print(str(relleno*ancho))\n\ndef triangulo(alto, relleno):\n k = alto - 1\n for i in range(0, alto):\n for j in range(0, k):\n print(end=\" \")\n k = k - 1\n for j in range(0, i+1):\n print(f\"{relleno} \", end=\"\")\n print(\"\\r\")","repo_name":"MarianoDubois/PG3_ITSVillada2022","sub_path":"funciones3.py","file_name":"funciones3.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"16088176361","text":"import unittest\nfrom contracting.client import ContractingClient\nimport os \nimport random\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\nclass DeployToken(unittest.TestCase):\n currency = None \n rswp_token = None \n rocketswap = None\n yeti = None\n index_token = None\n \n\n def deploy_all(self):\n self.c= ContractingClient()\n self.c.flush()\n self.c.signer = \"ff61544ea94eaaeb5df08ed863c4a938e9129aba6ceee5f31b6681bdede11b89\"\n\n with open(\"./currency.py\") as f:\n code = f.read()\n self.c.submit(code, name=\"currency\")\n\n with open(\"./con_rswp_lst001.py\") as f:\n code = f.read()\n self.c.submit(code, name=\"con_rswp_lst001\")\n\n with open(\"./con_rocketswap_official_v1_1.py\") as f:\n code = f.read()\n self.c.submit(code, name=\"con_rocketswap_official_v1_1\")\n\n with open(\"./con_yeti.py\") as f:\n code = f.read()\n self.c.submit(code, name=\"con_yeti\")\n\n self.currency = self.c.get_contract(\"currency\")\n self.rswp_token = self.c.get_contract(\"con_rswp_lst001\")\n self.rocketswap = self.c.get_contract(\"con_rocketswap_official_v1_1\")\n self.yeti = self.c.get_contract(\"con_yeti\")\n\n\n with open(\"../con_index.py\") as f:\n code = f.read()\n self.c.submit(code, name=\"con_index\")\n\n self.index_token = self.c.get_contract(\"con_index\")\n\n\n def create_pair(self):\n self.currency.approve(amount=1_000_000_000, to=\"con_rocketswap_official_v1_1\")\n self.rswp_token.approve(amount=1_000_000_000, to=\"con_rocketswap_official_v1_1\")\n self.yeti.approve(amount=1_000_000_000, to=\"con_rocketswap_official_v1_1\")\n self.rocketswap.create_market(\n contract=\"con_rswp_lst001\",\n currency_amount=5_000_000,\n token_amount=75_000_000,\n )\n self.rocketswap.create_market(\n contract=\"con_yeti\",\n currency_amount=500_000,\n token_amount=500_000_000,\n )\n \n def test_deploy(self):\n self.deploy_all()\n\n def test_mint_index(self):\n self.deploy_all()\n self.create_pair()\n self.currency.approve(amount=100_000_000, to=\"con_index\")\n self.rswp_token.approve(amount=100_000_000, to=\"con_index\")\n self.yeti.approve(amount=100_000_000, to=\"con_index\")\n print(self.index_token.get_calculated_allocations())\n self.index_token.rebalance()\n self.rocketswap.buy(contract=\"con_rswp_lst001\", currency_amount=10000)\n random_amount = random.randint(1, 300)\n random_amount_2 = random.randint(1,random_amount)\n for _ in range(random_amount):\n self.index_token.mint_index_using_tokens(index_amount=110)\n print(self.index_token.get_calculated_allocations())\n self.rocketswap.sell(contract=\"con_rswp_lst001\", token_amount=2230000)\n print(self.index_token.get_calculated_allocations())\n self.rocketswap.buy(contract=\"con_rswp_lst001\", currency_amount=10000)\n\n for _ in range(random_amount_2):\n self.index_token.burn_index_to_tokens(index_amount=110)\n print(self.index_token.get_calculated_allocations())\n random_amount = random.randint(1, 300)\n random_amount_2 = random.randint(1,random_amount)\n for _ in range(random_amount):\n self.index_token.mint_index_using_tokens(index_amount=110)\n print(self.index_token.get_calculated_allocations())\n self.rocketswap.sell(contract=\"con_rswp_lst001\", token_amount=2230000)\n print(self.index_token.get_calculated_allocations())\n self.rocketswap.buy(contract=\"con_rswp_lst001\", currency_amount=10000)\n for _ in range(random_amount_2):\n self.index_token.burn_index_to_tokens(index_amount=110)\n print(self.index_token.get_calculated_allocations())\n print(self.index_token.get_calculated_allocations())\n for _ in range(100):\n self.index_token.rebalance()\n print(self.index_token.get_calculated_allocations())\n print(self.index_token.get_calculated_allocations())\n \n \n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"crosschainer/index-token","sub_path":"tests/test_token.py","file_name":"test_token.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38448271518","text":"from fastapi import FastAPI, HTTPException\nfrom db import get_db_connection\nfrom models import ClientOrder\nfrom utils import TradeBook\nfrom typing import Dict\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom datetime import datetime\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi.responses import PlainTextResponse\nfrom starlette.exceptions import HTTPException as StarletteHTTPException\n\napp = FastAPI(title='TRADE_BOOK', version='1.0')\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n allow_credentials=True)\n\n\n@app.exception_handler(StarletteHTTPException)\nasync def http_exception_handler(request, exc):\n return PlainTextResponse(str(exc.detail), status_code=exc.status_code)\n\n\n@app.exception_handler(RequestValidationError)\nasync def validation_exception_handler(request, exc):\n return PlainTextResponse(str(exc), status_code=400)\n\n\n@app.get('/')\nasync def trade_history() -> Dict:\n connection, cursor = get_db_connection()\n # Load the initial positions and prices from the database\n cursor.execute(\"SELECT * FROM trade_history\")\n history = cursor.fetchall()\n history = [list(x) for x in set(tuple(x) for x in history)]\n\n return {\"message\": 'Welcome to the trade book.', 'History': history}\n\n\n@app.post(\"/trade\")\nasync def trade(order: ClientOrder) -> Dict:\n connection, cursor = get_db_connection()\n\n # Load the initial positions and prices from the database\n cursor.execute(\"SELECT * FROM positions\")\n initial_positions = {row[0]: row[1] for row in cursor.fetchall()}\n cursor.execute(\"SELECT * FROM prices\")\n market_prices = {row[0]: row[1] for row in cursor.fetchall()}\n\n # Load the transaction costs, risk aversion, and volatilises from the database\n cursor.execute(\"SELECT * FROM transaction_costs\")\n transaction_costs = {row[0]: row[1] for row in cursor.fetchall()}\n cursor.execute(\"SELECT * FROM risk_aversion\")\n risk_appetite = cursor.fetchone()[0]\n cursor.execute(\"SELECT * FROM volatilities\")\n volatility = {row[0]: row[1] for row in cursor.fetchall()}\n\n # Initialize the TradeBook with the initial positions and prices\n trade_book = TradeBook(initial_positions=initial_positions, initial_market_prices=market_prices,\n volatility=volatility, risk_appetite=risk_appetite, transaction_costs=transaction_costs,\n hedge=order.hedge)\n\n instruments = list(trade_book.positions.keys())\n\n if order.instrument_id not in instruments:\n raise HTTPException(status_code=404, detail=\"Instrument not found\")\n\n # Add the client order to the trade book\n trade_book.add_client_order(order.instrument_id, order.traded_price, order.quantity)\n\n # Update the positions and prices in the database\n for instrument_id, position in trade_book.positions.items():\n cursor.execute(\"UPDATE positions SET position = %s WHERE instrument_id = %s\", (position, instrument_id))\n for instrument_id, price in trade_book.market_prices.items():\n cursor.execute(\"UPDATE prices SET price = %s WHERE instrument_id = %s\", (price, instrument_id))\n\n timestamp = str(datetime.now())\n sharpe_ratio = trade_book.calculate_sharpe_ratio(risk_free_rate=0.03, time_period=1)\n roi = trade_book.calculate_roi(instrument_id=order.instrument_id)\n var = int(trade_book.calculate_var(alpha=0.95))\n calmar = trade_book.calculate_calmar_ratio(time_period=1)\n\n cursor.execute(\"INSERT INTO trade_history (instrument_id, position, timestamp, price) VALUES (%s, %s, %s, %s)\",\n (order.instrument_id, order.quantity, timestamp, order.traded_price))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return {'Instrument Traded': order.instrument_id,\n \"P&L for the portfolio\": trade_book.pnl,\n 'Max Drawdown': trade_book.maximum_drawdown,\n 'Hedged?': str(order.hedge),\n 'Price': str(order.traded_price),\n 'Value at Risk': var,\n 'sharpe_ratio': sharpe_ratio,\n 'ROI': roi,\n 'Calmar Ratio': calmar}\n","repo_name":"mertcan79/Trade-Book-FastAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37998855724","text":"\"\"\"\r\n\r\n@author: Cody Smith | codysmith.contact@gmail.com\r\nhttps://github.com/codysmith-tech\r\nhttps://www.linkedin.com/in/codysmithprofile/\r\n\r\n\"\"\"\r\n\r\nimport os\r\nimport shutil\r\nimport csv\r\n\r\n\r\ndef parse_data(working_directory):\r\n \"\"\"\r\n Takes raw data files and parses them into .csv files for better\r\n handling\r\n\r\n Parameters\r\n ----------\r\n working_directory : string\r\n The directory that contains your the scripts\r\n for this project, including this one.\r\n\r\n Returns\r\n -------\r\n raw_data_files : list\r\n A list of strings containing the paths to all raw data files.\r\n \r\n parsed_data_files : list\r\n A list of strings containing the paths to all raw data files.\r\n \r\n filenames : list\r\n A list of strings containing the names of each data file.\r\n \"\"\"\r\n\r\n #Making directories\r\n if os.path.isdir(working_directory + '\\\\parsed-data\\\\') == False:\r\n os.mkdir(working_directory + '\\\\parsed-data')\r\n \r\n if os.path.isdir(working_directory + '\\\\filtered-data\\\\') == True:\r\n shutil.rmtree(working_directory + '\\\\filtered-data')\r\n \r\n os.mkdir(working_directory + '\\\\filtered-data\\\\')\r\n os.mkdir(working_directory + '\\\\filtered-data\\\\november-data')\r\n os.mkdir(working_directory + '\\\\filtered-data\\\\december-data')\r\n os.mkdir(working_directory + '\\\\filtered-data\\\\january-data')\r\n \r\n #Making list of all raw data files\r\n raw_data_files = []\r\n for dirpath, subdirs, files in os.walk(working_directory + \"\\\\raw-data\"):\r\n for f in files:\r\n raw_data_files.append(os.path.join(dirpath, f))\r\n \r\n #Reading raw data files, removing header and whitespace, and parsing into new files\r\n n = 3\r\n nfirstlines = []\r\n \r\n for i in range(len(raw_data_files)):\r\n with open(raw_data_files[i]) as f, open(working_directory + '\\\\parsed-data\\\\' + f\"{files[i]}\", 'w') as out:\r\n first_line = f.readline().strip()\r\n if 'SPO_RAD' in first_line:\r\n for x in range(n):\r\n nfirstlines.append(next(f))\r\n for line in f:\r\n out.write(line.strip() + '\\n')\r\n \r\n \r\n #Making list of all parsed data files\r\n parse_data_files = []\r\n for dirpath, subdirs, files in os.walk(working_directory + \"\\\\parsed-data\"):\r\n for f in files:\r\n parse_data_files.append(os.path.join(dirpath, f))\r\n \r\n #Stripping file names of .dat extension\r\n filenames = []\r\n for i in range(len(files)):\r\n filenames.append(files[i].strip('.dat'))\r\n \r\n #Removing unnecessary data\r\n for i in range(len(parse_data_files)):\r\n f = open(parse_data_files[i], 'r')\r\n if '_11' in parse_data_files[i]:\r\n g = open(working_directory + '\\\\filtered-data\\\\november-data\\\\' + filenames[i] + '.csv', 'a+')\r\n elif '_12' in parse_data_files[i]:\r\n g = open(working_directory + '\\\\filtered-data\\\\december-data\\\\' + filenames[i] + '.csv', 'a+')\r\n else:\r\n g = open(working_directory + '\\\\filtered-data\\\\january-data\\\\' + filenames[i] + '.csv', 'a+')\r\n \r\n writer = csv.writer(g)\r\n \r\n for lines in f:\r\n line = lines.split()\r\n \r\n writer.writerow((line[0],line[1],line[2],line[3],line[4],line[5]))\r\n \r\n f.close()\r\n g.close()\r\n \r\n \r\n #Make list of all cleaned data files\r\n parsed_data_files = []\r\n for dirpath, subdirs, files in os.walk(working_directory + \"\\\\filtered-data\"):\r\n for f in files:\r\n parsed_data_files.append(os.path.join(dirpath, f))\r\n \r\n shutil.rmtree(working_directory + '\\\\parsed-data')\r\n\r\n return raw_data_files, parsed_data_files, filenames\r\n \r\n\r\n\r\n \r\n \r\n \r\n ","repo_name":"codysmith-tech/SPO-radiation-analysis","sub_path":"parse_data.py","file_name":"parse_data.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26702448387","text":"from character import character\n# import random\ntarig = character (\"tarig\",\"Space-Traveler\")\ntarig.set_power(\"teleportation\")\nman = character (\"Man\", \"Super-MAN\")\nman.set_power(\"superMAn\")\nman.set_power(\"fly\")\nprint(f\"{tarig.name} is actually the superhero {tarig.super} and his power is {tarig.power}\")\nprint(f\"{man.name} is actually the superhero {man.super}\")\n\ntarig.get_power()\nman.get_power()","repo_name":"TDevTech/Innovate_June_Coding_Protfolio","sub_path":"oop_python/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9692238768","text":"from geometric_object import GeometricObject\n\n\nclass Cylinder(GeometricObject):\n\n def __init__(self, radius, height, color, filled):\n super().__init__(color=color, filled=filled)\n # turns the radius and height into float if they are ints\n if type(radius) == int:\n self.__radius = float(radius)\n else:\n self.__radius = radius\n\n if type(height) == int:\n self.__height = float(height)\n else:\n self.__height = height\n\n if type(self.__radius) != float or type(self.__height) != float:\n raise Warning(\"Invalid input type for radius or height\")\n\n def get_radius(self):\n radius = float()\n radius = self.__radius\n return radius\n\n def get_height(self):\n height = float()\n height = self.__height\n return height\n\n def get_area(self):\n return round(2 * 3.14 * self.__radius * (self.__radius + self.__height), 2)\n\n def get_volume(self):\n return round(3.14 * self.__radius ** 2 * self.__height, 2)\n","repo_name":"Perytron/UZH","sub_path":"Informatics I/009 Inheritance/Task 4 Geometric Objects/Patertruck Solution/cylinder.py","file_name":"cylinder.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"71"} +{"seq_id":"33874746785","text":"# -*- coding: utf-8 -*-\n\"\"\"Classes defining the populate factory for Journal Entries, Invoices and related models.\"\"\"\nfrom odoo import models, fields, Command\nfrom odoo.tools import populate\n\nimport logging\nimport math\nfrom functools import lru_cache\nfrom dateutil.relativedelta import relativedelta\n\n_logger = logging.getLogger(__name__)\n\n\nclass AccountMove(models.Model):\n \"\"\"Populate factory part for account.move.\n\n Because of the complicated nature of the interraction of account.move and account.move.line,\n both models are actualy generated in the same factory.\n \"\"\"\n\n _inherit = \"account.move\"\n\n _populate_sizes = {\n 'small': 1000,\n 'medium': 10000,\n 'large': 500000,\n }\n\n _populate_dependencies = ['res.partner', 'account.journal', 'product.product']\n\n def _populate_factories(self):\n @lru_cache()\n def search_accounts(company_id, types=None):\n \"\"\"Search all the accounts of a certain type for a company.\n\n This method is cached, only one search is done per tuple(company_id, type).\n :param company_id (int): the company to search accounts for.\n :param type (str): the type to filter on. If not set, do not filter. Valid values are:\n payable, receivable, liquidity, other, False.\n :return (Model): the recordset of accounts found.\n \"\"\"\n domain = [\n *self.env['account.account']._check_company_domain(company_id),\n ('account_type', '!=', 'off_balance'),\n ]\n if types:\n domain += [('account_type', 'in', types)]\n return self.env['account.account'].search(domain)\n\n @lru_cache()\n def search_journals(company_id, journal_type, currency_id):\n \"\"\"Search all the journal of a certain type for a company.\n\n This method is cached, only one search is done per tuple(company_id, journal_type).\n :param company_id (int): the company to search journals for.\n :param journal_type (str): the journal type to filter on.\n Valid values are sale, purchase, cash, bank and general.\n :param currency_id (int): the currency to search journals for.\n :return (list): the ids of the journals of a company and a certain type\n \"\"\"\n return self.env['account.journal'].search([\n *self.env['account.journal']._check_company_domain(company_id),\n ('currency_id', 'in', (False, currency_id)),\n ('type', '=', journal_type),\n ]).ids\n\n @lru_cache()\n def search_products(company_id):\n \"\"\"Search all the products a company has access to.\n\n This method is cached, only one search is done per company_id.\n :param company_id (int): the company to search products for.\n :return (Model): all the products te company has access to\n \"\"\"\n return self.env['product.product'].search([\n *self.env['product.product']._check_company_domain(company_id),\n ('id', 'in', self.env.registry.populated_models['product.product']),\n ])\n\n @lru_cache()\n def search_partner_ids(company_id):\n \"\"\"Search all the partners that a company has access to.\n\n This method is cached, only one search is done per company_id.\n :param company_id (int): the company to search partners for.\n :return (list): the ids of partner the company has access to.\n \"\"\"\n return self.env['res.partner'].search([\n *self.env['res.partner']._check_company_domain(company_id),\n ('id', 'in', self.env.registry.populated_models['res.partner']),\n ]).ids\n\n def get_invoice_date(values, **kwargs):\n \"\"\"Get the invoice date date.\n\n :param values (dict): the values already selected for the record.\n :return (datetime.date, bool): the accounting date if it is an invoice (or similar) document\n or False otherwise.\n \"\"\"\n if values['move_type'] in self.get_invoice_types(include_receipts=True):\n return values['date']\n return False\n\n def get_lines(random, values, **kwargs):\n \"\"\"Build the dictionary of account.move.line.\n\n Generate lines depending on the move_type, company_id and currency_id.\n :param random: seeded random number generator.\n :param values (dict): the values already selected for the record.\n :return list: list of ORM create commands for the field line_ids\n \"\"\"\n def get_entry_line(label, balance=None):\n account = random.choice(accounts)\n currency = account.currency_id != account.company_id.currency_id and account.currency_id or random.choice(currencies)\n if balance is None:\n balance = round(random.uniform(-10000, 10000))\n return Command.create({\n 'name': 'label_%s' % label,\n 'balance': balance,\n 'account_id': account.id,\n 'partner_id': partner_id,\n 'currency_id': currency.id,\n 'amount_currency': account.company_id.currency_id._convert(balance, currency, account.company_id, date),\n })\n\n def get_invoice_line():\n return Command.create({\n 'product_id': random.choice(products).id,\n 'account_id': random.choice(accounts).id,\n 'price_unit': round(random.uniform(0, 10000)),\n 'quantity': round(random.uniform(0, 100)),\n })\n\n move_type = values['move_type']\n date = values['date']\n company_id = values['company_id']\n partner_id = values['partner_id']\n\n # Determine the right sets of accounts depending on the move_type\n if move_type in self.get_sale_types(include_receipts=True):\n accounts = search_accounts(company_id, ('income',))\n elif move_type in self.get_purchase_types(include_receipts=True):\n accounts = search_accounts(company_id, ('expense',))\n else:\n accounts = search_accounts(company_id)\n\n products = search_products(company_id)\n\n if move_type == 'entry':\n # Add a random number of lines (between 1 and 20)\n lines = [get_entry_line(\n label=i,\n ) for i in range(random.randint(1, 20))]\n\n # Add a last line containing the balance.\n # For invoices, etc., it will be on the receivable/payable account.\n lines += [get_entry_line(\n balance=-sum(vals['balance'] for _command, _id, vals in lines),\n label='balance',\n )]\n else:\n lines = [get_invoice_line() for _i in range(random.randint(1, 20))]\n\n return lines\n\n def get_journal(random, values, **kwargs):\n \"\"\"Get a random journal depending on the company and the move_type.\n\n :param random: seeded random number generator.\n :param values (dict): the values already selected for the record.\n :return (int): the id of the journal randomly selected\n \"\"\"\n move_type = values['move_type']\n company_id = values['company_id']\n currency_id = values['company_id']\n if move_type in self.get_sale_types(include_receipts=True):\n journal_type = 'sale'\n elif move_type in self.get_purchase_types(include_receipts=True):\n journal_type = 'purchase'\n else:\n journal_type = 'general'\n journal = search_journals(company_id, journal_type, currency_id)\n return random.choice(journal)\n\n def get_partner(random, values, **kwargs):\n \"\"\"Get a random partner depending on the company and the move_type.\n\n The first 3/5 of the available partners are used as customer\n The last 3/5 of the available partners are used as suppliers\n It means 1/5 is both customer/supplier\n -> Same proportions as in account.payment\n :param random: seeded random number generator.\n :param values (dict): the values already selected for the record.\n :return (int, bool): the id of the partner randomly selected if it is an invoice document\n False if it is a Journal Entry.\n \"\"\"\n move_type = values['move_type']\n company_id = values['company_id']\n partner_ids = search_partner_ids(company_id)\n\n if move_type in self.get_sale_types(include_receipts=True):\n return random.choice(partner_ids[:math.ceil(len(partner_ids)/5*2)])\n if move_type in self.get_purchase_types(include_receipts=True):\n return random.choice(partner_ids[math.floor(len(partner_ids)/5*2):])\n return False\n\n company_ids = self.env['res.company'].search([\n ('chart_template', '!=', False),\n ('id', 'in', self.env.registry.populated_models['res.company']),\n ])\n currencies = self.env['res.currency'].search([\n ('active', '=', True),\n ])\n\n return [\n ('move_type', populate.randomize(\n ['entry', 'in_invoice', 'out_invoice', 'in_refund', 'out_refund', 'in_receipt', 'out_receipt'],\n [0.2, 0.3, 0.3, 0.07, 0.07, 0.03, 0.03],\n )),\n ('company_id', populate.randomize(company_ids.ids)),\n ('currency_id', populate.randomize(currencies.ids)),\n ('journal_id', populate.compute(get_journal)),\n ('date', populate.randdatetime(relative_before=relativedelta(years=-4), relative_after=relativedelta(years=1))),\n ('invoice_date', populate.compute(get_invoice_date)),\n ('partner_id', populate.compute(get_partner)),\n ('line_ids', populate.compute(get_lines)),\n ]\n\n def _populate(self, size):\n records = super()._populate(size)\n _logger.info('Posting Journal Entries')\n to_post = records.filtered(lambda r: r.date < fields.Date.today())\n to_post.action_post()\n return records\n","repo_name":"odoo/odoo","sub_path":"addons/account/populate/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":10652,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"} +{"seq_id":"18923396103","text":"'''\nConfirms that the gold config produces matching netcdfs as to what is in\nthe repo\n'''\nimport subprocess as sp\nimport sys\nfrom netCDF4 import Dataset\nfrom os.path import join, abspath, expanduser\nimport os\nimport argparse\n\n__version__ = '0.1.0'\n\nparser = argparse.ArgumentParser(\ndescription='Checks for differences in gold files and develop files.')\n\nparser.add_argument('dev_dir', metavar='dev_dir', type=str,\nhelp='Path a directory containing a set of netcdfs that are in development')\n\nparser.add_argument('gold_dir', metavar='gold_dir', type=str,\nhelp='Path a directory containing a set of netcdfs that are considered gold files')\n\nparser.add_argument('-d', dest='debug', action='store_true',\nhelp='Whether to clean up files or not')\nargs = parser.parse_args()\n\nhdr = 'Gold File Checker v{}'.format(__version__)\nbanner = '=' * len(hdr)\nprint(banner)\nprint(hdr)\nprint(banner)\n\n# Location to the gold files and config\ndev_dir = abspath(expanduser(args.dev_dir))\ngold_dir = abspath(expanduser(args.gold_dir))\n\n# List all the files in the locations, filter on netcdf\n\ndev_files = os.listdir(dev_dir)\ngold_files = os.listdir(gold_dir)\n\ncommon_netcdfs = [f for f in dev_files if f in gold_files and f.split('.')[-1]=='nc']\n\nfor f in common_netcdfs:\n fname = join(dev_dir, f)\n gfname = join(gold_dir, f)\n diff_f = join(dev_dir, 'diff.nc')\n diff_cmd = 'ncdiff -O {} {} {}'.format(fname, gfname, diff_f)\n print('Executing:')\n print('\\n' + diff_cmd)\n sp.check_output(diff_cmd, shell=True)\n\n cmd = 'nc_stats {} -p 8'.format(diff_f)\n out = sp.check_output(cmd, shell=True)\n print(out.decode('utf-8'))\n\nif not args.debug:\n print('Cleaning up files...')\n cmd = 'rm {}'.format(diff_f)\n sp.check_output(cmd, shell = True)\n","repo_name":"micahjohnson150/scripts","sub_path":"test_development/check_gold.py","file_name":"check_gold.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31208580372","text":"import cv2\n\nfaceCascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_alt2.xml')\nmask = cv2.imread('./models/goku_saiyan_hair.png')\n\nclass VideoCam(object):\n def __init__(self):\n self.video = cv2.VideoCapture(0)\n \n\n def __del__(self):\n self.video.release()\n\n def get_frame(self):\n ret, frame = self.video.read()\n \n # Do stuff here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, minNeighbors=5)\n for (fx, fy, fw, fh) in faces:\n if fh <=0 or fw <= 0:\n # print(\"[WARN] Skipping...\")\n continue\n # cv2.rectangle(frame, (fx, fy), (fx+fw, fy+fh), (0, 200, 0), 2, 8)\n fw, fh = int(fw*1.6), int(fh*1.6)\n fy -= int(0.6*fh)\n fx -= int(0.2*fw)\n \n roi = frame[fy:fy+fh, fx:fx+fw]\n mask_resized = cv2.resize(mask, (fw, fh), interpolation=cv2.INTER_AREA)\n mask_gray = cv2.cvtColor(mask_resized, cv2.COLOR_BGR2GRAY)\n ret, mask_thresh = cv2.threshold(mask_gray, 150, 255, cv2.THRESH_BINARY_INV)\n # mask_inv = cv2.bitwise_not(mask_gray)\n # masked_face = cv2.bitwise_and(mask_resized, mask_resized, mask=mask_thresh)\n # mask_reinv = cv2.bitwise_not(mask_thresh)\n masked_frame = cv2.bitwise_and(roi, roi, mask=mask_thresh)\n frame[fy:fy+fh, fx:fx+fw] = cv2.add(mask_resized, masked_frame)\n\n ret, jpeg = cv2.imencode('.jpg', frame)\n\n return jpeg.tobytes()\n","repo_name":"mtc-20/Simple_AR_webApp","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38696068760","text":"#zelle 9.1.2 text exercise - racquetball sim\n\nfrom random import random\n\ndef simGame(a, b):\n\n aScore, bScore = 0, 0\n service = 'a'\n\n while aScore < 15 and bScore < 15:\n if service == 'a':\n if random() < a:\n aScore += 1\n else:\n service = 'b'\n else:\n if random() < b:\n bScore += 1\n else:\n service = 'a'\n\n if aScore == 15:\n return 'a'\n else:\n return 'b'\n \n\ndef simMatch(a, b, n):\n\n aWins = 0\n bWins = 0\n \n for i in range(n):\n winner = simGame(a, b)\n if winner == 'a':\n aWins += 1\n else:\n bWins += 1\n\n return aWins, bWins\n\ndef main():\n\n a = float(input('prob player A wins a serve: '))\n b = float(input('prob player B wins a serve: '))\n n = int(input('games to simulate: '))\n\n aWins, bWins = simMatch(a, b, n)\n\n print('\\nGames Simulated: ' + str(n) + '\\n')\n print('Wins for A: {0:0.0f} ({1:3.1%})'.format(aWins, aWins / n))\n print('Wins for B: {0:0.0f} ({1:3.1%})'.format(bWins, bWins / n))\n \n\nif __name__ == '__main__': main()\n","repo_name":"psarran/Zelle-Python-Programming","sub_path":"zelle 9.1.2 text exercise - racquetball sim.py","file_name":"zelle 9.1.2 text exercise - racquetball sim.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"24179397677","text":"import re\nimport string\n\nfrom Lexer.tokens import *\nfrom collections import OrderedDict\n\ndebug = False\n\n\nclass RuleException(Exception):\n def __init__(self, reason=None, lineNum=-1):\n self.lineNum = lineNum\n self.reason = reason\n\n def __repr__(self):\n return str(self)\n\nclass DuplicateRuleException(RuleException):\n def __str__(self):\n return \"Rule with duplicate name \" + str(self.reason) + \" lineNum: \" + str(self.lineNum)\n\nclass InvalidRuleException(RuleException):\n def __str__(self):\n return \"Rule was wrong! \" + str(self.reason) + \" lineNum: \" + str(self.lineNum)\n\n\n\nclass Rule:\n def __init__(self, name, content, lineNum):\n self.name = name\n self.content = content\n self.lineNum = lineNum\n\n def __str__(self):\n return \"{name} := {content} -- line: {lineNum}\".format(name =self.name, \n content = self.content, \n lineNum = self.lineNum)\n\n\nclass StateStoreQueue:\n __slots__ = [\n \"tokens\",\n \"count\",\n \"old_state\"\n ]\n\n def __init__(self, tokens=(), start_count=0):\n self.tokens = list(tokens)\n self.count = start_count\n self.old_state = None\n\n def pop(self):\n temp = self.tokens[self.count]\n self.count += 1\n return temp\n\n def peek(self):\n return self.tokens[self.count]\n\n def append(self, item):\n self.tokens.append(item)\n\n def extend(self, items):\n for i in items:\n self.tokens.append(i)\n\n def store_state(self):\n self.old_state = self.count\n return self.old_state\n\n def restore_state(self):\n assert self.old_state is not None\n self.count = self.old_state\n return self.count\n\n def __len__(self):\n return len(self.tokens) - self.count\n\n def __getitem__(self, item):\n return self.tokens[self.count+item]\n\n def peek_type(self, type):\n return isinstance(self.peek(), type)\n\n def __str__(self):\n return str(self.tokens[self.count:])\n\n def __repr__(self):\n return str(self)\n\n\n\n\nclass Lexer:\n statements = OrderedDict([\n (\"LeftBracket\", LeftBracket),\n (\"RightBracket\", RightBracket),\n (\"CommaSeperator\", CommaSeperator),\n (\"PrintToken\", PrintToken),\n\n (\"NumLiteral\", NumLiteral),\n (\"StrLiteral\", StrLiteral),\n\n (\"LogicalOperator\", LogicalOperator),\n (\"BitwiseOperator\", BitwiseOperator),\n (\"MathOperator\", MathOperator),\n\n (\"WhatsThisOperator\", WhatsThisOperator),\n\n\n (\"NumType\", NumType),\n (\"StringType\", StringType),\n (\"Assignment\", Assignment),\n\n (\"ToWord\", ToToken),\n (\"AddWord\", AddToken),\n (\"TakeWord\", TakeToken),\n (\"FromWord\", FromToken),\n\n\n\n (\"Term\", Term),\n\n\n\n (\"EndChar\", EndChar),\n ]\n )\n # statements will be converted into a dictionary containing, in __init__\n # \"logicaloperator\" : [class logicalOperator, regex pattern]\n\n # Ordered, higher means higher priority\n # must be passed in as a list or it losses its order to the kwargs gods\n\n\n NO_SPACE = ['+', '-', '*', '/', '--', '++', '==', '=', ';', \"(\", \")\"]\n\n def __init__(self, rule_file=\"rules.txt\"):\n self.rules = {} # string:Rule\n self.read_rules(rule_file)\n for key, value in self.statements.items():\n self.statements[key] = [value, re.compile(\"^\" + self.rules[key])]\n\n def read_rules(self, file):\n with open(file) as f:\n for lineNum, line in enumerate(f):\n if line.isspace() or line.startswith(\"#\"):\n continue\n try:\n name, content = line.split(\":=\", maxsplit=1)\n name = name.strip(\" \")\n content = content.strip(\" \")\n except ValueError:\n raise InvalidRuleException(\"No := symbol on line\", lineNum+1)\n self.add_rule(name.strip(), content, lineNum+1)\n\n def add_rule(self, rule_name, content, lineNum):\n \"\"\"Sub's in dependencies and processes the content\"\"\"\n word = []\n all_words = []\n in_string = False\n has_quotes = False\n escaped = False\n\n for letter in content:\n if letter == \" \" and not in_string:\n if not word:\n raise InvalidRuleException(\"Empty word before space symbol \" + str(all_words), lineNum)\n if in_string:\n raise InvalidRuleException(\"Require a closing quote before + symbol\", lineNum)\n\n if not has_quotes:\n # this word is an actual rule and needs to be filled in\n word = \"\".join(word)\n all_words.append(self.get_rule(word, lineNum))\n \n else:\n # this word is a raw string \n word = \"\".join(word)\n all_words.append(word)\n word = []\n has_quotes = False\n elif letter == \" \":\n word.append(\" \")\n\n elif letter.isspace():\n continue\n\n elif letter == '\"' and not escaped:\n if not in_string and has_quotes:\n raise InvalidRuleException(\"> 2 quotes\", lineNum)\n in_string = not in_string\n has_quotes = True\n\n elif letter == \"\\\\\" and not escaped:\n escaped = True\n word.append(\"\\\\\")\n else:\n escaped = False\n if has_quotes and not in_string:\n # there is a letter after a quoted rule, e.g. (\"asdf\" fs + )\n raise InvalidRuleException(\"Unexpected characters after closing quote\", lineNum)\n word.append(letter)\n\n if in_string:\n raise InvalidRuleException(\"Unclosed quotation\", lineNum)\n\n if word:\n if not has_quotes:\n # this word is an actual rule and needs to be filled in\n word = \"\".join(word)\n all_words.append(self.get_rule(word, lineNum))\n \n else:\n # this word is a raw string \n word = \"\".join(word)\n all_words.append(word)\n\n ret = \"\".join(all_words)\n self.rules[rule_name] = ret\n return ret\n\n def get_rule(self, ruleName, lineNum=-1):\n try:\n return self.rules[ruleName]\n except KeyError:\n raise InvalidRuleException(\"Cannot find rule, \" + str(ruleName), lineNum)\n\n def read_source(self, fname):\n ret = StateStoreQueue()\n with open(fname) as f:\n file = f.read()\n for i, line in enumerate(file.splitlines()):\n ret.extend(self.tokenize(line, i+1))\n return ret\n \n\n def tokenize(self, file, row=-1):\n ret = []\n col = 1\n while file:\n while file.startswith(tuple(string.whitespace)):\n file = file[1:]\n col += 1\n\n if not file:\n # the file is empty\n break\n for class_name, pattern in self.statements.values():\n match = pattern.match(file)\n if match:\n assert match.span()[0] == 0\n token_len = match.span()[1]\n ret.append(class_name(match.group(), row, col))\n col += token_len\n\n file = file[token_len:]\n break\n else:\n raise SyntaxError(\"Uncrecongnizable token at row: {}, col: {}, remaining content: \\\"{}\\\"\".format(row, col, file))\n\n return ret\n\n\n\n\n\n","repo_name":"Jackywathy/past-work","sub_path":"fscript/Lexer/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":7843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26770309020","text":"import cv2\nimport os\nfrom sklearn.preprocessing import StandardScaler\nimport json\nfrom helpers import *\n\n\n# Initialize Exemplars Database and Name Map\nif not os.path.exists(EXEMPLARS_DATABASE):\n with open(EXEMPLARS_DATABASE, \"w\") as f:\n json.dump({}, f)\n\nif not os.path.exists(NAME_MAP_DATABASE):\n with open(NAME_MAP_DATABASE, \"w\") as f:\n json.dump({}, f)\n\n\ndef main():\n # Universal index mapping (one index to many lists)\n # Things to be stores\n image_embeddings = [] # To be set later\n main_face_imgs = []\n image_names = []\n # Loop all images in input folder\n for image_name in os.listdir(INPUT_DIRECTORY):\n print(f\"Processing {image_name}...\")\n # Open image\n image_path = os.path.join(INPUT_DIRECTORY, image_name)\n img = cv2.imread(image_path)\n img = ratio_resize(img)\n img_height, img_width, _ = img.shape\n\n # Identify main face\n main_face = None\n # Extract faces\n faces = extract_faces(img)\n # No face\n if len(faces) == 0:\n if not os.path.exists(NO_FACE_DIRECTORY):\n os.mkdir(NO_FACE_DIRECTORY)\n destination = os.path.join(NO_FACE_DIRECTORY, image_name)\n try:\n os.rename(image_path, destination)\n except WindowsError:\n os.remove(destination)\n os.rename(image_path, destination)\n continue\n\n # Set main face as the face with the highest confidence_heuristic\n max_area = max(map(get_face_area, faces))\n max_distance = max(map(lambda x: get_face_distance(x, img_height, img_width), faces))\n for face in faces:\n # Bigger confidence = better\n confidence = face[-1]\n # Bigger size = better\n size_heuristic = get_face_area(face)/max_area\n # Smaller distance = better\n distance_heuristic = 1 - (get_face_distance(face, img_height, img_width)/max_distance)\n\n # Weighted sum of heuristics\n confidence_heuristic = 0.4*confidence + 0.35*size_heuristic + 0.25*distance_heuristic\n if main_face is None or confidence_heuristic > main_face[1]:\n main_face = (face, confidence_heuristic)\n\n main_face = main_face[0]\n # Crop main_face\n main_face_rect = list(map(int, main_face[:4]))\n x, y, w, h = main_face_rect\n main_face_img = img[y+1:y+h-1, x+1:x+w-1]\n\n # Preprocess the image for FaceNet\n main_face_img = cv2.cvtColor(main_face_img, cv2.COLOR_BGR2RGB) # Convert to RGB\n main_face_img = cv2.resize(main_face_img, (160, 160)) # Resize image to match FaceNet's input size\n\n # Store main_face_img\n main_face_imgs.append(main_face_img)\n # Store image_name\n image_names.append(image_name) \n\n # Create and store main_face embeddings from main_face_imgs\n image_embeddings = np.array(embed_images(np.array(main_face_imgs)))\n image_names = np.array(image_names)\n\n\n # Cluster embeddings\n # RUN 1 (New images)\n # Preprocess signatures\n scaler = StandardScaler()\n embeddings_scaled = scaler.fit_transform(image_embeddings)\n\n # Tried my best to properly sort :<\n new_cluster_labels, new_exemplars = cluster_embeddings(embeddings_scaled, similarity_function=lambda x: np.mean(x) + 0.9*np.std(x))\n print(f\"Number of clusters from new data: {len(new_exemplars)}\")\n # End of RUN 1\n # Everything after this point is chaos TT (send help)\n\n\n # Get exemplars to be clustered (NOTE: These are already scaled embeddings)\n with open(EXEMPLARS_DATABASE, \"r\") as f:\n exemplars_data = json.load(f)\n\n # Get old exemplar data\n old_exemplars_cluster_labels = list(exemplars_data.keys())\n old_exemplars_embeddings = list(exemplars_data.values())\n \n # Get new exemplar data\n new_exemplars_embeddings = embeddings_scaled[new_exemplars].tolist()\n\n # RUN 2 (Exemplars)\n # Combine exemplars to cluster\n combined_exemplars_embeddings = np.array(old_exemplars_embeddings + new_exemplars_embeddings)\n # Tried my best to only join exemplars of same face\n combined_cluster_labels, combined_exemplars = cluster_embeddings(combined_exemplars_embeddings, similarity_function=lambda x: np.mean(x) + np.std(x))\n print(f\"Number of clusters from combined data: {len(combined_exemplars)}\")\n # End of RUN 2\n\n\n # New cluster name assignment of old exemplars\n old_combined_cluster = list(map(str, combined_cluster_labels[:len(old_exemplars_embeddings)]))\n # New cluster name assignment of new exemplars\n new_combined_cluster = list(map(str, combined_cluster_labels[len(old_exemplars_embeddings):]))\n # Map new cluster name assignment to correspond to the database naming\n # Heavily assumes that no old exemplars are combined to form one cluster\n combined2database_label = dict(zip(old_combined_cluster, old_exemplars_cluster_labels))\n\n # Update Exemplars Database\n for cluster_label, exemplar_embedding in zip(combined_cluster_labels[combined_exemplars], combined_exemplars_embeddings[combined_exemplars]):\n # If exemplar is not part of the old database, make a new entry\n if str(cluster_label) not in combined2database_label:\n combined2database_label[str(cluster_label)] = str(len(exemplars_data))\n exemplars_data[str(cluster_label)] = exemplar_embedding.tolist()\n\n\n # Update the exemplar database file\n with open(EXEMPLARS_DATABASE, \"w\") as f:\n json.dump(exemplars_data, f)\n\n\n # Map for respective cluster names\n with open(NAME_MAP_DATABASE, \"r\") as f:\n cluster_name = json.load(f)\n\n # Map to connect new_cluster_labels to combined_cluster_labels\n new2combined_label = dict(zip(map(str, new_cluster_labels[new_exemplars]), new_combined_cluster))\n\n # Store exemplar images so it could easily be named by user\n for index in new_exemplars:\n proposed_cluster_name = combined2database_label[new2combined_label[str(new_cluster_labels[index])]]\n destination = os.path.join(EXEMPLARS_DIRECTORY, proposed_cluster_name + \".JPG\")\n cv2.imwrite(destination, main_face_imgs[index])\n\n # Transfer files to respective cluster names\n for image_name, cluster in zip(image_names, new_cluster_labels):\n image_path = os.path.join(INPUT_DIRECTORY, image_name)\n\n # Cluster this image belongs in database\n proposed_cluster_name = combined2database_label[new2combined_label[str(cluster)]]\n destination_directory = os.path.join(OUTPUT_DIRECTORY, cluster_name.get(proposed_cluster_name, proposed_cluster_name))\n destination = os.path.join(destination_directory, image_name)\n if not os.path.exists(destination_directory):\n os.mkdir(destination_directory)\n\n try:\n os.rename(image_path, destination)\n except WindowsError:\n os.remove(destination)\n os.rename(image_path, destination)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"Imaginatorix/Cluster-Face","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"5249561016","text":"arrNumbers = [-3,3,5,-5,7,-7,7,8,1]\narrNew = []\n\nwhile arrNumbers:\n \n minimum = arrNumbers[0]\n\n for arrEle in arrNumbers:\n if arrEle < minimum:\n minimum = arrEle\n arrNew.append(minimum)\n arrNumbers.remove(minimum)\n\nprint(arrNew)\n\n\n\n","repo_name":"mary-ma-boston/pythonexample","sub_path":"SortExample.py","file_name":"SortExample.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38921367200","text":"import pandas as pd\nimport csv\nimport numpy as np\n\nwords = pd.read_csv(\"vectors.txt\", sep=\" \", index_col=0, header=None, quoting=csv.QUOTE_NONE, dtype={\"Index\": str})\n\nwords_matrix = words.to_numpy()\n\nnp.save(\"words.npy\", words_matrix)\nwith open(\"words.txt\", \"w\", encoding=\"utf-8\") as file:\n i = 0\n for index, row in words.iterrows():\n if i == 5835:\n index = \"null\"\n if i == 18618:\n index = \"nan\"\n \n file.write(index)\n file.write(\"\\n\")\n i += 1\n","repo_name":"npistel/ml3","sub_path":"embedding/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5480042999","text":"import socket\n\n# 5.The client sends to the server an integer. The server returns the list of divisors for the specified number.\n\nbind_tuple = ('0.0.0.0', 1234)\n\n\ndef array_string(array):\n strin = \"\"\n for el in array:\n strin += str(el) +\",\"\n\n return strin[:-1]\n\n\ndef get_divisors(integer):\n array = []\n\n for i in range(1, integer+1):\n if integer % i == 0:\n array.append(i)\n\n return array\n\n\nif __name__ == '__main__':\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(bind_tuple)\n sock.listen()\n\n while True:\n comm, addr = sock.accept()\n\n data = comm.recv(100).decode()\n\n integer = int(data)\n\n divisors = get_divisors(integer)\n\n send_string = array_string(divisors)\n\n comm.send(send_string.encode())\n\n comm.close()\n\n\n\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"x64alex/Networking","sub_path":"Lab1/Problem5/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41750916237","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 6 14:40:49 2020\n\n@author: Cati\n\"\"\"\n\n\nfrom controller import Population\nfrom operations import *\n\nepsilon = 0.9\n\nif __name__ == '__main__':\n (trainingData,output) = readTrainingData()\n population=Population(trainingData,output)\n i=1\n while True:\n error = population.train()\n print(error)\n i+=1\n if error/', delete_recipe_view),\n path('ingredients/', ingredients_view),\n path('addingredientsitem/', add_ingredient_view),\n path('deleteingredientsitem//', delete_ingredient_view),\n path('equipment/', equipment_view),\n path('addequipmentitem/', add_equipment_view),\n path('deleteequipmentitem//', delete_equipment_view),\n]\n","repo_name":"inbarsh/easy-chef","sub_path":"easy_chef/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15091300107","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nocr for apple segments\r\nauthor: gies\r\nversion: 10.12.2020\r\nthis is a demo version. \r\n\"\"\"\r\n\r\n#Aim of this script: \r\n#The idea is to read screenshots that contain tables with numbers, to extract the numbers and to store them in a txt-file.\r\n#There may be several screenshots that belong to one table. These shoudld be saved in the same txt-file. \r\n#Overlapping rows should be deleted.\r\n#screenshots: Tables in white font on black ground\r\n#Warning: Rows that are cut-off ore often not properly parsed.\r\n# See also: https://nanonets.com/blog/ocr-with-tesseract/\r\n\r\n#installation of packages via anaconda prompt (pip install pkg), attention: cv2 is opencv\r\n#from PIL import Image\r\nimport pytesseract\r\nimport cv2\r\nimport numpy as np\r\nimport re\r\nimport os\r\nimport glob\r\n#from os.path import join\r\n#specify where teseract is installed\r\npytesseract.pytesseract.tesseract_cmd = r'wherever Tesseract is installed'\r\n\r\n##a few functions or definitions to improve image quality\r\n# get grayscale image\r\ndef get_grayscale(image):\r\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n# noise removal\r\ndef remove_noise(image):\r\n return cv2.medianBlur(image,5)\r\n \r\n#thresholding\r\ndef thresholding(image):\r\n return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\r\n\r\n# schwarz und weiss tauschen\r\ndef invert(image):\r\n image = (255-image)\r\n return(image)\r\n\r\n#dilation\r\ndef dilate(image):\r\n kernel = np.ones((2,2),np.uint8)\r\n return cv2.dilate(image, kernel, iterations = 1)\r\n \r\n#erosion\r\ndef erode(image):\r\n kernel = np.ones((2,2),np.uint8)\r\n return cv2.erode(image, kernel, iterations = 1)\r\n\r\n#skew correction\r\ndef deskew(image):\r\n coords = np.column_stack(np.where(image > 0))\r\n angle = cv2.minAreaRect(coords)[-1]\r\n if angle < -45:\r\n angle = -(90 + angle)\r\n else:\r\n angle = -angle\r\n (h, w) = image.shape[:2]\r\n center = (w // 2, h // 2)\r\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\r\n rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\r\n return rotated\r\n\r\n#template matching\r\ndef match_template(image, template):\r\n return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) \r\n\r\n\r\n#thresh method seems to work best in combination with psm 12\r\n#whitelist: should only recognize digits and :\r\n#oem: different engins, but no difference in performance on my testdata\r\n#psm: page segmentation mode. Different performances: 1 and 3 are similar, 11 and 12 are similar and 4 and 6, 7-10 and 2 were empty\r\n\r\n#let's get started\r\n#define main folder\r\nwd = whateveryourfolder+'\\\\segments'\r\n\r\n#list all subfolder\r\n\r\n#list all sub-subfolders\r\n\r\n#read jpg and png files...\r\njpg = glob.glob(wd + '\\\\**\\\\*.jpg', recursive=True)\r\npng = glob.glob(wd + '\\\\**\\\\*.png', recursive=True)\r\nPNG = glob.glob(wd + '\\\\**\\\\*.PNG', recursive=True)\r\nfiles = jpg+png+PNG\r\nfiles = sorted(files)\r\n\r\n\r\n#loop through folder\r\nfor f in files:\r\n \r\n #extract directory, i.e. remove last part\r\n path = os.path.dirname(f)\r\n #extract name of participant\r\n basename = os.path.basename(f)\r\n name = basename.split(sep='_')[0]\r\n number = re.search('(\\d)[^\\d]*$', basename).group(1)\r\n\r\n filename=path+'\\\\'+name+'_sgmt.txt'\r\n \r\n #open image\r\n image = cv2.imread(f)\r\n \r\n #convert into greyscale\r\n gray = get_grayscale(image)\r\n #invert white and black\r\n gray = invert(gray)\r\n #improve contrasts\r\n thresh = thresholding(gray)\r\n thresh = erode(thresh)\r\n \r\n \r\n #define configuration\r\n my_config=r'--psm 12 --oem 3 -c tessedit_char_whitelist=0123456789:' \r\n #convert to string and save content in t\r\n t = pytesseract.image_to_string(thresh, lang='eng', config=my_config)\r\n #convert t to list (=vector), splits after each space\r\n t2 = t.split()\r\n \r\n #manipulate content: keep only values with ':' in it\r\n pattern = re.compile(r\".*:.*\") # Create the regular expression to match\r\n t3 = [i for i in t2 if pattern.match(i)]\r\n \r\n #unfortunately, we may have more than one screenshot per table\r\n #all screenshots from one table should be joined to one textfile\r\n \r\n #check whether there is already a textfile for the same table\r\n l = glob.glob(filename, recursive =True)\r\n #if not, we can simply save t3 as a textfile\r\n if number == '1' or os.path.isfile(filename)==False:\r\n print(\"no file\")\r\n #remove first item (=time when screenshot was taken)\r\n \r\n t3.pop(0)\r\n \r\n #transform to array (=dataframe)\r\n a = np.array(t3)\r\n \r\n #build second column with segments\r\n l=len(t3)\r\n segs = np.array(range(1,l+1))\r\n a2 = np.array(list(zip(a,segs)))\r\n \r\n #save as textfile: first open the file \r\n #file = open('C:/Users/acgie/Desktop/testfile.txt','w') \r\n #write the array a2 to file\r\n \r\n with open(filename, \"w\") as file:\r\n for line in a2:\r\n file.write(\" \".join(line) + \"\\n\")\r\n \r\n #close file\r\n file.close() \r\n \r\n # if there is already a textfile, we need to append the new data to the existing textfile.\r\n #however, there is probably an overlap which should be avoided\r\n else :\r\n print(\"file exists\")\r\n t3.pop(0)\r\n obj = open(path+'\\\\'+name+'_sgmt.txt', 'r')\r\n #retrieve information from existing textfile to check for overlap\r\n result=[]\r\n time = []\r\n lines=obj.readlines()\r\n for x in lines:\r\n #segments\r\n result.append(x.split(' ')[1])\r\n #durations\r\n time.append(x.split(' ')[0])\r\n obj.close()\r\n\r\n #we need the index of all matches in time and t3\r\n reduced = [i for i, item in enumerate(t3) if item in time]\r\n \r\n #if there is no overlap, everything should be added\r\n try:\r\n #otherwise, delete matches\r\n #there might be errors in ocr, let's built a new list\r\n #everything up to the highest index should be removed\r\n range_reduced = list(range(0,max(reduced)+1))\r\n \r\n #delete the matches \r\n for e in sorted(range_reduced, reverse = True): \r\n del t3[e] \r\n except:\r\n print('no overlap')\r\n t3 = t3\r\n\r\n # define next segment number in order to obtain consecutive numbering of all segments: \r\n #retrieve the highest segment number in the textfile\r\n result = [int(i) for i in result] \r\n max_val = max(result)\r\n \r\n #transform to array (=dataframe)\r\n a = np.array(t3)\r\n \r\n #build second column with segments\r\n l=len(t3)\r\n #segment numbers from max_val+1 (subsequent number) untill max_val+ length t3\r\n segs = np.array(range(max_val+1,max_val+l+1))\r\n a2 = np.array(list(zip(a,segs)))\r\n \r\n #save as textfile: first open the file \r\n #file = open('C:/Users/acgie/Desktop/testfile.txt','w') \r\n #append the array a2 to file\r\n \r\n with open(filename, \"a\") as file:\r\n for line in a2:\r\n file.write(\" \".join(line) + \"\\n\")","repo_name":"ac-gieshoff/ocr","sub_path":"screenshots_numbers_ocr.py","file_name":"screenshots_numbers_ocr.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36058787557","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2018-05-08\n@summary: main application entrance\n@author: YangHaitao\n'''\n\nimport os\nimport logging\nimport asyncio\n\nimport asyncws\n\nimport logger\n\n\nLOG = logging.getLogger(__name__)\n\nHOST = \"0.0.0.0\"\nPORT = 8081\n\n\n@asyncio.coroutine\ndef test_websocket_service(websocket):\n LOG.info(\"open websocket path: %s\", websocket.request.path)\n frame_id = 0\n frame_interval = 0.010 # ms\n data_path = \"/home/breeze/Tmp/disk/2018-04-11_00:00:00/img\"\n if \"pcl_pointcloud\" in websocket.request.path:\n frame_interval = 0.010\n data_path = \"/home/breeze/Tmp/disk/2018-04-11_00:00:00/pcd\"\n try:\n frame_names = os.listdir(data_path)\n frame_names.sort()\n frames_length = len(frame_names)\n while True:\n try:\n if frame_id == frames_length:\n frame_id = 0\n LOG.debug(\"data_path: %s, frames_length: %s, frame_id: %s\", data_path, frames_length, frame_id)\n fp = open(os.path.join(data_path, frame_names[frame_id]), \"rb\")\n content = fp.read()\n fp.close()\n yield from websocket.send(content, True)\n yield from asyncio.sleep(frame_interval)\n frame_id += 1\n except ConnectionResetError:\n LOG.info(\"websocket close, path: %s\", websocket.request.path)\n break\n except Exception as e:\n LOG.exception(e)\n\n\nif __name__ == \"__main__\":\n logger.config_logging(file_name = \"test.log\",\n log_level = \"DEBUG\",\n dir_name = \"logs\",\n day_rotate = False,\n when = \"D\",\n interval = 1,\n max_size = 20,\n backup_count = 5,\n console = True)\n LOG.info(\"websocket server start\")\n LOG.info(\"service: %s:%s\", HOST, PORT)\n server = asyncws.start_server(test_websocket_service, HOST, PORT)\n asyncio.get_event_loop().run_until_complete(server)\n asyncio.get_event_loop().run_forever()\n LOG.info(\"websocket server end\")\n","repo_name":"fiefdx/asyncws-websocket-service-example","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6120779385","text":"import sys\nimport os\nimport json\nimport datetime\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\nimport tornado.wsgi\nfrom tornado import websocket\nfrom tornado.options import options, define, parse_command_line\nfrom django.core.wsgi import get_wsgi_application\nfrom tornado.httpclient import AsyncHTTPClient\n\ndefine('port', type=int, default=8888)\n\nclass MainHandler(tornado.web.RequestHandler):\n\tpass\n\t\"\"\"\n\tdef handle_response(self, response):\n\t\tif response.error:\n\t\t\t#print(\"Error: %s\" % response.error)\n\t\t\tself.write(response.error)\n\t\telse:\n\t\t\t#print(response.body)\n\t\t\tself.write(response.body)\n\t\tself.finish()\n\n\t@tornado.web.asynchronous\n\tdef get(self):\n\t\thttp_client = AsyncHTTPClient()\n\t\tpost_data = { 'username': 'ivanmarkov1997', 'password': 'i4611366968'} #A dictionary of your post data\n\t\tbody = tornado.escape.json_encode(post_data)\n\t\theaders = {}\n\t\theaders['Content-Type'] = 'application/json'\n\t\thttp_client.fetch(\"http://127.0.0.1:8888/auth/token\", self.handle_response, method='POST', headers=headers, body=body)\n\t\"\"\"\n\nclass WSHandler(websocket.WebSocketHandler):\n\t\n\tclients = []\n\n\tdef handle_load_mes(self, response):\n\t\tprint(response.body)\n\n\t#prevent from Http-403 Forbidden\n\tdef check_origin(self, origin):\n\t\treturn True\n\n\tdef check_message(self, msg):\n\t\tprint(msg)\n\t\tif msg[0] > 0 and msg[4] > 0:\n\t\t\treturn True\n\t\telse: \n\t\t\treturn False\n\n\tdef open(self):\n\t\tprint(\"WS opened\")\n\t\tself.clients.append(self)\n\n\tdef on_message(self, message):\n\t\tprint(message)\n\t\tmsg = json.loads(message)\n\t\tif self.check_message(message):\n\t\t\tfor client in self.clients:\n\t\t\t\tclient.write_message(\"From \" + msg['name'] + \": \" + msg['text'])\n\t\t\thttp_client = AsyncHTTPClient()\n\t\t\theaders = {}\n\t\t\theaders['Content-Type'] = 'application/json'\n\t\t\theaders['Authorization'] = 'Token ' + message['token']\n\t\t\thttp_client.fetch(\"http://127.0.0.1:8888/api/v1.0/chat/messages/\", \n\t\t\t\t\t\t\t self.handle_load_mes,\n\t\t\t\t\t\t\t method='POST', \n\t\t\t\t\t\t\t headers=headers, \n\t\t\t\t\t\t\t body=message[['sender', 'text', 'name', 'date', 'prject_group']])\n\t\telse:\n\t\t\tself.write(\"incorrect message format\")\n\n\tdef on_close(self):\n\t\tprint(\"WS closed\")\n\t\tself.clients.remove(self)\n\ndef main():\n os.environ['DJANGO_SETTINGS_MODULE'] = 'joyle.settings' # TODO: edit this\n sys.path.append('./joyle') # path to your project if needed\n\n parse_command_line()\n\n wsgi_app = get_wsgi_application()\n container = tornado.wsgi.WSGIContainer(wsgi_app)\n\n tornado_app = tornado.web.Application(\n [\n ('/tornado', MainHandler),\n ('/websocket', WSHandler),\n ('.*', tornado.web.FallbackHandler, dict(fallback=container)),\n ])\n\n server = tornado.httpserver.HTTPServer(tornado_app)\n server.listen(options.port)\n\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == '__main__':\n main()\n","repo_name":"ivanmarkov97/joyle_dip_rest","sub_path":"torn.py","file_name":"torn.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7807035478","text":"### --- Importanto bibliotecas --- ###\nimport pandas as pd\n\n### --- Importando a base --- ###\ndf = pd.read_csv(\"./Dados Brutos/BD_Teste BA.xlsx - BD_visitas.csv\")\n\ndf.head()\ndf.info()\n\n## -- Análse dos campos -- ##\n\n# - Negócio - Negócio criado em -- ##\ndf = df.rename(columns={\"Negócio - Negócio criado em\":\"Data de criação\"}) #Renomear a coluna\ndf.iloc[:,0].sort_values()\n\n# - Negócio - Organização 2 - #\ndf = df.rename(columns={\"Negócio - Organização 2\":\"Organização\"})\ndf.iloc[:,1].drop_duplicates().count()\n\n# - Negócio - Novo cliente? - #\ndf = df.rename(columns={\"Negócio - Novo cliente?\":\"Novo cliente?\"})\ndf.iloc[:,2].value_counts()\ndf.loc[(df[\"Novo cliente?\"].isnull())].count()\n\n# - Negócio - Etapa - #\ndf = df.rename(columns={\"Negócio - Etapa\":\"Etapa\"})\ndf.iloc[:,3].drop_duplicates().sort_values()\n\n# - Negócio - Status - #\ndf = df.rename(columns={\"Negócio - Status\":\"Status\"})\ndf.iloc[:,4].value_counts()\n\n# - Negócio - Data de ganho - #\ndf = df.rename(columns={\"Negócio - Data de ganho\":\"Data de ganho\"})\ndf.iloc[:,5].dropna().count()\n\n# - Negócio - Data de perda -#\ndf = df.rename(columns={\"Negócio - Data de perda\":\"Data da perda\"})\ndf.iloc[:,6].count()\n\n# - Organização - Canal de Origem - #\ndf = df.rename(columns={\"Organização - Canal de Origem\":\"Canal de Origem\"})\ndf.iloc[:,7].value_counts()\n\n# - Negócio - Data atualizada - #\ndf = df.rename(columns={\"Negócio - Data atualizada\":\"Data de atualização\"})\ndf.iloc[:,8].sort_values()\n\n# - Negócio - Data da Última Atividade - #\ndf = df.rename(columns={\"Negócio - Data da Última Atividade\":\"Data da última atividade\"})\ndf.loc[(df[\"Data da última atividade\"]).isnull()]\ndf.loc[(df[\"Data da última atividade\"]).isnull()][\"Etapa\"].value_counts()\ndf.loc[(df[\"Data da última atividade\"]).isnull()][\"Status\"].value_counts()\nlen(df.loc[(df[\"Data da última atividade\"]).isnull()].index)\n\n# - Exibição após tratamento inicial - #\ndf.head()\n\n\n# - Exportando a base tratada - #\ndf.to_csv(\"./Dados Tratados/Base_tratada.csv\")\n","repo_name":"jeffinish/Processo_Seletivo_Casa_Mineira_Analista_de_BI","sub_path":"Script Tratamento/script_tratamento.py","file_name":"script_tratamento.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40525427462","text":"from re import sub\r\nimport telebot\r\nimport config\r\nfrom datetime import datetime\r\nimport pymongo\r\nfrom telebot import types\r\nimport random\r\nimport os\r\nimport pafy\r\nfrom cryptography.fernet import Fernet\r\n\r\nclient = pymongo.MongoClient(\"mongodb+srv://tikosch:qwerty123@cluster0.zjsr8ku.mongodb.net/bdbot?retryWrites=true&w=majority\")\r\ndb = client[\"bdbot\"]\r\nusers = db[\"Users\"]\r\nhomeworks = db[\"Homeworks\"]\r\nmusics = db[\"Music\"]\r\nvideos = db[\"Video\"]\r\n# user = {\"_id\": 123, \"name\": \"Arnur\"}\r\n# users.insert_one(user)\r\n\r\n\r\nclass SingletonClass(object):\r\n def __new__(cls):\r\n if not hasattr(cls, 'instance'):\r\n cls.instance = super(SingletonClass, cls).__new__(cls)\r\n return cls.instance\r\nbot = SingletonClass()\r\n\r\nbot = telebot.TeleBot(config.token)\r\n\r\ndef download_music(file_name, link):\r\n ydl_opts = {\r\n 'outtmpl': './'+file_name,\r\n 'format': 'bestaudio/best',\r\n 'postprocessors': [{\r\n 'key': 'FFmpegExtractAudio',\r\n 'preferredcodec': 'mp3',\r\n 'preferredquality': '256',\r\n }],\r\n 'prefer_ffmpeg': True\r\n }\r\n\r\n\r\ndef save_user_to_db(user):\r\n result = users.replace_one({\"id\": user['id']}, user, upsert=True)\r\ndef home_button():\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n btn1 = types.KeyboardButton(\"📚 Add homework\")\r\n btn2 = types.KeyboardButton(\"🗓 See deadlines\")\r\n btn3 = types.KeyboardButton(\"✅ Finish homework\")\r\n btn4 = types.KeyboardButton(\"🔴 Youtube Link\")\r\n btn5 = types.KeyboardButton(\"📂 All Completed homeworks\")\r\n markup.add(btn1, btn2, btn3, btn4, btn5)\r\n return markup\r\ndef deadlines_buttons():\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n btn1 = types.KeyboardButton(\"For 2 days\")\r\n btn2 = types.KeyboardButton(\"For week\")\r\n btn3 = types.KeyboardButton(\"For month\")\r\n btn4 = types.KeyboardButton(\"🏠Go home\")\r\n markup.add(btn1, btn2, btn3, btn4)\r\n return markup\r\ndef homeBtn():\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n btn4 = types.KeyboardButton(\"🏠Go home\")\r\n markup.add(btn4)\r\n return markup\r\ndef a_v():\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n btn4 = types.KeyboardButton(\"Video\")\r\n btn5 = types.KeyboardButton(\"Audio\")\r\n markup.add(btn4,btn5)\r\n return markup\r\ndef youtube_btn():\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n btn2 = types.KeyboardButton(\"Delete Song\")\r\n btn3 = types.KeyboardButton(\"Download Song\")\r\n btn4 = types.KeyboardButton(\"🏠Go home\")\r\n markup.add(btn2, btn3, btn4)\r\n return markup\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef welcome(message):\r\n # Generate a key for encryption\r\n key = Fernet.generate_key()\r\n\r\n # Initialize a Fernet cipher object using the key\r\n cipher = Fernet(key)\r\n user = {}\r\n ciphertext = cipher.encrypt(message.from_user.first_name.encode())\r\n user['id'] = message.from_user.id\r\n user['first_name'] = ciphertext\r\n user['last_name']= message.from_user.last_name\r\n user['username'] = message.from_user.username\r\n users.insert_one(user)\r\n save_user_to_db(user)\r\n markup = home_button()\r\n\r\n bot.send_message(message.chat.id, \"Welcome to Std_Helper! {0.first_name}!\\nI can help you to organize your homeworks, track deadlines and track finished homeworks!\"\r\n .format(message.from_user, bot.get_me()), parse_mode='html', reply_markup=markup)\r\n \r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef home(message):\r\n if message.chat.type == 'private':\r\n if message.text == \"📚 Add homework\":\r\n user = {}\r\n user['id'] = message.from_user.id\r\n user['is_completed'] = False\r\n bot.send_message(message.chat.id, \"Please enter the subject: \", reply_markup=homeBtn())\r\n bot.register_next_step_handler(message, lambda m: enter_subject(m, user))\r\n elif message.text == \"🗓 See deadlines\":\r\n bot.send_message(message.chat.id, \"Choose the period of deadlines: \", reply_markup=deadlines_buttons())\r\n bot.register_next_step_handler(message,choose_period_of_deadlines)\r\n elif message.text == \"✅ Finish homework\":\r\n finish_homework(message)\r\n elif message.text == \"📂 All Completed homeworks\":\r\n get_completed_hws(message)\r\n elif message.text == \"🔴 Youtube Link\":\r\n\r\n bot.send_message(message.chat.id, 'Go make some things', parse_mode='html', reply_markup=youtube_btn())\r\n try:\r\n bot.register_next_step_handler(message, youtube)\r\n except:\r\n bot.send_message(message.chat.id, 'Send me the right link', parse_mode='html')\r\n\r\n else:\r\n bot.send_message(message.chat.id, \"Select only buttons below!\", reply_markup=home_button())\r\n \r\ndef youtube(message):\r\n if message.chat.type == 'private':\r\n if message.text == \"🏠Go home\":\r\n bot.send_message(message.chat.id, \"Going home...\", reply_markup=home_button())\r\n bot.register_next_step_handler(message,home)\r\n elif message.text == \"Delete Song\":\r\n bot.send_message(message.chat.id, \"Going to delete by the title:\")\r\n choose_song(message)\r\n elif message.text == \"Download Song\":\r\n bot.send_message(message.chat.id, \"Going to download:\", reply_markup=a_v())\r\n bot.register_next_step_handler(message,choice)\r\ndef choose_song(message):\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n all_songs = musics.find({})\r\n for i in all_songs:\r\n btn1 = types.KeyboardButton(f\"📄{i['title']}\")\r\n markup.add(btn1)\r\n bot.send_message(message.chat.id, \"Choose song to delete: \" , reply_markup=markup, parse_mode='html')\r\n bot.register_next_step_handler(message, delete_song)\r\ndef delete_song(message):\r\n try:\r\n message.text = message.text.replace(\"📄\", \"\")\r\n os.remove(\"/Users/tikosch/Desktop/python/telegramBot/music/{}.webm\".format(message.text))\r\n musics.delete_many({\"title\": message.text})\r\n bot.send_message(message.chat.id, \"{} has been deleted\".format(message.text) , reply_markup=youtube_btn(), parse_mode='html')\r\n except OSError as e:\r\n bot.send_message(message.chat.id, f\"Has not been deleted {e}\", reply_markup=youtube_btn(), parse_mode='html')\r\n\r\ndef enter_subject(message, user):\r\n if message.text == \"🏠Go home\":\r\n bot.send_message(message.chat.id, \"Going home...\", reply_markup=home_button())\r\n bot.register_next_step_handler(message,home)\r\n else:\r\n subject_name = message.text\r\n user['subject'] = subject_name\r\n bot.send_message(message.chat.id, f\"Subject is: {subject_name}, enter the assignment: \", parse_mode='html', reply_markup=homeBtn())\r\n bot.register_next_step_handler(message, lambda m: enter_assignment(m, user))\r\n\r\ndef enter_assignment(message, user):\r\n if message.text == \"🏠Go home\":\r\n bot.send_message(message.chat.id, \"Going home...\", reply_markup=home_button())\r\n bot.register_next_step_handler(message,home)\r\n else:\r\n assignment = message.text\r\n user['assignment'] = assignment\r\n bot.send_message(message.chat.id, f\"Assignment is: {assignment}, enter the deadline in format YYYY-MM-DD: \", parse_mode='html', reply_markup=homeBtn())\r\n bot.register_next_step_handler(message, lambda m: enter_date(m, user))\r\n\r\ndef enter_date(message, user):\r\n if message.text == \"🏠Go home\":\r\n bot.send_message(message.chat.id, \"Going home...\", reply_markup=home_button())\r\n bot.register_next_step_handler(message,home)\r\n else:\r\n date_text = message.text\r\n try:\r\n datetime.strptime(date_text, '%Y-%m-%d')\r\n try:\r\n user['deadline'] = date_text\r\n homeworks.insert_one(user)\r\n users.update_one({\"id\": user['id']},{\"$push\":{\"deadlines\": user}})\r\n except:\r\n bot.send_message(message.chat.id, f\"Oops, something went wrong, try again.\", parse_mode='html', reply_markup=homeBtn())\r\n pass\r\n bot.send_message(message.chat.id, f\"You have successfully saved the assignment!\", parse_mode='html', reply_markup=home_button())\r\n \r\n except ValueError:\r\n bot.send_message(message.chat.id, f\"Oops, wrong date. Follow pattern YYYY-MM-DD \", parse_mode='html', reply_markup=homeBtn())\r\n bot.register_next_step_handler(message, lambda m: enter_date(m, user))\r\n pass\r\n \r\ndef choose_period_of_deadlines(message):\r\n if message.chat.type == 'private':\r\n if message.text == \"For 2 days\":\r\n for_days(message, 2)\r\n elif message.text == \"For week\":\r\n for_days(message, 7)\r\n elif message.text == \"For month\":\r\n for_days(message, 30)\r\n elif message.text == \"🏠Go home\":\r\n bot.send_message(message.chat.id, \"Going home...\", reply_markup=home_button())\r\n bot.register_next_step_handler(message,home)\r\n else:\r\n bot.send_message(message.chat.id, \"Oops, choose only buttons below.\", reply_markup=deadlines_buttons() )\r\n\r\ndef for_days(message,number_of_days):\r\n user_id = message.from_user.id\r\n HWs = homeworks.find({'id':user_id})\r\n today = datetime.today().day\r\n counter = 0\r\n Text = \"\"\r\n for works in HWs:\r\n if works['is_completed'] == False:\r\n deadline = works['deadline']\r\n date = datetime.strptime(deadline, \"%Y-%m-%d\")\r\n days = date.day\r\n if days-today <= number_of_days:\r\n counter = counter + 1\r\n Text = Text + f\"{counter}. {works['subject']} - {works['assignment']} with deadline {works['deadline']}\\n\"\r\n if counter == 0:\r\n bot.send_message(message.chat.id, f\"Hey, you can chill out. You don't have any deadlines for {number_of_days} days!📚\" , reply_markup=home_button(), parse_mode='html')\r\n else:\r\n bot.send_message(message.chat.id, Text , reply_markup=home_button(), parse_mode='html')\r\n if counter == 1:\r\n bot.send_message(message.chat.id, \"You have only 1 assignment. Easy peasy!🍋\" , reply_markup=home_button(), parse_mode='html')\r\n if counter == 2:\r\n bot.send_message(message.chat.id, \"You have 2 assignments. Not bad!\" , reply_markup=home_button(), parse_mode='html')\r\n if counter >= 3:\r\n bot.send_message(message.chat.id, \"Hey, you have a lot of work to do. Let's get done all of them!🤓\" , reply_markup=home_button(), parse_mode='html')\r\n bot.register_next_step_handler(message,home)\r\n\r\ndef get_music(message):\r\n pass\r\n\r\ndef choice(message):\r\n if message.text == \"Video\":\r\n bot.send_message(message.chat.id, \"Send me the link\" , reply_markup=youtube_btn(), parse_mode='html')\r\n bot.register_next_step_handler(message, download_video)\r\n elif message.text == \"Audio\":\r\n bot.send_message(message.chat.id, \"Send me the link\" , reply_markup=youtube_btn(), parse_mode='html')\r\n bot.register_next_step_handler(message, download_audio)\r\n else:\r\n bot.send_message(message.chat.id, \"Going home...\", reply_markup=home_button())\r\n bot.register_next_step_handler(message,home)\r\n\r\ndef download_audio(message):\r\n audio = pafy.new(message.text)\r\n stream = audio.audiostreams\r\n title = audio.title\r\n try:\r\n with open(\"/Users/tikosch/Desktop/python/telegramBot/music/{}.mp3\".format(title), 'rb') as audio:\r\n bot.send_audio(message.chat.id, audio=audio)\r\n bot.send_message(message.chat.id, \"Done ✅\", reply_markup=youtube_btn())\r\n bot.register_next_step_handler(message,youtube)\r\n except: \r\n music = {}\r\n music['title'] = title\r\n music['link'] = message.text\r\n needed = {}\r\n needed['duration'] = audio.duration\r\n needed['viewcount'] = int(audio.viewcount)\r\n needed['rating'] = audio.rating\r\n musics.insert_one(music)\r\n musics.update_one({\"link\": music['link']}, {\"$push\": {\"description\": needed}})\r\n stream[2].download(\"/Users/tikosch/Desktop/python/telegramBot/music\")\r\n with open(\"/Users/tikosch/Desktop/python/telegramBot/music/{}.webm\".format(title), 'rb') as audio:\r\n bot.send_audio(message.chat.id, audio=audio)\r\n audio.close()\r\n bot.send_message(message.chat.id, \"Done ✅\", reply_markup=youtube_btn())\r\n bot.register_next_step_handler(message,youtube)\r\n\r\n\r\ndef download_video(message):\r\n video = pafy.new(message.text)\r\n stream = video.videostreams\r\n title = video.title\r\n try:\r\n with open(\"/Users/tikosch/Desktop/python/telegramBot/music/{}.mp4\".format(title), 'rb') as video:\r\n bot.send_video(message.chat.id, video=video)\r\n bot.send_message(message.chat.id, \"Done ✅\", reply_markup=youtube_btn())\r\n bot.register_next_step_handler(message,youtube)\r\n except: \r\n vid = {}\r\n vid['title'] = title\r\n vid['link'] = message.text\r\n needed = {}\r\n needed['duration'] = video.duration\r\n needed['viewcount'] = int(video.viewcount)\r\n needed['rating'] = video.rating\r\n videos.insert_one(vid)\r\n videos.update_one({\"link\": vid['link']}, {\"$push\": {\"description\": needed}})\r\n stream[0].download(\"/Users/tikosch/Desktop/python/telegramBot/video\")\r\n with open(\"/Users/tikosch/Desktop/python/telegramBot/video/{}.mp4\".format(title), 'rb') as video:\r\n bot.send_video(message.chat.id, video=video)\r\n video.close()\r\n bot.send_message(message.chat.id, \"Done ✅\", reply_markup=youtube_btn())\r\n bot.register_next_step_handler(message,youtube)\r\n#Finish homework steps\r\ndef finish_homework(message):\r\n user_id = message.from_user.id\r\n HWs = homeworks.find({'id':user_id})\r\n Text = \"\"\r\n counter = 0\r\n for works in HWs:\r\n if works['is_completed'] == False:\r\n counter = counter + 1\r\n Text = Text + f\"{counter}. {works['subject']} - {works['assignment']} with deadline {works['deadline']}\\n\"\r\n if counter == 0:\r\n bot.send_message(message.chat.id, f\"Hey, you can chill out. You don't have any homeworks to finish.\" , reply_markup=home_button(), parse_mode='html')\r\n else:\r\n bot.send_message(message.chat.id, Text , parse_mode='html')\r\n choose_hw_ids(message, counter)\r\n\r\ndef choose_hw_ids(message, counter):\r\n user_id = message.from_user.id\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n HWs = homeworks.find({'id':user_id, 'is_completed':False})\r\n for i in HWs:\r\n btn1 = types.KeyboardButton(f\"📄{i['subject']} - {i['assignment']}\")\r\n markup.add(btn1)\r\n bot.send_message(message.chat.id, f\"Choose finished homework's ID: \" , reply_markup=markup, parse_mode='html')\r\n bot.register_next_step_handler(message, set_finish_to_hw)\r\n \r\n\r\ndef set_finish_to_hw(message):\r\n if message.text == \"🏠Go home\":\r\n bot.send_message(message.chat.id, \"Going home...\", reply_markup=home_button())\r\n bot.register_next_step_handler(message,home)\r\n else:\r\n try:\r\n user_id = message.from_user.id\r\n text = message.text\r\n text = text.replace(\"📄\",'')\r\n splitted = text.split(\"-\")\r\n print(text, splitted)\r\n subject = splitted[0].strip()\r\n assignment = splitted[1].strip()\r\n \r\n HWs = homeworks.update_one(\r\n {'id':user_id,'subject':subject,'assignment':assignment},\r\n {\"$set\":{\"is_completed\": True}\r\n })\r\n HW = users.update_one(\r\n {'id':user_id},\r\n {\"$pop\":{\"deadlines\": 1}\r\n })\r\n bot.send_message(message.chat.id, f\"Successully finished {subject}!\" , reply_markup=home_button(), parse_mode='html')\r\n bot.register_next_step_handler(message, home)\r\n except:\r\n bot.send_message(message.chat.id, f\"Oops, you entered wrong ID, try again!\", parse_mode='html', reply_markup=homeBtn())\r\n bot.register_next_step_handler(message, set_finish_to_hw)\r\n \r\n pass\r\n\r\n\r\n\r\n#all completed HWs\r\ndef get_completed_hws(message):\r\n user_id = message.from_user.id\r\n HWs = homeworks.find({'id':user_id, 'is_completed':True})\r\n counter = 0\r\n Text = \"\"\r\n for works in HWs:\r\n counter = counter + 1\r\n Text = Text + f\"✅{counter}. {works['subject']} - {works['assignment']} with deadline {works['deadline']}\\n\"\r\n if counter == 0:\r\n bot.send_message(message.chat.id, f\"You don't have completed any homeworks:(\" , reply_markup=home_button(), parse_mode='html')\r\n else:\r\n bot.send_message(message.chat.id, Text , parse_mode='html')\r\n bot.send_message(message.chat.id, f\"Keep it going! You already have done {counter} homeworks.\" , reply_markup=home_button(), parse_mode='html')\r\n bot.register_next_step_handler(message,home)\r\nbot.enable_save_next_step_handlers(delay=1)\r\nbot.polling(none_stop=True)\r\n\r\n\r\n","repo_name":"tikosch/std_helper_bot","sub_path":"telegramBot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":17245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"72938928549","text":"import time\nfrom dydx3 import constants, epoch_seconds_to_iso\nfrom tests.constants import SEVEN_DAYS_S\n\n# class - DydxWithdrawal: is used to manage withdrawal on dydx .\n\n\nclass DydxWithdrawal(object):\n def __init__(self, dydx_client_instance_details):\n self.dydx_client = dydx_client_instance_details[\"dydx_instance\"]\n\n \"\"\"\n :method - slow_withdrawal: responsible for withdrawing USDC from dydx . this send an request to dydx contract to withdraw usdc.\n :param - withdrawal_params are parameters that pass to dydx API.\n :return - withdrawal information.\n \"\"\"\n\n def slow_withdrawal(self, withdrawal_params):\n withdrawal = self.dydx_client.private.create_withdrawal(\n position_id=withdrawal_params[\"position_id\"],\n amount=withdrawal_params[\"amount\"],\n asset=withdrawal_params[\"asset\"],\n expiration_epoch_seconds=withdrawal_params[\"expiration_epoch_seconds\"],\n to_address=withdrawal_params[\"to_address\"],\n )\n return vars(withdrawal)\n\n \"\"\" \n :method - fast_withdrawal: responsible for withdrawing USDC from dydx.this send an request to dydx pool to withdraw usdc that is of chain.\n :param - withdrawal_params are parameters that pass to dydx API.\n :return - withdrawal information. \n \"\"\"\n\n def fast_withdrawal(self, withdrawal_params):\n withdrawal_amount = withdrawal_params[\"withdrawal_amount\"]\n fast_withdrawal_result = vars(self.fast_withdrawal_details(withdrawal_amount))\n\n lp_position_id_result = list(\n fast_withdrawal_result[\"data\"][\"liquidityProviders\"].keys()\n )[0]\n quote = fast_withdrawal_result[\"data\"][\"liquidityProviders\"][\n lp_position_id_result\n ][\"quote\"]\n if quote is None:\n raise Exception(\"Could not get a quote\")\n debit_amount = quote[\"debitAmount\"]\n\n create_fast_withdrawal_result = self.dydx_client.private.create_fast_withdrawal(\n position_id=withdrawal_params[\"position_id\"],\n credit_asset=constants.ASSET_USDC,\n credit_amount=withdrawal_amount,\n debit_amount=debit_amount,\n to_address=withdrawal_params[\"to_address\"],\n lp_position_id=lp_position_id_result,\n lp_stark_public_key=list(\n fast_withdrawal_result[\"data\"][\"liquidityProviders\"].values()\n )[0][\"starkKey\"],\n expiration=epoch_seconds_to_iso(time.time() + SEVEN_DAYS_S),\n )\n return create_fast_withdrawal_result.data\n\n \"\"\" \n :method - all_transfer_details: responsible return all the transfer that has been initiated.\n :return - all transfer details . \n \"\"\"\n\n def all_transfer_details(self, params):\n transfers = self.dydx_client.private.get_transfers(**params)\n return vars(transfers)\n\n \"\"\" \n :method - fast_withdrawal_details: responsible return all the fast withdrawal details that has been initiated.\n :return - all transfer details . \n \"\"\"\n\n def fast_withdrawal_details(self, withdrawal_amount):\n get_fast_withdrawal_result = self.dydx_client.public.get_fast_withdrawal(\n creditAsset=constants.ASSET_USDC, creditAmount=withdrawal_amount\n )\n return get_fast_withdrawal_result\n","repo_name":"CruizeFinance/trident","sub_path":"services/dydx_client/withdrawal.py","file_name":"withdrawal.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"14553092982","text":"# STEP 1 of WISPR data processing\n\n# This code was run on a computer with wget installed, producing the text files that were to be used for data display on the website\nimport math\nimport sys\nimport subprocess\nfrom astropy.io import fits\nimport os\n\nprint(\"running!!\")\nNUMORBITS = 15\n\n#\n#sets up wget\ndef runcmd(cmd, verbose = False, *args, **kwargs):\n process = subprocess.Popen(\n cmd,\n stdout = subprocess.PIPE, \n stderr = subprocess.PIPE,\n text = True, \n shell = True\n )\n std_out, std_err = process.communicate()\n if verbose:\n print(std_out.strip(), std_err)\n pass\n\n#obtains FITS files by date\ndate = \"20210429\"\norbit = \"orbit08\"\nruncmd(\"wget -nc -r -l=2 --no-parent --accept fits https://wispr.nrl.navy.mil/data/rel/fits/L3/orbit15/\", verbose=True)\n\n\n#creates list of FITS files in certain date\n#extracts HAE coordinates from the FITS files\nm_in_au = 149597870700 #conversion factor obtained from https://cneos.jpl.nasa.gov/glossary/au.html\n\nfor i in range(15, NUMORBITS+1):\n if i<10:\n orbit = 'orbit'+'0'+str(i)\n else:\n orbit = 'orbit'+str(i)\n with open(f'wispr_data/wispr_txt_data/{orbit}.txt', 'w') as f:\n dates_list = os.listdir(f'wispr.nrl.navy.mil\\\\data\\\\rel\\\\fits\\\\L3\\\\{orbit}')\n for datey in dates_list:\n fits_list = os.listdir(f'wispr.nrl.navy.mil\\\\data\\\\rel\\\\fits\\\\L3\\\\{orbit}\\\\{datey}')\n for fitsy in fits_list:\n testfits = fits.open(f'wispr.nrl.navy.mil\\\\data\\\\rel\\\\fits\\\\L3\\\\{orbit}\\\\{datey}\\\\{fitsy}')\n x = testfits[0].header['HAEX_OBS']\n y = testfits[0].header['HAEY_OBS']\n z = testfits[0].header['HAEZ_OBS']\n ttime = testfits[0].header['DATE-AVG'][:16]\n yr = ttime[:4]\n mth = ttime[5:7]\n day = ttime[8:10]\n T = int(ttime.index('T'))\n time = ttime[T+1:T+6]\n ddate = f'{mth}/{day}/{yr}'\n pngtime = yr+mth+day+'_'+time[:2]+time[3:]\n dist = math.sqrt((x/m_in_au)**2 + (y/m_in_au)**2 + (z/m_in_au)**2)\n f.write(f'{pngtime}, {ddate}, {time}, {x}, {y}, {z}, {dist}\\n')\n","repo_name":"adamzhen/Project-Icarus-Official","sub_path":"data_processing/get_wispr_fits.py","file_name":"get_wispr_fits.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"13023817133","text":"\"\"\"\nVehicles IV\n\"\"\"\n\n# Create an instance of each child class. Access all their attributes and methods, including those inherited from their parent class Vehicle.\n\n# TAKEAWAY! - Vehicle is the baseline class for other more specific types of vehicles. Typically, you wouldn't instantiate a Vehicle because the child classes are more useful for storing information about vehicles. The Vehicle class serves to create a relationship between its children. However, \"submarine\" might be created as a Vehicle because it's so rare that you might not need a full Submarine class!\n\nclass Vehicle():\n def __init__(self, name, owner):\n self.name = name\n self.owner = owner\n\n category = 'transportation'\n\n def start_engine(self):\n print('Vrrrrrooomm!')\n\n\nclass Car(Vehicle):\n def __init__(self, name, owner):\n self.name = name\n self.owner = owner\n\n motion = 'drive'\n terrain = 'land'\n\n def honk_horn(self):\n print('HONK!')\n\n\nclass Plane(Vehicle):\n def __init__(self, name, owner):\n self.name = name\n self.owner = owner\n\n motion = 'fly'\n terrain = 'air'\n\n def take_off(self):\n print('Fasten your seatbelts!')\n\n\nclass Boat(Vehicle):\n def __init__(self, name, owner):\n self.name = name\n self.owner = owner\n\n motion = 'sail'\n terrain = 'water'\n\n def drop_anchor(self):\n print('Anchors away!')\n\n\n# CAR INSTANCE\ncar1 = Car('The Batmobile','Batman')\n\nprint(car1.category) # transportation\nprint(car1.owner, 'can', car1.motion, car1.name, 'on', car1.terrain) # The Batmmobile can drive on land\ncar1.start_engine() # Vrrrrrooomm!\ncar1.honk_horn() # HONK!\n\nprint('\\n')\n\n# PLANE INSTANCE\nplane1 = Plane('The Canary', 'Amelia Earhart')\n\nprint(plane1.category) # transportation\nprint(plane1.owner, 'can', plane1.motion, plane1.name, 'through the', plane1.terrain) # Amelia Earhart can fly The Canary through the air\nplane1.start_engine() # Vrrrrrooomm!\nplane1.take_off() # Fasten your seatbelts!\n\nprint('\\n')\n\n# BOAT INSTANCE\nboat1 = Boat('Jenny', 'Forrest Gump')\n\nprint(boat1.category) # transportation\nprint(boat1.owner, 'can', boat1.motion, boat1.name, 'on', boat1.terrain) # Forrest Gump can sail Jenny on water\nboat1.start_engine() # Vrrrrrooomm!\nboat1.drop_anchor() # Anchors Away!","repo_name":"mottaquikarim/pydev-psets","sub_path":"pset_classes/vehicles/solutions/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"23222322573","text":"import os\nimport argparse\nimport json\n\ndef load_config(json_fn):\n with open(json_fn, 'r') as infile:\n config = json.load(infile)\n return config\n\ndef create_config(args):\n path = os.path.join('models',args['model_name'])\n if not os.path.exists(path):\n os.mkdir(path)\n with open(os.path.join(path,'config.json'), 'w') as outfile:\n json.dump(args, outfile)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Create a config JSON')\n\n #possible types/values\n\n parser.add_argument('model_name',\n help='model name. will create a directory for model where config,data,etc will go')\n parser.add_argument('spec_type',\n help='Spectrogram Type, cqt or logstft')\n parser.add_argument('init_lr', type=float,\n help='Initial Learning Rate')\n parser.add_argument('lr_decay',\n help='How the Learning Rate Will Decay')\n parser.add_argument('bin_multiple', type=int,\n help='Used to calculate bins_per_octave')\n parser.add_argument('residual', type=bool,\n help='Use Residual Connections or not')\n parser.add_argument('full_window',\n help='Whether or not the convolution window spans the full axis')\n\n\n args = vars(parser.parse_args())\n\n create_config(args)\n","repo_name":"sahrayusuf/auto_music_transcription","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73378070308","text":"from typing import List\n\nimport k2\nimport k2.ragged as k2r\n\n\ndef get_texts(best_paths: k2.Fsa) -> List[List[int]]:\n \"\"\"Extract the texts (as word IDs) from the best-path FSAs.\n Args:\n best_paths:\n A k2.Fsa with best_paths.arcs.num_axes() == 3, i.e.\n containing multiple FSAs, which is expected to be the result\n of k2.shortest_path (otherwise the returned values won't\n be meaningful).\n Returns:\n Returns a list of lists of int, containing the label sequences we\n decoded.\n \"\"\"\n if isinstance(best_paths.aux_labels, k2.RaggedInt):\n # remove 0's and -1's.\n aux_labels = k2r.remove_values_leq(best_paths.aux_labels, 0)\n aux_shape = k2r.compose_ragged_shapes(\n best_paths.arcs.shape(), aux_labels.shape()\n )\n\n # remove the states and arcs axes.\n aux_shape = k2r.remove_axis(aux_shape, 1)\n aux_shape = k2r.remove_axis(aux_shape, 1)\n aux_labels = k2.RaggedInt(aux_shape, aux_labels.values())\n else:\n # remove axis corresponding to states.\n aux_shape = k2r.remove_axis(best_paths.arcs.shape(), 1)\n aux_labels = k2.RaggedInt(aux_shape, best_paths.aux_labels)\n # remove 0's and -1's.\n aux_labels = k2r.remove_values_leq(aux_labels, 0)\n\n assert aux_labels.num_axes() == 2\n return k2r.to_list(aux_labels)\n","repo_name":"csukuangfj/k2_decoding_benchmark","sub_path":"librispeech/local/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"13724583651","text":"import os\nimport ast\nimport bcrypt\n\nfrom datetime import datetime, timedelta\nfrom flask import Flask, render_template, request, redirect, url_for, flash, session\nfrom db import db\nfrom models.clientes import ClientesModel\nfrom models.prendas import PrendasModel\nfrom models.ordenes import OrdenesModel\nfrom models.user import UserModel\nfrom models.tiempos import TiemposModel\nfrom models.config import (\n ConfigPrendasModel,\n ConfigAreasModel,\n ConfigCategoriaEnvioModel,\n ConfigEmpresaEnvioModel,\n ConfigOpcionesEnvioModel,\n ConfigMediosCompraModel,\n ConfigMarcasModel,\n ConfigEstadosOrdenModel,\n ConfigMediosPagoModel,\n ConfigTallasModel,\n ConfigSubtipoPrendasModel,\n)\n\n\napp = Flask(__name__)\napp.secret_key = \"appcrisaapp\" \napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URI', 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\nsalt = bcrypt.gensalt()\n\n\n# PRENDAS = ['falda', 'pantalon', 'short', 'buzo', 'camibuzo', 'camiseta', 'croptop', 'camisilla', 'camisa', 'vestido', 'correa', 'medias', 'gorro', 'gafas', 'tapabocas', 'arnes', 'panoleta', 'cobija', 'panties', 'dakimakuras', 'kimono', 'cadena', 'sudadera', 'otro']\n\n# AREAS = [\"analisis\", \"planeacion\", \"diseno\", \"impresion\", \"sublimacion\", \"corte\", \"confeccion-preparacion1\", \"confeccion-preparacion2\", \"confeccion-terminacion\", \"calidad\", \"empaque\", \"almacen\", \"ventas\"]\n\n# CATEGORIA_ENVIOS = [\"24H\", \"CAT A\", \"CAT B\", \"CAT C\", \"CAT D\", \"CAT E\"]\n\n# EMPRESAS_ENVIO = [\"interrapidisimo\", \"servientrega\", \"472\", \"mensajero\", \"envia\", \"coordinadora\", \"deprisa\", \"tcc\", \"saferbo\", \"otro\"]\n\n# OPCIONES_ENVIO = [\"al cobro\", \"contado\", \"contraentrega\", \"recoge en local\"]\n\n# MEDIOS_COMPRA = [\"instagram\", \"whatsapp\", \"facebook\", \"etsy\", \"mercadolibre\", \"pagina web\", \"presencial\", \"amazon\", \"otro\"]\n\n# MARCAS = [\"crisa\", \"river caves\", \"rosie glam\", \"fighting\", \"possession\", \"artemis\", \"artemis\", \"akihabara\", \"happy banana\"]\n\n# ESTADOS_ORDEN = [\"analisis\", \"produccion\", \"almacen\", \"despachada\", \"cerrada\", \"cancelada\", \"cambios\", \"reembolso\"]\n\n# MEDIOS_PAGO = [\"nequi\", \"bancolombia consignacion\", \"bancolombia transferencia\", \"daviplata\", \"efecty\", \"gana\", \"baloto\", \"giro-otro\", \"davivienda\", \"payu\", \"paypal\", \"mercadopago\", \"western union\", \"efectivo\", \"nomina\", \"movil\", \"datafono\", \"otro\"]\n\n# TALLAS = [\"Talla unica\", \"junior\", \"XS\", \"S\", \"M\", \"L\", \"XL\", \"XXL\", \"XXXL\", \"Personalizada\"]\n\n\n\n\n@app.before_first_request\ndef first_run():\n db.create_all()\n\n usuario = UserModel.find_by_usuario(\"main_admin\")\n\n if not usuario:\n usuario = \"main_admin\"\n password = bcrypt.hashpw(\"appcrisa1029$\".encode(\"utf-8\"), salt)\n role = \"administrador\"\n area = \"administrador\"\n admin = UserModel(usuario, password, role, area)\n admin.save_to_db()\n\n\n# Rest API\n\n@app.route('/guardarorden', methods=['POST'])\ndef guardar_orden():\n\n data = request.form\n\n # Guardar datos del cliente\n tipo = data['cliente_categoria']\n nombre = data['cliente_nombre']\n correo = data['cliente_correo']\n tipodoc = data['cliente_tipodoc']\n cedula = data['cliente_cedula']\n telefono = data['cliente_telefono']\n direccion = data['cliente_direccion']\n barrio = data['cliente_barrio']\n ciudad = data['cliente_ciudad']\n departamento = data['cliente_departamento']\n pais = data['cliente_pais']\n codigo_postal = data['cliente_postal']\n\n cliente = ClientesModel.find_by_cedula(cedula)\n\n if cliente:\n cliente.tipo = tipo\n cliente.nombre = nombre\n cliente.correo = correo\n cliente.tipodoc = tipodoc\n cliente.cedula = cedula\n cliente.telefono = telefono\n cliente.direccion = direccion\n cliente.barrio = barrio\n cliente.ciudad = ciudad\n cliente.departamento = departamento\n cliente.pais = pais\n cliente.codigo_postal = codigo_postal\n cliente.save_to_db()\n cliente_id = cliente.id\n else:\n cliente = ClientesModel(tipo, nombre, correo, tipodoc, cedula, telefono, direccion, barrio, ciudad, departamento, pais, codigo_postal)\n cliente.save_to_db()\n cliente = ClientesModel.find_by_cedula(cedula)\n cliente_id = cliente.id\n\n\n # Registro del numero de orden\n user = session['user']\n numero_orden = data['numero_orden']\n\n if request.form.get('prioritaria') == \"orden_prioritaria\":\n prioridad = 'si'\n else:\n prioridad = 'no'\n \n flash(numero_orden, 'orden')\n estado_orden = data['estado_orden']\n opcion_envio = data['opcion_envio']\n if request.form.get('incluir_envio') == \"incluir_envio\":\n incluir_envio = 'si'\n empresa_envio = data['empresa_envio']\n precio_envio = data['precio_envio']\n guia_envio = data['guia_envio']\n else:\n incluir_envio = \"no\"\n empresa_envio = \"\"\n precio_envio = \"\"\n guia_envio = \"\"\n\n abono = data['abono']\n precio_total = data['precio_total']\n marca = data['marca']\n medio_compra = data['medio_compra']\n tiempo_estimado = data['tiempo_estimado']\n forma_pago = data['forma_pago']\n if request.form.get('pagado') == \"pagado\":\n pagado = 'si'\n else:\n pagado = 'no'\n comentarios = data['comentario']\n\n orden = OrdenesModel.find_by_orden(numero_orden)\n\n if orden:\n orden.numero_orden = numero_orden\n\n if estado_orden != 'despachada':\n orden.estado_orden = estado_orden\n\n if estado_orden == 'despachada':\n if orden.pagado == 'si':\n orden.estado_orden = estado_orden\n else:\n flash(f\"La orden {numero_orden} aun no ha sido pagada, aun no puede ser despachada\", 'error')\n\n orden.user = user\n orden.prioridad = prioridad\n orden.incluir_envio = incluir_envio\n orden.opcion_envio = opcion_envio\n orden.empresa_envio = empresa_envio\n orden.precio_envio = precio_envio\n orden.guia_envio = guia_envio\n orden.abono = abono\n orden.precio_total = precio_total\n orden.marca = marca\n orden.medio_compra = medio_compra\n orden.forma_pago = forma_pago\n orden.pagado = pagado\n orden.comentarios = comentarios\n orden.cliente_id = cliente_id\n orden.tiempo_estimado = tiempo_estimado\n orden.save_to_db()\n flash(f\"Orden {numero_orden} actualizada exitosamente\", 'success')\n orden_id = orden.id\n\n else:\n orden = OrdenesModel(user,numero_orden, prioridad, estado_orden, incluir_envio, opcion_envio, empresa_envio, precio_envio, guia_envio, abono, precio_total, marca, medio_compra, forma_pago, pagado, comentarios, cliente_id, tiempo_estimado)\n orden.save_to_db()\n flash(f\"Orden {numero_orden} creada exitosamente\", 'success')\n orden = OrdenesModel.find_by_orden(numero_orden)\n orden_id = orden.id\n\n\n\n # Guardar registro de las prendas de la orden\n data_keys = data.keys()\n prendas_index = list()\n for i in data_keys:\n if 'tipo' in i and 'sub' not in i and 'doc' not in i:\n prendas_index.append(i)\n\n for i in prendas_index:\n index = i[-1]\n\n id_prenda = data[f'id_{index}']\n tipo_prenda = data[f'tipo_{index}']\n subtipo = data[f'subtipo_{index}']\n genero = data[f'genero_{index}']\n talla = data[f'talla_{index}']\n imagen = data[f'imagen_{index}']\n precio = data[f'precio_{index}']\n cantidad = data[f'cantidad_{index}']\n especificacion = data[f'especificacion_{index}']\n\n if id_prenda == \"\":\n prenda = PrendasModel(tipo_prenda, subtipo, genero, talla, imagen, precio, cantidad, especificacion, orden_id)\n prenda.save_to_db()\n else:\n id_prenda = int(id_prenda)\n prenda = PrendasModel.find_by_id(id_prenda)\n prenda.tipo_prenda = tipo_prenda\n prenda.subtipo = subtipo\n prenda.genero = genero\n prenda.talla = talla\n prenda.imagen = imagen\n prenda.precio = precio\n prenda.cantidad = cantidad\n prenda.especificacion = especificacion\n prenda.save_to_db()\n\n return redirect(url_for('ordenes'))\n\n@app.route('/guardarordendiseno', methods=['POST'])\ndef guardar_orden_diseno():\n\n data = request.form\n\n # Guardar datos del cliente\n tipo = \"interna\"\n nombre = \"interna\"\n correo = \"interna\"\n tipodoc = \"interna\"\n cedula = \"interna\"\n telefono = \"interna\"\n direccion = \"interna\"\n barrio = \"interna\"\n ciudad = \"interna\"\n departamento = \"interna\"\n pais = \"interna\"\n codigo_postal = \"interna\"\n\n cliente = ClientesModel.find_by_cedula(cedula)\n\n if cliente:\n cliente.tipo = tipo\n cliente.nombre = nombre\n cliente.correo = correo\n cliente.tipodoc = tipodoc\n cliente.cedula = cedula\n cliente.telefono = telefono\n cliente.direccion = direccion\n cliente.barrio = barrio\n cliente.ciudad = ciudad\n cliente.departamento = departamento\n cliente.pais = pais\n cliente.codigo_postal = codigo_postal\n cliente.save_to_db()\n cliente_id = cliente.id\n else:\n cliente = ClientesModel(tipo, nombre, correo, tipodoc, cedula, telefono, direccion, barrio, ciudad, departamento, pais, codigo_postal)\n cliente.save_to_db()\n cliente = ClientesModel.find_by_cedula(cedula)\n cliente_id = cliente.id\n\n\n # Registro del numero de orden\n user = session['user']\n numero_orden = data['numero_orden']\n flash(numero_orden, 'orden')\n prioridad = 'no'\n estado_orden = data['estado_orden']\n opcion_envio = \"interna\"\n incluir_envio = \"no\"\n empresa_envio = \"interna\"\n precio_envio = \"interna\"\n guia_envio = \"interna\"\n\n abono = \"interna\"\n precio_total = \"interna\"\n marca = \"interna\"\n medio_compra = \"interna\"\n tiempo_estimado = \"interna\"\n forma_pago = \"interna\"\n pagado = 'no'\n comentarios = data['comentario']\n\n orden = OrdenesModel.find_by_orden(numero_orden)\n\n if orden:\n orden.numero_orden = numero_orden\n orden.estado_orden = estado_orden\n orden.user = user\n orden.prioridad = prioridad\n orden.incluir_envio = incluir_envio\n orden.opcion_envio = opcion_envio\n orden.empresa_envio = empresa_envio\n orden.precio_envio = precio_envio\n orden.guia_envio = guia_envio\n orden.abono = abono\n orden.precio_total = precio_total\n orden.marca = marca\n orden.medio_compra = medio_compra\n orden.forma_pago = forma_pago\n orden.pagado = pagado\n orden.comentarios = comentarios\n orden.cliente_id = cliente_id\n orden.tiempo_estimado = tiempo_estimado\n orden.save_to_db()\n flash(f\"Orden {numero_orden} actualizada exitosamente\", 'success')\n orden_id = orden.id\n\n else:\n orden = OrdenesModel(user,numero_orden, prioridad, estado_orden, incluir_envio, opcion_envio, empresa_envio, precio_envio, guia_envio, abono, precio_total, marca, medio_compra, forma_pago, pagado, comentarios, cliente_id, tiempo_estimado)\n orden.save_to_db()\n flash(f\"Orden {numero_orden} creada exitosamente\", 'success')\n orden = OrdenesModel.find_by_orden(numero_orden)\n orden_id = orden.id\n\n\n # Guardar registro de las prendas de la orden\n data_keys = data.keys()\n prendas_index = list()\n for i in data_keys:\n if 'especificacion' in i:\n prendas_index.append(i)\n\n for i in prendas_index:\n index = i[-1]\n\n id_prenda = data[f'id_{index}']\n tipo_prenda = \"interna\"\n subtipo = \"interna\"\n genero = \"interna\"\n talla = \"interna\"\n imagen = \"interna\"\n precio = \"interna\"\n cantidad = \"interna\"\n especificacion = data[f'especificacion_{index}']\n\n if id_prenda == \"\":\n prenda = PrendasModel(tipo_prenda, subtipo, genero, talla, imagen, precio, cantidad, especificacion, orden_id)\n prenda.area_responsable = \"diseno\"\n prenda.save_to_db()\n else:\n id_prenda = int(id_prenda)\n prenda = PrendasModel.find_by_id(id_prenda)\n prenda.tipo_prenda = tipo_prenda\n prenda.subtipo = subtipo\n prenda.genero = genero\n prenda.talla = talla\n prenda.imagen = imagen\n prenda.precio = precio\n prenda.cantidad = cantidad\n prenda.especificacion = especificacion\n prenda.save_to_db()\n\n return redirect(url_for('ordenes'))\n\n@app.route('/crearnumerodeorden')\ndef new_order():\n utc_date = datetime.now()\n col_date = utc_date - timedelta(hours=5)\n fecha = col_date.strftime(\"%y%m%d%H%M%S\")\n return {'numero_orden': fecha}\n\n@app.route('/getuser/')\ndef get_user(cedula):\n cliente = ClientesModel.find_by_cedula(cedula)\n if cliente:\n return cliente.json()\n else:\n return {\"message\":\"Usuario no enconotrado\"}, 400\n\n@app.route('/getorder/')\ndef get_order(order):\n orden = OrdenesModel.find_by_orden(order)\n if orden:\n return orden.json()\n else:\n return {\"message\":\"Orden no encontrada\"}, 400\n\n@app.route('/borrarprenda/', methods=['DELETE'])\ndef borrar_prenda(prenda_id):\n prenda_id = int(prenda_id)\n prenda = PrendasModel.find_by_id(prenda_id)\n if prenda:\n prenda.delete_from_db()\n return{\"message\":\"Prenda eliminada con exito\"}\n return{\"message\":\"No se encontro la prenda con el id\"+prenda_id}\n\n@app.route('/borrarusuario', methods=['POST'])\ndef borrar_usuario():\n data = request.form\n usuario = data['eliminar-usuario']\n user = UserModel.find_by_usuario(usuario)\n if user:\n user.delete_from_db()\n flash(\"Usuario eliminado con exito\", \"success\")\n else:\n flash(\"Usuario no encontrado\", \"error\")\n return redirect(url_for('configuracion'))\n\n@app.route('/borrarorden', methods=['POST'])\ndef borrar_orden():\n data = request.form\n numero_orden = data['eliminar-orden']\n\n orden = OrdenesModel.find_by_orden(numero_orden)\n if orden:\n orden.delete_from_db()\n flash(\"Orden eliminada exitosamente\", \"success\")\n\n else:\n flash(\"Orden no encontrada\", \"error\")\n return redirect(url_for('configuracion'))\n\n@app.route('/actualizarestadoorden', methods=[\"POST\"])\ndef actualizar_estado_orden():\n\n data = request.form\n flash(data['numero_orden'], 'orden')\n order = OrdenesModel.find_by_orden(data['numero_orden'])\n\n if data['estado_orden'] == \"almacen\":\n flag = True\n for prenda in order.prendas:\n if prenda.empacado == \"no\":\n flash(f\"La orden {data['numero_orden']} tiene prendas pendientes por empacar\", \"error\")\n flag = False\n break\n if flag:\n order.estado_orden = data['estado_orden']\n order.save_to_db()\n flash(\"Estado actualizado exitosamente\", 'success')\n\n\n elif data['estado_orden'] == 'despachada':\n if order.estado_orden == \"almacen\":\n if order.pagado == 'si':\n order.estado_orden = data['estado_orden']\n order.save_to_db()\n flash(\"Estado actualizado exitosamente\", 'success')\n else:\n flash(f\"La orden {data['numero_orden']} aun no ha sido pagada, aun no puede ser despachada\", 'error')\n else:\n flash(f\"La orden {data['numero_orden']} no se encuentra en almacen\", 'error')\n\n elif data['estado_orden'] == 'cerrada':\n\n for prenda in order.prendas:\n prenda_model = PrendasModel.find_by_id(prenda.id)\n prenda_model.user_responsable = \"\"\n prenda_model.save_to_db()\n\n order.estado_orden = data['estado_orden']\n order.save_to_db()\n flash(\"Estado actualizado exitosamente\", 'success')\n\n\n else:\n order.estado_orden = data['estado_orden']\n order.save_to_db()\n flash(\"Estado actualizado exitosamente\", 'success')\n\n\n return redirect(url_for('produccion'))\n\n@app.route('/actualizardespacho', methods=[\"POST\"])\ndef actualizar_despacho():\n\n data = request.get_json()\n order = OrdenesModel.find_by_orden(data['numero_orden'])\n print('2')\n if data['estado_orden'] == 'despachada':\n print('3')\n if order.estado_orden == \"almacen\":\n print('4')\n if order.pagado == 'si':\n print('5')\n order.estado_orden = data['estado_orden']\n order.save_to_db()\n return {\"message\":\"Estado actualizado exitosamente\"}\n else:\n return{\"message\":f\"La orden {data['numero_orden']} aun no ha sido pagada, aun no puede ser despachada\"}\n else:\n return{\"message\":f\"La orden {data['numero_orden']} no se encuentra en almacen\"}\n\n return {\"message\":\"La orden no se marco como despachada\"}\n\n@app.route('/logout') \ndef logout(): \n if 'user' in session: \n session.pop('user',None) \n return redirect(url_for('home'))\n\n@app.route('/tiempos', methods=['POST'])\ndef cargar_tiempos():\n data = request.get_json()\n prenda = PrendasModel.find_by_id(data['id_prenda'])\n user = UserModel.find_by_usuario(session['user'])\n tiempos = TiemposModel.find_by_prendayusuario(prenda.id, user.id)\n \n if tiempos:\n tiempo = False\n for i in tiempos:\n if i.final == \"\":\n tiempo = i\n break\n \n if tiempo:\n tiempo.final = (datetime.now() - timedelta(hours=5)).strftime(\"%Y %m %d %H %M %S\")\n tiempo.save_to_db()\n else:\n tiempo = TiemposModel(user.id, prenda.orden_id, prenda.id, session['area'])\n tiempo.save_to_db()\n\n else:\n tiempo = TiemposModel(user.id, prenda.orden_id, prenda.id, session['area'])\n tiempo.save_to_db()\n\n return tiempo.json()\n\n@app.route('/verificartiempos/')\ndef verificar_tiempos(order):\n user = UserModel.find_by_usuario(session['user'])\n orden = OrdenesModel.find_by_orden(order)\n if orden:\n\n tiempos = TiemposModel.find_by_ordenyusuario(orden.id, user.id)\n y = list()\n for tiempo in tiempos:\n if tiempo.final == \"\":\n y.append(tiempo.json())\n\n return {\"tiempos_abiertos\":y}\n\n else:\n return {\"message\":\"Orden no encontrada\"}\n\n@app.route('/casoproduccion', methods=['POST'])\ndef casoproduccion():\n data = request.get_json()\n prenda = PrendasModel.find_by_id(data['id'])\n\n if prenda:\n if prenda.caso_produccion != data['caso']:\n prenda.caso_produccion = data['caso']\n estados_produccion = dict()\n for estado in data['estados']:\n estados_produccion[estado] = 0\n prenda.estados_produccion = str(estados_produccion)\n prenda.area_responsable = data[\"area_inicial\"]\n prenda.save_to_db()\n return {'message':'Actualizado con exito', \"estados_produccion\":prenda.estados_produccion}\n else:\n return {'message':'El caso actual seleccionado es el mismo'}\n \n return {'message':'Prenda no encontrada'}\n\n@app.route('/marcarproduccion', methods=['POST'])\ndef marcarproduccion():\n data = request.get_json()\n prenda = PrendasModel.find_by_id(data[\"id_prenda\"])\n\n if prenda:\n estados_produccion = ast.literal_eval(prenda.estados_produccion)\n if data[\"estado\"]:\n estados_produccion[data[\"area\"]] = 1\n prenda.estados_produccion = str(estados_produccion)\n prenda.save_to_db()\n return {\"message\":\"Estado actualizado exitosamente\", \"status\":\"ok\"}\n\n else:\n return {\"message\":\"Servicio no disponible\", \"status\":\"\"}\n\n else:\n return {\"message\":\"Servicio no disponible\", \"status\":\"\"}\n\n@app.route('/getprendaestado/')\ndef getprendaestado(prenda_id):\n prenda = PrendasModel.find_by_id(prenda_id)\n if prenda:\n return{\"estados\":prenda.json()[\"estados_produccion\"]}\n else:\n return {\"message\":\"prenda no encontrada\"}\n\n@app.route('/crearusuario', methods=[\"POST\"])\ndef crear_usuario():\n data = request.form\n usuario = data['input-nuevo-usuario']\n password = bcrypt.hashpw(data[\"input-nuevo-usuario-password\"].encode(\"utf-8\"), salt)\n role = data[\"input-nuevo-usuario-role\"]\n area = data[\"input-nuevo-usuario-area\"]\n\n user = UserModel.find_by_usuario(usuario)\n\n if user:\n flash(f\"El usuario {usuario} ya existe, por favor seleccione otro nombre de usuario\", \"error\")\n return(redirect(url_for('configuracion')))\n\n else:\n user = UserModel(usuario, password, role, area)\n try:\n user.save_to_db()\n flash(f\"Usuario {usuario} creado con éxito\", \"success\")\n return(redirect(url_for('configuracion')))\n except:\n flash(f\"Error al crear el usuario {usuario}, por favor intentelo nuevamente\", \"error\")\n return(redirect(url_for('configuracion')))\n\n@app.route('/modificarusuario', methods=[\"POST\"])\ndef modificar_usuario():\n data = request.form\n\n usuario = data['modificar-usuario']\n password = bcrypt.hashpw(data[\"modificar-password\"].encode(\"utf-8\"), salt)\n role = data[\"modificar-role\"]\n area = data[\"modificar-area\"]\n\n user = UserModel.find_by_usuario(usuario)\n\n if user:\n user.password = password\n user.role = role\n user.area = area\n try:\n user.save_to_db()\n flash(f\"Usuario {usuario} actualizado exitosamente\", \"success\")\n return(redirect(url_for('configuracion')))\n except:\n flash(f\"Error al actualizar el usuario {usuario}, por favor intentelo nuevamente\", \"error\")\n return(redirect(url_for('configuracion')))\n\n else:\n flash(\"Usuario no encontrado\", \"error\")\n return(redirect(url_for('configuracion')))\n\n@app.route('/arearesponsableprenda', methods=[\"POST\"])\ndef area_responsable_prenda():\n data = request.get_json()\n prenda = PrendasModel.find_by_id(data[\"id_prenda\"])\n if prenda:\n prenda.area_responsable = data[\"area\"]\n prenda.save_to_db()\n return {\"message\":f\"Se asigno exitosamente a {data['area']}\"}\n else:\n return {\"message\":\"Error al asignar área, por favor recargue la página e inténtelo nuevamente\"}\n\n@app.route('/usuarioresponsableprenda', methods=[\"POST\"])\ndef usuario_responsable_prenda():\n data = request.get_json()\n prenda = PrendasModel.find_by_id(data[\"id_prenda\"])\n if prenda:\n usuario = UserModel.find_by_usuario(data[\"usuario\"])\n\n if usuario:\n \n if usuario.area == data['area']:\n prenda.user_responsable = data[\"usuario\"]\n prenda.save_to_db()\n return {\"message\": f\"Se asigno exitosamente a {data['usuario']}\"}\n \n return {'message': f'El usuario seleccionado no es del area {data[\"area\"]}'}\n \n else:\n return {\"message\": f\"el usuario {data['usuario']} no existe\"}\n \n else:\n return {\"message\": \"Error al asignar usuario, por favor recargue la página e inténtelo nuevamente\"}\n\n@app.route('/cambiarusuariovacio/')\ndef cambiar_usuario_vacio(prenda_id):\n\n prenda = PrendasModel.find_by_id(prenda_id)\n if prenda:\n\n prenda.user_responsable = \"\"\n try:\n prenda.save_to_db()\n return {\"message\": \"Usuarios desasignados exitosamente\"}\n except:\n return {\"message\": \"Servidor no disponible\"}\n \n else:\n return {\"message\": \"prenda no encontrada\"}\n\n@app.route('/verordenes')\ndef ver_ordenes():\n x = OrdenesModel.find()\n y = list()\n for i in x:\n y.append(i.json())\n return {'ordenes':y}\n\n@app.route('/verordenesbydate', methods=['POST'])\ndef ver_ordenes_by_date():\n data = request.get_json()\n\n if data[\"desde\"] and data['hasta']:\n desde = datetime.strptime(data[\"desde\"], \"%Y-%m-%d\")\n hasta = datetime.strptime(data[\"hasta\"], \"%Y-%m-%d\")\n elif data[\"desde\"] and not data['hasta']:\n desde = datetime.strptime(data[\"desde\"], \"%Y-%m-%d\")\n hasta = desde + timedelta(days=100)\n elif data['hasta'] and not data[\"desde\"]:\n hasta = datetime.strptime(data[\"hasta\"], \"%Y-%m-%d\")\n desde = hasta - timedelta(days=100)\n else:\n utc_date = datetime.now()\n hasta = utc_date - timedelta(hours=5)\n desde = hasta - timedelta(days=100)\n\n data[\"desde\"] = desde.strftime(\"%y%m%d%H%M%S\")\n data[\"hasta\"] = hasta.strftime(\"%y%m%d%H%M%S\")\n\n x = OrdenesModel.find_by_date(**data)\n y = list()\n for i in x:\n y.append(i.json())\n return {'ordenes':y}\n\n@app.route('/vertiempos')\ndef ver_tiempos():\n x = TiemposModel.find()\n y = dict()\n cont = 1\n for i in x:\n y[cont] = i.json()\n cont = cont + 1\n return y\n\n@app.route('/get_tiempos', methods=['POST'])\ndef get_tiempos():\n data = request.get_json()\n utc_date = datetime.now()\n col_date = utc_date - timedelta(hours=5)\n default_min_date = col_date - timedelta(days=100)\n\n if data[\"desde\"]:\n data[\"desde\"] = datetime.strptime(data[\"desde\"], \"%Y-%m-%d\")\n else:\n data[\"desde\"] = default_min_date\n\n if data[\"hasta\"]:\n data[\"hasta\"] = datetime.strptime(data[\"hasta\"], \"%Y-%m-%d\")\n else:\n data[\"hasta\"] = col_date\n\n try:\n if data['orden']:\n data['orden'] = OrdenesModel.find_by_orden(data['orden']).id\n\n if data['usuario']:\n data['usuario'] = UserModel.find_by_usuario(data['usuario']).id\n except:\n return {\"tiempos\": []}\n\n x = TiemposModel.find_query(data['orden'], data['usuario'], data['area'])\n y = list()\n for i in x:\n prenda = i.prenda.tipo\n if data[\"prenda\"] == \"\" or prenda == data['prenda']:\n inicio = datetime.strptime(i.inicio, \"%Y %m %d %H %M %S\")\n if inicio <= data['hasta'] and inicio >= data[\"desde\"]:\n final = \"\"\n tiempo = \"\"\n if i.final != \"\": \n final = datetime.strptime(i.final, \"%Y %m %d %H %M %S\")\n tiempo = final - inicio\n hours, remainder = divmod(tiempo.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n\n item = {\n \"orden\": i.orden.numero_orden,\n \"usuario\": i.user.usuario,\n \"area\": i.area_produccion,\n \"inicio\": inicio.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"final\": final.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"prenda\": prenda,\n \"tiempo\": tiempo.seconds,\n \"tiempo_str\": f\"{hours}:{minutes}:{seconds}\"\n }\n y.append(item)\n return {\"tiempos\":y}\n\n@app.route('/getsession')\ndef getsession():\n\n if 'user' in session:\n return {\n \"usuario\": session['user'],\n \"role\": session['role'],\n \"area\": session['area'],\n \"message\": \"success\"\n }\n else:\n return {\"message\":\"none\"}\n\n@app.route('/getuserinfo')\ndef getuserinfo():\n\n data = request.form\n usuario = data['input-nuevo-usuario']\n password = bcrypt.hashpw(data[\"input-nuevo-usuario-password\"].encode(\"utf-8\"), salt)\n role = data[\"input-nuevo-usuario-role\"]\n area = data[\"input-nuevo-usuario-area\"]\n\n user = UserModel.find_by_usuario(usuario)\n\n if user:\n flash(f\"El usuario {usuario} ya existe, por favor seleccione otro nombre de usuario\", \"error\")\n return(redirect(url_for('configuracion')))\n\n else:\n user = UserModel(usuario, password, role, area)\n try:\n user.save_to_db()\n flash(f\"Usuario {usuario} creado con éxito\", \"success\")\n return(redirect(url_for('configuracion')))\n except:\n flash(f\"Error al crear el usuario {usuario}, por favor intentelo nuevamente\", \"error\")\n return(redirect(url_for('configuracion')))\n\n@app.route('/agregarcomentario', methods=[\"POST\"])\ndef agregar_comentario():\n data = request.get_json()\n nuevo_comentario = data['comentario']\n \n prenda = PrendasModel.find_by_id(data['id'])\n\n if prenda:\n prenda.especificacion = prenda.especificacion +\" >>> \"+ nuevo_comentario\n try:\n prenda.save_to_db()\n return {\"message\": \"ok\", \"especificacion\": prenda.especificacion}\n except:\n return {\"message\":\"Error en el servidor\"}\n else:\n return {\"message\":\"prenda no encontrada\"}\n\n@app.route('/modificarcomentario', methods=[\"POST\"])\ndef modificar_comentario():\n data = request.get_json()\n nuevo_comentario = data['comentario']\n \n prenda = PrendasModel.find_by_id(data['id'])\n\n if prenda:\n prenda.especificacion = nuevo_comentario\n try:\n prenda.save_to_db()\n return {\"message\": \"ok\", \"especificacion\": prenda.especificacion}\n except:\n return {\"message\":\"Error en el servidor\"}\n else:\n return {\"message\":\"prenda no encontrada\"}\n\n@app.route('/marcarpago', methods=[\"POST\"])\ndef marcar_pago():\n data = request.get_json()\n\n orden = OrdenesModel.find_by_orden(data['orden'])\n\n if orden:\n if data['estado']:\n orden.pagado = \"si\"\n orden.abono = orden.precio_total\n else:\n orden.pagado = \"no\"\n orden.abono = \"\"\n \n try:\n orden.save_to_db()\n return {\"message\":\"actualizado exitosamente\"}\n except:\n return{\"message\":\"Servidor no disponible, intentelo mas tarde\"},500\n else:\n return {\"message\":\"Orden no encontrada\"},400\n\n@app.route('/marcarempaque', methods=['POST'])\ndef marcar_empaque():\n\n data = request.get_json()\n\n prenda = PrendasModel.find_by_id(data['prenda_id'])\n\n if prenda:\n if data['estado']:\n prenda.empacado = \"si\"\n prenda.area_responsable = \"\"\n else:\n prenda.empacado = \"no\"\n prenda.area_responsable = \"empaque\"\n\n try:\n prenda.save_to_db()\n return {\"message\":\"Actualizado exitosamente\"}\n\n except:\n return {\"message\":\"Servidor no disponible, intente nuevamente\"}\n\n \n return {\"message\":\"Prenda no encontrada\"}\n\ndef config_add_item(item, cls):\n nuevo_item = cls.find_one(item)\n\n if nuevo_item:\n flash(f\"El item {item} ya existe\", \"error\")\n return False\n\n nuevo_item = cls(item)\n try:\n nuevo_item.save_to_db()\n flash(f\"Item {item} creada exitosamente\", \"success\")\n return True\n\n except:\n flash(\"Error interno del servidor, vuelva a intentarlo\", \"error\")\n return False\n\ndef config_get_items(cls):\n items_list = []\n items = cls.find()\n\n for item in items:\n item_json = item.json()\n items_list.append(item_json.get(\"item\"))\n\n return {\"items\": items_list}\n\n@app.route(\"/config_addprenda\", methods=['POST'])\ndef config_addprenda():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigPrendasModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getprendas\")\ndef config_get_prendas():\n return config_get_items(ConfigPrendasModel)\n\n@app.route(\"/config_addareas\", methods=['POST'])\ndef config_addareas():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigAreasModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getareas\")\ndef config_get_areas():\n return config_get_items(ConfigAreasModel)\n\n@app.route(\"/config_addcategoriaenvio\", methods=['POST'])\ndef config_addcategoriaenvio():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigCategoriaEnvioModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getcategoriaenvio\")\ndef config_get_categoriaenvio():\n return config_get_items(ConfigCategoriaEnvioModel)\n\n@app.route(\"/config_addempresaenvio\", methods=['POST'])\ndef config_addempresaenvio():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigEmpresaEnvioModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getempresaenvio\")\ndef config_get_empresaenvio():\n return config_get_items(ConfigEmpresaEnvioModel)\n\n@app.route(\"/config_addopcionesenvio\", methods=['POST'])\ndef config_addopcionesenvio():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigOpcionesEnvioModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getopcionesenvio\")\ndef config_get_opcionesenvio():\n return config_get_items(ConfigOpcionesEnvioModel)\n\n@app.route(\"/config_addmedioscompra\", methods=['POST'])\ndef config_addmedioscompra():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigMediosCompraModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getmedioscompra\")\ndef config_get_medioscompra():\n return config_get_items(ConfigMediosCompraModel)\n\n@app.route(\"/config_addmarcas\", methods=['POST'])\ndef config_addmarcas():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigMarcasModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getmarcas\")\ndef config_get_marcas():\n return config_get_items(ConfigMarcasModel)\n\n@app.route(\"/config_addestadosorden\", methods=['POST'])\ndef config_addestadosorden():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigEstadosOrdenModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getestadosorden\")\ndef config_get_estadosorden():\n return config_get_items(ConfigEstadosOrdenModel)\n\n@app.route(\"/config_addmediospago\", methods=['POST'])\ndef config_addmediospago():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigMediosPagoModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_getmediospago\")\ndef config_get_mediospago():\n return config_get_items(ConfigMediosPagoModel)\n\n@app.route(\"/config_addtallas\", methods=['POST'])\ndef config_addtallas():\n item = request.form.get(\"item\").strip().lower()\n result = config_add_item(item, ConfigTallasModel)\n return redirect(url_for('configuracion'))\n\n@app.route(\"/config_gettallas\")\ndef config_get_tallas():\n return config_get_items(ConfigTallasModel)\n\n@app.route(\"/config_addsubtipo\", methods=['POST'])\ndef config_addsubtipo():\n prenda = request.form.get(\"prenda\").strip().lower()\n item = request.form.get(\"item\").strip().lower()\n nuevo_item = ConfigSubtipoPrendasModel.find_one(item)\n\n if nuevo_item:\n flash(f\"El item {item} ya existe\", \"error\")\n return redirect(url_for('configuracion'))\n\n prenda_obj = ConfigPrendasModel.find_one(prenda)\n\n if prenda_obj:\n nuevo_item = ConfigSubtipoPrendasModel(item, prenda_obj.id)\n\n try:\n nuevo_item.save_to_db()\n flash(f\"Item {item} creada exitosamente\", \"success\")\n return redirect(url_for('configuracion'))\n\n except:\n flash(\"Error interno del servidor, vuelva a intentarlo\", \"error\")\n return redirect(url_for('configuracion'))\n\n\n return redirect(url_for('configuracion'))\n\n flash(f\"La prenda {prenda} no existe\", \"error\")\n return redirect(url_for('configuracion'))\n\n\n@app.route(\"/config_getsubtipo/\")\ndef config_get_subtipo(prenda):\n items_list = []\n prenda = ConfigPrendasModel.find_one(prenda)\n if prenda:\n subtipos_obj = ConfigSubtipoPrendasModel.find_by_prenda(prenda.id)\n for subtipo in subtipos_obj:\n items_list.append(subtipo.item)\n \n return {'items': items_list}\n\n\n\n\n# Pagina web\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n print(os.environ.get('DATABASE_URI'))\n if 'user' in session:\n return redirect(url_for('ordenes'))\n else:\n if request.method == \"POST\":\n input_usuario = request.form['usuario']\n input_password = request.form['password'].encode(\"utf-8\")\n\n usuario = UserModel.find_by_usuario(input_usuario)\n\n if usuario:\n # password_comparar = bytes.fromhex(usuario.password[2:])\n password_comparar = usuario.password\n\n if bcrypt.checkpw(input_password, password_comparar):\n session['user'] = usuario.usuario\n session['role'] = usuario.role\n session['area'] = usuario.area\n\n return redirect(url_for('ordenes'))\n\n else:\n flash(\"Email o contraseña incorrecta\", \"error\")\n return render_template('login.html')\n\n else:\n flash(\"Email o contraseña incorrecta\", \"error\")\n return render_template('login.html')\n\n else:\n return render_template('login.html')\n\n@app.route('/ordenes')\ndef ordenes():\n\n GLOBAL_CONTEXT = {\n \"prendas\": config_get_prendas()[\"items\"],\n \"areas\": config_get_areas()[\"items\"],\n \"categorias_envio\": config_get_categoriaenvio()[\"items\"],\n \"empresas_envio\": config_get_empresaenvio()[\"items\"],\n \"opciones_envio\": config_get_opcionesenvio()[\"items\"],\n \"medios_compra\": config_get_medioscompra()[\"items\"],\n \"marcas\": config_get_marcas()[\"items\"],\n \"estados_orden\": config_get_estadosorden()[\"items\"],\n \"medios_pago\": config_get_mediospago()[\"items\"],\n \"tallas\": config_get_tallas()[\"items\"],\n }\n\n if 'user' in session:\n\n if session['role'] == \"administrador\":\n return render_template('ordenes.html', **GLOBAL_CONTEXT)\n\n elif session['area'] == \"ventas\":\n\n if session['role'] == \"jefe-area\":\n return render_template('ordenes-ventas.html', **GLOBAL_CONTEXT)\n elif session['role'] == \"operador\":\n return render_template('ordenes-ventas-operarios.html', **GLOBAL_CONTEXT)\n\n elif session['area'] == \"diseno\" and session['role'] == 'jefe-area':\n return render_template('ordenes-diseno.html')\n\n else:\n return redirect(url_for('informes'))\n\n else:\n return redirect(url_for('home'))\n\n@app.route('/produccion')\ndef produccion():\n\n GLOBAL_CONTEXT = {\n \"prendas\": config_get_prendas()[\"items\"],\n \"areas\": config_get_areas()[\"items\"],\n \"categorias_envio\": config_get_categoriaenvio()[\"items\"],\n \"empresas_envio\": config_get_empresaenvio()[\"items\"],\n \"opciones_envio\": config_get_opcionesenvio()[\"items\"],\n \"medios_compra\": config_get_medioscompra()[\"items\"],\n \"marcas\": config_get_marcas()[\"items\"],\n \"estados_orden\": config_get_estadosorden()[\"items\"],\n \"medios_pago\": config_get_mediospago()[\"items\"],\n \"tallas\": config_get_tallas()[\"items\"],\n }\n\n if 'user' in session:\n\n if session['role'] == 'operador':\n if session['area'] == 'ventas':\n return render_template(\"produccion-ventas.html\", **GLOBAL_CONTEXT) #ok\n\n elif session['area'] == 'almacen':\n return redirect(url_for('informes'))\n\n elif session['area'] == \"analisis\":\n return render_template(\"produccion-analisis.html\") #ok\n\n elif session['area'] == \"empaque\":\n return render_template(\"produccion-empaques.html\", **GLOBAL_CONTEXT) #ok\n\n else:\n return render_template(\"produccion-operarios.html\") #ok\n\n\n elif session['role'] == 'jefe-area':\n if session['area'] == 'ventas':\n return render_template(\"produccion-ventas.html\", **GLOBAL_CONTEXT) #ok\n\n elif session['area'] == 'almacen':\n return redirect(url_for('informes'))\n\n elif session['area'] == \"analisis\":\n return render_template(\"produccion-analisis.html\") #ok\n\n elif session['area'] == \"empaque\":\n return render_template(\"produccion-empaques-jefe.html\", **GLOBAL_CONTEXT) #ok\n \n elif session['area'] == \"diseno\":\n return render_template(\"produccion-jefe-diseno.html\", **GLOBAL_CONTEXT) #ok\n \n elif session['area'] == \"planeacion\":\n return render_template(\"produccion-planeacion.html\", **GLOBAL_CONTEXT) #ok\n\n else:\n return render_template(\"produccion-jefe.html\", **GLOBAL_CONTEXT) #ok\n\n\n elif session['role'] == 'administrador':\n return render_template(\"produccion.html\", **GLOBAL_CONTEXT) #ok\n\n else:\n return{\"message\":\"Role no reconocido\"}\n\n else:\n return redirect(url_for('home'))\n\n@app.route('/informes')\ndef informes():\n\n GLOBAL_CONTEXT = {\n \"prendas\": config_get_prendas()[\"items\"],\n \"areas\": config_get_areas()[\"items\"],\n \"categorias_envio\": config_get_categoriaenvio()[\"items\"],\n \"empresas_envio\": config_get_empresaenvio()[\"items\"],\n \"opciones_envio\": config_get_opcionesenvio()[\"items\"],\n \"medios_compra\": config_get_medioscompra()[\"items\"],\n \"marcas\": config_get_marcas()[\"items\"],\n \"estados_orden\": config_get_estadosorden()[\"items\"],\n \"medios_pago\": config_get_mediospago()[\"items\"],\n \"tallas\": config_get_tallas()[\"items\"],\n }\n\n if 'user' in session:\n\n if session['role'] == 'operador':\n if session['area'] == 'ventas':\n return render_template(\"informes-ventas.html\", **GLOBAL_CONTEXT) #ok\n\n elif session['area'] == 'almacen':\n return render_template(\"informes-almacen.html\", **GLOBAL_CONTEXT)\n\n elif session['area'] == \"analisis\":\n return render_template(\"informes-analisis.html\", **GLOBAL_CONTEXT) #ok\n \n elif session['area'] == \"empaque\":\n return render_template(\"informes-empaque.html\", **GLOBAL_CONTEXT) #ok\n\n else:\n return render_template(\"informes-operarios.html\", **GLOBAL_CONTEXT) #ok\n\n\n elif session['role'] == 'jefe-area':\n if session['area'] == 'ventas':\n return render_template(\"informes-ventas.html\", **GLOBAL_CONTEXT) #ok\n\n elif session['area'] == 'almacen':\n return render_template(\"informes-almacen.html\", **GLOBAL_CONTEXT)\n\n elif session['area'] == \"analisis\":\n return render_template(\"informes-analisis-jefe.html\", **GLOBAL_CONTEXT) #ok\n\n elif session['area'] == \"empaque\":\n return render_template(\"informes-empaque-jefe.html\", **GLOBAL_CONTEXT) #ok\n\n elif session['area'] == \"diseno\":\n return render_template(\"informes-jefe-diseno.html\", **GLOBAL_CONTEXT) #ok\n\n else:\n return render_template(\"informes-jefe.html\", **GLOBAL_CONTEXT) #ok\n\n\n elif session['role'] == 'administrador':\n return render_template(\"informes.html\", **GLOBAL_CONTEXT) #ok\n\n else:\n return{\"message\":\"Role no reconocido\"}\n \n else:\n return redirect(url_for('home'))\n\n@app.route('/configuracion')\ndef configuracion():\n\n GLOBAL_CONTEXT = {\n \"prendas\": config_get_prendas()[\"items\"],\n \"areas\": config_get_areas()[\"items\"],\n \"categorias_envio\": config_get_categoriaenvio()[\"items\"],\n \"empresas_envio\": config_get_empresaenvio()[\"items\"],\n \"opciones_envio\": config_get_opcionesenvio()[\"items\"],\n \"medios_compra\": config_get_medioscompra()[\"items\"],\n \"marcas\": config_get_marcas()[\"items\"],\n \"estados_orden\": config_get_estadosorden()[\"items\"],\n \"medios_pago\": config_get_mediospago()[\"items\"],\n \"tallas\": config_get_tallas()[\"items\"],\n }\n\n if 'user' in session:\n if session['role'] == \"administrador\":\n return render_template(\"configuracion.html\", **GLOBAL_CONTEXT)\n else:\n return redirect(url_for('informes'))\n else:\n return redirect(url_for('home'))\n\n@app.route('/cambiarcontrasena', methods=[\"POST\", \"GET\"])\ndef cambiar_contrasena():\n if request.method == \"POST\":\n\n data = request.form\n\n user = UserModel.find_by_usuario(data['usuario'])\n if user:\n password_comparar = bytes.fromhex(user.password[2:])\n if bcrypt.checkpw(data['contrasena-actual'].encode(\"utf-8\"), password_comparar):\n user.password = bcrypt.hashpw(data['contrasena-nueva'].encode(\"utf-8\"), salt)\n try:\n user.save_to_db()\n except:\n flash(\"Error en el servidor, trate nuevamente\", \"error\")\n return redirect(url_for('home'))\n\n flash(\"Contraseña actualizada exitosamente\", \"success\")\n return redirect(url_for('home'))\n\n else:\n flash(\"Contraseña incorrecta\", \"error\")\n return redirect(url_for('home'))\n\n else:\n flash(\"Usuario no encontrado\", \"error\")\n return redirect(url_for('home'))\n\n else:\n return render_template('contrasena.html')\n\n\n\nif __name__ == \"__main__\":\n db.init_app(app)\n app.run(host=\"0.0.0.0\", debug = True)\n","repo_name":"andresgoag/appcrisa","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":45635,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41455304161","text":"\n#!/usr/bin/python3\n\"\"\"\nRead just this Docstring as follows:\nNames: Oyama Plati\nStudent Number: PLTOYA001\nPrac: 1\nDate: 28/07/2019\n\"\"\"\n\n# Import Relevant Librares\nimport RPi.GPIO as GPIO\nfrom time import sleep\n# Prepare the raspberry Pi for code\n# Define naming converntion\nGPIO.setmode(GPIO.BOARD)\n# Setup variables for pin numbers\nUP_BUTTON=16\nDOWN_BUTTON=12\nLED_1=11\nLED_2=7\nLED_3=13\n\n# Setup Pin 16 as input, activated pull up resistor\nGPIO.setup(UP_BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n# Setup Pin 12 as input, activated pull up resistor\nGPIO.setup(DOWN_BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n# Disable WARNINGS from being printed to the screen\nGPIO.setwarnings(False)\n\n# Setup GPIO output pin 11, 7 AND 13\nGPIO.setup(LED_1, GPIO.OUT)\nGPIO.setup(LED_2, GPIO.OUT)\nGPIO.setup(LED_3, GPIO.OUT)\n\n# Set output to OFF\nGPIO.output(LED_1, False)\nGPIO.output(LED_2, False)\nGPIO.output(LED_3, False)\n\n# Global Variables\nCOUNTER=0\n\n# Save Button Presses\ndef onClickButtonUp(channel):\n global COUNTER\n if (GPIO.event_detected(channel) and COUNTER != 7): # Ok! to increment\n COUNTER += 1\n print (bin(COUNTER)[2:].zfill(3))\n sleep(0.5)\n if (GPIO.event_detected(channel) and COUNTER == 7): # Stop increment and Restart\n COUNTER = 0 # At 0\n print (bin(COUNTER)[2:].zfill(3))\n sleep(0.5)\n return\n\ndef onClickButtonDown(channel):\n global COUNTER\n if (GPIO.event_detected(channel) and COUNTER != 0): # Ok! to decrement\n COUNTER -= 1\n print (bin(COUNTER)[2:].zfill(3))\n sleep(0.5)\n if (GPIO.event_detected(channel) and COUNTER == 0): # Stop decrement and Restart\n COUNTER = 7 # At 7\n print (bin(COUNTER)[2:].zfill(3))\n sleep(0.5)\n return\n\n# Add rising edge detection on a channel, ignoring further edges for 200ms for switchbouncing\nGPIO.add_event_detect(UP_BUTTON, GPIO.RISING, callback=onClickButtonUp, bouncetime=200)\nGPIO.add_event_detect(DOWN_BUTTON, GPIO.RISING, callback=onClickButtonDown, bouncetime=200)\n\n# Control on and off state of LEDs\ndef control(counter):\n binaryValue = bin(counter)[2:].zfill(3)\n for key, value in enumerate(binaryValue):\n if (value == '1'):\n onValue(key) # Set appropiate LEDs to HIGH\n else:\n offValue(key) # Set appropiate LEDs to LOW\n return\n# Select LEDs to be set HIGH\ndef onValue(pin):\n if (pin == 0):\n GPIO.output(LED_1, True)\n if (pin == 1):\n GPIO.output(LED_2, True)\n if (pin == 2):\n GPIO.output(LED_3, True)\n return\n# Select LEDs to be set LOW\ndef offValue(pin):\n if (pin == 0):\n GPIO.output(LED_1, False)\n if (pin == 1):\n GPIO.output(LED_2, False)\n if (pin == 2):\n GPIO.output(LED_3, False)\n return\n\n# Logic tO write\ndef main():\n global UP_BUTTON\n global DOWN_BUTTON\n onClickButtonUp(UP_BUTTON)\n onClickButtonDown(DOWN_BUTTON)\n control(COUNTER)\n\n# Only run the functions if\nif __name__ == \"__main__\":\n # Make sure the GPIO is stopped correctly\n try:\n while True:\n main()\n except KeyboardInterrupt:\n print(\"Exiting gracefully\")\n # Turn off your GPIOs here\n GPIO.cleanup()\n except e:\n GPIO.cleanup()\n print(\"Some other error occurred\")\n print(e.message)\n","repo_name":"OyamaPlati/EEE3096S_Prac1","sub_path":"binaryCounter.py","file_name":"binaryCounter.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16306585620","text":"from collections import deque\n\n\nclass Solution:\n \"\"\"my bfs sol, first attempt, need optimized\"\"\"\n\n def orangesRotting(self, grid: List[List[int]]) -> int:\n\n if not grid or not grid[0]:\n return -1\n\n m, n = len(grid), len(grid[0])\n next_rot = []\n fresh_num = 0\n time = 0\n\n # count fresh number and location of rot ones\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 1:\n fresh_num += 1\n elif grid[i][j] == 2:\n next_rot.append([i, j])\n\n # if no fresh and no rot\n if fresh_num == 0:\n return 0\n\n # if have rot, loop to see if all fresh can turn rot\n while next_rot:\n\n if fresh_num == 0:\n return time\n\n prev_rot, next_rot = next_rot, []\n for i, j in prev_rot:\n if i + 1 < m and grid[i + 1][j] == 1:\n grid[i + 1][j] = 2\n fresh_num -= 1\n next_rot.append([i + 1, j])\n if j + 1 < n and grid[i][j + 1] == 1:\n grid[i][j + 1] = 2\n fresh_num -= 1\n next_rot.append([i, j + 1])\n if i > 0 and grid[i - 1][j] == 1:\n grid[i - 1][j] = 2\n fresh_num -= 1\n next_rot.append([i - 1, j])\n if j > 0 and grid[i][j - 1] == 1:\n grid[i][j - 1] = 2\n fresh_num -= 1\n next_rot.append([i, j - 1])\n\n time += 1\n print(time, grid)\n\n # else, return -1\n return -1\n\n\nclass Solution:\n \"\"\"ans, bfs\"\"\"\n\n def orangesRotting(self, grid: List[List[int]]) -> int:\n queue = deque()\n\n # Step 1). build the initial set of rotten oranges\n fresh_oranges = 0\n ROWS, COLS = len(grid), len(grid[0])\n for r in range(ROWS):\n for c in range(COLS):\n if grid[r][c] == 2:\n queue.append((r, c))\n elif grid[r][c] == 1:\n fresh_oranges += 1\n\n # Mark the round / level, _i.e_ the ticker of timestamp\n queue.append((-1, -1))\n\n # Step 2). start the rotting process via BFS\n minutes_elapsed = -1\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n while queue:\n row, col = queue.popleft()\n if row == -1:\n # We finish one round of processing\n minutes_elapsed += 1\n if queue: # to avoid the endless loop\n queue.append((-1, -1))\n else:\n # this is a rotten orange\n # then it would contaminate its neighbors\n for d in directions:\n neighbor_row, neighbor_col = row + d[0], col + d[1]\n if ROWS > neighbor_row >= 0 and COLS > neighbor_col >= 0:\n if grid[neighbor_row][neighbor_col] == 1:\n # this orange would be contaminated\n grid[neighbor_row][neighbor_col] = 2\n fresh_oranges -= 1\n # this orange would then contaminate other oranges\n queue.append((neighbor_row, neighbor_col))\n\n # return elapsed minutes if no fresh orange left\n return minutes_elapsed if fresh_oranges == 0 else -1\n","repo_name":"RunkunXie/LeetCode","sub_path":"Problems/src/994. Rotting Oranges.py","file_name":"994. Rotting Oranges.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72960764391","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import Compose, ToTensor, Normalize\n\nfrom tqdm import tqdm\nfrom ignite.engine import Engine, create_supervised_evaluator, create_supervised_trainer, Events\nfrom ignite.metrics import Accuracy, Loss\n\n\nimport numpy as np\n\n\ndef get_data_loaders(train_batch_size, val_batch_size):\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n\n train_loader = DataLoader(MNIST(download=True, root=\".\", transform=data_transform, train=True),\n batch_size=train_batch_size, shuffle=True)\n\n val_loader = DataLoader(MNIST(download=False, root=\".\", transform=data_transform, train=False),\n batch_size=val_batch_size, shuffle=False)\n return train_loader, val_loader\n\n\nclass ConvNet(nn.Module):\n def __init__(self):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)\n self.fc1 = nn.Linear(7*7*64, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2)\n x = x.view(-1, 7*7*64)\n x = self.fc1(x)\n\n return F.log_softmax(x, dim=-1)\n\n\nbatch_size = 64\nlr = 1e-3\ntrain_loader, val_loader = get_data_loaders(batch_size, batch_size)\n\nmodel = ConvNet()\ndevice = 'cuda'\noptimizer = optim.Adam(model.parameters(), lr=lr)\ntrainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)\nevaluator = create_supervised_evaluator(\n model=model,\n metrics={'accuracy': Accuracy(),\n 'nll': Loss(F.nll_loss)},\n device=device\n)\n\ndesc = \"ITERATION - loss: {:.2f}\"\npbar = tqdm(\n initial=0, leave=False, total=len(train_loader),\n desc=desc.format(0)\n)\n\n\n@trainer.on(Events.ITERATION_COMPLETED)\ndef log_training_loss(engine):\n iter = (engine.state.iteration - 1) % len(train_loader) + 1\n pbar.desc = desc.format(engine.state.output)\n pbar.update(1)\n\n\n@trainer.on(Events.EPOCH_COMPLETED)\ndef log_train_metrics(engine):\n pbar.refresh()\n evaluator.run(train_loader)\n metrics = evaluator.state.metrics\n avg_accuracy = metrics['accuracy']\n avg_nll = metrics['nll']\n tqdm.write(\n \"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\"\n .format(engine.state.epoch, avg_accuracy, avg_nll))\n\n\n@trainer.on(Events.EPOCH_COMPLETED)\ndef log_val_metrics(engine):\n evaluator.run(val_loader)\n metrics = evaluator.state.metrics\n tqdm.write(\n \"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\"\n .format(engine.state.epoch, metrics['accuracy'], metrics['nll']))\n\n pbar.n = pbar.last_print_n = 0\n\n\ntrainer.run(train_loader, max_epochs=10)\npbar.close()\n","repo_name":"Daiver/Depth-regression","sub_path":"ignite_examples/mnits_ignite.py","file_name":"mnits_ignite.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19447432124","text":"from pathlib import Path \nimport streamlit as st\n\n\nwavFiles = []\nwavFiles.append('77d369396a6a567cc0f3059670d1f7ab.wav')\nwavFiles.append('sample4.wav')\nwavFiles.append('sample3.wav')\nwavFiles.append('sample5.wav')\nwavFiles.append('9d76cc4b9340b217152540be8afedbb7.wav')\nwavFiles.append('0fe4fd7f8b394c863269bc87f81b9e7b.wav')\nwavFiles.append('affed74c3b93602385f6d52ba9076dee.wav')\nwavFiles.append('cadc501bf659b6c487aa6f692552e317.wav')\nwavFiles.append('sample9.wav')\nwavFiles.append('002db9b7bcb5f07f783de39d04cdd479.wav')\n\nGT = []\nGT.append('the vehicle does start and drive')\nGT.append('would you like cream and sugar')\nGT.append('okay and what\\'s the code')\nGT.append('uh no not just regular')\nGT.append('people love me')\nGT.append('the covid pandemic it reiterated to me')\nGT.append('today only about eleven percent of federal it systems are running in the cloud')\nGT.append('even nuclear weapons are only about one percent efficient chemical weapons are a tiny fraction of a percent in terms of their efficiency')\nGT.append('i know what that is i just don\\'t know what items you wanted')\nGT.append('he said jim your company')\n\nPT = []\nPT.append(':red[it\\'ll be a good bus store] and drive')\nPT.append('the :red[gds why you make sure]')\nPT.append('okay and :red[wolstter] code')\nPT.append('uh no no :red[that\\'s very go]')\nPT.append(':red[ple birthday]')\nPT.append('the :red[coid] pandemic it reiterated to me')\nPT.append(':red[to day] only about eleven :red[per cent] of federal it :red[t] systems are running in the cloud')\nPT.append('even nuclear weapons are only about :red[oneer] efficient chemical weapons are a tiny fraction of a :red[perc] in terms of their efficiency')\nPT.append('i know what that is i just don\\'t know what :red[ei dont] you :red[want it]')\nPT.append(':red[it\\'s a] jam your company')\n\nFT = []\nFT.append('the vehicle does start and drive')\nFT.append('would you like cream and sugar')\nFT.append('okay and what\\'s the code')\nFT.append(':red[(uh)] no not just regular')\nFT.append('people love me')\nFT.append('the covid pandemic it reiterated to me')\nFT.append('today only about eleven percent of federal it systems are running in the cloud')\nFT.append('even nuclear weapons are only about one percent efficient chemical weapons are a tiny fraction of a percent in terms of their efficiency')\nFT.append('i know what that is i just don\\'t know what items you wanted')\nFT.append('he said jim your company')\n\n\n\nst.set_page_config(layout='wide')\n\nlogo_image = \"logo.png\"\nst.image(logo_image, width=200)\n\nst.markdown(\"

Demo of fine-tuning Nvidia ASR model

\", unsafe_allow_html=True)\ndef read_markdown_file(markdown_file):\n return Path(markdown_file).read_text()\nintro_markdown = read_markdown_file(\"introduction.md\")\nst.markdown(intro_markdown, unsafe_allow_html=True)\n\nst.markdown('---')\ncol1, col2, col3, col4 = st.columns(4)\n\n\ncol1.markdown(\"
Audio
\", unsafe_allow_html=True)\ncol2.markdown(\"
Ground Truth
\", unsafe_allow_html=True)\ncol3.markdown(\"
Nvidia Pre-trained Model\\n\\n (WER=7.8%)
\", unsafe_allow_html=True)\ncol4.markdown(\"
Fine-tuned with Appen Dataset \\n\\n (WER=6.5%)
\", unsafe_allow_html=True)\n\n\nfor ind in range(len(wavFiles)):\n col1, col2, col3, col4 = st.columns(4)\n col1.audio('samples/'+ wavFiles[ind], format='audio/wav')\n col2.markdown(GT[ind], unsafe_allow_html=True)\n col3.markdown(PT[ind], unsafe_allow_html=True)\n col4.markdown(FT[ind], unsafe_allow_html=True)\n st.markdown(\"---\")\n\n","repo_name":"hxing093020/Nvidia_asr_demo","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24168858437","text":"a = list(map(str, input()))\nfor i in range(len(a)):\n if 'A' <= a[i] <= 'Z':\n a[i] = a[i].lower()\ncount = {}\nfor i in a:\n if 'a' <= i <= 'z':\n if i not in count:\n count[i] = 1\n else:\n count[i] += 1\ncount_st = sorted(count.items(), key=lambda a: (-a[1], a[0]))\nprint(count_st[0][0], count_st[0][1])\n","repo_name":"myf-algorithm/Leetcode","sub_path":"PAT_B/1042.字符统计.py","file_name":"1042.字符统计.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"21299825306","text":"import random\nrandom.seed(1)\na, b = 1, 2\nwhile a : # while a is true\n print(a)\n a, b = b, a + b\n if a > 10:\n a = 0 # set a to be false\n#\n\nbool_test = False\ncounter = 0\nwhile bool_test:\n print(\"True!\")\n counter += 1\n if counter == 4:\n bool_test = False\n\n#\n\n# write a while loop where the loop prints the value of i 6 times and then prints it is finished after the iterations\n# after every iteration it increases the value of i\n\ni = 1\n\nwhile i < 7:\n print(i)\n i += 1 # counter = counter + 1\n\nprint(\"it is finished\")\n\n\nj = 0\nwhile 1 == 1:\n print(j)\n j += 1\n if j >= 10:\n print(\"Breaking\")\n break\n\nprint(\"It is finished!\")\nk = 1\nwhile k < 100:\n print(k)\n k += 1\n if k == 50:\n break\n\n\nname =\"stella\"\nif name == \"stella\":\n print(\"y\")\n\nelse:\n print(\"n\")\n\nalph = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\nallp = ['a','s','i','o','f','z','l','d']\nrandallp = random.choice(allp)\nfor item in allp:\n if item == randallp:\n print(item)\n break\n\n# modularity\n\n","repo_name":"isadesina/my_python_codes","sub_path":"fibbo.py","file_name":"fibbo.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41441133060","text":"#/usr/bin/env python\n# -*- coding: Utf8 -*-\n\nimport event\n\nclass Plugin:\n\n def __init__(self, client):\n self.client = client\n\n @event.privmsg()\n def help_request(self, e):\n target = e.values['target']\n msg = e.values['msg'][1:]\n nick = e.values['nick']\n if nick == self.client.nick_name:\n return\n if target == self.client.nick_name:\n target = nick\n if msg in ('!help', '!man', '!usage'):\n self.help(target)\n elif msg[0:4] == '!man':\n plugins = msg[4:].strip().split(' ')\n for p in plugins:\n if p == '' or not (p in self.client.plugins):\n continue\n if p == 'help':\n continue\n if self.client.plugins[p].__class__.__dict__.has_key('help'):\n self.client.plugins[p].help(target)\n else:\n message = p + \" plugin has no manual yet :(\"\n self.client.priv_msg(target, message)\n\n\n def help(self, target):\n message = \"irkotr0id (https://github.com/nisay759/irkotr0id)\"\n self.client.priv_msg(target, message)\n plugins = []\n for p in self.client.plugins:\n plugins.append(p)\n self.client.priv_msg(target, 'Loaded plugins: ' + ', '.join(plugins))\n message = \"\\'!man plugin\\' to get usage of specific plugin\"\n self.client.priv_msg(target, message)\n","repo_name":"nisay759/irkotr0id","sub_path":"src/plugins/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"23979043880","text":"from contextlib import contextmanager\nfrom logging import getLogger, DEBUG, INFO, FileHandler, Formatter, Logger, StreamHandler\nimport time\nfrom typing import Optional\n\n\ndef get_logger(filepath: str, name: Optional[str] = None) -> Logger:\n \"\"\"Get logger having stream and file handler.\n\n Parameters\n ----------\n filepath : str\n Where log file is to be written.\n name : Optional[str], optional\n Logger name, by default None.\n\n Returns\n -------\n Logger: Logger\n Logger object.\n \"\"\"\n logger = getLogger(name or __name__)\n logger.setLevel(DEBUG)\n for h in logger.handlers:\n logger.removeHandler(h)\n file_handler = FileHandler(filepath)\n file_handler.setLevel(DEBUG)\n file_handler.setFormatter(Formatter('\"%(asctime)s\",\"%(name)s\",\"%(levelname)s\",\"%(message)s\"'))\n stream_handler = StreamHandler()\n stream_handler.setLevel(INFO)\n stream_handler.setFormatter(Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n return logger\n\n\n@contextmanager\ndef timer(name: str, logger: Optional[Logger] = None, level: int = DEBUG):\n '''\n Refference\n ----------\n https://amalog.hateblo.jp/entry/kaggle-snippets\n '''\n print_ = print if logger is None else lambda msg: logger.log(level, msg)\n t0 = time.time()\n print_(f'{name}: start')\n yield\n print_(f'{name}: done in {time.time() - t0:.3f} s')","repo_name":"Quvotha/nishika-bokete-classification","sub_path":"scripts/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37915164536","text":"# 给出两个 非空 的链表用来表示两个非负的整数。其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。\n#\n# 如果,我们将这两个数相加起来,则会返回一个新的链表来表示它们的和。\n#\n# 您可以假设除了数字 0 之外,这两个数都不会以 0 开头。\n#\n# 示例:\n#\n# 输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)\n# 输出:7 -> 0 -> 8\n# 原因:342 + 465 = 807\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef getList(l):\n def getNext(lLeft):\n if lLeft:\n currentNode = ListNode(lLeft[0])\n currentNode.next = getNext(lLeft[1:])\n return currentNode\n\n return getNext(l)\n\n\ndef showList(l: ListNode):\n while l:\n print(l.val)\n l = l.next\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n # l3 = l1\n # carry = 0\n # while l1 or l2:\n # x1 = l1.val if l1 else 0\n # x2 = l2.val if l2 else 0\n # sum = x1 + x2 + carry\n # carry = sum // 10\n # l1.val = sum - carry * 10\n # if not l1.next and l2 and l2.next:\n # l1.next = l2.next\n # l2.next = None\n # if l1:\n # if l1.next is None and carry:\n # l1.next = ListNode(carry)\n # break\n # l1 = l1.next\n # if l2:\n # l2 = l2.next\n # return l3\n def getNext(l1, l2, carry):\n x1 = l1.val if l1 else 0\n x2 = l2.val if l2 else 0\n sum = x1 + x2 + carry\n if sum == 0 and not l1 and not l2:\n return\n carry = sum // 10\n tempNode = ListNode(sum - carry * 10)\n if l1 or l2 or carry:\n tempNode.next = getNext(l1.next if l1 else None, l2.next if l2 else None, carry)\n return tempNode\n\n return getNext(l1, l2, 0)\n\n\ns = Solution()\nl1 = getList([2, 4, 3])\nl2 = getList([5, 6, 4])\nl3 = s.addTwoNumbers(l1, l2)\nshowList(l3)\n\nl1 = getList([2, 4, 5, 9])\nl2 = getList([5, 6, 4])\nl3 = s.addTwoNumbers(l1, l2)\nshowList(l3)\n\nl1 = getList([5, 6, 4])\nl2 = getList([2, 4, 5, 9])\nl3 = s.addTwoNumbers(l1, l2)\nshowList(l3)\n\nl1 = getList([5, 6, 4])\nl2 = getList([])\nl3 = s.addTwoNumbers(l1, l2)\nshowList(l3)\n\nl1 = getList([])\nl2 = getList([5, 6, 4])\nl3 = s.addTwoNumbers(l1, l2)\nshowList(l3)\n\nl1 = getList([0])\nl2 = getList([0])\nl3 = s.addTwoNumbers(l1, l2)\nshowList(l3)\n\nl1 = getList([0])\nl2 = getList([])\nl3 = s.addTwoNumbers(l1, l2)\nshowList(l3)\n","repo_name":"vzpd/myBrushRecord","sub_path":"exercise/bd_两数相加.py","file_name":"bd_两数相加.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36221717983","text":"'''\nPair Sums (variation of 2 sum)\nGiven a list of n integers arr[0..(n-1)], determine the number of different pairs of elements within it which sum to k.\nIf an integer appears in the list multiple times, each copy is considered to be different; that is, two pairs are considered different if one pair includes at least one array index which the other doesn't, even if they include the same values.\nSignature\nint numberOfWays(int[] arr, int k)\nInput\nn is in the range [1, 100,000].\nEach value arr[i] is in the range [1, 1,000,000,000].\nk is in the range [1, 1,000,000,000].\nOutput\nReturn the number of different pairs of elements which sum to k.\nExample 1\nn = 5\nk = 6\narr = [1, 2, 3, 4, 3]\noutput = 2\nThe valid pairs are 2+4 and 3+3.\nExample 2\nn = 5\nk = 6\narr = [1, 5, 3, 3, 3]\noutput = 4\nThere's one valid pair 1+5, and three different valid pairs 3+3 (the 3rd and 4th elements, 3rd and 5th elements, and 4th and 5th elements).\n\n'''\n\n#!/bin/python3\n\nfrom collections import Counter\n\n# Complete the sockMerchant function below.\ndef numberOfWays(arr, k):\n\n pairs = []\n\n checker_dict = {}\n\n # put the contents of the array into a dict with unique keys\n # for num in arr:\n # # add the value and its counterpart\n \n \n # loop through the dict checking if dict[arr[i]] == k - arr[j]\n\n for number in arr:\n\n checker_dict[number] = k - number\n\n if checker_dict[number] in arr:\n pairs.append([number, checker_dict[number]])\n\n\n print(pairs)\n \n return len(pairs) \n # append to pairs list\n\nif __name__ == '__main__':\n\n ar = (1, 2, 4, 6) # 3 pairs\n\n result = numberOfWays(ar, 10)\n\n print ('Number of pairs {} '.format(result))","repo_name":"denemorhun/Python-Problems","sub_path":"Hackerrank/Dictionaries/pairs_of_sum.py","file_name":"pairs_of_sum.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36511051693","text":"#preprocessing captions\nimport re\nfrom nltk.corpus import stopwords\nimport Datacollection\ndef cleantext(rev):\n rev = re.sub(r'[^a-zA-Z]',' ',rev)\n rev = rev.lower()\n rev = rev.split()\n rev = [word for word in rev if len(word)>1]\n rev = [word for word in rev if word.isalpha()]\n rev = ' '.join(rev)\n rev = 'startseq ' + rev + ' endseq'\n return rev\nfor i in range(len(result1)):\n result2[i][1] = cleantext(result2[i][1])\nfrom keras.preprocessing.text import Tokenizer\nmax_features = 10000\ntokenizer = Tokenizer(num_words=max_features)\ntokenizer.fit_on_texts(result2[:,1])\nlist_tokenized_train = tokenizer.texts_to_sequences(result2[:,1])\nnum_words = len(tokenizer.word_index) + 1\n\nprint(num_words)\n","repo_name":"sanchit2843/ArtificialEyes","sub_path":"temp/Textpreprocessing.py","file_name":"Textpreprocessing.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"13530349513","text":"import os\nimport logging\nimport sys\nimport json\nimport threading\nimport time\nimport tempfile\nimport unittest\n\nfrom io import StringIO, BytesIO\nfrom contextlib import contextmanager\n\nimport psycopg2\n\nimport tpq\n\nfrom tpq.utils import (\n get_db_env, transaction, savepoint\n)\nfrom tpq.__main__ import main\n\n\n# Useful to debug threading issues.\nlogging.basicConfig(\n stream=sys.stderr,\n # Change level to DEBUG here if you need to.\n level=logging.CRITICAL,\n format='%(thread)d: %(message)s'\n)\nLOGGER = logging.getLogger(__name__)\n\n\n@contextmanager\ndef setenv(add, remove):\n _environ = dict(os.environ)\n os.environ.update(add)\n for k in remove:\n os.environ.pop(k, None)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(_environ)\n\n\nclass TestError(Exception):\n \"\"\"\n Error used for some tests.\n \"\"\"\n\n pass\n\n\nclass ThreadedConsumer(threading.Thread):\n \"\"\"\n Helper to get() items from queue on a thread.\n \"\"\"\n\n def __init__(self, queue, wait=-1, work=0, exit=False, once=False):\n self.queue = queue\n self.wait = wait\n self.work = work\n self.exit = exit\n self.once = once\n self.items = []\n self.errors = []\n self.loops = 0\n self.stopping = threading.Event()\n threading.Thread.__init__(self)\n self.start()\n\n def run(self):\n LOGGER.debug('Starting')\n while not self.stopping.is_set():\n LOGGER.debug('Looping')\n # TODO: make get() a context manager.\n try:\n LOGGER.debug('get()ing')\n with self.queue.get(wait=self.wait) as item:\n LOGGER.debug('got: %s', item)\n self.items.append(item)\n LOGGER.debug('Success')\n except Exception as e:\n if isinstance(e, tpq.QueueEmpty):\n LOGGER.debug('Empty')\n self.errors.append(e)\n if self.exit:\n LOGGER.debug('Exiting')\n self.stopping.set()\n continue\n else:\n # Interruptable sleep. Simulates working on a task with txn\n # open.\n LOGGER.debug('Sleeping for %s', self.work)\n self.stopping.wait(self.work)\n LOGGER.debug('Awoke')\n LOGGER.debug('Commited')\n if self.once:\n LOGGER.debug('Exiting')\n self.stopping.set()\n self.loops += 1\n LOGGER.debug('Stopping')\n\n def stop(self):\n LOGGER.debug('Signaling')\n self.stopping.set()\n LOGGER.debug('Joining')\n self.join()\n\n\nclass ThreadedProducer(threading.Thread):\n \"\"\"\n Helper to put items in queue on a thread.\n \"\"\"\n\n def __init__(self, queue, items, work=0):\n self.queue = queue\n self.work = work\n self.items = items[:]\n self.stopping = threading.Event()\n threading.Thread.__init__(self)\n self.start()\n\n def run(self):\n LOGGER.debug('Starting')\n while self.items and not self.stopping.is_set():\n LOGGER.debug('Looping')\n item = self.items.pop(0)\n LOGGER.debug('put: %s', item)\n self.queue.put(item)\n self.stopping.wait(self.work)\n LOGGER.debug('Stopping')\n\n def stop(self):\n LOGGER.debug('Signaling')\n self.stopping.set()\n LOGGER.debug('Joining')\n self.join()\n\n\nclass Tests(object):\n \"\"\"\n Test normal operations.\n \"\"\"\n\n def test_empty(self):\n \"\"\"\n Ensure empty queue behavior is correct.\n\n Queue should raise QueueEmpty when empty.\n \"\"\"\n with self.assertRaises(tpq.QueueEmpty):\n # Note that since this is a context manager, we MUST use with...\n with self.queue.get() as item:\n print(item)\n\n\nclass ThreadedTests(object):\n \"\"\"\n Test thread interactions.\n \"\"\"\n\n def test_skip(self):\n \"\"\"\n Ensure concurrent consumers do not collide.\n\n This test ensures that if one consumer get()s a message inside a\n transaction that another consumer ignores that message (no\n double-dipping).\n \"\"\"\n self.queue.put({'test':'test'})\n c1 = ThreadedConsumer(self.queue, work=10)\n time.sleep(0.1)\n c2 = ThreadedConsumer(self.queue)\n time.sleep(0.1)\n c1.stop()\n c2.stop()\n # Make sure consumer one got the message.\n self.assertEqual(1, len(c1.items))\n # Make sure consumer two did not.\n self.assertEqual(0, len(c2.items))\n # Make sure consumer two tried at least once.\n self.assertTrue(c2.loops > 0)\n\n def test_fair(self):\n \"\"\"\n Ensure concurrent consumers can both get().\n\n This test ensures that if there are two mesages in the queue, that each\n consumer can get one. In other words they don't compete.\n \"\"\"\n self.queue.put({'test': 'test'})\n self.queue.put({'test': 'test'})\n c1 = ThreadedConsumer(self.queue, once=True, work=10)\n c2 = ThreadedConsumer(self.queue, once=True, work=10)\n time.sleep(0.1)\n c1.stop()\n c2.stop()\n # Make sure consumer one got a message.\n self.assertEqual(1, len(c1.items))\n # Make sure consumer two got a message.\n self.assertEqual(1, len(c2.items))\n\n def test_order(self):\n \"\"\"\n Ensure FIFO.\n\n Compare dequeued items to queued items and assert equality (same order).\n \"\"\"\n put, got = [], []\n for i in range(10):\n put.append({'test': i})\n p = ThreadedProducer(self.queue, put)\n p.join()\n c = ThreadedConsumer(self.queue, exit=True)\n c.join()\n self.assertEqual(len(put), len(c.items))\n self.assertEqual(put, c.items)\n\n def test_len(self):\n \"\"\"Ensure len() works for queue.\"\"\"\n self.assertEqual(0, len(self.queue))\n ThreadedProducer(self.queue, [{'a': 'b'} for i in range(10)]).join()\n self.assertEqual(10, len(self.queue))\n c = ThreadedConsumer(self.queue, exit=True)\n c.join()\n self.assertEqual(10, len(c.items))\n self.assertEqual(0, len(self.queue))\n\n def test_wait_forever(self):\n \"\"\"Ensure waiting forever works.\n\n Whether pooled or not, or threaded or not, waiting without a timeout\n should always work.\n \"\"\"\n c = ThreadedConsumer(self.queue, wait=0, once=True)\n # Make it wait...\n time.sleep(0.1)\n self.assertTrue(c.is_alive())\n self.queue.put({'test': 'test'})\n c.stop()\n self.assertEqual(1, len(c.items))\n\n\nclass PooledTestCase(Tests, ThreadedTests, unittest.TestCase):\n \"\"\"\n Test queue with connection pool.\n \"\"\"\n\n def setUp(self):\n self.queue = tpq.Queue('test')\n self.queue.create()\n self.queue.clear()\n\n def tearDown(self):\n self.queue.clear()\n self.queue.close()\n\n def test_wait_timeout_interrupted(self):\n \"\"\"We should be able to wait just fine.\"\"\"\n c = ThreadedConsumer(self.queue, wait=10)\n # Make it wait...\n time.sleep(0.1)\n self.queue.put({'test': 'test'})\n c.stop()\n self.assertEqual(1, len(c.items))\n self.assertIsInstance(c.items[0], dict)\n\n def test_wait_timeout_expires(self):\n start = time.time()\n c = ThreadedConsumer(self.queue, wait=1, once=True)\n c.stop()\n self.assertLess(1, time.time() - start)\n self.assertEqual(1, len(c.errors))\n self.assertIsInstance(c.errors[0], tpq.QueueEmpty)\n\n\nclass SharedTestCase(Tests, ThreadedTests, unittest.TestCase):\n \"\"\"\n Test queue with shared connection.\n \"\"\"\n\n def setUp(self):\n host, dbname, user, password = get_db_env()\n self.conn = psycopg2.connect(host=host, dbname=dbname, user=user,\n password=password)\n self.queue = tpq.Queue('test', conn=self.conn)\n self.queue.clear()\n\n def tearDown(self):\n self.queue.clear()\n self.conn.close()\n\n def test_wait_timeout(self):\n \"\"\"This one should result in a warning.\"\"\"\n c = ThreadedConsumer(self.queue, wait=10, exit=True)\n # Make it wait...\n time.sleep(0.1)\n self.queue.put({'test': 'test'})\n c.stop()\n self.assertEqual(1, len(c.items))\n\n# TODO: we need to test a shared connection, ensuring an open transaction is\n# not committed under put() or get() with or without wait.\n\n\nclass ShortcutTestCase(unittest.TestCase):\n \"\"\"\n Test module-level shortcut functions.\n \"\"\"\n\n def setUp(self):\n self.queue = tpq.Queue('test')\n self.queue.create()\n\n def tearDown(self):\n self.queue.clear()\n self.queue.close()\n\n def test_get(self):\n item_put = {'test': 'test'}\n self.queue.put(item_put)\n item_get = tpq.get('test')\n self.assertEqual(item_put, item_get)\n\n def test_put(self):\n item_put = {'test': 'test'}\n tpq.put('test', item_put)\n with self.queue.get('test') as item_get:\n pass\n self.assertEqual(item_put, item_get)\n\n def test_create(self):\n tpq.create('test')\n\n def test_clear(self):\n tpq.create('test')\n tpq.clear('test')\n\n\nclass CommandTestCase(unittest.TestCase):\n \"\"\"\n Test Command Line Interface.\n \"\"\"\n\n def setUp(self):\n self.queue = tpq.Queue('test')\n self.queue.create()\n\n def tearDown(self):\n self.queue.clear()\n self.queue.close()\n\n def test_main_get(self):\n \"\"\"Ensure we can get from a queue using CLI.\"\"\"\n item_put, stdout = {'test': 'test'}, StringIO()\n self.queue.put(item_put)\n\n try:\n main({\n '--debug': False,\n '': 'test',\n 'consume': True,\n 'produce': False,\n '--wait': -1,\n }, stdout=stdout)\n except SystemExit as e:\n self.assertEqual(0, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n self.assertEqual(item_put, json.loads(stdout.getvalue()))\n\n def test_main_put_stdin(self):\n \"\"\"Ensure we can put to a queue from stdin using CLI.\"\"\"\n item_put = {'test': 'test'}\n\n try:\n main({\n '--debug': False,\n '': 'test',\n 'consume': False,\n 'produce': True,\n '--file': '-',\n '--create': False,\n }, stdin=StringIO(json.dumps(item_put)))\n except SystemExit as e:\n self.assertEqual(0, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n with self.queue.get() as item_get:\n self.assertEqual(item_put, item_get)\n\n def test_main_put_file(self):\n \"\"\"Ensure we can put to a queue from a file using CLI.\"\"\"\n item_put = {'test': 'test'}\n\n with tempfile.NamedTemporaryFile() as t:\n t.write(json.dumps(item_put).encode('utf-8'))\n t.flush()\n\n try:\n main({\n '--debug': False,\n '': 'test',\n 'consume': False,\n 'produce': True,\n '--file': t.name,\n '--create': False,\n }, stdin=StringIO(json.dumps(item_put)))\n except SystemExit as e:\n self.assertEqual(0, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n with self.queue.get() as item_get:\n self.assertEqual(item_put, item_get)\n\n def test_main_put_file_json_error(self):\n \"\"\"Ensure put fails with invalid json.\"\"\"\n try:\n main({\n '--debug': False,\n '': 'test',\n 'consume': False,\n 'produce': True,\n '--file': '/dev/null',\n '--create': False,\n })\n except SystemExit as e:\n self.assertEqual(1, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n def test_main_put_file_decode_error(self):\n \"\"\"Ensure put fails with invalid data.\"\"\"\n with open('/dev/random', 'rb') as r:\n try:\n main({\n '--debug': False,\n '': 'test',\n 'consume': False,\n 'produce': True,\n '--file': '-',\n '--create': False,\n }, stdin=BytesIO(r.read(10)))\n except SystemExit as e:\n self.assertEqual(1, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n def test_main_get_fail_emptyqueue(self):\n \"\"\"Ensure get fails when queue is empty.\"\"\"\n try:\n main({\n '--debug': False,\n '': 'test',\n 'consume': True,\n 'produce': False,\n '--create': False,\n '--wait': -1,\n })\n except SystemExit as e:\n self.assertEqual(1, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n def test_main_get_fail_missing(self):\n \"\"\"Ensure get fails when queue is missing.\"\"\"\n try:\n main({\n '--debug': False,\n '': 'bubba',\n 'consume': True,\n 'produce': False,\n '--wait': False,\n '--create': False,\n })\n except SystemExit as e:\n self.assertEqual(1, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n def test_main_put_fail_missing(self):\n \"\"\"Ensure put fails when queue is missing.\"\"\"\n try:\n main({\n '--debug': False,\n '': 'bubba',\n 'consume': False,\n 'produce': True,\n '--file': '-',\n '--create': False,\n }, stdin=StringIO('{\"test\": \"test\"}'))\n except SystemExit as e:\n self.assertEqual(1, e.args[0])\n else:\n self.fail('Did not raise SystemExit')\n\n\nclass TransactionTestCase(unittest.TestCase):\n def setUp(self):\n host, dbname, user, password = get_db_env()\n self.conn = psycopg2.connect(host=host, dbname=dbname, user=user,\n password=password)\n with self.conn.cursor() as cursor:\n cursor.execute('CREATE TABLE '\n 'IF NOT EXISTS test_t ('\n 'id serial primary key, '\n 'v varchar(1) not null'\n ')')\n self.conn.commit()\n\n def tearDown(self):\n with self.conn.cursor() as cursor:\n cursor.execute('DROP TABLE test_t')\n self.conn.commit()\n\n def test_transaction(self):\n \"\"\"If transaction works, row should disappear after leaving context.\"\"\"\n with self.assertRaises(TestError):\n with transaction(self.conn) as cursor:\n cursor.execute('INSERT INTO test_t (v) '\n 'VALUES (\\'a\\')')\n raise TestError()\n\n with self.conn.cursor() as cursor:\n cursor.execute('SELECT COUNT(*) '\n 'FROM test_t '\n 'WHERE v = \\'a\\'')\n self.assertEqual(0, cursor.fetchone()[0])\n\n def test_savepoint(self):\n \"\"\"If savepoint works, row should disappear after leaving context.\"\"\"\n with self.assertRaises(TestError):\n with savepoint(self.conn) as cursor:\n cursor.execute('INSERT INTO test_t (v) '\n 'VALUES (\\'a\\')')\n raise TestError()\n\n with self.conn.cursor() as cursor:\n cursor.execute('SELECT COUNT(*) '\n 'FROM test_t '\n 'WHERE v = \\'a\\'')\n self.assertEqual(0, cursor.fetchone()[0])\n\n\nclass DBConfigTestCase(unittest.TestCase):\n def test_url(self):\n with setenv({'TPQ_URL': 'postgresql://foo:bar@baz/qux'}):\n self.assertEqual(('baz', 'qux', 'foo', 'bar'), get_db_env())\n\n\n def test_url(self):\n with setenv({\n 'TPQ_HOST': 'baz',\n 'TPQ_DB': 'qux',\n 'TPQ_USER': 'foo',\n 'TPQ_PASS': 'bar',\n }, ('TPQ_URL', )):\n self.assertEqual(('baz', 'qux', 'foo', 'bar'), get_db_env())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"btimby/tpq","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":16771,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"28259815820","text":"\"\"\"Sink class\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional\n\nfrom wingline import plumbing\nfrom wingline.plumbing import base, hooks, queue\nfrom wingline.types import SENTINEL, PayloadIterator\n\nif TYPE_CHECKING:\n from wingline.plumbing import PayloadIteratorHook\n\n\nclass Sink(base.BasePlumbing):\n\n emoji = \"тн▓\"\n\n def __init__(\n self,\n parent: base.BasePlumbing,\n name: Optional[str] = None,\n ):\n self._name: str = name if name is not None else self.__class__.name\n # Initialize the thread and identification.\n super().__init__()\n self.name = self._name\n\n # Initialize connection to parent.\n self.parent = parent\n self.parent.subscribe(self)\n # Sinks are, by definition, no-ops so their hash\n # should be inherited from their parent.\n self.hash = parent.hash\n\n # Initialize queues.\n self.input_queue: queue.Queue = queue.Queue()\n self.iter_queue: queue.Queue = queue.Queue()\n\n # Initialize hooks.\n self.input_hooks: list[plumbing.PayloadIteratorHook] = []\n self.start_hooks: list[plumbing.PlumbingHook] = []\n self.end_hooks: list[plumbing.PlumbingHook] = []\n\n def run_payload_hook(\n self,\n hook: PayloadIteratorHook,\n pipe: base.BasePlumbing,\n payloads: PayloadIterator,\n ):\n return hook(pipe, (payload for payload in payloads if payload is not SENTINEL))\n\n def run(self):\n self.input_hooks.append(hooks.log_payloads(\"input\"))\n self.start_hooks.append(hooks.log_plumbing(\"Started.\"))\n self.end_hooks.append(hooks.log_plumbing(\"Finished.\"))\n\n # Start hooks are called when a pipe or tap\n # starts generating items\n for hook in self.start_hooks:\n hook(self)\n\n terminate = False\n while True:\n payload = self.input_queue.get(timeout=30)\n if payload is SENTINEL:\n terminate = True\n\n iter_payload = iter((payload,))\n # Input hooks can read the input iter\n # but should not modify it.\n for hook in self.input_hooks:\n iter_payload = self.run_payload_hook(hook, self, iter_payload)\n\n for payload in iter_payload:\n self.iter_queue.put(payload)\n self.input_queue.task_done()\n if terminate:\n self.iter_queue.put(SENTINEL)\n break\n # End hooks are called when a pipe or tap\n # finishes generating items\n for hook in self.end_hooks:\n hook(self)\n\n def __iter__(self):\n terminate = False\n while True:\n payload = self.iter_queue.get(timeout=5)\n self.iter_queue.task_done()\n if payload is SENTINEL:\n terminate = True\n else:\n yield payload\n if terminate:\n break\n","repo_name":"HappyEinara/wingline","sub_path":"wingline/plumbing/sink.py","file_name":"sink.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21068294321","text":"import pandas as pd\nimport json\nimport re\n\ndef get_label(p_label):\n\tp_label = p_label[0].upper() + p_label[1:]\n\tif len(re.findall('[A-Z][^A-Z]*', p_label)) > 0:\n\t\tp_label = ' '.join(re.findall('[A-Z][^A-Z]*', p_label))\n\tp_label = p_label.lower()\n\n\treturn p_label\n\ndef add_alignment_info():\n\talignment_path = './counqer_v1/static/data/alignments/'\n\tset_predicates_path = './counqer_v1/static/data/set_predicates_old/'\n\tset_predicates_path_new = './counqer_v1/static/data/set_predicates/'\n\t\n\tkb_names = ['dbpedia_mapped', 'dbpedia_raw', 'wikidata']\n\tfor kb in kb_names:\n\t\tset_predicates = {'predE': {'aligned': [], 'unaligned': []}, \n\t\t\t\t\t\t 'predC': {'aligned': [], 'unaligned': []},\n\t\t\t\t\t\t 'predE_inv': {'aligned': [], 'unaligned': []}}\n\t\talignments = pd.read_csv(alignment_path+kb+'.csv', delimiter=',')\n\t\tif kb == 'wikidata':\n\t\t\talign_predE = alignments['predE'].str.split('/').str[-1].unique().tolist()\n\t\t\talign_predE_inv = [x.split('_inv')[0] for x in align_predE if x.endswith('_inv')]\n\t\t\talign_predE = [x for x in align_predE if not x.endswith('_inv')]\n\t\t\talign_predC = alignments['predC'].str.split('/').str[-1].unique().tolist()\n\t\telse:\n\t\t\tif kb == 'dbpedia_raw':\n\t\t\t\tsplitat = 'http://dbpedia.org/property/'\n\t\t\t\tprefix = 'dbp: '\n\t\t\telse:\n\t\t\t\tsplitat = 'http://dbpedia.org/ontology/'\n\t\t\t\tprefix = 'dbo: '\n\t\t\talign_predE = alignments['predE'].str.split(splitat).str[-1].unique().tolist()\n\t\t\talign_predE_inv = [x.split('_inv')[0] for x in align_predE if x.endswith('_inv')]\n\t\t\talign_predE = [x for x in align_predE if not x.endswith('_inv')]\n\t\t\talign_predC = alignments['predC'].str.split(splitat).str[-1].unique().tolist()\n\t\t\t\n\t\t\talign_predE = [prefix+get_label(x) for x in align_predE]\n\t\t\talign_predE_inv = [prefix+get_label(x) for x in align_predE_inv]\n\t\t\talign_predC = [prefix+get_label(x) for x in align_predC]\n\t\t\n\t\tpredicates = json.loads(open(set_predicates_path+kb+'.json').read())\n\t\tfor item in predicates['predE']:\n\t\t\tif kb == 'wikidata':\n\t\t\t\tpred = item.split(\":\")[0]\n\t\t\t\tif pred in align_predE:\n\t\t\t\t\tset_predicates['predE']['aligned'].append(item)\n\t\t\t\telse:\n\t\t\t\t\tset_predicates['predE']['unaligned'].append(item)\n\t\t\telse:\n\t\t\t\tif item in align_predE:\n\t\t\t\t\tset_predicates['predE']['aligned'].append(item)\n\t\t\t\telse:\n\t\t\t\t\tset_predicates['predE']['unaligned'].append(item)\n\t\t\n\t\tfor item in predicates['predE_inv']:\n\t\t\tif kb == 'wikidata':\n\t\t\t\tpred = item.split(\":\")[0]\n\t\t\t\tif pred in align_predE_inv:\n\t\t\t\t\tset_predicates['predE_inv']['aligned'].append(item)\n\t\t\t\telse:\n\t\t\t\t\tset_predicates['predE_inv']['unaligned'].append(item)\n\t\t\telse:\n\t\t\t\tif item in align_predE_inv:\n\t\t\t\t\tset_predicates['predE_inv']['aligned'].append(item)\n\t\t\t\telse:\n\t\t\t\t\tset_predicates['predE_inv']['unaligned'].append(item)\n\t\t\n\t\tfor item in predicates['predC']:\n\t\t\tif kb == 'wikidata':\n\t\t\t\tpred = item.split(\":\")[0]\n\t\t\t\tif pred in align_predC:\n\t\t\t\t\tset_predicates['predC']['aligned'].append(item)\n\t\t\t\telse:\n\t\t\t\t\tset_predicates['predC']['unaligned'].append(item)\n\t\t\telse:\n\t\t\t\tif item in align_predC:\n\t\t\t\t\tset_predicates['predC']['aligned'].append(item)\n\t\t\t\telse:\n\t\t\t\t\tset_predicates['predC']['unaligned'].append(item)\n\t\t\n\t\twith open(set_predicates_path_new+kb+'.json', 'w') as fp:\n\t\t\tfp.write(json.dumps(set_predicates))\n\t\t\n\nif __name__ == '__main__':\n\tadd_alignment_info()","repo_name":"ghoshs/counqer","sub_path":"set_predicates_with_alignments.py","file_name":"set_predicates_with_alignments.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72503461991","text":"import pickle\nimport time\n\nimport tensorflow as tf\n\nfrom configs import settings\nfrom dataLoader.utils import showTest\n\n\nclass BaseNetwork(object):\n def __init__(self, data_loader):\n self.train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n self.train_accuracy = tf.keras.metrics.CategoricalAccuracy(name=\"train_accuracy\",)\n self.test_loss = tf.keras.metrics.Mean(name=\"test_loss\")\n self.test_accuracy = tf.keras.metrics.CategoricalAccuracy(name=\"test_accuracy\")\n\n self.dataLoader = data_loader()\n\n # self.lossFn = tf.keras.losses.MeanSquaredError()\n self.optimizer = tf.keras.optimizers.Adam()\n\n def forward(self, train_images, train_labels, test_images, test_labels):\n raise NotImplementedError(\"According support_set and query_set to generate predictions\")\n\n def test(self, title=None, fig_text=None, show=True):\n train_images, train_labels, test_images, test_labels = self.dataLoader.sample_dataset_from_test()\n predictions = tf.argmax(self.forward(train_images, train_labels, test_images, test_labels), axis=-1)\n if show:\n showTest(test_images, test_labels, predictions, title, fig_text)\n\n def labelEncode(self, labels):\n # return tf.one_hot(labels, depth=settings.TRAIN_TEST_WAY, axis=1)\n return tf.one_hot(labels, depth=settings.TRAIN_TEST_WAY, axis=-1)\n\n @property\n def trainable_variables(self):\n return NotImplementedError(\"Return model trainable variables\")\n\n @property\n def loss_function(self):\n return NotImplementedError(\"Loss function to compute predictions and labels\")\n\n def train_step(self, train_images, train_labels, test_images, test_labels):\n with tf.GradientTape() as tape:\n predictions = self.forward(train_images, train_labels, test_images, test_labels)\n oneHotLabels = self.labelEncode(test_labels)\n # print(oneHotLabels.shape, predictions.shape)\n loss = self.loss_function(oneHotLabels, predictions)\n gradients = tape.gradient(loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n self.train_loss(loss)\n self.train_accuracy(oneHotLabels, predictions)\n\n def test_step(self, train_images, train_labels, test_images, test_labels):\n predictions = self.forward(train_images, train_labels, test_images, test_labels)\n oneHotLabels = self.labelEncode(test_labels)\n loss = self.loss_function(oneHotLabels, predictions)\n\n self.test_loss(loss)\n self.test_accuracy(oneHotLabels, predictions)\n return oneHotLabels, predictions\n\n def train(self, epochs, count_per_epoch):\n # self.test()\n train_loss = []\n train_accuracy = []\n test_loss = []\n test_accuracy = []\n predictions_list = []\n labels_list = []\n total_time = 0\n for epoch in range(epochs):\n start = time.time()\n self.train_loss.reset_state()\n self.train_accuracy.reset_state()\n self.test_loss.reset_state()\n self.test_accuracy.reset_state()\n\n for _ in range(count_per_epoch):\n train_images, train_labels, test_images, test_labels = self.dataLoader.get_dataset()\n # train_images, train_labels, test_images, test_labels = self.dataLoader.sampleBatchDataset(\n # batch_size=settings.BATCH_SIZE, training=True, resize=True\n # )\n self.train_step(train_images, train_labels, test_images, test_labels)\n\n for _ in range(count_per_epoch // 2):\n train_images, train_labels, test_images, test_labels = self.dataLoader.sample_dataset_from_test()\n # train_images, train_labels, test_images, test_labels = self.dataLoader.sampleBatchDataset(\n # batch_size=settings.BATCH_SIZE, training=False, resize=True\n # )\n oneHotLabels, predictions = self.test_step(train_images, train_labels, test_images, test_labels)\n # print(predictions.shape)\n predictions_list.append(predictions)\n labels_list.append(oneHotLabels)\n\n train_loss.append(self.train_loss.result())\n train_accuracy.append(self.train_accuracy.result())\n test_loss.append(self.test_loss.result())\n test_accuracy.append(self.test_accuracy.result())\n print(\n \"Epoch: {:.2f} \".format(epoch + 1),\n \"train_loss: {:.2f} \".format(self.train_loss.result()),\n \"train_accuracy: {:.2f}% \".format(self.train_accuracy.result() * 100),\n \"test_loss: {:.2f} \".format(self.test_loss.result()),\n \"test_accuracy: {:.2f}% \".format(self.test_accuracy.result() * 100),\n \"time: {:.2f} \".format(time.time() - start)\n )\n total_time = total_time + time.time() - start\n if (epoch + 1) % 5 == 0:\n print(\"time: {:.2f} \".format(total_time * 20 / 3600))\n total_time = 0\n with open(\"relation_omniglot_5way_1shot.pkl\", \"wb\") as f:\n pickle.dump((train_loss, train_accuracy, test_loss, test_accuracy), f)\n\n with open(\"relation_omniglot_5way_1shot_label_prediction.pkl\", \"wb\") as f:\n pickle.dump((labels_list, predictions_list), f)\n # self.test()\n","repo_name":"ping15/few-shot-learning","sub_path":"networks/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"23729489886","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n# from nltk.util import ngrams\n\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nfrom h01_data.get_surprisals import load_surprisals as load_orig_surprisals\nfrom util import util\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n # Results\n parser.add_argument(\"--surprisals-orig-fpath\", type=str, required=True)\n parser.add_argument(\"--surprisals-cluster-fpath\", type=str, required=True)\n parser.add_argument(\"--surprisals-text-fpath\", type=str, required=True)\n # Save\n parser.add_argument(\"--results-fpath\", type=str, required=True)\n\n args = parser.parse_args()\n print(args)\n return args\n\n\ndef load_surprisals(surprisals_fpath):\n data = util.read_data(surprisals_fpath)\n data = data['model_text']\n return np.array(data['ids']), data['surprisals']\n\n\ndef load_all_surprisals(surprisals_orig_fpath, surprisals_cluster_fpath, surprisals_text_fpath):\n ids_orig, surprisals_orig = load_orig_surprisals(surprisals_orig_fpath)\n ids_cluster, surprisals_cluster = load_surprisals(surprisals_cluster_fpath)\n ids_text, surprisals_text = load_surprisals(surprisals_text_fpath)\n\n assert (ids_orig == ids_cluster).all()\n assert (ids_orig == ids_text).all()\n\n return surprisals_orig, surprisals_cluster, surprisals_text\n\n\ndef get_correlations(surprisals_orig_fpath, surprisals_cluster_fpath, surprisals_text_fpath):\n surprisals_orig, surprisals_cluster, surprisals_text = \\\n load_all_surprisals(surprisals_orig_fpath, surprisals_cluster_fpath, surprisals_text_fpath)\n\n corr_cluster, pvalue_cluster = stats.spearmanr(surprisals_orig, surprisals_cluster)\n corr_text, pvalue_text = stats.spearmanr(surprisals_orig, surprisals_text)\n\n results = {\n 'text': [corr_text, pvalue_text],\n 'cluster': [corr_cluster, pvalue_cluster],\n }\n df = pd.DataFrame.from_dict(results, columns=['corr', 'pvalue'], orient='index')\n print(df)\n return df\n\n\ndef main():\n args = get_args()\n\n df = get_correlations(args.surprisals_orig_fpath, args.surprisals_cluster_fpath,\n args.surprisals_text_fpath)\n df.to_csv(args.results_fpath, sep='\\t')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rycolab/clusters-in-language-evaluation","sub_path":"src/h03_analysis/get_surprisal_correlations.py","file_name":"get_surprisal_correlations.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"14917313600","text":"from hamcrest import assert_that, contains, empty, has_entries, none\n\nfrom . import confd\nfrom ..helpers import associations as a, errors as e, fixtures, scenarios as s\nfrom ..helpers.config import MAIN_TENANT, SUB_TENANT\n\nFAKE_ID = 999999999\n\n\n@fixtures.outcall()\n@fixtures.trunk()\ndef test_associate_errors(outcall, trunk):\n response = confd.outcalls(FAKE_ID).trunks.put(trunks=[trunk])\n response.assert_status(404)\n\n url = confd.outcalls(outcall['id']).trunks().put\n for check in error_checks(url):\n yield check\n\n\ndef error_checks(url):\n yield s.check_bogus_field_returns_error, url, 'trunks', 123\n yield s.check_bogus_field_returns_error, url, 'trunks', None\n yield s.check_bogus_field_returns_error, url, 'trunks', True\n yield s.check_bogus_field_returns_error, url, 'trunks', 'string'\n yield s.check_bogus_field_returns_error, url, 'trunks', [123]\n yield s.check_bogus_field_returns_error, url, 'trunks', [None]\n yield s.check_bogus_field_returns_error, url, 'trunks', ['string']\n yield s.check_bogus_field_returns_error, url, 'trunks', [{}]\n yield s.check_bogus_field_returns_error, url, 'trunks', [{'id': None}]\n yield s.check_bogus_field_returns_error, url, 'trunks', [{'id': 'string'}]\n yield s.check_bogus_field_returns_error, url, 'trunks', [{'id': 1}, {'id': None}]\n yield s.check_bogus_field_returns_error, url, 'trunks', [{'not_id': 123}]\n yield s.check_bogus_field_returns_error, url, 'trunks', [{'id': FAKE_ID}]\n\n\n@fixtures.outcall()\n@fixtures.trunk()\ndef test_associate(outcall, trunk):\n response = confd.outcalls(outcall['id']).trunks().put(trunks=[trunk])\n response.assert_updated()\n\n\n@fixtures.outcall()\n@fixtures.trunk()\n@fixtures.trunk()\n@fixtures.trunk()\ndef test_associate_multiple(outcall, trunk1, trunk2, trunk3):\n response = confd.outcalls(outcall['id']).trunks.put(trunks=[trunk2, trunk3, trunk1])\n response.assert_updated()\n\n response = confd.outcalls(outcall['id']).get()\n assert_that(\n response.item,\n has_entries(\n trunks=contains(\n has_entries(id=trunk2['id']),\n has_entries(id=trunk3['id']),\n has_entries(id=trunk1['id']),\n )\n ),\n )\n\n\n@fixtures.outcall()\n@fixtures.trunk()\ndef test_associate_same_trunk(outcall, trunk):\n trunks = [{'id': trunk['id']}, {'id': trunk['id']}]\n response = confd.outcalls(outcall['id']).trunks.put(trunks=trunks)\n response.assert_status(400)\n\n\n@fixtures.outcall()\n@fixtures.trunk()\n@fixtures.trunk()\ndef test_get_trunks_associated_to_outcall(outcall, trunk1, trunk2):\n with a.outcall_trunk(outcall, trunk2, trunk1):\n response = confd.outcalls(outcall['id']).get()\n assert_that(\n response.item,\n has_entries(\n trunks=contains(\n has_entries(\n id=trunk2['id'], endpoint_sip=none(), endpoint_custom=none()\n ),\n has_entries(\n id=trunk1['id'], endpoint_sip=none(), endpoint_custom=none()\n ),\n )\n ),\n )\n\n\n@fixtures.outcall()\n@fixtures.outcall()\n@fixtures.trunk()\ndef test_get_outcalls_associated_to_trunk(outcall1, outcall2, trunk):\n with a.outcall_trunk(outcall2, trunk), a.outcall_trunk(outcall1, trunk):\n response = confd.trunks(trunk['id']).get()\n assert_that(\n response.item,\n has_entries(\n outcalls=contains(\n has_entries(id=outcall2['id'], name=outcall2['name']),\n has_entries(id=outcall1['id'], name=outcall1['name']),\n )\n ),\n )\n\n\n@fixtures.outcall(wazo_tenant=MAIN_TENANT)\n@fixtures.outcall(wazo_tenant=SUB_TENANT)\n@fixtures.trunk(wazo_tenant=MAIN_TENANT)\n@fixtures.trunk(wazo_tenant=SUB_TENANT)\ndef test_associate_multi_tenant(main_outcall, sub_outcall, main_trunk, sub_trunk):\n response = confd.outcalls(main_outcall['id']).trunks.put(\n trunks=[{'id': main_trunk['id']}], wazo_tenant=SUB_TENANT\n )\n response.assert_match(404, e.not_found('Outcall'))\n\n response = confd.outcalls(sub_outcall['id']).trunks.put(\n trunks=[{'id': main_trunk['id']}], wazo_tenant=SUB_TENANT\n )\n response.assert_match(400, e.not_found('Trunk'))\n\n response = confd.outcalls(main_outcall['id']).trunks.put(\n trunks=[{'id': sub_trunk['id']}], wazo_tenant=MAIN_TENANT\n )\n response.assert_match(400, e.different_tenant())\n\n\n@fixtures.outcall()\n@fixtures.trunk()\n@fixtures.trunk()\ndef test_dissociate(outcall, trunk1, trunk2):\n with a.outcall_trunk(outcall, trunk1, trunk2):\n response = confd.outcalls(outcall['id']).trunks.put(trunks=[])\n response.assert_updated()\n\n\n@fixtures.outcall()\n@fixtures.trunk()\n@fixtures.trunk()\ndef test_delete_outcall_when_outcall_and_trunk_associated(outcall, trunk1, trunk2):\n with a.outcall_trunk(outcall, trunk1, trunk2, check=False):\n confd.outcalls(outcall['id']).delete().assert_deleted()\n\n deleted_outcall = confd.outcalls(outcall['id']).get\n yield s.check_resource_not_found, deleted_outcall, 'Outcall'\n\n response = confd.trunks(trunk1['id']).get()\n yield assert_that, response.item['outcalls'], empty()\n\n response = confd.trunks(trunk2['id']).get()\n yield assert_that, response.item['outcalls'], empty()\n\n\n@fixtures.outcall()\n@fixtures.outcall()\n@fixtures.trunk()\ndef test_delete_trunk_when_outcall_and_trunk_associated(outcall1, outcall2, trunk):\n with a.outcall_trunk(outcall1, trunk, check=False), a.outcall_trunk(\n outcall2, trunk, check=False\n ):\n confd.trunks(trunk['id']).delete().assert_deleted()\n\n deleted_trunk = confd.trunks(trunk['id']).get\n yield s.check_resource_not_found, deleted_trunk, 'Trunk'\n\n response = confd.outcalls(outcall1['id']).get()\n yield assert_that, response.item['trunks'], empty()\n\n response = confd.outcalls(outcall2['id']).get()\n yield assert_that, response.item['trunks'], empty()\n","repo_name":"wazo-platform/wazo-confd","sub_path":"integration_tests/suite/base/test_outcall_trunk.py","file_name":"test_outcall_trunk.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"26056275422","text":"\"\"\"Conversion operations.\"\"\"\r\nfrom collections import Sequence\r\nimport csv\r\nimport logging\r\n\r\nimport arcpy\r\n\r\nfrom arcetl.arcobj import DatasetView, dataset_metadata, spatial_reference_metadata\r\nfrom arcetl import attributes\r\nfrom arcetl import dataset\r\nfrom arcetl import features\r\nfrom arcetl.helpers import contain, leveled_logger, unique_name\r\n\r\n\r\nLOG = logging.getLogger(__name__)\r\n\"\"\"logging.Logger: Module-level logger.\"\"\"\r\n\r\n\r\ndef planarize(dataset_path, output_path, **kwargs):\r\n \"\"\"Planarize feature geometry into lines.\r\n\r\n Note:\r\n This method does not make topological linework. However it does carry all\r\n attributes with it, rather than just an ID attribute.\r\n\r\n Since this method breaks the new line geometry at intersections, it can be\r\n useful to break line geometry features that cross.\r\n\r\n Args:\r\n dataset_path (str): Path of the dataset.\r\n output_path (str): Path of the output dataset.\r\n **kwargs: Arbitrary keyword arguments. See below.\r\n\r\n Keyword Args:\r\n dataset_where_sql (str): SQL where-clause for dataset subselection.\r\n tolerance (float): Tolerance for coincidence, in units of the dataset.\r\n log_level (str): Level to log the function at. Default is \"info\".\r\n\r\n Returns:\r\n str: Path of the converted dataset.\r\n \"\"\"\r\n kwargs.setdefault(\"dataset_where_sql\")\r\n kwargs.setdefault(\"tolerance\")\r\n log = leveled_logger(LOG, kwargs.setdefault(\"log_level\", \"info\"))\r\n log(\"Start: Planarize geometry in %s to lines in %s.\", dataset_path, output_path)\r\n view = DatasetView(dataset_path, kwargs[\"dataset_where_sql\"])\r\n with view:\r\n arcpy.management.FeatureToLine(\r\n in_features=view.name,\r\n out_feature_class=output_path,\r\n cluster_tolerance=kwargs[\"tolerance\"],\r\n attributes=True,\r\n )\r\n log(\"End: Planarize.\")\r\n return output_path\r\n\r\n\r\ndef polygons_to_lines(dataset_path, output_path, topological=False, **kwargs):\r\n \"\"\"Convert geometry from polygons to lines.\r\n\r\n Note:\r\n If topological is set to True, shared outlines will be a single, separate\r\n feature. Note that one cannot pass attributes to a topological transformation\r\n (as the values would not apply to all adjacent features).\r\n\r\n If an id field name is specified, the output dataset will identify the input\r\n features that defined the line feature with the name & values from the provided\r\n field. This option will be ignored if the output is non-topological lines, as\r\n the field will pass over with the rest of the attributes.\r\n\r\n Args:\r\n dataset_path (str): Path of the dataset.\r\n output_path (str): Path of the output dataset.\r\n topological (bool): Flag to indicate lines should be topological, or merged\r\n where lines overlap.\r\n **kwargs: Arbitrary keyword arguments. See below.\r\n\r\n Keyword Args:\r\n dataset_where_sql (str): SQL where-clause for dataset subselection.\r\n id_field_name (str): Name of the field to apply ID to lines from.\r\n tolerance (float): Tolerance for coincidence, in units of the dataset.\r\n log_level (str): Level to log the function at. Default is \"info\".\r\n\r\n Returns:\r\n str: Path of the converted dataset.\r\n \"\"\"\r\n kwargs.setdefault(\"dataset_where_sql\")\r\n kwargs.setdefault(\"id_field_name\")\r\n log = leveled_logger(LOG, kwargs.setdefault(\"log_level\", \"info\"))\r\n log(\"Start: Convert polgyons in %s to lines in %s.\", dataset_path, output_path)\r\n meta = {\r\n \"dataset\": dataset_metadata(dataset_path),\r\n \"orig_tolerance\": arcpy.env.XYTolerance,\r\n }\r\n view = DatasetView(dataset_path, kwargs[\"dataset_where_sql\"])\r\n with view:\r\n if \"tolerance\" in kwargs:\r\n arcpy.env.XYTolerance = kwargs[\"tolerance\"]\r\n arcpy.management.PolygonToLine(\r\n in_features=view.name,\r\n out_feature_class=output_path,\r\n neighbor_option=topological,\r\n )\r\n if \"tolerance\" in kwargs:\r\n arcpy.env.XYTolerance = meta[\"orig_tolerance\"]\r\n if topological:\r\n for side in [\"left\", \"right\"]:\r\n meta[side] = {\"oid_key\": side.upper() + \"_FID\"}\r\n if kwargs[\"id_field_name\"]:\r\n meta[side][\"id_field\"] = next(\r\n field\r\n for field in meta[\"dataset\"][\"fields\"]\r\n if field[\"name\"].lower() == kwargs[\"id_field_name\"].lower()\r\n )\r\n meta[side][\"id_field\"][\"name\"] = side + \"_\" + kwargs[\"id_field_name\"]\r\n # Cannot create an OID-type field, so force to long.\r\n if meta[side][\"id_field\"][\"type\"].lower() == \"oid\":\r\n meta[side][\"id_field\"][\"type\"] = \"long\"\r\n dataset.add_field_from_metadata(\r\n output_path, meta[side][\"id_field\"], log_level=None\r\n )\r\n attributes.update_by_joined_value(\r\n output_path,\r\n field_name=meta[side][\"id_field\"][\"name\"],\r\n join_dataset_path=dataset_path,\r\n join_field_name=kwargs[\"id_field_name\"],\r\n on_field_pairs=[\r\n (meta[side][\"oid_key\"], meta[\"dataset\"][\"oid_field_name\"])\r\n ],\r\n log_level=None,\r\n )\r\n dataset.delete_field(output_path, meta[side][\"oid_key\"], log_level=None)\r\n else:\r\n dataset.delete_field(output_path, \"ORIG_FID\", log_level=None)\r\n log(\"End: Convert.\")\r\n return output_path\r\n\r\n\r\ndef project(dataset_path, output_path, spatial_reference_item=4326, **kwargs):\r\n \"\"\"Project dataset features to a new dataset.\r\n\r\n Args:\r\n dataset_path (str): Path of the dataset.\r\n output_path (str): Path of the output dataset.\r\n spatial_reference_item: Item from which the spatial reference of the output\r\n geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).\r\n **kwargs: Arbitrary keyword arguments. See below.\r\n\r\n Keyword Args:\r\n dataset_where_sql (str): SQL where-clause for dataset subselection.\r\n log_level (str): Level to log the function at. Default is \"info\".\r\n\r\n Returns:\r\n str: Path of the converted dataset.\r\n \"\"\"\r\n kwargs.setdefault(\"dataset_where_sql\")\r\n meta = {\"spatial\": spatial_reference_metadata(spatial_reference_item)}\r\n log = leveled_logger(LOG, kwargs.setdefault(\"log_level\", \"info\"))\r\n log(\r\n \"Start: Project %s to srid=%s in %s.\",\r\n dataset_path,\r\n meta[\"spatial\"][\"object\"].factoryCode,\r\n output_path,\r\n )\r\n meta[\"dataset\"] = dataset_metadata(dataset_path)\r\n \"\"\"Project tool cannot output to an in-memory workspace (will throw error 000944).\r\n This is not a bug. Esri\"s Project documentation (as of v10.6) specifically states:\r\n \"The in_memory workspace is not supported as a location to write the output\r\n dataset.\"\r\n https://desktop.arcgis.com/en/arcmap/latest/tools/data-management-toolbox/project.htm\r\n https://pro.arcgis.com/en/pro-app/tool-reference/data-management/project.htm\r\n To avoid all this ado, using create to clone a (reprojected)\r\n dataset & insert features into it.\r\n \"\"\"\r\n dataset.create(\r\n dataset_path=output_path,\r\n field_metadata_list=meta[\"dataset\"][\"user_fields\"],\r\n geometry_type=meta[\"dataset\"][\"geometry_type\"],\r\n spatial_reference_item=meta[\"spatial\"][\"object\"],\r\n log_level=None,\r\n )\r\n features.insert_from_path(\r\n dataset_path=output_path,\r\n insert_dataset_path=dataset_path,\r\n field_names=meta[\"dataset\"][\"user_fields\"],\r\n insert_where_sql=kwargs[\"dataset_where_sql\"],\r\n log_level=None,\r\n )\r\n log(\"End: Project.\")\r\n return output_path\r\n\r\n\r\ndef rows_to_csvfile(rows, output_path, field_names, header=False, **kwargs):\r\n \"\"\"Write collection of rows to a CSV-file.\r\n\r\n Note: Rows can be represented by either dictionaries or sequences.\r\n\r\n Args:\r\n rows (iter): Collection of dictionaries or sequences representing rows.\r\n output_path (str): Path of the output dataset.\r\n field_names (iter): Collection of the field names, in the desired order or\r\n output.\r\n header (bool): Write a header in the CSV output if True.\r\n **kwargs: Arbitrary keyword arguments. See below.\r\n\r\n Keyword Args:\r\n file_mode (str): Code indicating the file mode for writing. Default is \"wb\".\r\n log_level (str): Level to log the function at. Default is \"info\".\r\n\r\n Returns:\r\n str: Path of the CSV-file.\r\n \"\"\"\r\n kwargs.setdefault(\"file_mode\", \"wb\")\r\n log = leveled_logger(LOG, kwargs.setdefault(\"log_level\", \"info\"))\r\n log(\"Start: Convert rows to CSVfile %s.\", output_path)\r\n field_names = list(contain(field_names))\r\n with open(output_path, kwargs[\"file_mode\"]) as csvfile:\r\n for index, row in enumerate(rows):\r\n if index == 0:\r\n if isinstance(row, dict):\r\n writer = csv.DictWriter(csvfile, field_names)\r\n if header:\r\n writer.writeheader()\r\n elif isinstance(row, Sequence):\r\n writer = csv.writer(csvfile)\r\n if header:\r\n writer.writerow(field_names)\r\n else:\r\n raise TypeError(\"Rows must be dictionaries or sequences.\")\r\n\r\n writer.writerow(row)\r\n log(\"End: Write.\")\r\n return output_path\r\n\r\n\r\ndef table_to_points(\r\n dataset_path,\r\n output_path,\r\n x_field_name,\r\n y_field_name,\r\n spatial_reference_item=4326,\r\n **kwargs\r\n):\r\n \"\"\"Convert coordinate table to a new point dataset.\r\n\r\n Args:\r\n dataset_path (str): Path of the dataset.\r\n output_path (str): Path of the output dataset.\r\n x_field_name (str): Name of field with x-coordinate.\r\n y_field_name (str): Name of field with y-coordinate.\r\n spatial_reference_item: Item from which the spatial reference of the output\r\n geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).\r\n **kwargs: Arbitrary keyword arguments. See below.\r\n\r\n Keyword Args:\r\n dataset_where_sql (str): SQL where-clause for dataset subselection.\r\n z_field_name (str): Name of the field with z-coordinate.\r\n log_level (str): Level to log the function at. Default is \"info\".\r\n\r\n Returns:\r\n str: Path of the converted dataset.\r\n \"\"\"\r\n kwargs.setdefault(\"dataset_where_sql\")\r\n kwargs.setdefault(\"z_field_name\")\r\n log = leveled_logger(LOG, kwargs.setdefault(\"log_level\", \"info\"))\r\n log(\"Start: Convert %s to spatial dataset %s.\", dataset_path, output_path)\r\n meta = {\"spatial\": spatial_reference_metadata(spatial_reference_item)}\r\n view_name = unique_name()\r\n arcpy.management.MakeXYEventLayer(\r\n table=dataset_path,\r\n out_layer=view_name,\r\n in_x_field=x_field_name,\r\n in_y_field=y_field_name,\r\n in_z_field=kwargs.get(\"z_field_name\"),\r\n spatial_reference=meta[\"spatial\"][\"object\"],\r\n )\r\n dataset.copy(\r\n view_name,\r\n output_path,\r\n dataset_where_sql=kwargs[\"dataset_where_sql\"],\r\n log_level=None,\r\n )\r\n dataset.delete(view_name, log_level=None)\r\n log(\"End: Convert.\")\r\n return output_path\r\n","repo_name":"denkide/ColumbiaCarto","sub_path":"Library/ArcETL/arcetl/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":11513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72072982629","text":"# Import Modules\nimport os\nimport pygame as pg\n\ndef clear_screen(screen, bg_color=(170, 238, 187)):\n background = pg.Surface(screen.get_size())\n background = background.convert()\n background.fill(bg_color)\n screen.blit(background, (0, 0))\n pg.display.flip()\n\ndef init(width=1280, height=480, bg_color=(170, 238, 187), box_width=5, box_height=5):\n pg.init()\n screen = pg.display.set_mode((width, height), pg.SCALED)\n \n pg.display.flip()\n\n boxes_x = width // box_width\n boxes_y = height // box_height\n\n clock = pg.time.Clock()\n return screen, boxes_x, boxes_y, clock\n\ndef draw_point(screen, box_width, box_height, x, y):\n sx = x * box_width\n sy = y * box_height\n rect = pg.Rect(sx, sy, box_width, box_height)\n pg.draw.rect(screen, (255,0,0), rect)\n pg.display.flip()\n\n\n# this calls the 'main' function when this script is executed\nif __name__ == \"__main__\":\n screen, boxes_x, boxes_y, clock = init()\n x = y = 10\n n = 0\n while True:\n clear_screen(screen, (0,0,0))\n box_width = box_height = 5\n y = n//boxes_x\n x = (n) % boxes_x\n n+= 1\n draw_point(screen, box_width, box_height, x, y)\n clock.tick(60)","repo_name":"azhar0100/gamelibrary","sub_path":"pygame_skeleton.py","file_name":"pygame_skeleton.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8356255653","text":"\n\nimport numpy as np\nimport numpy.typing as npt\nfrom spot_interfaces.msg import JointAngles\n\n\ndef get_leg_angles_as_np_array(joints: JointAngles, leg_id: int) -> npt.NDArray:\n leg_angles = np.zeros(3)\n if leg_id == 0:\n # FL\n leg_angles[0] = joints.flc\n leg_angles[1] = joints.flh\n leg_angles[2] = joints.flk\n elif leg_id == 1:\n # FR\n leg_angles[0] = joints.frc\n leg_angles[1] = joints.frh\n leg_angles[2] = joints.frk\n elif leg_id == 2:\n # BL\n leg_angles[0] = joints.blc\n leg_angles[1] = joints.blh\n leg_angles[2] = joints.blk\n else:\n # BR\n leg_angles[0] = joints.brc\n leg_angles[1] = joints.brh\n leg_angles[2] = joints.brk\n return leg_angles\n\ndef set_leg_angles_in_joint_angles(joints: JointAngles, leg_angles: npt.NDArray, leg_id: int):\n if leg_angles.shape != (3,):\n # wrong leg angles shape, make no changes\n return\n\n if leg_id == 0:\n # FL\n joints.flc = leg_angles[0]\n joints.flh = leg_angles[1]\n joints.flk = leg_angles[2]\n elif leg_id == 1:\n # FR\n joints.frc = leg_angles[0]\n joints.frh = leg_angles[1]\n joints.frk = leg_angles[2]\n elif leg_id == 2:\n # BL\n joints.blc = leg_angles[0]\n joints.blh = leg_angles[1]\n joints.blk = leg_angles[2]\n else:\n # BR\n joints.brc = leg_angles[0]\n joints.brh = leg_angles[1]\n joints.brk = leg_angles[2]\n\ndef joint_angles_to_np_array(joints: JointAngles) -> npt.NDArray:\n \"\"\"Convert joint angles represented as a JointAngles object into a numpy array.\n \"\"\"\n angles = np.zeros((4,3)) # 3 joints for each of 4 legs\n angles[0,0] = joints.flc\n angles[0,1] = joints.flh\n angles[0,2] = joints.flk\n angles[1,0] = joints.frc\n angles[1,1] = joints.frh\n angles[1,2] = joints.frk\n angles[2,0] = joints.blc\n angles[2,1] = joints.blh\n angles[2,2] = joints.blk\n angles[3,0] = joints.brc\n angles[3,1] = joints.brh\n angles[3,2] = joints.brk\n return angles\n\ndef np_array_to_joint_angles(array: npt.NDArray) -> JointAngles:\n \"\"\"Convert jcint angles represented as a numpy array into a JointAngles object.\n \"\"\"\n joints = JointAngles()\n if array.shape != (4,3):\n # it's not a valid joint angles array, so just return all zeros as default\n return joints\n\n joints.flc = array[0,0]\n joints.flh = array[0,1]\n joints.flk = array[0,2]\n joints.frc = array[1,0]\n joints.frh = array[1,1]\n joints.frk = array[1,2]\n joints.blc = array[2,0]\n joints.blh = array[2,1]\n joints.blk = array[2,2]\n joints.brc = array[3,0]\n joints.brh = array[3,1]\n joints.brk = array[3,2]\n return joints\n\ndef joint_angles_match(joints_a: JointAngles, joints_b: JointAngles, tolerance_deg: float = 0.01) -> bool:\n \"\"\"Return true if the joint angles in A are within tolerance of being equal to joint angles in B.\n \"\"\"\n angles_a = joint_angles_to_np_array(joints_a)\n angles_b = joint_angles_to_np_array(joints_b)\n\n abs_diff = np.abs(angles_a - angles_b)\n\n return np.max(abs_diff) <= tolerance_deg\n\ndef multi_joint_one_step_interp(current_joints: JointAngles, target_joints: JointAngles, max_angle_delta: float) -> JointAngles:\n \"\"\"Interpolate a joint position for Spot Micro based on current joint position and target joint position.\n\n Given a target position that differs in multiple joints from current, this method will find an interplated joint step\n that, when iterated, will lead all joints to land in the target on the same step (approximately).\n \"\"\"\n # determine desired angle deltas for each joint\n current_angles_arr = joint_angles_to_np_array(current_joints)\n target_angles_arr = joint_angles_to_np_array(target_joints)\n angle_deltas = target_angles_arr - current_angles_arr\n\n # determine max desired change\n max_delta_mag = np.max(np.abs(angle_deltas))\n\n # constrain max desired change to max allowable\n max_step = min(max_delta_mag, max_angle_delta)\n\n # calculate ratio of max allowable to max desired, and adjust all angles based on this one ratio\n ratio = max_step / max_delta_mag\n ratio = min(ratio, 1.0)\n angle_deltas = angle_deltas * ratio\n\n # add allowable angle deltas to current angles and convert back to JointAngles\n interp_angles = np_array_to_joint_angles(current_angles_arr + angle_deltas)\n return interp_angles\n\ndef one_step_interp(current_angle: float, target_angle: float, max_angle_delta: float) -> float:\n \"\"\"Given a starting angle and an ending angle, determine an interpolated angle that is a maximum of\n `max_angle_delta` away from the current_angle.\n \"\"\"\n max_positive_increment = max_angle_delta\n max_negative_increment = -1 * max_positive_increment\n\n delta_angle = target_angle - current_angle\n # clamp\n delta_angle = max(max_negative_increment, min(delta_angle, max_positive_increment))\n\n return current_angle + delta_angle\n","repo_name":"mogar/spot_micro","sub_path":"spot_ws/src/motion_control/motion_control/lib/motion_utils.py","file_name":"motion_utils.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2507399319","text":"#!/usr/bin/env python\nfrom scipy.sparse import lil_matrix\nimport numpy as np\n\ndef poisson(pts, elements, bc_nodes, f, u_bc, element, quadrature, scaling=1.0):\n \"\"\"Set up a Galerkin FEM system approximating the Poisson equation with Dirichlet B.C\n on a domain described by a mesh in (pts, elements).\n\n This code can be used with different element types and different quadratures.\n\n This code pre-computes J^{-1}*dchi and det(J)*w at quadrature points.\n\n Parameters:\n - pts: list of node coordinates (2D array)\n - elements: list of lists, each element contains indices in pts of nodes that belong\n to a given quad or triangle\n - bc_nodes: 1D array of 0 and 1; bc_nodes[k] == 1 iff pts[k] is a Dirichlet B.C. node\n - f: forcing term: array with len(pts) elements\n - u_bc: Dirichlet boundary condition: array with len(pts) elements\n - element: constructor of the FEM element class (callable)\n - quadrature class instance\n \"\"\"\n n_nodes = pts.shape[0]\n n_elements = elements.shape[0]\n A = lil_matrix((n_nodes, n_nodes))\n b = np.zeros(n_nodes)\n\n # Get quadrature points and weights\n q_pts = quadrature.points()\n q_w = quadrature.weights()\n n_quadrature_points = len(q_pts)\n\n E = element()\n # precompute values of shape functions at all quadrature points,\n # as well as values of their derivatives:\n chi = np.zeros((E.n_chi(), n_quadrature_points))\n dchi = np.zeros((E.n_chi(), n_quadrature_points, 2))\n for j in xrange(E.n_chi()):\n for q in xrange(n_quadrature_points):\n chi[j,q] = E.chi(j, q_pts[q])\n dchi[j,q,:] = E.dchi(j, q_pts[q]).T\n\n # for each element...\n for k in xrange(n_elements):\n # initialize the current element\n E.reset(pts[elements[k]])\n\n # compute dchi with respect to x,y (using chain rule)\n # and det(J)*w at all quadrature points:\n dchi_xy = np.zeros((E.n_chi(), n_quadrature_points, 2))\n det_JxW = np.zeros(n_quadrature_points)\n for q in xrange(n_quadrature_points):\n J_inv = E.J_inverse(q_pts[q])\n for j in xrange(E.n_chi()):\n dchi_xy[j,q,:] = (J_inv * np.matrix(dchi[j,q,:]).T).T\n\n det_JxW[q] = E.det_J(q_pts[q]) * q_w[q]\n\n # for each shape function $\\phi_i$...\n for i in xrange(E.n_chi()):\n row = elements[k, i]\n\n if bc_nodes[row]:\n continue\n\n # for each shape function $\\phi_j$...\n for j in xrange(E.n_chi()):\n col = elements[k, j]\n\n for q in xrange(n_quadrature_points):\n # stiffness matrix:\n A[row, col] += det_JxW[q] * np.dot(dchi_xy[i,q], dchi_xy[j,q])\n\n # right hand side:\n b[row] += det_JxW[q] * chi[i,q] * (f[col] * chi[j,q])\n\n # enforce Dirichlet boundary conditions:\n for k in xrange(n_nodes):\n if bc_nodes[k]:\n A[k,k] = 1.0 * scaling\n b[k] = u_bc[k] * scaling\n\n return A, b\n","repo_name":"ckhroulev/finite-elements","sub_path":"poisson/2d/poisson_optimized.py","file_name":"poisson_optimized.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"6562450483","text":"#!/usr/bin/env /home/jcm/env/sidxfa/bin/python\n\nfrom mydataframe import dataframe\nimport pandas as pd\n\npath = '/home/jcm/projects/SIDxFARanInventory/LocalFiles/List Report - CELL 20191210.xlsx'\ntest = dataframe()\ntest.setSourceFile(path)\ntest.createExcelDataframe('Sheet1', skiprows=2)\n\ninputPath = test.showSourceFile()\nprint(\"Input path: {}\".format(inputPath))\n\nprint(test.df)\n","repo_name":"json2x/sidxfa_ran_inventory","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18993578013","text":"import matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\n\n\nclass Plot:\n\n def _get_ax(self, ax):\n # ax handling taken from pandas source code here:\n # https://github.com/pandas-dev/pandas/blob/main/pandas/plotting/_matplotlib/__init__.py#L64\n if ax is None and len(plt.get_fignums()) > 0:\n with plt.rc_context():\n ax = plt.gca()\n ax = getattr(ax, \"left_ax\", ax)\n if ax is None:\n fig, ax = plt.subplots()\n return ax\n\n def line(self, x, y, ax=None, labels=None, units=None, xaxis_label=None, yaxis_label=None, *args, **kwargs):\n ax = self._get_ax(ax)\n ax.tick_params(axis='x', labelrotation=45)\n ax.plot(x, y, label=labels)\n if labels is not None:\n ax.legend()\n if units is not None and yaxis_label is not None:\n ax.set_ylabel(f\"{yaxis_label} ({units})\")\n if xaxis_label is not None:\n ax.set_xlabel(f\"{xaxis_label}\")\n return ax\n\n def colormap(self, x, y, z, xaxis_label=None, yaxis_label=None, yaxis_units=None, zaxis_label=None,\n zaxis_units=None, ax=None,\n cmap=None, logy=True,\n logz=True, vmin=None, vmax=None, *args,\n **kwargs):\n ax = self._get_ax(ax)\n\n if yaxis_units is not None and yaxis_label is not None:\n ax.set_ylabel(f\"{yaxis_label} ({yaxis_units})\")\n if xaxis_label is not None:\n ax.set_xlabel(f\"{xaxis_label}\")\n\n vmin = vmin or np.nanmin(z[np.nonzero(z)])\n vmax = vmax or np.nanmax(z)\n\n if logy:\n ax.semilogy()\n if logz:\n norm = colors.LogNorm(vmin=vmin, vmax=vmax)\n else:\n norm = colors.Normalize(vmin=vmin, vmax=vmax)\n\n ax.tick_params(axis='x', labelrotation=45)\n cm = ax.pcolormesh(x, y, z,\n cmap=cmap or 'plasma',\n norm=norm, *args, **kwargs)\n cbar = plt.colorbar(cm, ax=ax)\n if zaxis_units is not None and zaxis_label is not None:\n cbar.set_label(f'{zaxis_label} ({zaxis_units})')\n return ax\n\n def __call__(self, *args, **kwargs):\n pass\n","repo_name":"SciQLop/speasy","sub_path":"speasy/plotting/mpl_backend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"71"} +{"seq_id":"10319221193","text":"import matplotlib.pyplot as plt\nimport numpy as np\n#Finding the left most point\ndef Left_index(points):\n\t\n\tminn = 0\n\tfor i in range(1,len(points)):\n\t\tif points[i][0] < points[minn][0]:\n\t\t\tminn = i\n\t\telif points[i][0] == points[minn][0]:\n\t\t\tif points[i][1] > points[minn][1]:\n\t\t\t\tminn = i\n\treturn minn\n\n'''\n\tTo find orientation of ordered triplet (p, q, r).\n\tThe function returns following values\n\t0 --> p, q and r are collinear\n\t1 --> Clockwise\n\t2 --> Counterclockwise\n'''\ndef orientation(p, q, r):\n\tval = (q[1] - p[1]) * (r[0] - q[0]) - \\\n\t\t(q[0] - p[0]) * (r[1] - q[1])\n\n\tif val == 0:\n\t\treturn 0\n\telif val > 0:\n\t\treturn 1\n\telse:\n\t\treturn 2\n\ndef convexHull(points, n):\n\t\n\t# There must be at least 3 points\n\tif n < 3:\n\t\treturn\n\n\t# Find the leftmost point\n\tl = Left_index(points)\n\n\thull = []\n\t\n\t'''\n\tStart from leftmost point, keep moving counterclockwise\n\tuntil reach the start point again. This loop runs O(h)\n\ttimes where h is number of points in result or output.\n\t'''\n\tp = l\n\tq = 0\n\twhile(True):\n\t\t\n\t\t# Add current point to result\n\t\thull.append(p)\n\t\tq = (p + 1) % n\n\n\t\tfor i in range(n):\n\t\t\t\n\t\t\t# If i is more counterclockwise\n\t\t\t# than current q, then update q\n\t\t\tif(orientation(points[p],\n\t\t\t\t\t\tpoints[i], points[q]) == 2):\n\t\t\t\tq = i\n\t\tp = q\n\n\t\t# While we don't come to first point\n\t\tif(p == l):\n\t\t\tbreak\n\treturn hull\n\ndef main():\n\t# Driver Code\n\tp=[]\n\t#print(\"The points are:\")\n\twith open('points.txt',\"r\") as fp:\n\t for i in fp.readlines():\n\t t=eval(i.strip(\"\\n\"))\n\t #print(t)\n\t p.append(t)\n\tind=convexHull(p, len(p))\n\tHull=[p[i] for i in ind]\n\tHull.append(p[ind[0]])\n\tfor i in p:\n\t\tplt.plot(i[0],i[1],'o:r')\n\tfor i in Hull:\n\t plt.text(i[0],i[1]+1,'({},{})'.format(i[0],i[1]),size=12,color='black')\n\tplt.plot(np.array([i[0] for i in Hull]),np.array([i[1] for i in Hull]),'x:y')\n\tprint(Hull)\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"programoworm/mtech_projects","sub_path":"cg/conhull/jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"25514506548","text":"import json\nfrom pprint import pprint\nfrom time import ctime\nimport tldextract\nimport requests\nfrom pymongo import MongoClient\nimport hashlib\nfrom add_dict import AddDict\nclient = MongoClient()\ndb = client['newscraper']\n\n\ndef get_TLD(url):\n return ''.join([char for char in '.'.join(tldextract.extract(url)[-2:]) if char.isalnum()])\n\n\ndef insert(url: str, entries: list):\n TLD = get_TLD(url)\n prev_urls = db['queries'].find().distinct('url')\n print(prev_urls)\n for entry in entries:\n if entry['url'] not in prev_urls:\n new = {'articles': entry}\n db['queries'].update_one({'TLD': TLD}, {'$push': new}, upsert=True)\n\n db['queries'].update_one({'url': entry['url']}, {'$set': {'url': entry['url']}}, upsert=True)\n\n\ndef get_TLD_entries(url):\n return db['queries'].find({'TLD': get_TLD(url)})\n\n\nif __name__ == '__main__':\n # db['queries'].drop()\n\n entries = [{\n 'score': {\n 'hate': 0.000117,\n 'low': 0.036948,\n 'mixed': 0.004485,\n 'satire': 0.000486,\n 'center': 0.000184,\n 'pro-science': 0.000139,\n 'extreme right': 0.000158,\n 'conspiracy': 0.349955,\n 'fake news': 0.000376,\n 'right': 9.7e-05,\n 'extreme left': 0.0,\n 'right-center': 0.004446,\n 'left': 0.005477,\n 'very high': 1.4e-05,\n 'high': 0.018363,\n 'left-center': 0.00029,\n 'propaganda': 0.000116\n },\n 'url': '1e571fd2a6b730ed87aa2f8fe7c00b27',\n 'timestamp': '02/12/18'\n }, {\n 'score': {\n 'hate': 0.010018,\n 'low': 0.74259,\n 'mixed': 0.002012,\n 'satire': 0.0,\n 'center': 0.003828,\n 'pro-science': 0.001176,\n 'extreme right': 0.159544,\n 'conspiracy': 0.993555,\n 'fake news': 0.05063,\n 'right': 0.001056,\n 'extreme left': 2e-06,\n 'right-center': 0.001567,\n 'left': 1.1e-05,\n 'very high': 0.001493,\n 'high': 0.010507,\n 'left-center': 0.01214,\n 'propaganda': 0.031203\n },\n 'url': '593560e0bbcaef78d1f241a923f27147',\n 'timestamp': '02/12/18'\n }, {\n 'score': {\n 'hate': 0.0,\n 'low': 1.0,\n 'mixed': 0.000108,\n 'satire': 0.0,\n 'center': 0.0,\n 'pro-science': 0.0,\n 'extreme right': 3e-06,\n 'conspiracy': 1.0,\n 'fake news': 1e-06,\n 'right': 0.0,\n 'extreme left': 0.0,\n 'right-center': 1e-06,\n 'left': 0.0,\n 'very high': 0.0,\n 'high': 5e-06,\n 'left-center': 0.001661,\n 'propaganda': 4e-06\n },\n 'url': 'a24239402274bf268c1c3e68ae6b6afd',\n 'timestamp': '02/12/18'\n }]\n\n url = 'http://www.naturalnews.com/something'\n insert(url, entries)\n\n from pprint import pprint\n # pprint(list(db['queries'].find()))\n\n # pprint(list(get_TLD_entries(url)))\n print(len(list(get_TLD_entries(url))[0]['articles']))\n","repo_name":"N2ITN/are-you-fake-news","sub_path":"docker/train/test_mongo_query_results.py","file_name":"test_mongo_query_results.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"71"} +{"seq_id":"73248583910","text":"\"\"\"Client to access external API\"\"\"\nimport json\nimport requests\nfrom fastapi.exceptions import HTTPException\nfrom src.config import INVERTEXT_HOLIDAY_URL, INVERTEXT_KEY, INVERTEXT_VALIDATION_URL\n\n\nclass ClientInvertexto:\n \"\"\"Client for external API\"\"\"\n\n def get_holidays(self, year: int, state: str = None) -> json:\n \"\"\"Get holidays from Invertexto\"\"\"\n params = {\n \"token\": INVERTEXT_KEY,\n }\n\n if state:\n params.update({\"state\": state})\n\n reponse = requests.get(\n url=f\"{INVERTEXT_HOLIDAY_URL}{year}/\", params=params, timeout=500\n )\n if reponse.status_code != 200:\n raise HTTPException(status_code=reponse.status_code, detail=reponse.json())\n\n return reponse.json()\n\n def get_cpf_validator(self, value: str) -> json:\n \"\"\"Get ip loca from Invertexto\"\"\"\n params = {\"token\": INVERTEXT_KEY, \"type\": \"cpf\", \"value\": value}\n\n reponse = requests.get(\n url=f\"{INVERTEXT_VALIDATION_URL}\", params=params, timeout=500\n )\n if reponse.status_code != 200:\n raise HTTPException(status_code=reponse.status_code, detail=reponse.json())\n\n return reponse.json()\n","repo_name":"pedrogs97/holidays","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11974426613","text":"from collections import defaultdict as dd\n\ndef jakie_litery (s):\n litery = dd(int)\n for e in s:\n litery[e] += 1\n return litery\n\ndef czy_ukladalne (dluzsze, krotsze):\n litery_d = jakie_litery(dluzsze)\n litery_k = jakie_litery(krotsze)\n #if litery_k in litery_d: return 'tak'\n #else: return 'nie'\n if all(litery_k[e] <= litery_d[e] for e in litery_k): return 1\n else: return 0\n\n# print(jakie_litery('slowo'))\n#print (czy_ukladalne('lokomotywa', 'motyl'), czy_ukladalne('lokomotywa', 'kotka'))\n","repo_name":"mikikora/University","sub_path":"python/python_lista8/prog2.py","file_name":"prog2.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35320939203","text":"import json\nimport file_functions as ff\n\nclass User:\n def get_all_users(user_file):\n if ff.is_file_empty(user_file):\n return []\n else:\n file = None\n try:\n file = open(user_file, \"r\")\n \n # convert from json obj to dictionary\n return json.load(file)\n except Exception as e:\n print(\"Exception: \", e)\n finally:\n if file is not None:\n file.close()\n #---------------------------------------------------------#\n\n @classmethod\n def create_new_user(self, user_file, user_data):\n all_users = self.get_all_users(user_file)\n\n file = None\n try:\n file = open(user_file, \"w\")\n \n all_users.append(user_data)\n \n # dumps --> convert from dictionary to json obj\n file.write(json.dumps(all_users))\n \n print('\\n-------------- new user added successfully ----------------\\n')\n \n except Exception as e:\n print(\"Exception: \", e)\n finally:\n \n if file is not None:\n file.close()\n #---------------------------------------------------------#\n\n","repo_name":"mohamed-alwakiel/simple-demo-using-python","sub_path":"user_functions.py","file_name":"user_functions.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37944716877","text":"\nimport datetime\nimport sys\nimport os\nimport ftplib\nimport configparser\nimport argparse\nimport logging\n\n\nimport CBlib.Log as Log\n\n\ndef FtpNewmedias(ftp_limit = 10, Cam = 1, Media = 'video'):\n \"\"\"\n This function takes the images stored in the base storage directory (as\n defined in the configuration file), and then tries to ftp them to\n an offsite location. If the transfer is successfull, the original\n file also gets moved to a longterm storage directory for redundancy\n \n User can define the maximum number of images they want transferred,\n the default is 10\n \"\"\"\n #****************************************************************************\n #get the option arguments from commandline\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--camera\", help=\"integer to decide what camera to use (1-4)\", type = int)\n parser.add_argument(\"--media\", help=\"video or image, default is video\", type = str)\n parser.add_argument(\"--trans_limit\", help=\"transfer limit, integer\", type = int)\n args = parser.parse_args()\n \n #If a commandline argument is given, use it to overwrite the default camera\n if args.camera:\n Cam = args.camera\n \n #If a commandline argument is given, use it to overwrite the default media type\n if args.media:\n Media = args.media\n \n #If a commandline argument is given, use it to overwrite the default transfer limit\n if args.trans_limit:\n ftp_limit = args.trans_limit\n \n\n\n\n #Read the configuration file\n config = configparser.ConfigParser()\n config.read('StationConfig.ini') #assumed to be in the same folder as script\n \n #Parse the relevant configuration details\n StationNumber = config['DEFAULT']['Station_Number']\n CamNum = 'Camera' + str(Cam)\n \n \n #set video path defaults if that is what is selected\n if Media == 'video':\n #First try the camera specific path\n if config[CamNum]['stor_path_video']:\n storage_path = config[CamNum]['stor_path_video']\n else:\n storage_path = config['DefaultStorage']['stor_path_video']\n \n \n #define folder where files go after they are ftp'd\n longterm_stor_path = config['DefaultStorage']['longterm_stor_path_video']\n ftp_directory = config['FTP']['Directory_Video']\n \n\n \n \n #set image path defaults if that is what is selected\n if Media == 'image':\n #First try the camera specific path\n if config[CamNum]['stor_path_image']:\n storage_path = config[CamNum]['stor_path_image']\n else:\n storage_path = config['DefaultStorage']['stor_path_image']\n \n #define folder where files go after they are ftp'd\n longterm_stor_path = config['DefaultStorage']['longterm_stor_path_image']\n ftp_directory = config['FTP']['Directory_Image']\n \n \n \n if not os.path.exists(longterm_stor_path):\n os.makedirs(longterm_stor_path)\n \n \n \n \n #set ftp defaults\n ftp_address = config['FTP']['Address']\n ftp_user = config['FTP']['User']\n ftp_pwd = config['FTP']['Pwd']\n ftp_maxtime = int(config['FTP']['Timeout_Video'])\n\n\n\n #search the directory and get a list of just the videos\n list_of_files = os.listdir(storage_path)\n list_of_media = [os.path.join(storage_path,i) for i in list_of_files if (i.endswith(('.mp4', '.mkv', 'jpg', 'jpeg')))]\n list_of_media = sorted(list_of_media)[::-1] #reorder so the newest video is first\n\n\n\n\n ftp_limit = min(ftp_limit,len(list_of_media)) #ensure the user defined number isn't bigger than the number of video files\n\n #print message if no videos will be uploaded\n if ftp_limit == 0:\n Log.printandlog(\"No media to upload\")\n \n #subset to a maximum of the limit set by the user at the beginning of the script \n ftp_list = list_of_media[0:ftp_limit]\n\n #loop through the videos\n for media in ftp_list:\n media_name = os.path.basename(media)\n \n #first check if file exists in the ftp'd folder, if it does then skip the transfer and delete the orginal\n if os.path.exists(os.path.join(longterm_stor_path, media_name)):\n os.remove(media)\n Log.printandlog(\"File {0} has already been ftpd\".format(media))\n \n else:\n try:\n Log.printandlog(\"Opening ftp session\\n\")\n session = ftplib.FTP(ftp_address, ftp_user, ftp_pwd, timeout = ftp_maxtime) \n session.cwd(ftp_directory)\n file = open(media, 'rb')\n Log.printandlog( \"Sending File: \" + media_name)\n session.storbinary('STOR ' + media_name, file)\n file.close()\n Log.printandlog(\"file sent to ftp\")\n \n #move the file\n os.rename(media,os.path.join(longterm_stor_path, media_name))\n \n Log.printandlog(\"Successful: File sent, local file has been moved to folder 'ftpd_vidoes'\")\n session.quit()\n except Exception as e:\n Log.printandlog(str(e))\n Log.printandlog(\"an error occured for file: \" + os.path.basename(media))\n \n \n \n \n \n#execute the function if the script is being explicitly called (and not imported as a module into other code)\nif __name__ == \"__main__\":\n\n #****************************************************************************\n #set logging file \n Log.initiatelog(\"FTPMedialog\")\n \n FtpNewmedias()\n","repo_name":"jimmybom/imgvel","sub_path":"scripts/FtpNewMedia.py","file_name":"FtpNewMedia.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23953811586","text":"from dotenv import load_dotenv, find_dotenv\nimport os\nimport pandas as pd\nimport requests\nfrom collections import defaultdict\n\n\ndef find_user_token():\n load_dotenv(find_dotenv())\n\n user_token = os.getenv(\"user_token\")\n return user_token\n\n\ndef find_user_id():\n load_dotenv(find_dotenv())\n\n user_id = os.getenv(\"user_id\")\n return user_id\n\n\ndef find_page_id():\n load_dotenv(find_dotenv())\n\n page_id = os.getenv(\"page_id\")\n return page_id\n\n\ndef find_page_token(user_id=None, user_token=None):\n if user_id is None:\n user_id = find_user_id()\n if user_token is None:\n user_token = find_user_token()\n\n page_token_url = (\n f\"https://graph.facebook.com/{user_id}/accounts?access_token={user_token}\"\n )\n\n res = requests.get(page_token_url)\n res_json = res.json()\n\n try:\n page_token = res_json[\"data\"][0][\"access_token\"]\n except KeyError:\n print(\"Error loading using user token - using saved page token\")\n page_token = os.getenv(\"page_token\")\n return page_token\n\n\ndef request_metrics(list_of_metrics, page_token=None):\n \"\"\"\n Give me a list of metrics and a page token and I'll return the value (dated at the most recent time)\n Output can be given straight to pandas to pd.Dataframe.from_dict --> df.to_excel\n \"\"\"\n\n if page_token is None:\n page_token = find_page_token()\n\n base_url = \"https://graph.facebook.com\"\n\n object_id = find_page_id()\n metric_string = \",\".join(list_of_metrics)\n add_on_url = f\"/v10.0/{object_id}/insights/{metric_string}\"\n\n url = base_url + add_on_url\n\n params = {\n \"access_token\": page_token,\n \"show_description_from_api_doc\": \"true\",\n \"period\": \"days_28\",\n }\n\n res = requests.get(url, params=params)\n\n d = dict(res.json())\n\n return d\n\n\ndef data_to_dict(d):\n \"\"\"This should take in the DATA from a request (this can be done with res.json()['data'])\n And it will output a defaultdict of defaultdicts with the structure\n date --> metric --> value\n \"\"\"\n output_d = defaultdict(defaultdict)\n\n # This is how the output_d will be structured when it's returned\n # output_d['date']['metric'] = value\n\n # Iterating through each metric that we're given\n for ele in d:\n variable_name = ele[\"name\"]\n\n # Iterating through each value for a specific metric\n for values in ele[\"values\"]:\n # There should be a list of values, each for a different date\n # Structure of this is a list of dicts\n # We should turn metric -> date -> value into date -> metric -> value\n\n # We wrap this in a try because some metrics don't come as integers (they could be a list or a dict)\n try:\n if isinstance(values[\"value\"], int) == True:\n date = values[\"end_time\"]\n value = values[\"value\"]\n metric_name = variable_name\n\n output_d[date][metric_name] = value\n except Exception:\n continue\n\n return output_d\n\n\n# Parameter for index should be removed in df.to_excel\ndef dict_to_spreadsheet(d, filename=\"output.xlsx\"):\n \"\"\"This takes in a dict and outputs the spreadsheet to the given filename\"\"\"\n df = pd.DataFrame.from_dict(d)\n df.to_excel(filename)\n\n\ndef debug_token(token):\n\n payload = {\"input_token\": token}\n\n base_url = \"https://graph.facebook.com\"\n extension = f\"/v10.0/debug_token?input_token={token}\"\n\n url = base_url + extension\n\n res = requests.get(url, data=payload).json()\n\n return res\n\n\ndef find_published_posts(page_token=None, page_id=None):\n if page_token is None:\n page_token = find_page_token()\n\n if page_id is None:\n page_id = find_page_id()\n\n base_url = \"https://graph.facebook.com\"\n add_on_url = f\"/v10.0/{page_id}/published_posts\"\n url = base_url + add_on_url\n\n params = {\"access_token\": page_token}\n\n res = requests.get(url, params=params)\n\n d = res.json()\n\n post_ids = [ele[\"id\"] for ele in d[\"data\"]]\n\n return post_ids\n\n\nif __name__ == \"__main__\":\n print(\"Hello World!\")\n","repo_name":"pritanb/facebook_api","sub_path":"facebook_helpers.py","file_name":"facebook_helpers.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1135881207","text":"from scipy.optimize import root_scalar\r\nfrom scipy.integrate import quad\r\nimport numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\nimport arms6 as arms6\r\nimport Functions2 as func2\r\n\r\n################################\r\n# arms8\r\n################################\r\n# Fitting a robotic arm along a curve.\r\n# Improvement w.r.t. arms6 (which had no arc length initialization)\r\n# In arms8 we initialize the t parameter of the gamma curve so that the arc length of\r\n# the curve between two consequtive t values is equal to the arms length.\r\n# The code below is compatible with any gamma curve. Therefore we need to solve \r\n# the arc_lenght problem. This is done using the scipy package.\r\n###############################################################################\r\n# To obtain the same results as in the thesis, please uncomment \r\n# the eight (2d) curve and/or the coil (3D) curve in arms6\r\n###############################################################################\r\n\r\ndef arc_length(t):\r\n '''Returns the arc lenght of d_gamma'''\r\n arc = np.linalg.norm(arms6.d_gamma(t))\r\n #print('arc length is: ', arc)\r\n return arc\r\n\r\n\r\ndef calc_arc_length(t_0,t_1):\r\n '''Calculates the arc length from t_0 to t_1'''\r\n result, error = quad(arc_length, t_0, t_1)\r\n # print('result is: ', result)\r\n return result\r\n\r\ndef initialize_t(arm_length, n_arms, info=False):\r\n '''Calculates the initial positions of t, using the arclength.\r\n Requires: arm_length, the length of one arm piece as float;\r\n n_arms, number of arms as integer;\r\n Optional: t_max, maximum time parameter of the curve as float;\r\n info, boolean is True then printing info about the convergence of the root finding alg.'''\r\n t_0 = np.zeros(n_arms+1)\r\n \r\n for arm_it in range(1, n_arms+1):\r\n \r\n f = lambda t: calc_arc_length(t_0[arm_it-1], t) - arm_length\r\n \r\n zero_info = root_scalar(f, method='bisect', bracket=[t_0[arm_it-1], t_0[arm_it-1] + 2*arm_length ])\r\n \r\n if info:\r\n print('Finding root for arm_it ', arm_it,':')\r\n print(zero_info)\r\n \r\n t_0[arm_it] = zero_info.root\r\n \r\n return t_0[1:]\r\n\r\ndef Riem_grad_descent_with_arc_length_init(R_0, x_0, eta, eta_t, max_init_it, max_it, info=False, n_inter = 0):\r\n '''Same as Riemannian_grad_descent_multi_arms in arms6, but now with arc_length initialization.'''\r\n arms_length = np.linalg.norm(x_0)\r\n n_arms = np.shape(R_0)[0] #arc_length initialization of the t_0 variables\r\n \r\n R_it = np.zeros((max_init_it + max_it + 1, n_arms, n, n))\r\n y_it=np.zeros((max_init_it + max_it + 1, n_arms + 1, n, 1))\r\n t_it = np.zeros((max_init_it + max_it + 1, n_arms))\r\n \r\n print('t is initialized according to arc_length')\r\n t_0=initialize_t(arms_length, n_arms)\r\n print('the initial t are: ', t_0)\r\n \r\n R_it[0:max_init_it+1], y_it[0:max_init_it+1], t_it[0:max_init_it+1] = arms6.Riemannian_grad_descent_multi_arms(R_0, x_0, t_0, eta, eta_t, max_init_it, info = info, n_inter = n_inter, updating_t=False)\r\n \r\n print(max_init_it, ' initial iterations are performed.')\r\n print('R_0 fitted on fixed gamma curve positions. Now, we unfix t.')\r\n R_0 = R_it[max_init_it]\r\n t_0 = t_it[max_init_it]\r\n \r\n R_it[max_init_it:], y_it[max_init_it:], t_it[max_init_it:] = arms6.Riemannian_grad_descent_multi_arms(R_0, x_0, t_0, eta, eta_t, max_it, info =info, n_inter=n_inter)\r\n \r\n t_diff = t_it[max_init_it+max_it] - t_it[max_init_it]\r\n if np.any(t_diff<0):\r\n print('Error warning: arc length is shorter than the length of the arm')\r\n print(t_diff)\r\n \r\n print(max_it, ' iterations are performed with t unfixed.')\r\n print('In total we have done ', max_init_it+max_it, 'iterations.')\r\n print('Algorithm finished, returning R_it, y_it, t_it')\r\n \r\n return R_it, y_it, t_it\r\n\r\n\r\n##################################################################\r\n# main code below\r\n##################################################################\r\n \r\n\r\n# Setting simulation variables\r\nn=2 #dimension, please adjust the functions gamma and d_gamma accordingly\r\nn_arms = 18\r\nR_0 = arms6.identity_Rs(n, n_arms)\r\n#theta = 1/4 * np.pi\r\n#R_0[0] = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]) \r\nx_0=np.zeros((n,1))\r\nx_0[0,0]=0.5\r\narms_length = np.linalg.norm(x_0)\r\n\r\n# t_0=initialize_t(arms_length,n_arms) \r\n# print('print t_0')\r\n\r\n#eta=np.array([0.5, 0.5, 0.5, 0.5, 0.5])\r\neta=2\r\neta_t=0.1\r\nmax_init_it = 50\r\nmax_it = 150\r\nn_inter = 9\r\nn_to_plot=4\r\n\r\n# # printing some information\r\n# y=arms6.retrieve_axis_positions(R_0, x_0)\r\n# print('n_arms is: ', n_arms)\r\n# print('initial coordinate are ', y)\r\n# print('initial t is: ', t_0)\r\n\r\n\r\n#performing the algorithm\r\nstart_time = time.time()\r\nR_it, y_it, t_it = Riem_grad_descent_with_arc_length_init(R_0, x_0, eta, eta_t, max_init_it, max_it, n_inter = n_inter)\r\nexecution_time = time.time()-start_time\r\nprint('The algorithm ran for: ', execution_time, ' seconds')\r\nprint('So ', execution_time/(max_init_it+max_it), ' seconds for each iteration')\r\n\r\n#analyses of the results\r\n#calculating losses\r\nloss = arms6.calc_losses(y_it, t_it)\r\n\r\n#plotting results \r\n#calculating Riem_grad_norms\r\n\r\n# plotting arm\r\nplt.close('all')\r\nfig = plt.figure()\r\nif n==2:\r\n ax = fig.add_subplot(111)\r\nelif n==3:\r\n ax = fig.add_subplot(111, projection='3d')\r\ngamma_curve = arms6.gamma(np.linspace(0,1.5,10000))\r\nfunc2.plot_figure(y_it, gamma_curve, step=math.floor((max_init_it + max_it)/n_to_plot))\r\n#plotting fitting points on curve in the same figure\r\nif n==2:\r\n x_scatter = arms6.gamma(t_it[-1])[:, 0]\r\n y_scatter = arms6.gamma(t_it[-1])[:, 1]\r\n plt.scatter(x_scatter, y_scatter)\r\n plt.xlim([-1.2,1.2])\r\n plt.ylim([-1.2, 1.2])\r\n for arm_it in range(n_arms):\r\n ax.text(x_scatter[arm_it], y_scatter[arm_it], str(arm_it+1), ha='center', va='bottom')\r\nelif n==3:\r\n x_scatter = arms6.gamma(t_it[-1])[:, 0]\r\n y_scatter = arms6.gamma(t_it[-1])[:, 1] \r\n z_scatter = arms6.gamma(t_it[-1])[:, 2]\r\n ax.scatter(x_scatter,y_scatter,z_scatter)\r\n ax.set_xlim([-0.2,2.2])\r\n ax.set_ylim([-1.2,1.2])\r\n ax.set_zlim([0,1.5])\r\n for arm_it in range(n_arms):\r\n ax.text(x_scatter[arm_it][0], y_scatter[arm_it][0], z_scatter[arm_it][0], str(arm_it+1), ha='center', va='bottom')\r\n\r\n\r\n\r\n\r\nplt.title('Robotic arm with arc length initialization')\r\n\r\n\r\n#plotting loss functions\r\nplt.figure()\r\narms6.plot_loss(loss,plot_all_losses=False) #set plot_all_losses to true to plot losses for every arm piece\r\nplt.axvline(x=max_init_it, color='red', linestyle='dotted')\r\nplt.title('Loss with arc length initialization')\r\n\r\n\r\n#plotting max_dist\r\n# plt.figure()\r\n# arms6.plot_max_dist(y_it, t_it)\r\n\r\n#plotting Riem grad w.r.t. to R\r\n# plt.figure()\r\n# all_Riem_grad_R_norms, all_dt_norms = arms6.plot_Riem_grad_R_norms(y_it, t_it, R_it, x_0, n_inter=n_inter)\r\n\r\n#change dimensions to work with 3\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"DeMilder/Thesis-Final-Code","sub_path":"End Code/arms8.py","file_name":"arms8.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33350592308","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Authors: Y. Jia \n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nsettings = dict()\n\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\n\ndef changelog():\n with open('CHANGELOG.md') as f:\n return f.read()\n\n\ndef requirements():\n with open('requirements.txt') as f:\n return f.read().splitlines()\n\n\nsettings.update(\n name='utils_py',\n version='0.3.0',\n description='Utility package for python',\n long_description=readme(),\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Utilities',\n ],\n keywords='python utility',\n url='https://github.com/ytjia/utils-py',\n author='Y. Jia',\n author_email='ytjia.zju@gmail.com',\n license='Apache 2.0',\n packages=['utils_py'],\n install_requires=requirements(),\n test_suite='nose.collector',\n tests_require=['nose'],\n include_package_data=True,\n zip_safe=False,\n)\n\nsetup(**settings)\n","repo_name":"ytjia/utils-py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36275674002","text":"import typing\nfrom collections import deque, defaultdict\nfrom collections.abc import Set\nfrom inspect import isclass\n\nfrom typish.functions._is_from_typing import is_from_typing\n\n\ndef get_origin(t: type) -> type:\n \"\"\"\n Return the origin of the given (generic) type. For example, for\n ``t=List[str]``, the result would be ``list``.\n :param t: the type of which the origin is to be found.\n :return: the origin of ``t`` or ``t`` if it is not generic.\n \"\"\"\n from typish.functions._get_simple_name import get_simple_name\n\n simple_name = get_simple_name(t)\n result = _type_per_alias.get(simple_name, None)\n if isclass(t) and not is_from_typing(t):\n # Get the origin in case of a parameterized generic.\n result = getattr(t, '__origin__', t)\n elif not result:\n result = getattr(typing, simple_name, t)\n return result\n\n\n_type_per_alias = {\n 'List': list,\n 'Tuple': tuple,\n 'Dict': dict,\n 'Set': set,\n 'FrozenSet': frozenset,\n 'Deque': deque,\n 'DefaultDict': defaultdict,\n 'Type': type,\n 'AbstractSet': Set,\n 'Optional': typing.Union,\n}\n","repo_name":"ramonhagenaars/typish","sub_path":"typish/functions/_get_origin.py","file_name":"_get_origin.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"71"} +{"seq_id":"13392688849","text":"def calculate(p, q):\n \n mod = 1000000007\n expo = 0\n expo = mod - 2\n \n while (expo):\n if (expo & 1):\n p = (p * q) % mod\n q = (q * q) % mod\n expo >>= 1\n \n return p\n\nfor p in range(1,100):\n for q in range(1,30):\n if calculate(p,q)==416666673:\n print(p,q)","repo_name":"abhik2003/my_cp","sub_path":"Break_This_Array.py","file_name":"Break_This_Array.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18967915601","text":"import argparse\nimport logging\nimport sys\n\nimport yaml\n\nfrom ludwig.contrib import contrib_command, contrib_import\nfrom ludwig.globals import LUDWIG_VERSION\nfrom ludwig.hyperopt.run import hyperopt\nfrom ludwig.utils.defaults import default_random_seed\nfrom ludwig.utils.horovod_utils import set_on_master, is_on_master\nfrom ludwig.utils.misc_utils import check_which_model_definition\nfrom ludwig.utils.print_utils import logging_level_registry, print_ludwig\n\nlogger = logging.getLogger(__name__)\n\n\ndef hyperopt_cli(\n model_definition=None,\n model_definition_file=None,\n dataset=None,\n training_set=None,\n validation_set=None,\n test_set=None,\n training_set_metadata=None,\n data_format=None,\n experiment_name=\"hyperopt\",\n model_name=\"run\",\n # model_load_path=None,\n # model_resume_path=None,\n skip_save_training_description=True,\n skip_save_training_statistics=True,\n skip_save_model=True,\n skip_save_progress=True,\n skip_save_log=True,\n skip_save_processed_input=True,\n skip_save_unprocessed_output=True,\n skip_save_predictions=True,\n skip_save_eval_stats=True,\n skip_save_hyperopt_statistics=False,\n output_directory=\"results\",\n gpus=None,\n gpu_memory_limit=None,\n allow_parallel_threads=True,\n use_horovod=None,\n random_seed=default_random_seed,\n debug=False,\n **kwargs,\n):\n model_definition = check_which_model_definition(model_definition,\n model_definition_file)\n\n return hyperopt(\n model_definition=model_definition,\n dataset=dataset,\n training_set=training_set,\n validation_set=validation_set,\n test_set=test_set,\n training_set_metadata=training_set_metadata,\n data_format=data_format,\n experiment_name=experiment_name,\n model_name=model_name,\n # model_load_path=model_load_path,\n # model_resume_path=model_resume_path,\n skip_save_training_description=skip_save_training_description,\n skip_save_training_statistics=skip_save_training_statistics,\n skip_save_model=skip_save_model,\n skip_save_progress=skip_save_progress,\n skip_save_log=skip_save_log,\n skip_save_processed_input=skip_save_processed_input,\n skip_save_unprocessed_output=skip_save_unprocessed_output,\n skip_save_predictions=skip_save_predictions,\n skip_save_eval_stats=skip_save_eval_stats,\n skip_save_hyperopt_statistics=skip_save_hyperopt_statistics,\n output_directory=output_directory,\n gpus=gpus,\n gpu_memory_limit=gpu_memory_limit,\n allow_parallel_threads=allow_parallel_threads,\n use_horovod=use_horovod,\n random_seed=random_seed,\n debug=debug,\n **kwargs,\n )\n\n\ndef cli(sys_argv):\n parser = argparse.ArgumentParser(\n description=\"This script searches for optimal Hyperparameters\",\n prog=\"ludwig hyperopt\",\n usage=\"%(prog)s [options]\",\n )\n\n # -------------------\n # Hyperopt parameters\n # -------------------\n parser.add_argument(\n \"-sshs\",\n \"--skip_save_hyperopt_statistics\",\n help=\"skips saving hyperopt statistics file\",\n action=\"store_true\",\n default=False,\n )\n\n # ----------------------------\n # Experiment naming parameters\n # ----------------------------\n parser.add_argument(\n \"--output_directory\",\n type=str,\n default=\"results\",\n help=\"directory that contains the results\",\n )\n parser.add_argument(\n \"--experiment_name\", type=str, default=\"hyperopt\",\n help=\"experiment name\"\n )\n parser.add_argument(\n \"--model_name\", type=str, default=\"run\", help=\"name for the model\"\n )\n\n # ---------------\n # Data parameters\n # ---------------\n parser.add_argument(\n '--dataset',\n help='input data file path. '\n 'If it has a split column, it will be used for splitting '\n '(0: train, 1: validation, 2: test), '\n 'otherwise the dataset will be randomly split'\n )\n parser.add_argument('--training_set', help='input train data file path')\n parser.add_argument(\n '--validation_set', help='input validation data file path'\n )\n parser.add_argument('--test_set', help='input test data file path')\n\n parser.add_argument(\n '--training_set_metadata',\n help='input metadata JSON file path. An intermediate preprocess file '\n 'containing the mappings of the input file created '\n 'the first time a file is used, in the same directory '\n 'with the same name and a .json extension'\n )\n\n parser.add_argument(\n '--data_format',\n help='format of the input data',\n default='auto',\n choices=['auto', 'csv', 'hdf5']\n )\n\n parser.add_argument(\n \"-sspi\",\n \"--skip_save_processed_input\",\n help=\"skips saving intermediate HDF5 and JSON files\",\n action=\"store_true\",\n default=False,\n )\n\n # ----------------\n # Model parameters\n # ----------------\n model_definition = parser.add_mutually_exclusive_group(required=True)\n model_definition.add_argument(\n \"-md\", \"--model_definition\", type=yaml.safe_load,\n help=\"model definition\"\n )\n model_definition.add_argument(\n \"-mdf\",\n \"--model_definition_file\",\n help=\"YAML file describing the model. Ignores --model_hyperparameters\",\n )\n\n parser.add_argument(\n \"-mlp\",\n \"--model_load_path\",\n help=\"path of a pretrained model to load as initialization\",\n )\n parser.add_argument(\n \"-mrp\",\n \"--model_resume_path\",\n help=\"path of a the model directory to resume training of\",\n )\n parser.add_argument(\n \"-sstd\",\n \"--skip_save_training_description\",\n action=\"store_true\",\n default=False,\n help=\"disables saving the description JSON file\",\n )\n parser.add_argument(\n \"-ssts\",\n \"--skip_save_training_statistics\",\n action=\"store_true\",\n default=False,\n help=\"disables saving training statistics JSON file\",\n )\n parser.add_argument(\n \"-ssm\",\n \"--skip_save_model\",\n action=\"store_true\",\n default=False,\n help=\"disables saving weights each time the model imrpoves. \"\n \"By default Ludwig saves weights after each epoch \"\n \"the validation metric imrpvoes, but if the model is really big \"\n \"that can be time consuming if you do not want to keep \"\n \"the weights and just find out what performance can a model get \"\n \"with a set of hyperparameters, use this parameter to skip it\",\n )\n parser.add_argument(\n \"-ssp\",\n \"--skip_save_progress\",\n action=\"store_true\",\n default=False,\n help=\"disables saving weights after each epoch. By default ludwig saves \"\n \"weights after each epoch for enabling resuming of training, but \"\n \"if the model is really big that can be time consuming and will \"\n \"save twice as much space, use this parameter to skip it\",\n )\n parser.add_argument(\n \"-ssl\",\n \"--skip_save_log\",\n action=\"store_true\",\n default=False,\n help=\"disables saving TensorBoard logs. By default Ludwig saves \"\n \"logs for the TensorBoard, but if it is not needed turning it off \"\n \"can slightly increase the overall speed\",\n )\n\n # ------------------\n # Runtime parameters\n # ------------------\n parser.add_argument(\n \"-rs\",\n \"--random_seed\",\n type=int,\n default=42,\n help=\"a random seed that is going to be used anywhere there is a call \"\n \"to a random number generator: data splitting, parameter \"\n \"initialization and training set shuffling\",\n )\n parser.add_argument(\n \"-g\", \"--gpus\", nargs=\"+\", type=int, default=None,\n help=\"list of gpus to use\"\n )\n parser.add_argument(\n '-gml',\n '--gpu_memory_limit',\n type=int,\n default=None,\n help='maximum memory in MB to allocate per GPU device'\n )\n parser.add_argument(\n \"-uh\",\n \"--use_horovod\",\n action=\"store_true\",\n default=False,\n help=\"uses horovod for distributed training\",\n )\n parser.add_argument(\n \"-dbg\",\n \"--debug\",\n action=\"store_true\",\n default=False,\n help=\"enables debugging mode\",\n )\n parser.add_argument(\n \"-l\",\n \"--logging_level\",\n default=\"info\",\n help=\"the level of logging to use\",\n choices=[\"critical\", \"error\", \"warning\", \"info\", \"debug\", \"notset\"],\n )\n\n args = parser.parse_args(sys_argv)\n\n args.logging_level = logging_level_registry[args.logging_level]\n logging.getLogger('ludwig').setLevel(\n args.logging_level\n )\n global logger\n logger = logging.getLogger('ludwig.hyperopt')\n\n set_on_master(args.use_horovod)\n\n if is_on_master():\n print_ludwig(\"Hyperopt\", LUDWIG_VERSION)\n\n hyperopt_cli(**vars(args))\n\n\nif __name__ == \"__main__\":\n contrib_import()\n contrib_command(\"hyperopt\", *sys.argv)\n cli(sys.argv[1:])\n","repo_name":"anshula2/ludwig","sub_path":"ludwig/hyperopt_cli.py","file_name":"hyperopt_cli.py","file_ext":"py","file_size_in_byte":9424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"30246988757","text":"from dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass\nclass DockerComposeProject:\n key: str\n project_name: str\n config_files: List[str]\n working_dir: str\n running_services: List[str]\n declared_services: List[str]\n\n\n# pylint: disable=too-many-instance-attributes\n@dataclass\nclass DockerContainerMetric:\n container_id: str\n container_name: str\n container_ports: list\n container_labels: list\n status: str = ''\n time_read: int = 0\n cpu_percentage: float = None\n cpu_percentage_previous: float = None\n memory_usage: int = None\n memory_usage_previous: int = None\n\n def get_column(self, column_name):\n if column_name == 'container_name':\n return self.container_name\n if column_name == 'cpu_percentage':\n return self.cpu_percentage if self.cpu_percentage is not None else 0\n if column_name == 'memory_usage':\n return self.memory_usage if self.memory_usage is not None else 0\n return None\n","repo_name":"micoli/gocker","sub_path":"gocker/gui/services/docker_container/dataclass.py","file_name":"dataclass.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"27745780937","text":"#!bin/python3\n\nimport re\n\nregex_ipv4 = re.compile(r'^(([01]?\\d{1,2}|2[0-5]{2})\\.){3}([01]?\\d{1,2}|2[0-5]{2})$')\nregex_ipv6 = re.compile(r'^([\\da-f]{1,4}:){7}[\\da-f]{1,4}$')\n\nn = int(input())\nfor i in range(n):\n string = input()\n if regex_ipv4.match(string):\n print('IPv4')\n elif regex_ipv6.match(string):\n print('IPv6')\n else:\n print('Neither')","repo_name":"prantostic/HackerRank","sub_path":"Regex/Applications/IP Address Validation/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"4453923668","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport collections\nimport torch\nimport torch.nn as nn\n\nimport skimage\nimport skimage.io\nfrom scipy.misc import imresize\nimport skimage.transform\n\nimport torchvision\n\nimport numpy as np\nimport json\nfrom PIL import Image, ImageFont, ImageDraw\n\ndef if_use_att(opt):\n # Decide if load attention feature according to caption model\n if opt.caption_model in ['show_tell', 'all_img', 'fc'] and opt.vse_model in ['fc', 'fc2']:\n return False\n return True\n\n# Input: seq, N*D numpy array, with element 0 .. vocab_size. 0 is END token.\ndef decode_sequence(ix_to_word, seq):\n N, D = seq.size()\n out = []\n for i in range(N):\n txt = ''\n for j in range(D):\n ix = seq[i,j]\n if ix > 0 :\n if j >= 1:\n txt = txt + ' '\n txt = txt + ix_to_word[str(ix.item())]\n else:\n break\n out.append(txt)\n return out\n\ndef to_contiguous(tensor):\n if tensor.is_contiguous():\n return tensor\n else:\n return tensor.contiguous()\n\nclass LanguageModelCriterion(nn.Module):\n def __init__(self):\n super(LanguageModelCriterion, self).__init__()\n\n def forward(self, input, target, mask):\n # truncate to the same size\n target = target[:, :input.size(1)]\n mask = mask[:, :input.size(1)]\n input = to_contiguous(input).view(-1, input.size(2))\n target = to_contiguous(target).view(-1, 1)\n mask = to_contiguous(mask).view(-1, 1)\n output = - input.gather(1, target) * mask\n output = torch.sum(output) / torch.sum(mask)\n\n return output\n\ndef set_lr(optimizer, lr):\n for group in optimizer.param_groups:\n group['lr'] = lr\n\ndef clip_gradient(optimizer, grad_clip):\n for group in optimizer.param_groups:\n for param in group['params']:\n if param.grad is not None:\n param.grad.data.clamp_(-grad_clip, grad_clip)\n\ndef load_image(file_name, size = None):\n img = skimage.io.imread('/mnt/ilcompf8d0/user/rluo/datasets/coco/' + file_name)\n\n if len(img.shape) == 2:\n img = img[:,:,np.newaxis]\n img = np.concatenate((img, img, img), axis=2)\n\n if size:\n img = imresize(img, size)\n\n img = img.astype('float32') / 255.0\n img = torch.from_numpy(img.transpose([2,0,1]))\n\n return img\n\ndef make_html(id, iteration):\n output = {}\n output['main_id'] = logger.to_htmls.keys()\n \n output['img_urls'] = []\n output['img_urls2'] = []\n output['captions'] = []\n for i in output['main_id']:\n output['img_urls'].append(['../../datasets/coco/'+_[0] for _ in logger.to_htmls[i]])\n output['img_urls2'].append(['../../datasets/coco/'+_[1] for _ in logger.to_htmls[i]])\n output['captions'].append([_[2] + '\\n' + _[3] for _ in logger.to_htmls[i]])\n\n if not os.path.isdir('htmls_'+id):\n os.mkdir('htmls_'+id)\n os.system('cp htmls/index.html htmls_'+id+'/')\n json.dump(output, open('htmls_'+id+'/result'+str(iteration)+'.json', 'w'))\n\ndef var_wrapper(x, cuda=True):\n if type(x) is dict:\n return {k: var_wrapper(v, cuda) for k,v in x.items()}\n if type(x) is list or type(x) is tuple:\n return [var_wrapper(v, cuda) for v in x]\n if isinstance(x, np.ndarray):\n x = torch.from_numpy(x)\n if cuda:\n x = x.cuda()\n else:\n x = x.cpu()\n return x\n\ndef load_state_dict(model, state_dict):\n model_state_dict = model.state_dict()\n keys = set(model_state_dict.keys() + state_dict.keys())\n for k in keys:\n if k not in state_dict:\n print('key %s in model.state_dict() not in loaded state_dict' %(k))\n elif k not in model_state_dict:\n print('key %s in loaded state_dict not in model.state_dict()' %(k))\n else:\n if state_dict[k].size() != model_state_dict[k].size():\n print('key %s size not match in model.state_dict() and loaded state_dict. Try to flatten and copy the values in common parts' %(k))\n model_state_dict[k].view(-1)[:min(model_state_dict[k].numel(), state_dict[k].numel())]\\\n .copy_(state_dict[k].view(-1)[:min(model_state_dict[k].numel(), state_dict[k].numel())])\n\n model.load_state_dict(model_state_dict)","repo_name":"ruotianluo/DiscCaptioning","sub_path":"misc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"71"} +{"seq_id":"14227845082","text":"import numpy as np\nimport copy as c\nimport random as r\nimport math\n\nclass NeuralNetwork ():\n \n def __init__(self, n_hidden = []):\n self.n_input = None # It will be defined at first training\n self.n_hidden = n_hidden # Number of neurons per hidden layer\n self.n_output = None # It will be defined at first training\n self.weight = list() # It will be defined at first training\n self.structure = None # The number of neurons per layer\n \n def stats(self):\n \"\"\"It prints some infos about the Neural Network.\"\"\"\n \n print(\"\\nNumber of inputs: \"+str(self.n_input)+\" (bias included)\")\n print(\"Number of hidden neurons: \"+str(self.n_hidden))\n print(\"Number of outputs: \"+str(self.n_output))\n if len(self.weight) == 0:\n print(\"The current weights are: None\")\n else:\n print(\"The current weights are: \\n\")\n for w in self.weight:\n print(w,\"\\n\")\n print()\n \n def train(self, X, y, init_w = \"rand\", bias = True):\n \"\"\"\n It trains the Neural Network using X as input data and y as\n target data. It doesn't return anything.\n \n Args:\n X (numpy.ndarray): an array containing the input data.\n y (numpy.ndarray): an array containing the target data.\n init_w (Optional[double]): the initial value of the weight; if\n nothing is provided, then they are generated randomly between\n -1 and 1.\n bias(Optional[bool]): True is a bias neuron (fixed 1) has to be \n added; False otherwise. True by default. \n \n Returns:\n None\n \n \"\"\"\n \n # Extract infos from X and y\n n_size, n_feature = X.shape\n n_target = y.shape[1] # The number of outputs\n \n # Define n_input and n_output\n self.n_input = n_feature\n self.n_output = n_target # There are as many outputs as targets\n \n # Now we can use this info to define the NN structure\n self.structure = [self.n_input] + self.n_hidden + [self.n_output]\n \n # Generate the starting weights\n if init_w == \"rand\":\n for i in range(len(self.structure)-1):\n self.weight.append(np.random.rand(self.structure[i]+1 \n if bias \n else self.structure[i],\n self.structure[i+1])) \n else:\n init_w = float(init_w)\n for i in range(len(self.structure)-1):\n self.weight.append(np.full((self.structure[i]+1\n if bias\n else self.structure[i],\n self.structure[i+1]), \n init_w))\n \n \n # Fit the data and check the error\n result = self.fit(X)\n error = get_error(result, y)\n\n alpha = 0.8\n \n for _ in range(1000):\n \n old_weight = c.deepcopy(self.weight)\n \n for i_weight in range(len(self.structure)-1):\n for j in range(self.weight[i_weight].shape[0]):\n for k in range(self.weight[i_weight].shape[1]):\n if r.random() <= alpha:\n self.weight[i_weight][j][k] += (r.random()*2-1)/10\n \n new_result = self.fit(X)\n new_error = get_error(new_result,y)\n \n if new_error <= error:\n result = new_result\n error = new_error\n else:\n self.weight = c.deepcopy(old_weight)\n \n def fit(self, X, bias = True):\n \"\"\"\n It fits the dataset X on the trained Neural Network. \n \n Args:\n X (numpy.ndarray): array containing the input data.\n bias(Optional[bool]): True is a bias neuron (fixed 1) has to be \n added; False otherwise. True by default.\n \n Returns:\n Z (numpy.ndarray): the value of the output layer\n \n \"\"\"\n # Temp\n Z = X\n \n # Matrix moltiplication\n for layer in range(len(self.structure)-1):\n # Add bias neuron\n if bias:\n bias_neuron = np.array([[1] for _ in range(Z.shape[0])])\n Z = np.hstack((Z,bias_neuron)) \n \n S = Z.dot(np.array(self.weight[layer]))\n \n #if layer != len(self.structure)-2: # If not the last layer\n \n Z = np.vectorize(sigmoid)(S) # Hidden neurons use sigmoid func\n #else:\n # Z = np.vectorize(step)(S) # Output neurons use step func\n \n if len(Z.shape) < 2: # If result in a 1d array (it should)\n Z = np.reshape(Z,[Z.shape[0],1])\n return(Z)\n \n\ndef get_error(output, target):\n \"\"\"\n Given two numpy arrays it calculates the squared difference between\n every cell. It returns such difference.\n \n Args:\n output (numpy.ndarray): an array containing the values of \n the output neurons of the neural network.\n target (numpy.ndarray): an array containing the target values\n for a given dataset.\n \n Returns:\n error (double): the squared difference of the two arrays\n \n \"\"\"\n error = 0.0\n \n if output.shape != target.shape:\n print(\"Arrays' shape do not match!\") # Should throw an error instead\n return None\n \n if (output == target).all():\n return error\n \n nrow, ncol = output.shape\n for i in range(ncol):\n for j in range(nrow):\n error += (output[j,i] - target[j,i])**2\n return error\n\ndef sigmoid(x):\n \"\"\"Simple logistic sigmoid function.\"\"\"\n return 1 / (1 + math.exp(-x))\n\ndef step(x):\n \"\"\"Simple step function. Note: it returns an int, not a bool.\"\"\"\n if x >= 0:\n return 1\n else:\n return 0","repo_name":"alessandrosp/simple-nn","sub_path":"simplenn/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17579980511","text":"import datetime, json\nfrom bxml.builder import Builder\nfrom bl.dict import Dict\nfrom bl.string import String\nfrom bsql.json_encoder import JSONEncoder\n\n\nclass Record(Dict):\n \"\"\"A record from the database.\"\"\"\n\n def __init__(self, db, **args):\n self.__dict__['db'] = db\n Dict.__init__(self, **args)\n\n def __repr__(self):\n \"\"\"represent the object in a form that would enable it to be recreated\"\"\"\n sep = ', '\n return \"%s(db, %s)\" % (\n self.__class__.__name__,\n sep.join([\"%s=%r\" % (k, self.get(k)) for k in self.keys() if self.get(k) is not None]),\n )\n\n def __str__(self):\n sep = '\\n\\t'\n return \"%s:%s%s\" % (\n self.__class__.__name__,\n sep,\n sep.join([\"%s: %s\" % (k, self.get(k)) for k in self.keys() if self.get(k) is not None]),\n )\n\n def key_tuple(self, key=[]):\n \"\"\"can be used as a dict key.\"\"\"\n if key == []:\n key = self.keys()\n return tuple([self[k] for k in key])\n\n def dict(self, keys=None):\n return dict(**{k: self[k] for k in keys or self.keys()})\n\n def json(self, keys=None, indent=2, cls=None):\n return json.dumps(self.dict(keys=keys), indent=indent, cls=cls or JSONEncoder)\n\n def element(self, keys=None, namespace=None, minimize=True):\n B = Builder.single(namespace)\n return B(\n String(self.__class__.__name__).camelsplit().hyphenify().lower(),\n **{k: str(self.get(k)) for k in keys or self.keys() if self.get(k) or not minimize}\n )\n\n def using_keys(self, keys=[]):\n \"\"\"return a copy of this record that only has the given keys. \n Useful for generating a keyword-argument dictionary for select()\n \"\"\"\n d = self.__class__(self.db)\n for k in keys:\n d[k] = self[k]\n return d\n\n def using_prefix(self, prefix, **args):\n \"\"\"return an instance containing the args that have the given prefix, with prefix removed\"\"\"\n r = self.__class__.__init__(self, self.db)\n for k in [k for k in args.keys() if k[: len(prefix)] == prefix]:\n r[k[len(prefix) :]] = args[k]\n return r\n","repo_name":"BlackEarth/bsql","sub_path":"bsql/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37703467012","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport matplotlib.patches as mpatches\n\n#Open each .txt file\nfN = open('si_nd1.txt', 'r')\nfS = open('si_sm1.txt', 'r')\nfL = open('si_ls1.txt', 'r')\n\n#put the data into an array\nndvi_data = np.loadtxt(fN)\nsm_data = np.loadtxt(fS)\nlst_data = np.loadtxt(fL)\n\n#normalise the data using z scores\nz_ndvi = stats.zscore(ndvi_data, ddof=1)\nz_sm = stats.zscore(sm_data, ddof=1)\nz_lst = stats.zscore(lst_data, ddof=1)\nprint(z_ndvi)\nprint('')\nprint(z_sm)\nprint('')\nprint(z_lst)\nprint('')\n\nfig, ax = plt.subplots()\n\ndates = []\nfor i in range(10,21):\n for f in range(1,13):\n #print(len(f, 2000i, sep = '.')\n month = str(f)\n year = str(i)\n date = month+\".\"+year\n dates.append(date)\n \n \n#aesthetics\nplt.style.use(\"seaborn-dark\")\nfor param in ['figure.facecolor', 'axes.facecolor', 'savefig.facecolor']:\n plt.rcParams[param] = '#212946' # bluish dark grey\nfor param in ['text.color', 'axes.labelcolor', 'xtick.color', 'ytick.color']:\n plt.rcParams[param] = '0.9' # very light grey\nax.grid(color='#2A3459') # bluish dark grey, but slightly lighter than background \n\nred_patch = mpatches.Patch(color=('xkcd:red'), label='LST')\ngreen_patch = mpatches.Patch(color=('xkcd:neon green'), label='NDVI')\ncyan_patch = mpatches.Patch(color=('xkcd:cyan'), label='Soil Moisture')\nplt.legend(loc = 2)\nplt.legend(handles=([red_patch,green_patch,cyan_patch]), loc = 'upper left')\n\n\n#plot the normalised data\nax.plot(dates, z_ndvi, color = ('xkcd:cyan'))\nax.plot(dates, z_sm, color = ('xkcd:neon green'))\nax.plot(dates, z_lst, color = ('xkcd:red'))\n\nax.xaxis.set_major_locator(plt.MaxNLocator(12))\n\nax.set_ylim(-2.2,3.2)\nplt.xlabel(\"Time (M/YY)\")\nplt.ylabel(\"Variables\")\nax.set_title('Variable Comparison for the Sichuan region')\n\n\n\n#save the figure\nplt.tight_layout()\nplt.savefig(\"Sichuan_comp\", dpi = 400)","repo_name":"NicJMurray/NDVIChina","sub_path":"combined.py","file_name":"combined.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"86284802388","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom sofia_redux.scan.custom.sofia.info.telescope import SofiaTelescopeInfo\n\nfrom astropy import units, log\nimport numpy as np\n\n__all__ = ['HawcPlusTelescopeInfo']\n\n\nclass HawcPlusTelescopeInfo(SofiaTelescopeInfo):\n\n def __init__(self):\n \"\"\"\n Initialize the HAWC+ telescope information.\n\n Contains information on the SOFIA specific telescope parameters such as\n zenith angle, boresight coordinates, and tracking status and the focus\n offset for HAWC+.\n \"\"\"\n super().__init__()\n self.focus_t_offset = np.nan * units.Unit('um')\n\n def apply_configuration(self):\n \"\"\"\n Update telescope information with FITS header information.\n\n Updates the information by taking the following keywords from the\n FITS header::\n\n FCSTOFF - The total focus offset (um)\n\n Returns\n -------\n None\n \"\"\"\n options = self.options\n if options is None:\n return\n\n self.focus_t_offset = options.get_float(\"FCSTOFF\") * units.Unit('um')\n if not np.isnan(self.focus_t_offset):\n log.debug(f\"Focus T Offset: {self.focus_t_offset}\")\n super().apply_configuration()\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/scan/custom/hawc_plus/info/telescope.py","file_name":"telescope.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"20175795453","text":"import requests\r\nimport json\r\nimport pandas as pd\r\nfrom datetime import datetime as dt\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n##User Entry Conditions\r\nticker = input(\"Enter ticker of a company in the S&P500 you'd like to review: \") #Ticker\r\nticker = ticker.upper()\r\ndays = input('Enter Lookback period in days: ')\r\n\r\n\r\n##Sentiment Request\r\n# def sentiment():\r\n# try:\r\n# URL = f'http://thebaite.com:81/api/v1/stocks/metric?m=sentiment&d={days}&s={ticker}&token=' ##Modify for your specific API Token\r\n# r = requests.get(URL)\r\n# rJson = r.json()\r\n# except Exception as e:\r\n# print(e)\r\n# r = json.dumps(rJson)\r\n# s = json.loads(r)\r\n# df_sentiment = pd.DataFrame(s)\r\n# df_sentiment['t'] = pd.to_datetime(df_sentiment['t'],unit='s')\r\n# df_sentiment.rename(columns={'d': 'Sentiment Score', 't': 'Date'}, inplace=True)\r\n# df_sentiment.plot(y = 'Sentiment Score', x= 'Date', kind = 'line')\r\n# plt.title('Sentiment Index Chart')\r\n# plt.show()\r\n \r\n\r\n# sentiment()\r\n\r\n# ##Attention Request:\r\ndef attention():\r\n try:\r\n URL = f'http://thebaite.com:81/api/v1/stocks/metric?m=attention&d={days}&s={ticker}&token=' ##Modify for your specific API Token\r\n r = requests.get(URL)\r\n rJson = r.json()\r\n except Exception as e:\r\n print(e)\r\n r = json.dumps(rJson)\r\n s = json.loads(r)\r\n df_sentiment = pd.DataFrame(s)\r\n df_sentiment['t'] = pd.to_datetime(df_sentiment['t'],unit='s')\r\n df_sentiment.rename(columns={'d': 'Attention Score', 't': 'Date'}, inplace=True)\r\n df_sentiment.plot(y = 'Attention Score', x = 'Date', kind = 'line')\r\n plt.title('Attention Index Chart')\r\n plt.show()\r\nattention()\r\n\r\n\r\n# ##Index Sentiment Request","repo_name":"Mike-Gui/Stock-Event-Inferencer","sub_path":"Secondary/bAIte_demo1.py","file_name":"bAIte_demo1.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17989121441","text":"import gym\nimport config\n\nclass Environment:\n def __init__(self):\n self.env = gym.make(config.env_name)\n self.game_name = config.env_name\n self.dims = (config.screen_width, config.screen_height)\n self.screen_height = config.screen_height\n self.screen_width = config.screen_width\n self.observation = None\n self.screen = None\n self.reward = 0\n self.terminal = True\n self.display = config.display\n self.info = None\n\n def step(self, action):\n cumulated_reward = 0\n start_lives = self.lives\n for _ in range(config.action_repeat):\n self.observation, self.reward, self.terminal, self.info = self.env.step(action)\n\n self.reward = max(-1, min(self.reward, 1))\n cumulated_reward += self.reward\n\n if config.proceed_train and start_lives > self.lives:\n cumulated_reward -= 1\n self.terminal = True\n break\n\n self.reward = cumulated_reward\n return self.observation, self.reward, self.terminal, self.info\n\n def render(self):\n if(self.display):\n self.env.render()\n\n def reset(self):\n self.observation = self.env.reset()\n return self.observation\n\n @property\n def action_size(self):\n return self.env.action_space.n\n @property\n def state(self):\n return self.observation, self.reward, self.terminal\n\n @property\n def lives(self):\n if(self.info is None):\n return 0\n else:\n return self.info['ale.lives']","repo_name":"Joe251/DQN","sub_path":"dqn/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37675313466","text":"# Letter Phone\n# Asked in: \n# Facebook\n# Epic systems\n# Given a digit string, return all possible letter combinations that the number could represent.\n\n# A mapping of digit to letters (just like on the telephone buttons) is given below.\n\n\n\n# The digit 0 maps to 0 itself.\n# The digit 1 maps to 1 itself.\n\n# Input: Digit string \"23\"\n# Output: [\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"].\n# Make sure the returned strings are lexicographically sorted.\n\n\nclass Solution:\n MAP = {\n '0': '0',\n '1': '1',\n '2': 'abc',\n '3': 'def',\n '4': 'ghi',\n '5': 'jkl',\n '6': 'mno',\n '7': 'pqrs',\n '8': 'tuv',\n '9': 'wxyz',\n }\n\n # @param A : string\n # @return a list of strings\n def letterCombinations(self, digits):\n mapping = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', \n '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz', '0': '0', '1': '1'}\n if len(digits) == 0:\n return []\n if len(digits) == 1:\n return list(mapping[digits[0]])\n prev = self.letterCombinations(digits[:-1])\n additional = mapping[digits[-1]]\n return [s + c for s in prev for c in additional]\n","repo_name":"mutalip/interviewbit-coding-ninja","sub_path":"05Backtracking/LetterPhone.py","file_name":"LetterPhone.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74401198630","text":"from .cli import Cli\nfrom .pip_handler import PipHandler\nfrom .file_handler import FileHandler\nfrom .lambda_handler import LambdaHandler\n\n\ndef handle():\n cli = Cli()\n\n pip_handler = PipHandler(cli=cli)\n\n pip_handler.install_dependencies()\n\n file_handler = FileHandler(file=\"python.zip\", cli=cli)\n\n file_handler.zip_folders()\n\n binary_zip = file_handler.zip_to_binary()\n\n lambda_handler = LambdaHandler(zipped_file=binary_zip, cli=cli)\n\n lambda_handler.publish_layer()\n\n\nif __name__ == \"__main__\":\n handle()\n","repo_name":"brianamaral/aws_layer_publisher","sub_path":"layer_builder/handle.py","file_name":"handle.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10283309690","text":"# -*- coding: utf-8 -*-\n\"\"\"\nZenodo\n======\n\nDefines the objects implementing support for a *Zenodo* community and its\nrecords:\n\n- :class:`colour_datasets.Record`\n- :class:`colour_datasets.Community`\n\"\"\"\n\nfrom __future__ import division, unicode_literals\nimport json\nimport os\nimport six\nimport shutil\nimport setuptools.archive_util\nimport stat\nimport tempfile\nimport textwrap\nfrom collections import Mapping\nfrom six.moves import html_parser\nfrom six.moves import urllib\nfrom pprint import pformat\n\nfrom colour.utilities import warning\n\nfrom colour_datasets.utilities import url_download, json_open\nfrom colour_datasets.records import Configuration\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-science@googlegroups.com'\n__status__ = 'Production'\n\n__all__ = ['Record', 'Community']\n\n\nclass Record(object):\n \"\"\"\n Defines an object storing a *Zenodo* record data and providing methods to\n sync it in a local repository.\n\n Attributes\n ----------\n data\n configuration\n repository\n id\n title\n\n Methods\n -------\n __init__\n __str__\n __repr__\n from_id\n synced\n pull\n remove\n\n Parameters\n ----------\n data : unicode\n *Zenodo* record data.\n configuration : Configuration\n *Colour - Datasets* configuration.\n\n Examples\n --------\n >>> record = Record(json_open('https://zenodo.org/api/records/3245883'))\n\n # Doctests skip for Python 2.x compatibility.\n >>> record.id # doctest: +SKIP\n '3245883'\n >>> record.title # doctest: +SKIP\n 'Camera Spectral Sensitivity Database'\n \"\"\"\n\n def __init__(self, data, configuration=None):\n\n self._data = data\n self._configuration = (Configuration()\n if configuration is None else configuration)\n\n @property\n def data(self):\n \"\"\"\n Getter and setter property for the *Zenodo* record data.\n\n Parameters\n ----------\n value : dict\n Value to set the *Zenodo* record data with.\n\n Returns\n -------\n dict\n *Zenodo* record data.\n \"\"\"\n\n return self._data\n\n @property\n def configuration(self):\n \"\"\"\n Getter and setter property for the *Colour - Datasets* configuration.\n\n Parameters\n ----------\n value : Configuration\n Value to set the *Colour - Datasets* configuration with.\n\n Returns\n -------\n unicode\n *Colour - Datasets* configuration.\n \"\"\"\n\n return self._configuration\n\n @property\n def repository(self):\n \"\"\"\n Getter and setter property for the *Zenodo* record local repository.\n\n Parameters\n ----------\n value : unicode\n Value to set the the *Zenodo* record local repository with.\n\n Returns\n -------\n unicode\n *Zenodo* record local repository.\n \"\"\"\n\n return os.path.join(self._configuration.repository, self.id)\n\n @property\n def id(self):\n \"\"\"\n Getter and setter property for the *Zenodo* record id.\n\n Parameters\n ----------\n value : unicode\n Value to set the *Zenodo* record id with.\n\n Returns\n -------\n unicode\n *Zenodo* record id.\n \"\"\"\n\n return six.text_type(self._data['id'])\n\n @property\n def title(self):\n \"\"\"\n Getter and setter property for the *Zenodo* record title.\n\n Parameters\n ----------\n value : unicode\n Value to set the *Zenodo* record title with.\n\n Returns\n -------\n unicode\n *Zenodo* record title.\n \"\"\"\n\n return self._data['metadata']['title']\n\n def __str__(self):\n \"\"\"\n Returns a formatted string representation of the *Zenodo* record.\n\n Returns\n -------\n unicode\n Formatted string representation.\n\n Examples\n --------\n >>> data = json_open('https://zenodo.org/api/records/3245883')\n >>> print('\\\\n'.join(str(Record(data)).splitlines()[:4]))\n Camera Spectral Sensitivity Database - 1.0.0\n ============================================\n \n Record ID : 3245883\n \"\"\"\n\n def strip_html(text):\n \"\"\"\n Strips *HTML* tags from given text.\n\n Parameters\n ----------\n text : unicode\n Text to strips the *HTML* tags from.\n\n Returns\n -------\n unicode\n Text with *HTML* tags stripped of.\n \"\"\"\n\n text = text.replace(' ', ' ').replace('\\n\\n', ' ')\n\n parts = []\n parser = html_parser.HTMLParser()\n parser.handle_data = parts.append\n parser.feed(text)\n\n return ''.join(parts)\n\n metadata = self._data['metadata']\n authors = '; '.join(\n [creator['name'] for creator in metadata['creators']])\n files = self._data['files']\n\n representation = (\n '{0} - {1}\\n'\n '{2}\\n\\n'\n 'Record ID : {3}\\n'\n 'Authors : {4}\\n'\n 'License : {5}\\n'\n 'DOI : {6}\\n'\n 'Publication Date : {7}\\n'\n 'URL : {8}\\n\\n'\n 'Description\\n-----------\\n\\n{9}\\n\\n'\n 'Files\\n-----\\n\\n{10}'.format(\n metadata['title'],\n metadata['version'],\n '=' * (len(self.title) + 3 + len(metadata['version'])),\n self.id,\n authors,\n metadata['license']['id'],\n metadata['doi'],\n metadata['publication_date'],\n self._data['links']['html'],\n '\\n'.join(\n textwrap.wrap(strip_html(metadata['description']), 79)),\n '\\n'.join([\n '- {0} : {1}'.format(file_data['key'],\n file_data['links']['self'])\n for file_data in sorted(files, key=lambda x: x['key'])\n ]),\n ))\n\n if six.PY2:\n representation = representation.encode('utf-8')\n\n return representation\n\n def __repr__(self):\n \"\"\"\n Returns an evaluable string representation of the *Zenodo* record.\n\n Returns\n -------\n unicode\n Evaluable string representation.\n\n Examples\n --------\n >>> data = json_open('https://zenodo.org/api/records/3245883')\n\n # Doctests skip for Python 2.x compatibility.\n >>> print('\\\\n'.join(repr(Record(data)).splitlines()[:4]))\n ... # doctest: +SKIP\n Record(\n {'conceptdoi': '10.5281/zenodo.3245882',\n 'conceptrecid': '3245882',\n 'created': '2019-06-14T09:34:15.765924+00:00',\n \"\"\"\n\n return '{0}(\\n{1},\\n{2}\\n)'.format(\n self.__class__.__name__, '\\n'.join([\n ' {0}'.format(line)\n for line in pformat(self._data).splitlines()\n ]), ' Configuration(\\n{0}\\n )'.format('\\n'.join([\n ' {0}'.format(line)\n for line in pformat(self._configuration).splitlines()\n ])))\n\n @staticmethod\n def from_id(id_, configuration=None, retries=3):\n \"\"\"\n :class:`colour_datasets.Record` class factory that builds an instance\n using given *Zenodo* record id.\n\n Parameters\n ----------\n id_ : unicode\n *Zenodo* record id.\n configuration : Configuration, optional\n configuration : Configuration\n *Colour - Datasets* configuration.\n retries : int, optional\n Number of retries in case where a networking error occurs.\n\n Returns\n -------\n Record\n *Zenodo* record data.\n\n Examples\n --------\n # Doctests skip for Python 2.x compatibility.\n >>> Record.from_id('3245883').title\n ... # doctest: +SKIP\n 'Camera Spectral Sensitivity Database'\n \"\"\"\n\n configuration = (Configuration()\n if configuration is None else configuration)\n\n if not os.path.exists(configuration.repository):\n os.makedirs(configuration.repository)\n\n record_url = '{0}/records/{1}'.format(configuration.api_url, id_)\n\n return Record(json_open(record_url, retries), configuration)\n\n def synced(self):\n \"\"\"\n Returns whether the *Zenodo* record data is synced to the local\n repository.\n\n Returns\n -------\n bool\n Whether the *Zenodo* record data is synced to the local repository.\n\n Examples\n --------\n >>> from colour_datasets.utilities import suppress_stdout\n >>> record = Record.from_id('3245883')\n >>> with suppress_stdout():\n ... record.pull()\n >>> record.synced()\n True\n >>> record.remove()\n >>> record.synced()\n False\n \"\"\"\n\n downloads_directory = os.path.join(\n self.repository, self._configuration.downloads_directory)\n deflate_directory = os.path.join(self.repository,\n self._configuration.deflate_directory)\n return all([\n os.path.exists(downloads_directory),\n os.path.exists(deflate_directory),\n ])\n\n def pull(self, use_urls_txt_file=True, retries=3):\n \"\"\"\n Pulls the *Zenodo* record data to the local repository.\n\n Parameters\n ----------\n use_urls_txt_file : bool, optional\n Whether to use the *urls.txt* file: if such a file is present in\n the *Zenodo* record data, the urls it defines take precedence over\n the record data files. The later will be used in the eventuality\n where the urls are not available.\n retries : int, optional\n Number of retries in case where a networking error occurs or the\n *MD5* hash is not matching.\n\n Examples\n --------\n >>> from colour_datasets.utilities import suppress_stdout\n >>> record = Record.from_id('3245883')\n >>> record.remove()\n >>> with suppress_stdout():\n ... record.pull()\n >>> record.synced()\n True\n \"\"\"\n\n print('Pulling \"{0}\" record content...'.format(self.title))\n\n if not os.path.exists(self._configuration.repository):\n os.makedirs(self._configuration.repository)\n\n downloads_directory = os.path.join(\n self.repository, self._configuration.downloads_directory)\n if not os.path.exists(downloads_directory):\n os.makedirs(downloads_directory)\n\n # As much as possible, the original file urls are used, those are\n # given by the content of :attr:`URLS_TXT_FILE` attribute file.\n urls_txt = None\n for file_data in self.data['files']:\n if file_data['key'] == self._configuration.urls_txt_file:\n urls_txt = file_data\n break\n\n def _urls_download(urls):\n \"\"\"\n Downloads given urls.\n \"\"\"\n\n for url, md5 in urls.items():\n filename = os.path.join(\n downloads_directory,\n urllib.parse.unquote(url.split('/')[-1]))\n url_download(url, filename, md5.split(':')[-1], retries)\n\n try:\n if use_urls_txt_file and urls_txt:\n urls = {}\n urls_txt_file = tempfile.mktemp()\n url_download(urls_txt['links']['self'], urls_txt_file,\n urls_txt['checksum'].split(':')[-1], retries)\n\n with open(urls_txt_file, 'r') as json_file:\n urls_txt_json = json.load(json_file)\n for url, md5 in urls_txt_json['urls'].items():\n urls[url] = md5.split(':')[-1]\n\n shutil.copyfile(\n urls_txt_file,\n os.path.join(downloads_directory,\n self._configuration.urls_txt_file))\n\n _urls_download(urls)\n else:\n raise ValueError(\n '\"{0}\" file was not found in record data!'.format(\n self._configuration.urls_txt_file))\n except (urllib.error.URLError, ValueError) as error:\n warning('An error occurred using urls from \"{0}\" file: {1}\\n'\n 'Switching to record urls...'.format(\n self._configuration.urls_txt_file, error))\n\n urls = {}\n for file_data in self.data['files']:\n if file_data['key'] == self._configuration.urls_txt_file:\n continue\n\n urls[file_data['links']['self']] = (\n file_data['checksum'].split(':')[-1])\n\n _urls_download(urls)\n\n deflate_directory = os.path.join(self.repository,\n self._configuration.deflate_directory)\n if os.path.exists(deflate_directory):\n shutil.rmtree(deflate_directory, onerror=_remove_readonly)\n\n shutil.copytree(downloads_directory, deflate_directory)\n\n for filename in os.listdir(deflate_directory):\n filename = os.path.join(deflate_directory, filename)\n if not os.path.isfile(filename):\n continue\n\n basename, extension = os.path.splitext(filename)\n basename = os.path.basename(basename)\n if extension.lower() in ('.zip', '.tar', '.gz', '.bz2'):\n if basename.lower().endswith('.tar'):\n basename = basename.rsplit('.', 1)[0]\n\n basename = basename.replace('.', '_')\n unpacking_directory = os.path.join(deflate_directory, basename)\n\n print('Unpacking \"{0}\" archive...'.format(filename))\n setuptools.archive_util.unpack_archive(filename,\n unpacking_directory)\n os.remove(filename)\n\n with open(os.path.join(self.repository, 'record.json'),\n 'w') as record_json:\n json.dump(self.data, record_json, indent=4, sort_keys=True)\n\n def remove(self):\n \"\"\"\n Removes the *Zenodo* record data local repository.\n\n Examples\n --------\n >>> from colour_datasets.utilities import suppress_stdout\n >>> record = Record.from_id('3245883')\n >>> with suppress_stdout():\n ... record.pull()\n >>> record.remove()\n >>> record.synced()\n False\n \"\"\"\n\n if os.path.exists(self.repository):\n shutil.rmtree(self.repository, onerror=_remove_readonly)\n\n\nclass Community(Mapping):\n \"\"\"\n Defines an object storing a *Zenodo* community data.\n\n Attributes\n ----------\n data\n configuration\n repository\n records\n\n Methods\n -------\n __init__\n __str__\n __repr__\n __getitem__\n __iter__\n __len__\n from_id\n synced\n pull\n remove\n\n Parameters\n ----------\n data : unicode\n *Zenodo* community data.\n configuration : Configuration\n *Colour - Datasets* configuration.\n\n Examples\n --------\n >>> community_data = json_open(\n ... 'https://zenodo.org/api/communities/colour-science-datasets')\n >>> records_data = json_open(\n ... 'https://zenodo.org/api/records/?q=communities:'\n ... 'colour-science-datasets')\n >>> community = Community({\n ... 'community': community_data,\n ... 'records': records_data,\n ... })\n\n # Doctests skip for Python 2.x compatibility.\n >>> community['3245883'].title # doctest: +SKIP\n 'Camera Spectral Sensitivity Database'\n \"\"\"\n\n def __init__(self, data, configuration=None):\n self._data = data\n self._configuration = (Configuration()\n if configuration is None else configuration)\n\n hits = self._data['records']['hits']['hits']\n self._records = {\n six.text_type(hit['id']): Record(hit, self._configuration)\n for hit in hits\n }\n\n @property\n def data(self):\n \"\"\"\n Getter and setter property for the *Zenodo* community data.\n\n Parameters\n ----------\n value : dict\n Value to set the *Zenodo* community data.with.\n\n Returns\n -------\n dict\n *Zenodo* community data.\n \"\"\"\n\n return self._data\n\n @property\n def configuration(self):\n \"\"\"\n Getter and setter property for the *Colour - Datasets* configuration.\n\n Parameters\n ----------\n value : Configuration\n Value to set the *Colour - Datasets* configuration with.\n\n Returns\n -------\n unicode\n *Colour - Datasets* configuration.\n \"\"\"\n\n return self._configuration\n\n @property\n def repository(self):\n \"\"\"\n Getter and setter property for the *Zenodo* community local repository.\n\n Parameters\n ----------\n value : unicode\n Value to set the the *Zenodo* community local repository with.\n\n Returns\n -------\n unicode\n *Zenodo* community local repository.\n \"\"\"\n\n return self._configuration.repository\n\n @property\n def records(self):\n \"\"\"\n Getter and setter property for the *Zenodo* community records.\n\n Parameters\n ----------\n value : dict\n Value to set the *Zenodo* community records with.\n\n Returns\n -------\n dict\n *Zenodo* community records.\n \"\"\"\n\n return self._records\n\n def __str__(self):\n \"\"\"\n Returns a formatted string representation of the *Zenodo* community.\n\n Returns\n -------\n unicode\n Formatted string representation.\n\n Examples\n --------\n >>> community = Community.from_id('colour-science-datasets-tests')\n >>> print('\\\\n'.join(str(community).splitlines()[:6]))\n ... # doctest: +ELLIPSIS\n colour-science-datasets-tests\n =============================\n \n Datasets : ...\n Synced : ...\n URL : https://zenodo.org/communities/\\\ncolour-science-datasets-tests/\n \"\"\"\n\n datasets = '\\n'.join([\n '[{0}] {1} : {2}'.format('x' if dataset.synced() else ' ',\n dataset.id, dataset.title)\n for dataset in sorted(self.values(), key=lambda x: x.title)\n ])\n representation = ('{0}\\n'\n '{1}\\n\\n'\n 'Datasets : {2}\\n'\n 'Synced : {3}\\n'\n 'URL : {4}\\n\\n'\n 'Datasets\\n--------\\n\\n'\n '{5}'.format(\n self._configuration.community,\n '=' * len(self._configuration.community),\n len(self),\n len([\n dataset for dataset in self.values()\n if dataset.synced()\n ]),\n self._data['community']['links']['html'],\n datasets,\n ))\n\n if six.PY2:\n representation = representation.encode('utf-8')\n\n return representation\n\n def __repr__(self):\n \"\"\"\n Returns an evaluable string representation of the *Zenodo* community.\n\n Returns\n -------\n unicode\n Evaluable string representation.\n\n Examples\n --------\n >>> community = Community.from_id('colour-science-datasets-tests')\n\n # Doctests skip for Python 2.x compatibility.\n >>> print('\\\\n'.join(repr(community).splitlines()[:4]))\n ... # doctest: +SKIP\n Community(\n {'community': {'created': '2019-06-09T10:45:47.999975+00:00',\n 'curation_policy': '',\n 'description': '',\n \"\"\"\n\n return '{0}(\\n{1},\\n{2}\\n)'.format(\n self.__class__.__name__, '\\n'.join([\n ' {0}'.format(line)\n for line in pformat(self._data).splitlines()\n ]), ' Configuration(\\n{0}\\n )'.format('\\n'.join([\n ' {0}'.format(line)\n for line in pformat(self._configuration).splitlines()\n ])))\n\n def __getitem__(self, id_):\n \"\"\"\n Returns the *Zenodo* record at given id.\n\n Parameters\n ----------\n id_ : unicode\n *Zenodo* recordid.\n\n Returns\n -------\n Record\n *Zenodo* record at given id.\n\n Examples\n --------\n >>> community = Community.from_id('colour-science-datasets-tests')\n\n # Doctests skip for Python 2.x compatibility.\n >>> community['3245883'].title # doctest: +SKIP\n 'Camera Spectral Sensitivity Database'\n \"\"\"\n\n return self._records[id_]\n\n def __iter__(self):\n \"\"\"\n Iterates through the *Zenodo* community records.\n\n Returns\n -------\n iterator\n *Zenodo* community records iterator.\n\n Examples\n --------\n # Doctests skip for Python 2.x compatibility.\n >>> for record in Community.from_id('colour-science-datasets-tests'):\n ... print(record) # doctest: +SKIP\n \"\"\"\n\n return iter(self._records)\n\n def __len__(self):\n \"\"\"\n Returns *Zenodo* community records count.\n\n Returns\n -------\n int\n *Zenodo* community records count.\n\n Examples\n --------\n # Doctests skip for Python 2.x compatibility.\n >>> len(Community.from_id('colour-science-datasets-tests'))\n ... # doctest: +SKIP\n 3\n \"\"\"\n\n return len(self._records)\n\n @staticmethod\n def from_id(id_, configuration=None, retries=3):\n \"\"\"\n :class:`colour_datasets.Community` class factory that builds an\n instance using given *Zenodo* community id.\n\n Parameters\n ----------\n id_ : unicode\n *Zenodo* community id.\n configuration : Configuration, optional\n configuration : Configuration\n *Colour - Datasets* configuration.\n retries : int, optional\n Number of retries in case where a networking error occurs.\n\n Returns\n -------\n Community\n *Zenodo* community data.\n\n Examples\n --------\n >>> community = Community.from_id('colour-science-datasets-tests')\n\n # Doctests skip for Python 2.x compatibility.\n >>> community['3245883'].title # doctest: +SKIP\n 'Camera Spectral Sensitivity Database'\n \"\"\"\n\n configuration = (Configuration()\n if configuration is None else configuration)\n configuration.community = id_\n\n if not os.path.exists(configuration.repository):\n os.makedirs(configuration.repository)\n\n community_url = '{0}/communities/{1}'.format(configuration.api_url,\n configuration.community)\n # NOTE: Retrieving 512 datasets at most. This should cover needs for\n # the foreseeable future. There is likely an undocumented hard limit on\n # \"Zenodo\" server side.\n records_url = '{0}/records/?q=communities:{1}&size=512'.format(\n configuration.api_url, configuration.community)\n\n community_json_filename = os.path.join(\n configuration.repository,\n '{0}-community.json'.format(configuration.community))\n records_json_filename = os.path.join(\n configuration.repository,\n '{0}-records.json'.format(configuration.community))\n\n try:\n community_data = json_open(community_url, retries)\n records_data = json_open(records_url, retries)\n\n for key, value in {\n community_json_filename: community_data,\n records_json_filename: records_data,\n }.items():\n with open(key, 'w') as json_file:\n json.dump(value, json_file, indent=4, sort_keys=True)\n except (urllib.error.URLError, ValueError):\n warning('Retrieving the \"{0}\" community data failed '\n 'after {1} attempts, '\n 'attempting to use cached local data!')\n if not all([\n os.path.exists(community_json_filename),\n os.path.exists(records_json_filename),\n ]):\n raise RuntimeError('Local files were not found, aborting!')\n\n with open(community_json_filename) as json_file:\n community_data = json.loads(json_file.read())\n\n with open(records_json_filename) as json_file:\n records_data = json.loads(json_file.read())\n\n data = {\n 'community': community_data,\n 'records': records_data,\n }\n\n return Community(data, configuration)\n\n def synced(self):\n \"\"\"\n Returns whether the *Zenodo* community data is synced to the local\n repository.\n\n Returns\n -------\n bool\n Whether the *Zenodo* community data is synced to the local\n repository.\n\n Examples\n --------\n >>> from colour_datasets.utilities import suppress_stdout\n >>> community = Community.from_id('colour-science-datasets-tests')\n >>> with suppress_stdout():\n ... community.pull() # doctest: +SKIP\n >>> community.synced() # doctest: +SKIP\n True\n >>> community.remove()\n >>> community.synced()\n False\n \"\"\"\n\n return all([record.synced() for record in self._records.values()])\n\n def pull(self, use_urls_txt_file=True, retries=3):\n \"\"\"\n Pulls the *Zenodo* community data to the local repository.\n\n Parameters\n ----------\n use_urls_txt_file : bool, optional\n Whether to use the *urls.txt* file: if such a file is present in\n a *Zenodo* record data, the urls it defines take precedence over\n the record data files. The later will be used in the eventuality\n where the urls are not available.\n retries : int, optional\n Number of retries in case where a networking error occurs or the\n *MD5* hash is not matching.\n\n Examples\n --------\n >>> from colour_datasets.utilities import suppress_stdout\n >>> community = Community.from_id('colour-science-datasets-tests')\n >>> community.remove()\n >>> with suppress_stdout():\n ... community.pull() # doctest: +SKIP\n >>> community.synced() # doctest: +SKIP\n True\n \"\"\"\n\n if not os.path.exists(self._configuration.repository):\n os.makedirs(self._configuration.repository)\n\n for record in self._records.values():\n record.pull(use_urls_txt_file, retries)\n\n def remove(self):\n \"\"\"\n Removes the *Zenodo* community data local repository.\n\n Examples\n --------\n >>> from colour_datasets.utilities import suppress_stdout\n >>> community = Community.from_id('colour-science-datasets-tests')\n >>> with suppress_stdout():\n ... community.pull() # doctest: +SKIP\n >>> community.remove()\n >>> community.synced()\n False\n \"\"\"\n\n if os.path.exists(self.repository):\n shutil.rmtree(self.repository, onerror=_remove_readonly)\n\n\ndef _remove_readonly(function, path, excinfo):\n \"\"\"\n Error handler for :func:`shutil.rmtree` definition that removes read-only\n files.\n \"\"\"\n\n os.chmod(path, stat.S_IWRITE)\n\n function(path)\n","repo_name":"dwtian/colour-datasets","sub_path":"colour_datasets/records/zenodo.py","file_name":"zenodo.py","file_ext":"py","file_size_in_byte":28462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"33907591685","text":"from werkzeug.exceptions import NotFound\n\nfrom odoo import http, _\nfrom odoo.addons.phone_validation.tools import phone_validation\nfrom odoo.http import request\n\n\nclass MailingSMSController(http.Controller):\n\n def _check_trace(self, mailing_id, trace_code):\n try:\n mailing = request.env['mailing.mailing'].sudo().search([('id', '=', mailing_id)])\n except:\n mailing = False\n if not mailing:\n return {'error': 'mailing_error'}\n trace = request.env['mailing.trace'].sudo().search([\n ('trace_type', '=', 'sms'),\n ('sms_code', '=', trace_code),\n ('mass_mailing_id', '=', mailing.id)\n ])\n if not trace:\n return {'error': 'trace_error'}\n return {'trace': trace}\n\n @http.route(['/sms//'], type='http', website=True, auth='public')\n def blacklist_page(self, mailing_id, trace_code, **post):\n check_res = self._check_trace(mailing_id, trace_code)\n if not check_res.get('trace'):\n return request.redirect('/web')\n return request.render('mass_mailing_sms.blacklist_main', {\n 'mailing_id': mailing_id,\n 'trace_code': trace_code,\n })\n\n @http.route(['/sms//unsubscribe/'], type='http', website=True, auth='public')\n def blacklist_number(self, mailing_id, trace_code, **post):\n check_res = self._check_trace(mailing_id, trace_code)\n if not check_res.get('trace'):\n return request.redirect('/web')\n # parse and validate number\n sms_number = post.get('sms_number', '').strip(' ')\n sanitized = phone_validation.phone_format(\n sms_number,\n request.geoip.country_code,\n force_format='E164',\n raise_exception=False,\n )\n tocheck_number = sanitized or sms_number\n\n trace = check_res['trace'].filtered(lambda r: r.sms_number == tocheck_number)[:1] if tocheck_number else False\n # compute opt-out / blacklist information\n lists_optout = request.env['mailing.list'].sudo()\n lists_optin = request.env['mailing.list'].sudo()\n unsubscribe_error = False\n if tocheck_number and trace:\n mailing_list_ids = trace.mass_mailing_id.contact_list_ids\n if mailing_list_ids:\n subscriptions = request.env['mailing.subscription'].sudo().search([\n ('list_id', 'in', mailing_list_ids.ids),\n ('contact_id.phone_sanitized', '=', tocheck_number),\n ])\n subscriptions.write({'opt_out': True})\n lists_optout = subscriptions.mapped('list_id')\n else:\n blacklist_rec = request.env['phone.blacklist'].sudo().add(tocheck_number)\n blacklist_rec._message_log(\n body=_('Blacklist through SMS Marketing unsubscribe (mailing ID: %s - model: %s)',\n trace.mass_mailing_id.id, trace.mass_mailing_id.mailing_model_id.display_name))\n lists_optin = request.env['mailing.subscription'].sudo().search([\n ('contact_id.phone_sanitized', '=', tocheck_number),\n ('list_id', 'not in', mailing_list_ids.ids),\n ('opt_out', '=', False),\n ]).mapped('list_id')\n elif tocheck_number:\n unsubscribe_error = _('Number %s not found', tocheck_number)\n else:\n unsubscribe_error = _('Invalid number %s', post.get('sms_number', ''))\n\n return request.render('mass_mailing_sms.blacklist_number', {\n 'mailing_id': mailing_id,\n 'trace_code': trace_code,\n 'sms_number': sms_number,\n 'lists_optin': lists_optin,\n 'lists_optout': lists_optout,\n 'unsubscribe_error': unsubscribe_error,\n })\n\n @http.route('/r//s/', type='http', auth=\"public\")\n def sms_short_link_redirect(self, code, sms_id_int, **post):\n if sms_id_int:\n trace_id = request.env['mailing.trace'].sudo().search([('sms_id_int', '=', int(sms_id_int))]).id\n else:\n trace_id = False\n\n request.env['link.tracker.click'].sudo().add_click(\n code,\n ip=request.httprequest.remote_addr,\n country_code=request.geoip.country_code,\n mailing_trace_id=trace_id\n )\n redirect_url = request.env['link.tracker'].get_url_from_code(code)\n if not redirect_url:\n raise NotFound()\n return request.redirect(redirect_url, code=301, local=False)\n","repo_name":"odoo/odoo","sub_path":"addons/mass_mailing_sms/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"} +{"seq_id":"41699669138","text":"#!/usr/bin/env python3\n\n\"\"\"\nfile\n\"\"\"\n\nimport csv\nimport math\nfrom typing import List, Dict, Union\n\n\nclass Server:\n \"\"\"Server class to paginate a database of popular baby names.\n \"\"\"\n DATA_FILE = \"Popular_Baby_Names.csv\"\n\n def __init__(self):\n self.__dataset = None\n\n def dataset(self) -> List[List]:\n \"\"\"Cached dataset\n \"\"\"\n if self.__dataset is None:\n with open(self.DATA_FILE) as f:\n reader = csv.reader(f)\n dataset = [row for row in reader]\n self.__dataset = dataset[1:]\n\n return self.__dataset\n\n def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n \"\"\"Get a page of the dataset\n \"\"\"\n assert isinstance(page, int) and page > 0, \"page should be int > 0\"\n assert isinstance(page_size, int) and page_size > 0, \"page_size,int>0\"\n\n start_idx, end_idx = index_range(page, page_size)\n dataset = self.dataset()\n return dataset[start_idx:end_idx]\n\n def get_hyper(self, page: int = 1, page_size: int = 10) -> Dict[\n str, Union[int, List[List], None]]:\n \"\"\"Get a page of the dataset with hypermedia metadata\n \"\"\"\n data_page = self.get_page(page, page_size)\n total_pages = int(math.ceil(len(self.dataset()) / page_size))\n next_page = page + 1 if page < total_pages else None\n prev_page = page - 1 if page > 1 else None\n\n return {\n \"page_size\": len(data_page),\n \"page\": page,\n \"data\": data_page,\n \"next_page\": next_page,\n \"prev_page\": prev_page,\n \"total_pages\": total_pages\n }\n\n\ndef index_range(page: int, page_size: int) -> tuple:\n \"\"\"Return a tuple of start and end indexes based on page and page_size\n \"\"\"\n start_idx = (page - 1) * page_size\n end_idx = page * page_size\n return start_idx, end_idx\n","repo_name":"lloydkioko/alx-backend","sub_path":"0x00-pagination/2-hypermedia_pagination.py","file_name":"2-hypermedia_pagination.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4296628698","text":"# SW Expert Academy 0222_Tree - 1231 \n\nimport sys\nsys.stdin = open('input.txt', 'r')\n\nT = int(input())\nfor t in range(1, T + 1):\n N = int(input())\n lst = [i for i in range(0, N+1)]\n result = [0] * (N+1)\n last = 1\n def inorder(v):\n global last\n if v > N:\n return\n inorder(v*2)\n result[v] = last\n last += 1\n inorder(v*2+1)\n inorder(1)\n print(f'#{t}', result[1], result[N//2])","repo_name":"ksykma/ssafy_algorithm_study","sub_path":"2023.02.23/20230223-2.py","file_name":"20230223-2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16634803188","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\n\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(bottom=0.35)\n\nx = np.arange(0.0, 1.0, 0.001)\ny = 5 * np.cos(6 * np.pi * x)\nl, = plt.plot(x, y)\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\n\nax_freq = plt.axes([0.25, 0.15, 0.65, 0.03])\nax_amplitude = plt.axes([0.25, 0.1, 0.65, 0.03])\n\n\nfreq = Slider(ax_freq, 'Frequency', 0.0, 20.0, 3)\namplitude = Slider(ax_amplitude, 'Amplitude', 0.0, 10.0, 5, valstep=1.0)\n\n\ndef update(val):\n\tf = freq.val\n\ta = amplitude.val\n\tl.set_ydata(a*np.sin(2*np.pi*f*x))\n\n\nfreq.on_changed(update)\namplitude.on_changed(update)\n\nplt.show()\n","repo_name":"lszwed5/Systems-Engineering","sub_path":"IV/PSD/Lista00/plotter_slider.py","file_name":"plotter_slider.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7775255818","text":"\r\nprint(\"Enter the number of processes: \")\r\nn=int(input())\r\nburstTime = []\r\nwaitTime=[]\r\nturnAT=[]\r\navgWT=0\r\navgTAT=0\r\nprocesses=[]\r\nfor i in range(0,n):\r\n\tprocesses.insert(i,i+1)\r\nprint(\"Enter the burst time of processes: \\n\")\r\nburstTime=list(map(int, raw_input().split()))\r\nfor i in range(0,len(burstTime)-1):\r\n\tfor j in range(0,len(burstTime)-i-1):\r\n\t\tif(burstTime[j]>burstTime[j+1]):\r\n\t\t\ttemp=burstTime[j]\r\n\t\t\tburstTime[j]=burstTime[j+1]\r\n\t\t\tburstTime[j+1]=temp\r\n\t\t\ttemp=processes[j]\r\n\t\t\tprocesses[j]=processes[j+1]\r\n\t\t\tprocesses[j+1]=temp\r\nwaitTime.insert(0,0)\r\nturnAT.insert(0,burstTime[0])\r\nfor i in range(1,len(burstTime)):\r\n\twaitTime.insert(i,waitTime[i-1]+burstTime[i-1])\r\n\tturnAT.insert(i,waitTime[i]+burstTime[i])\r\n\tavgWT+=waitTime[i]\r\n\tavgTAT+=turnAT[i]\r\navgWT=float(avgWT)/n\r\navgTAT=float(avgTAT)/n\r\nprint(\"\\n\")\r\nprint(\"Process\\t\tBurst Time\\t\tWaiting Time\\t\t\tTurnAround Time\\t\")\r\nfor i in range(0,n):\r\n\tprint(str(processes[i])+\"\\t\\t\"+str(burstTime[i])+\"\\t\\t\"+str(waitTime[i])+\"\\t\\t\"+str(turnAT[i]))\r\n\tprint(\"\\n\")\r\n\tprint(\"Average waiting time: \"+ str(avgWT))\r\nprint(\"Average TurnAround time: \"+ str(avgTAT))","repo_name":"aimen003/Assignment5","sub_path":"sjf.py","file_name":"sjf.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2622458598","text":"def shortPalindrome(text):\n text = text.lower()\n results = []\n\n for i in range(len(text)):\n for j in range(0, i):\n chunk = text[j:i + 1]\n\n if chunk == chunk[::-1]:\n results.append(chunk)\n return len(results)\n\nres = shortPalindrome('akakak')\nprint(res)","repo_name":"Jon-J/python_repo","sub_path":"training/shortPalindrome.py","file_name":"shortPalindrome.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37558817549","text":"class Solution:\n def sortArray(self, nums: List[int]) -> List[int]:\n self.quicksort(nums, 0, len(nums) - 1)\n return nums\n \n def quicksort(self, nums, lower, upper):\n if lower < upper:\n pivot = self.partition(nums, lower, upper)\n self.quicksort(nums, lower, pivot - 1)\n self.quicksort(nums, pivot + 1, upper)\n else:\n return\n \n def partition(self, nums, lower, upper):\n \n pivot = nums[upper]\n \n i = lower\n \n for j in range(lower, upper):\n if nums[j] < pivot:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n \n nums[i], nums[upper] = nums[upper], nums[i]\n \n return i","repo_name":"YaxeZhang/Just-Code","sub_path":"src/0912.sort-an-array/sort-an-array.py","file_name":"sort-an-array.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":779,"dataset":"github-code","pt":"71"} +{"seq_id":"34969310569","text":"\"\"\"A collection of different strategies for iterating over streams\nof :class:`~.Scan`-like objects.\n\"\"\"\nimport bisect\nimport warnings\n\nfrom typing import Any, Callable, DefaultDict, Deque, Generic, Iterator, List, Optional, TypeVar, Union\n\nfrom collections import deque, defaultdict\nfrom six import PY2\n\nfrom . import ScanBunch\n\n\ndef _null_scan_validator(scan):\n return True\n\n\ndef _null_scan_cacher(scan):\n return None\n\n\nITERATION_MODE_GROUPED = \"grouped\"\nITERATION_MODE_SINGLE = \"single\"\n\n\nScanGroupType = TypeVar(\"ScanGroupType\")\nScanType = TypeVar(\"ScanType\")\n\n\nclass _ScanIteratorImplBase(Generic[ScanType, ScanGroupType]):\n \"\"\"\n Internal base class for scan iteration strategies\n\n Attributes\n ----------\n iterator : :class:`Iterator`\n An iterator that produces a raw scan which will be packaged by :attr:`scan_packer`\n scan_cacher : :class:`Callable`\n A callable that will cache the scan, or a no-op\n scan_packer : :class:`Callable`\n A callable that will package a raw scan object into a :class:`Scan` object\n scan_validator : :class:`Callable`\n A callable that will check if a raw scan object is valid or not for filtering\n out unwanted entries, or a no-op\n \"\"\"\n\n iterator: Iterator[Any]\n _producer: Union[Iterator[ScanType], Iterator[ScanGroupType]]\n\n def __init__(self, iterator, scan_packer, scan_validator=None, scan_cacher=None, **kwargs):\n if scan_validator is None:\n scan_validator = _null_scan_validator\n if scan_cacher is None:\n scan_cacher = _null_scan_cacher\n self.iterator = iterator\n self.scan_packer = scan_packer\n self.scan_validator = scan_validator\n self.scan_cacher = scan_cacher\n self._producer = None\n\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"Py2 compatible iterator\"\"\"\n if self._producer is None:\n self._producer = self._make_producer()\n return next(self._producer)\n\n def __next__(self):\n return self.next() # pylint: disable=not-callable\n\n def _make_producer(self) -> Union[Iterator[ScanType], Iterator[ScanGroupType]]:\n raise NotImplementedError()\n\n @property\n def iteration_mode(self):\n \"\"\"The iteration mode of the strategy, \"grouped\" or \"single\".\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def from_scan_source(cls, iterator, scan_source, **kwargs):\n \"\"\"\n Create an iterator strategy for `iterator` from `scan_source`\n\n Parameters\n ----------\n iterator : :class:`Iterable`\n The iterator over raw scan data to consume\n scan_source : :class:`~.ScanIterator`\n The data extraction wrapper to provide with an iteration strategy\n **kwargs\n Forwarded to :meth:`__init__`\n \"\"\"\n return cls(iterator, scan_source._make_scan, scan_source._validate, scan_source._cache_scan, **kwargs)\n\n\nclass _SingleScanIteratorImpl(_ScanIteratorImplBase[ScanType, ScanGroupType], Iterator[ScanType]):\n \"\"\"\n Iterate over individual scans.\n\n The default strategy when MS1 scans are missing.\n \"\"\"\n\n _producer: Iterator[ScanType]\n\n @property\n def iteration_mode(self):\n return ITERATION_MODE_SINGLE\n\n def _make_producer(self) -> Iterator[ScanType]:\n _make_scan = self.scan_packer\n _validate = self.scan_validator\n _cache_scan = self.scan_cacher\n for scan in self.iterator:\n packed = _make_scan(scan)\n if not _validate(packed):\n continue\n _cache_scan(packed)\n yield packed\n\n\nclass _FakeGroupedScanIteratorImpl(_SingleScanIteratorImpl[ScanType, ScanGroupType], Iterator[ScanGroupType]):\n \"\"\"\n Mimics the interface of :class:`_GroupedScanIteratorImpl` for\n scan sequences which only support single scans, or which do not\n guarantee sequential access to precursor/product collections.\n \"\"\"\n\n _producer: Iterator[ScanGroupType]\n\n @property\n def iteration_mode(self):\n return ITERATION_MODE_GROUPED\n\n def _make_producer(self) -> Iterator[ScanGroupType]:\n generator = super(_FakeGroupedScanIteratorImpl, self)._make_producer()\n for scan in generator:\n if scan.ms_level == 1:\n yield ScanBunch(scan, [])\n else:\n yield ScanBunch(None, [scan])\n\n\nclass _GroupedScanIteratorImpl(_ScanIteratorImplBase[ScanType, ScanGroupType], Iterator[ScanGroupType]):\n \"\"\"\n Iterate over related scan bunches.\n\n The default strategy when MS1 scans are known to be\n present, even if MSn scans are not.\n \"\"\"\n\n _producer: Iterator[ScanGroupType]\n\n @property\n def iteration_mode(self):\n return ITERATION_MODE_GROUPED\n\n def _make_producer(self) -> Iterator[ScanGroupType]:\n _make_scan = self.scan_packer\n _validate = self.scan_validator\n _cache_scan = self.scan_cacher\n\n precursor_scan = None\n product_scans = []\n\n current_level = 1\n\n for scan in self.iterator:\n packed = _make_scan(scan)\n if not _validate(packed):\n continue\n _cache_scan(packed)\n if packed.ms_level > 1:\n # inceasing ms level\n if current_level < packed.ms_level:\n current_level = packed.ms_level\n # decreasing ms level\n elif current_level > packed.ms_level:\n current_level = packed.ms_level\n product_scans.append(packed)\n elif packed.ms_level == 1:\n if current_level > 1:\n if precursor_scan is not None:\n precursor_scan.product_scans = list(product_scans)\n yield ScanBunch(precursor_scan, product_scans)\n else:\n if precursor_scan is not None:\n precursor_scan.product_scans = list(product_scans)\n yield ScanBunch(precursor_scan, product_scans)\n precursor_scan = packed\n product_scans = []\n else:\n raise ValueError(\"Could not interpret MS Level %r\" %\n (packed.ms_level,))\n if precursor_scan is not None:\n yield ScanBunch(precursor_scan, product_scans)\n\n\nclass _GenerationTracker(object):\n _generations_type = list if PY2 else deque\n\n def __init__(self):\n self.generation_to_id = defaultdict(set)\n self.id_to_generation = dict()\n self.generations = self._generations_type()\n\n def _add_generation(self, generation):\n bisect.insort_left(self.generations, generation)\n\n def clear(self):\n self.generation_to_id.clear()\n self.id_to_generation.clear()\n self.generations = self._generations_type()\n\n def add(self, identifier, generation):\n if generation not in self.generation_to_id:\n self._add_generation(generation)\n self.generation_to_id[generation].add(identifier)\n self.id_to_generation[identifier] = generation\n\n def remove(self, identifier):\n if identifier not in self.id_to_generation:\n return False\n generation = self.id_to_generation[identifier]\n self.generation_to_id[generation].remove(identifier)\n del self.id_to_generation[identifier]\n if len(self.generation_to_id[generation]) == 0:\n self.generations.remove(generation)\n return True\n\n def older_than(self, generation):\n result = []\n purged = []\n for gen in self.generations:\n if gen < generation:\n members = self.generation_to_id[gen]\n result.extend(members)\n purged.append(gen)\n else:\n break\n for r in result:\n del self.id_to_generation[r]\n for gen in purged:\n del self.generation_to_id[gen]\n self.generations.remove(gen)\n return result\n\n\nclass _InterleavedGroupedScanIteratorImpl(_GroupedScanIteratorImpl[ScanType, ScanGroupType]):\n \"\"\"Iterate over related scan bunches.\n\n The default strategy when MS1 scans are known to be\n present, even if MSn scans are not.\n \"\"\"\n\n buffering: int\n ms1_buffer: Deque[ScanType]\n product_mapping: DefaultDict[str, List[ScanType]]\n\n generation_tracker: _GenerationTracker\n orphans: List[ScanType]\n\n passed_first_ms1: bool\n highest_ms_level: int\n generation: int\n\n def __init__(self, iterator, scan_packer, scan_validator=None, scan_cacher=None, buffering=5):\n super(_InterleavedGroupedScanIteratorImpl, self).__init__(\n iterator, scan_packer, scan_validator, scan_cacher)\n if buffering < 2:\n raise ValueError(\"Interleaved buffering must be greater than 1\")\n self.buffering = buffering\n self.ms1_buffer = deque()\n self.product_mapping = defaultdict(list)\n self.generation_tracker = _GenerationTracker()\n self.orphans = []\n self.passed_first_ms1 = False\n self.highest_ms_level = 0\n self.generation = 0\n\n def pop_precursor(self, precursor_id: str) -> List[ScanType]:\n self.generation_tracker.remove(precursor_id)\n return self.product_mapping.pop(precursor_id, [])\n\n def deque_group(self, flush_products=False) -> ScanGroupType:\n \"\"\"Remove the next scan from the MS1 queue, grouped with\n any associated MSn scans.\n\n Parameters\n ----------\n flush_products : bool\n Whether to flush all the remaining product scans with this\n group.\n\n Returns\n -------\n ScanBunch\n \"\"\"\n precursor = self.ms1_buffer.popleft()\n _empty = []\n products = self.pop_precursor(precursor.id)\n if None in self.product_mapping:\n products += self.product_mapping.pop(None, _empty)\n\n # Flush out older precursors' products that haven't turned up yet, they\n # probably aren't coming soon.\n for prec_id in self.generation_tracker.older_than(self.generation - self.buffering):\n products.extend(self.product_mapping.pop(prec_id, _empty))\n\n # Look for MSn for n > 2\n if self.highest_ms_level > 2:\n extra_blocks = [products]\n for _ in range(self.highest_ms_level - 2):\n new_block = []\n for prod in extra_blocks[-1]:\n new_block.extend(self.pop_precursor(prod.id))\n if not new_block:\n break\n extra_blocks.append(new_block)\n if len(extra_blocks) > 1:\n products = extra_blocks[0]\n for block in extra_blocks[1:]:\n products.extend(block)\n\n if flush_products:\n if self.product_mapping:\n lingering = {s.id for ss in self.product_mapping.values() for s in ss}\n missing = set(self.product_mapping)\n unclaimed_precursors = missing - lingering\n warnings.warn(\"Lingering Product Sets For %r!\" %\n (sorted(unclaimed_precursors), ))\n for _, value in self.product_mapping.items():\n products += value\n self.product_mapping.clear()\n self.generation_tracker.clear()\n precursor.product_scans = products\n\n # Collect any MSn spectra which pre-date the first precursor if they are encountered before\n # the first precursor is found.\n if not self.passed_first_ms1 and self.ms1_buffer:\n current_ms1_time = precursor.scan_time\n for prec_id, prods in list(self.product_mapping.items()):\n masked = set()\n for i, prod in enumerate(prods):\n if prod.scan_time <= current_ms1_time:\n products.append(prod)\n masked.add(i)\n # We've only removed some of the products under this precursor, so just\n # remove those products from mapping.\n if len(masked) < len(prods):\n prods = [v for i, v in enumerate(prods) if i not in masked]\n self.product_mapping[prec_id] = prods\n else:\n # Otherwise we must have completely consumed the products of this\n # precursor, so we need to remove it from the tracking.\n self.pop_precursor(prec_id)\n self.passed_first_ms1 = True\n\n\n self.generation += 1\n return ScanBunch(precursor, products)\n\n def add_product(self, scan: ScanType):\n \"\"\"Add MSn scan to :attr:`product_mapping` for the associated\n precursor scan ID.\n\n Parameters\n ----------\n scan : :class:`~.ScanBase`\n The scan to track.\n \"\"\"\n pinfo = scan.precursor_information\n if pinfo is None:\n precursor_id = None\n else:\n precursor_id = pinfo.precursor_scan_id\n if precursor_id is None:\n try:\n precursor_id = self.ms1_buffer[-1].id\n except IndexError:\n precursor_id = None\n if precursor_id not in self.product_mapping:\n self.generation_tracker.add(precursor_id, self.generation)\n self.product_mapping[precursor_id].append(scan)\n\n def add_precursor(self, scan: ScanType) -> bool:\n \"\"\"Add MS1 scan to :attr:`ms1_buffer`\n\n Parameters\n ----------\n scan : :class:`~.ScanBase`\n The scan to track.\n\n Returns\n -------\n buffer_full : bool\n Whether or not :attr:`ms1_buffer` is full.\n \"\"\"\n self.ms1_buffer.append(scan)\n return len(self.ms1_buffer) >= self.buffering\n\n def _make_producer(self) -> Iterator[ScanGroupType]:\n _make_scan = self.scan_packer\n _validate = self.scan_validator\n _cache_scan = self.scan_cacher\n\n current_level = 1\n\n for scan in self.iterator:\n packed = _make_scan(scan)\n if not _validate(packed):\n continue\n _cache_scan(packed)\n if packed.ms_level > 1:\n # inceasing ms level\n if current_level < packed.ms_level:\n current_level = packed.ms_level\n if current_level > self.highest_ms_level:\n self.highest_ms_level = current_level\n\n # decreasing ms level\n elif current_level > packed.ms_level:\n current_level = packed.ms_level\n self.add_product(packed)\n elif packed.ms_level == 1:\n do_emit = self.add_precursor(packed)\n if do_emit:\n yield self.deque_group()\n else:\n raise ValueError(\"Could not interpret MS Level %r\" %\n (packed.ms_level,))\n\n while len(self.ms1_buffer) > 1:\n yield self.deque_group()\n\n if self.ms1_buffer:\n yield self.deque_group(flush_products=True)\n\n\nclass MSEIterator(_GroupedScanIteratorImpl[ScanType, ScanGroupType]):\n \"\"\"\n A scan iterator implementation for grouping MS^E spectra according\n to the specified functions.\n\n Attributes\n ----------\n low_energy_config : int\n The function corresponding to lower energy. These correspond\n to the MS1 equivalent.\n lock_mass_config : int\n The function corresponding to the lockmass. Lockmass scans\n will be skipped.\n \"\"\"\n\n low_energy_config: int = 1\n lock_mass_config: int = 3\n low_energy_config: Optional[Callable]\n\n def __init__(self, iterator, scan_packer, scan_validator=None,\n scan_cacher=None, low_energy_config=1, lock_mass_config=3,\n on_lock_mass_scan: Optional[Callable] = None, **kwargs):\n super(MSEIterator, self).__init__(\n iterator, scan_packer, scan_validator, scan_cacher)\n self.low_energy_config = low_energy_config\n self.lock_mass_config = lock_mass_config\n self.on_lock_mass_scan = on_lock_mass_scan\n\n def _make_producer(self) -> Iterator[ScanGroupType]:\n _make_scan = self.scan_packer\n _validate = self.scan_validator\n _cache_scan = self.scan_cacher\n\n precursor_scan = None\n product_scans = []\n\n current_level = 'low'\n\n for scan in self.iterator:\n packed = _make_scan(scan)\n if not _validate(packed):\n continue\n _cache_scan(packed)\n config = packed.acquisition_information[0].scan_configuration\n if config == self.lock_mass_config:\n if self.on_lock_mass_scan is not None:\n self.on_lock_mass_scan(scan)\n continue\n\n if config != self.low_energy_config:\n if current_level == 'low':\n current_level = 'high'\n # decreasing ms level\n product_scans.append(packed)\n elif config == self.low_energy_config:\n if current_level != 'low':\n yield ScanBunch(precursor_scan, product_scans)\n else:\n if precursor_scan is not None:\n yield ScanBunch(precursor_scan, product_scans)\n precursor_scan = packed\n product_scans = []\n else:\n raise ValueError(\"Could not interpret MS Level %r\" %\n (packed.ms_level,))\n if precursor_scan is not None:\n yield ScanBunch(precursor_scan, product_scans)\n","repo_name":"mobiusklein/ms_deisotope","sub_path":"src/ms_deisotope/data_source/scan/scan_iterator.py","file_name":"scan_iterator.py","file_ext":"py","file_size_in_byte":17762,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"71"} +{"seq_id":"7486901355","text":"from collections import namedtuple\nfrom enum import Enum\nimport random\nimport sys\n\n# all costs in k zeny\n\nclass Outcome(Enum):\n SUCCESS = 1\n FAILURE = 2\n BROKEN = 3\n\nclass Simulator:\n Result = namedtuple('Result', 'fees oridecons copies cost')\n\n def __init__(self, start_refine, target_refine, item_cost, copy_cost):\n self.start_refine = start_refine\n self.target_refine = target_refine\n self.item_cost = item_cost\n self.copy_cost = copy_cost\n self.oridecon_cost = 25\n self.outcomes = [Outcome.SUCCESS, Outcome.FAILURE, Outcome.BROKEN]\n self.outcome_weights = {\n 0: [1, 0, 0],\n 1: [1, 0, 0],\n 2: [1, 0, 0],\n 3: [1, 0, 0],\n 4: [0.5, 0.25, 0.25],\n 5: [0.5, 0.25, 0.25],\n 6: [0.4, 0.3, 0.3],\n 7: [0.4, 0.3, 0.3],\n 8: [0.4, 0.3, 0.3],\n 9: [0.4, 0.3, 0.3]\n }\n self.safe_refine_copies = { 0: 0, 1: 0, 2: 0, 3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 6, 9: 10 }\n self.safe_refine_oridecons = { 0: 1, 1: 1, 2: 1, 3: 1, 4: 5, 5: 10, 6: 15, 7: 25, 8: 50, 9: 85 }\n self.safe_refine_fees = { 0: 10, 1: 20, 2: 30, 3: 40, 4: 100, 5: 220, 6: 470, 7: 910, 8: 1630, 9: 2740 }\n self.fees = []\n self.oridecons = []\n self.copies = []\n \n def _calc_fee(self, refine_level):\n return min((refine_level + 1), 10) * 10\n \n def safe_refine_results(self):\n fees = 0\n oridecons = 0\n copies = 0\n \n for refine_level in range(self.start_refine, self.target_refine):\n fees += self.safe_refine_fees[refine_level]\n oridecons += self.safe_refine_oridecons[refine_level] \n copies += self.safe_refine_copies[refine_level]\n\n cost = fees + oridecons * self.oridecon_cost + copies * self.copy_cost\n return Simulator.Result(fees, oridecons, copies, cost)\n \n def results(self):\n tries = len(self.fees)\n avg_fees = round(sum(self.fees)/tries)\n avg_oridecons = sum(self.oridecons)/tries\n avg_oridecon_cost = round(avg_oridecons * self.oridecon_cost)\n avg_copies = sum(self.copies)/tries\n avg_copies_cost = round(avg_copies * self.copy_cost) \n avg_cost = avg_fees + avg_oridecon_cost + avg_copies_cost\n\n return Simulator.Result(avg_fees, avg_oridecons, avg_copies, avg_cost)\n \n def step(self):\n fees = 0\n oridecons = 0\n copies = 0\n refine = self.start_refine\n \n while refine < self.target_refine:\n fees += self._calc_fee(refine)\n oridecons += 1 if refine < 10 else 5\n outcome = random.choices(population=self.outcomes, weights=self.outcome_weights[refine], k=1)[0]\n #print(outcome.name)\n if outcome == Outcome.SUCCESS:\n refine += 1\n elif outcome == Outcome.FAILURE:\n refine -= 1\n elif outcome == Outcome.BROKEN:\n refine -= 1\n copies += 1\n fees += 5\n #print('+{}'.format(refine))\n\n self.fees.append(fees)\n self.oridecons.append(oridecons)\n self.copies.append(copies)","repo_name":"TimLeysen/RomRefineSimulator","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"34197963740","text":"import click\nimport logging\nimport shutil\nimport os\nimport lmctl.cli.lifecycle as lifecycle_cli\nimport lmctl.project.package.core as pkgs\nfrom lmctl.cli.controller import get_global_controller\nfrom lmctl.cli.format import determine_format_class\nfrom .utils.object_groups import object_group_options\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group(short_help='Onboard a package built from a Project', help='Onboard a package previously built from a Project, distributed as a \".tgz\" or \".csar\" file')\ndef pkg():\n logger.debug('Package Management')\n\n\nPUSH_HEADER = 'Push'\n\n\n@pkg.command(help='Push a previously built package to a CP4NA orchestration environment')\n@click.argument('package')\n@click.argument('environment', required=False, default=None)\n@click.option('--config', default=None, help='configuration file')\n@click.option('--armname', default='defaultrm', help='if using ansible-rm packaging the name of ARM to upload Resources to must be provided')\n@click.option('--pwd', '--api-key', default=None, help='password/api_key used for authenticating with CP4NA orchestration. Only required if the environment is secure and a username has been included in your configuration file with no password (api_key when using auth_mode=zen)')\n@click.option('--autocorrect', default=False, is_flag=True, help='allow validation warnings and errors to be autocorrected if supported')\n@object_group_options()\ndef push(package, environment, config, armname, pwd, autocorrect, object_group_name = None, object_group_id = None):\n \"\"\"Pushes an existing Assembly/Resource package to a target CP4NA orchestration (and ARM) environment\"\"\"\n logger.debug('Pushing package at: {0}'.format(package))\n pkg, pkg_content = lifecycle_cli.get_pkg_and_open(package)\n try:\n env_sessions = lifecycle_cli.build_sessions_for_pkg(pkg_content.meta, environment, pwd, armname, config)\n ctl = get_global_controller(override_config_path=config)\n tnco_client = ctl.get_tnco_client(environment_group_name=environment, input_pwd=pwd)\n object_group_id = lifecycle_cli.resolve_object_group(tnco_client, object_group_id, object_group_name)\n controller = lifecycle_cli.ExecutionController(PUSH_HEADER)\n controller.start(package)\n exec_push(controller, pkg, env_sessions, allow_autocorrect=autocorrect, object_group_id=object_group_id)\n finally:\n cleanup_pkg(pkg_content)\n controller.finalise()\n\n\n@pkg.command(help='Inspect a package')\n@click.argument('package')\n@click.option('--config', default=None, help='configuration file')\n@click.option('-f', '--format', 'output_format', default='yaml', help='format of output [yaml, json]')\ndef inspect(package, config, output_format):\n logger.debug('Inspecting package at: {0}'.format(package))\n pkg_content = lifecycle_cli.open_pkg(package)\n try:\n inspection_report = pkg_content.inspect()\n result = format_inspection_report(output_format, inspection_report)\n click.echo(result)\n finally:\n cleanup_pkg(pkg_content)\n \ndef cleanup_pkg(pkg):\n if os.path.exists(pkg.tree.root_path):\n shutil.rmtree(pkg.tree.root_path)\n\ndef format_inspection_report(output_format, inspection_report):\n inspection_report_tpl = inspection_report.to_dict()\n formatter_class = determine_format_class(output_format)\n formatter = formatter_class()\n result = formatter.convert_element(inspection_report_tpl)\n return result\n\ndef exec_push(controller, pkg, env_sessions, allow_autocorrect=False, object_group_id=None):\n push_options = pkgs.PushOptions()\n push_options.object_group_id = object_group_id\n push_options.allow_autocorrect = allow_autocorrect\n push_options.journal_consumer = controller.consumer\n return controller.execute(pkg.push, env_sessions, push_options)\n","repo_name":"IBM/lmctl","sub_path":"lmctl/cli/commands/pkg.py","file_name":"pkg.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"30792690047","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom django.test.client import Client\nfrom django.core.urlresolvers import resolve\nfrom django.shortcuts import render_to_response\nfrom .views import all_products, product_view, custom_request\nfrom .models import Product\nfrom .forms import CustomProductForm\nimport re\n\n\n# Test all products page\nclass AllProductsPageTest(TestCase):\n def test_all_products_page(self):\n # Make sure page resolves\n allproducts_page = resolve('/products/')\n self.assertEqual(allproducts_page.func, all_products)\n\n # Make sure correct template used\n allproductsc = self.client.get('/products/')\n self.assertTemplateUsed(allproductsc, \"products.html\")\n\n # Make sure html is correct\n allproducts_output = render_to_response(\"products.html\").content\n self.assertEqual(allproductsc.content, allproducts_output)\n\n\n# Test product page\nclass ProductPageTest(TestCase):\n def test_product_page(self):\n # Create test product entry\n self.client = Client()\n self.viewed_product = Product.objects.create(id='1', name='test', description='test', price='9.99', image='static/product_images/test.jpg')\n\n # Make sure page resolves\n product_page = resolve('/products/1/')\n self.assertEqual(product_page.func, product_view)\n\n # Make sure correct template used\n productc = self.client.get('/products/1/')\n self.assertTemplateUsed(productc, \"productdetails.html\")\n\n # Make sure html is correct\n product_output = render_to_response(\"productdetails.html\", {'viewed_product': self.viewed_product}).content\n self.assertEqual(productc.content, product_output)\n\n\n# Test custom request page\nclass CustomRequestPageTest(TestCase):\n def test_custom_request_page(self):\n # Make sure page resolves\n customrequest_page = resolve('/customorder/')\n self.assertEqual(customrequest_page.func, custom_request)\n\n # Make sure correct template used\n customrequestc = self.client.get('/customorder/')\n self.assertTemplateUsed(customrequestc, \"customorder.html\")\n\n # Make sure html is correct\n customrequest_output = render_to_response(\"customorder.html\", {'form': CustomProductForm()}).content\n # Remove csrf token\n csrf = r']+csrfmiddlewaretoken[^>]+>'\n new_html = re.sub(csrf, '', customrequestc.content)\n self.assertEqual(new_html, customrequest_output)\n\n\n# Test custom product request form\nclass CustomProductFormTests(TestCase):\n def test_custom_product_form(self):\n form = CustomProductForm({\n 'email': 'test@test.com',\n 'custom_description': 'This is a test description',\n })\n self.assertTrue(form.is_valid())\n\n def test_custom_product_form_fails_with_missing_email(self):\n form = CustomProductForm({\n 'custom_description': 'This is a test description',\n })\n self.assertFalse(form.is_valid())\n\n def test_custom_product_form_fails_with_missing_description(self):\n form = CustomProductForm({\n 'email': 'test@test.com',\n })\n self.assertFalse(form.is_valid())\n\n def test_custom_product_form_fails_with_invalid_email(self):\n form = CustomProductForm({\n 'email': 'test.com',\n 'custom_description': 'This is a test description',\n })\n self.assertFalse(form.is_valid())\n\n def test_custom_product_form_fails_with_invalid_email_characters(self):\n form = CustomProductForm({\n 'email': 'test@tes!t.com',\n 'custom_description': 'This is a test description',\n })\n self.assertFalse(form.is_valid())\n","repo_name":"FutoiSaru/rainbow_felt","sub_path":"products/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1468034523","text":"import pickle\nimport os\n\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.optimizers import Adam\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tag import pos_tag\nfrom nltk.corpus import wordnet as wn\n\n\n# LOAD MODEL\nMODEL_VERSION = 'lstm_model_rus.h5' # modèle\nMODEL_PATH = os.path.join(os.getcwd(), 'models',\n MODEL_VERSION) # path vers le modèle\n# model = load_model(MODEL_PATH, custom_objects={'MyOptimizer': Adam})\nmodel = load_model(MODEL_PATH) # chargement du modèle\n\n# LOAD TOKENIZER\nTOKENIZER_VERSION = 'tokenizer_rus.pickle'\nTOKENIZER_PATH = os.path.join(os.getcwd(), 'models',\n TOKENIZER_VERSION) # path vers le tokenizer\nwith open(TOKENIZER_PATH, 'rb') as handle:\n tokenizer = pickle.load(handle)\n\n# PREPROCESS TEXT\n\nstop_words = stopwords.words('english')\n\n\ndef get_wordnet_pos(tag):\n if tag.startswith('J'):\n return wn.ADJ\n elif tag.startswith('V'):\n return wn.VERB\n elif tag.startswith('N'):\n return wn.NOUN\n elif tag.startswith('R'):\n return wn.ADV\n else:\n return wn.NOUN\n\n\ndef cleaning(data):\n # 1. Tokenize\n text_tokens = word_tokenize(data.replace(\"'\", \"\").lower())\n # 2. Remove Puncs\n tokens_without_punc = [w for w in text_tokens if w.isalpha()]\n # 3. Removing Stopwords\n tokens_without_sw = [t for t in tokens_without_punc if t not in stop_words]\n # 4. Lemmatize\n POS_tagging = pos_tag(tokens_without_sw)\n wordnet_pos_tag = []\n wordnet_pos_tag = [(word, get_wordnet_pos(pos_tag))\n for (word, pos_tag) in POS_tagging]\n wnl = WordNetLemmatizer()\n lemma = [wnl.lemmatize(word, tag) for word, tag in wordnet_pos_tag]\n return \" \".join(lemma)\n\n# PREDICT\n\n\ndef my_predict(text):\n # Tokenize text\n text_pad_sequences = pad_sequences(tokenizer.texts_to_sequences(\n [text]), maxlen=300)\n # Predict\n predict_val = float(model.predict([text_pad_sequences]))\n recommandation = \"Recommandé\" if predict_val > 0.5 else \"Non Recommandé\"\n score = int(predict_val*100)\n return score, recommandation\n\n# route recommandation par GET et POST\n\n\ndef predict():\n customer_feedback = \"Mon texte de test\"\n clean_comment = cleaning(customer_feedback)\n\n score, recommandation = my_predict(clean_comment)\n score = f\"Note estimée : {score}/100\"\n\n return customer_feedback, recommandation, score\n","repo_name":"marianneSimplon/sentiment_analysis_Flask","sub_path":"for_test.py","file_name":"for_test.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11944624192","text":"import django_filters\n\nfrom collections import OrderedDict\n\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import (\n Count, Case, When, BooleanField, FloatField, Avg, F\n)\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import (\n ViewSet,\n ReadOnlyModelViewSet,\n)\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.decorators import action\n\nfrom rest_framework import filters\n\nfrom .models import (\n Category,\n Item,\n Feedback,\n Like,\n Rating,\n)\n\nfrom .serializers import (\n CategoriesJsonSerializer,\n ItemSerializer,\n ItemDetailSerializer,\n FeedbackSerializer,\n CheckableSerializer,\n)\n\nfrom .paginations import (\n ItemsSetPagination,\n)\n\nfrom .filters import (\n ItemFilter,\n)\n\n# Create your views here.\n\n\nclass CategoriesViewSet(ViewSet):\n\n queryset = Category.objects.all()\n serializer_class = CategoriesJsonSerializer\n\n def list(self, request):\n childs = set()\n categories_dict = OrderedDict()\n for category in self.queryset.select_related('of'):\n if category.id not in categories_dict:\n categories_dict[category.id] = {'subcategories': []}\n categories_dict[category.id]['id'] = category.id\n categories_dict[category.id]['name'] = category.name\n categories_dict[category.id]['description'] = category.description\n if category.of is not None:\n childs.add(category.id)\n if category.of.id not in categories_dict:\n categories_dict[category.of.id] = {'subcategories': []}\n categories_dict[category.of.id]['subcategories'].append(\n categories_dict[category.id]\n )\n for child in childs:\n del categories_dict[child]\n serializer = self.serializer_class(data={\n 'categories': categories_dict.values()\n })\n serializer.is_valid(raise_exception=True)\n return Response(serializer.data)\n\n\n\nclass ItemsViewSet(ReadOnlyModelViewSet):\n\n queryset = Item.objects.all()\n serializer_class = ItemSerializer\n pagination_class = ItemsSetPagination\n filterset_class = ItemFilter\n filter_backends = (\n filters.OrderingFilter,\n django_filters.rest_framework.DjangoFilterBackend,\n )\n ordering_fields = ('created', 'updated',)\n\n def get_queryset(self):\n likes = ratings = []\n if not self.request.user.is_anonymous:\n likes = self.request.user.likes.values_list('item')\n ratings = self.request.user.ratings.values_list('item')\n queryset = self.queryset.annotate(\n rating=Avg('ratings__rate')\n ).annotate(\n rating=Case(\n When(rating=None, then=0),\n default=F('rating'),\n output_field=FloatField()\n ),\n is_liked=Case(\n When(id__in=likes, then=True),\n default=False,\n output_field=FloatField()\n ),\n is_rated=Case(\n When(id__in=ratings, then=True),\n default=False,\n output_field=FloatField()\n )\n )\n return queryset\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n return ItemDetailSerializer\n return self.serializer_class\n\n @action(detail=True, methods=['POST'],\n permission_classes=[IsAuthenticated],\n serializer_class=CheckableSerializer)\n def like(self, request, pk):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n item = get_object_or_404(self.get_queryset(), pk=pk)\n if serializer.validated_data['is_set']:\n item.likes.get_or_create(item=pk, user=request.user)\n else:\n item.likes.filter(item=pk, user=request.user).delete()\n return Response(serializer.data)\n\n @action(detail=True, methods=['POST'],\n permission_classes=[IsAuthenticated],\n serializer_class=CheckableSerializer)\n def rate(self, request, pk):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n item = get_object_or_404(self.get_queryset(), pk=pk)\n if serializer.validated_data['is_set']:\n item.ratings.get_or_create(item=pk, user=request.user)\n else:\n item.ratings.filter(item=pk, user=request.user).delete()\n return Response(serializer.data)\n\n @action(detail=True, methods=['GET', 'POST'],\n permission_classes=[IsAuthenticated],\n serializer_class=FeedbackSerializer)\n def feedbacks(self, request, pk):\n if request.method == 'GET':\n queryset = Feedback.objects.filter(item=pk)\n serializer = self.serializer_class(queryset, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n item = get_object_or_404(self.get_queryset(), pk=pk)\n feedback = item.feedbacks.create(user=request.user,\n **serializer.validated_data)\n return Response(self.serializer_class(feedback).data)\n else:\n return Response(status.HTTP_400_BAD_REQUEST)\n\n\nclass CategorizedItemsViewSet(ListAPIView):\n\n queryset = Item.objects.all()\n serializer_class = ItemSerializer\n pagination_class = ItemsSetPagination\n # filterset_class = ItemFilter\n # filter_backends = (\n # filters.OrderingFilter,\n # django_filters.rest_framework.DjangoFilterBackend,\n # )\n ordering_fields = ('created', 'updated',)\n\n def get_queryset(self):\n likes = ratings = []\n if not self.request.user.is_anonymous:\n likes = self.request.user.likes.values_list('item')\n ratings = self.request.user.ratings.values_list('item')\n queryset = self.queryset.annotate(\n rating=Avg('ratings__rate')\n ).annotate(\n rating=Case(\n When(rating=None, then=0),\n default=F('rating'),\n output_field=FloatField()\n ),\n is_liked=Case(\n When(id__in=likes, then=True),\n default=False,\n output_field=FloatField()\n ),\n is_rated=Case(\n When(id__in=ratings, then=True),\n default=False,\n output_field=FloatField()\n )\n )\n return queryset\n\n @property\n def paginator(self):\n if not hasattr(self, '_paginator'):\n if self.pagination_class is None:\n self._paginator = None\n else:\n self._paginator = self.pagination_class()\n return self._paginator\n\n def paginate_queryset(self, queryset):\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(queryset,\n self.request, view=self)\n\n def get_paginated_response(self, data):\n assert self.paginator is not None\n return self.paginator.get_paginated_response(data)\n\n def list(self, request, category):\n try:\n if category == 'all':\n queryset = self.get_queryset()\n else:\n queryset = self.get_queryset() \\\n .filter(category=int(category))\n filtered_data = ItemFilter(request.GET, queryset=queryset)\n queryset = filtered_data.qs\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.serializer_class(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self.serializer_class(queryset, many=True)\n return Response(serializer.data)\n except ValueError:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n\nclass HitItemsViewSet(CategorizedItemsViewSet):\n\n queryset = Item.objects.filter(is_hit=True)\n\n\nclass NewItemsViewSet(CategorizedItemsViewSet):\n\n queryset = Item.objects.filter(is_new=True)\n\n\nclass PopularItemsViewSet(CategorizedItemsViewSet):\n\n queryset = Item.objects.filter(is_popular=True)\n","repo_name":"takhirmunarbekov/MultiBrand","sub_path":"goods/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"44239284507","text":"# -*- coding:utf-8 -*-\n# @Time : 2018/6/8 22:27\n# @Author : yuanjing liu\n# @Email : lauyuanjing@163.com\n# @File : model.py\n# @Software: PyCharm\n\nfrom scipy.stats import beta\nfrom scipy.special import gamma as gammaf\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef MaxMinNormalization(x):\n Min, Max = min(x), max(x)\n x = (x - Min) / (Max - Min)\n return x\n\n\ndef betaNLL(param, *args):\n\n a, b = param\n data = args[0]\n pdf = beta.pdf(data, a, b, loc=0, scale=1)\n lg = np.log(pdf)\n # -----Replace -inf with 0s------\n lg = np.where(lg == -np.inf, 0, lg)\n nll = -1*np.sum(lg)\n return nll\n\n\n# 滤波函数\ndef HpFilter(dta):\n cycle, trend = sm.tsa.filters.hpfilter(dta, 15000)\n return cycle, trend\n\n\n# model运行主函数\ndef BetaModel(data):\n\n def fitted(x, a, b):\n fx = gammaf(a+b)/gammaf(a)/gammaf(b)*x**(a-1)*(1-x)**(b-1) # pdf of beta\n return fx\n\n data1 = MaxMinNormalization(data)\n\n a, b, xx, yy = beta.fit(data1)\n\n plt.hist(data1, bins=30, normed=True)\n xx = np.linspace(0, max(data1), len(data1))\n plt.plot(xx, fitted(xx, a, b), 'g')\n plt.show()\n\n alpha = 0.95\n q1, q2 = beta.interval(alpha, a, b, loc=0, scale=1)\n\n d1 = q1*(max(data)-min(data))+min(data)\n d2 = q2*(max(data)-min(data))+min(data)\n return a, b, d1, d2\n\n\n\ndef abnormal(shuju, qujian1, qujian2):\n zc, yc = [], []\n for i in range(len(shuju)):\n if (shuju.values[i] > qujian1 and shuju.values[i] < qujian2):\n zc.append(shuju.values[i])\n else:\n yc.append(shuju.values[i])\n return zc, yc\n\n","repo_name":"gavin-kang/AiPlatform","sub_path":"BetaFit/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6092701172","text":"\nfrom collections import namedtuple\n\n__all__ = [\n 'Pos',\n 'Token',\n 'SExp',\n 'AtomicExp',\n 'VarExp',\n 'NumExp',\n 'BoolExp',\n 'true',\n 'false',\n 'VoidExp',\n 'void',\n 'StrExp',\n 'LamExp',\n 'AppExp',\n 'IfExp',\n 'LetRecExp',\n 'BeginExp',\n 'SetExp',\n 'SetThenExp',\n 'gensym',\n 'unkpos'\n ]\n\nclass GenSym:\n n = 1\n @classmethod\n def __call__(cls, sym=''):\n sym += str(cls.n)\n cls.n += 1\n return VarExp(sym)\ngensym = GenSym()\n\n################################################################################\n## Parser types\n################################################################################\n\n# A position object for tracking location in the source file\nPos = namedtuple('Pos', ['line', 'col'])\n# A token object for holding literals and their position in the source file\nToken = namedtuple('Token', ['pos', 'val'])\n\nclass SExp(list):\n \"\"\"A S-expression.\n\n @type pos: Pos\n @param pos: position of the S-expression in the source file\n @type: A list of SExps and/or Tokens\n @param args: SExps or Tokens contained within this S-expression\n \"\"\"\n def __init__(self, pos, *args):\n self.pos = pos\n super(SExp, self).__init__(args)\n def __getitem__(self, key):\n if isinstance(key, slice):\n return SExp(self.pos, *super(SExp, self).__getitem__(key))\n else:\n return super(SExp, self).__getitem__(key)\n def __repr__(self):\n return 'SExp(' + ', '.join(repr(e) for e in self) + ')'\n\n# import here to avoid circular import dependency\nfrom schemec.sexp import pretty\nunkpos = Pos(-1, -1)\n\n################################################################################\n## Scheme Expressions\n################################################################################\n\n## Atomic Expressions\nclass AtomicExp:\n def map(self, f, skip=True):\n return f(self)\n def toSExp(self):\n tok = Token(unkpos, repr(self))\n return tok\n\nclass VarExp(AtomicExp):\n \"\"\"A variable.\n\n @type name: String\n @param name: The name of the variable\n \"\"\"\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return str(self.name)\n\n def __hash__(self):\n return hash(repr(self))\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\nclass NumExp(AtomicExp):\n \"\"\"A number.\n\n @type val: Number\n @param val: The value\n \"\"\"\n def __init__(self, val):\n self.val = val\n\n def __repr__(self):\n return str(self.val)\n\nclass BoolExp(AtomicExp):\n \"\"\"A boolean.\n\n @type val: Bool\n @param val: The value\n \"\"\"\n def __init__(self, val):\n self.val = val\n\n def __repr__(self):\n return \"#t\" if self.val else \"#f\"\n\n# these may be useful synonyms\ntrue = BoolExp(True)\nfalse = BoolExp(False)\n\nclass VoidExp(AtomicExp):\n \"\"\"void/nil/etc...\"\"\"\n def __repr__(self):\n return pretty(self.toSExp())\n def toSExp(self):\n sexp = SExp(unkpos, Token(unkpos, 'void'))\n return sexp\n\nvoid = VoidExp()\n\nclass StrExp(AtomicExp):\n \"\"\"A string.\n\n @type val: String\n @param val: The value\n \"\"\"\n def __init__(self, val):\n self.val = val\n\n def __repr__(self):\n return '\"{0}\"'.format(self.val)\n\nclass LamExp(AtomicExp):\n n = 1\n \"\"\"A lambda expression.\n\n @type argExps: A List of VarExps\n @param argExps: The formal parameters of the lambda\n @type bodyExp: Any Scheme expression\n @param bodyExp: The body of the lambda\n \"\"\"\n def __init__(self, argExps, bodyExp):\n if isinstance(argExps, AppExp):\n argExps = argExps.tolist()\n self.argExps = argExps\n self.bodyExp = bodyExp\n self.name = 'lambda_%d' % LamExp.n\n LamExp.n += 1\n\n def map(self, f, skip=True):\n if not skip:\n f(self)\n lam = LamExp([v.map(f, skip) for v in self.argExps], self.bodyExp.map(f, skip))\n lam.name = self.name\n return f(lam)\n\n def __repr__(self):\n return pretty(self.toSExp())\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def toSExp(self):\n sexp = SExp(unkpos,\n Token(unkpos, 'lambda'),\n SExp(unkpos, *[e.toSExp() for e in self.argExps]),\n self.bodyExp.toSExp()\n )\n return sexp\n\n## More complex expressions\nclass AppExp:\n \"\"\"A lambda application.\n\n @type funcExp: Any Scheme expression\n @param funcExp: The function being applied\n @type argExps: A List of Scheme Expressions (not passed as a list though!)\n @param argExps: The arguments to the function\n \"\"\"\n def __init__(self, funcExp, *argExps):\n self.funcExp = funcExp\n self.argExps = argExps\n\n def map(self, f, skip=True):\n if not skip:\n f(self)\n return f(\n AppExp(\n self.funcExp.map(f, skip),\n *[exp.map(f, skip) for exp in self.argExps]\n )\n )\n\n def __repr__(self):\n return pretty(self.toSExp())\n\n def tolist(self):\n lst = [self.funcExp]\n lst.extend(self.argExps)\n return lst\n\n def toSExp(self):\n sexp = SExp(unkpos,\n self.funcExp.toSExp(),\n *[e.toSExp() for e in self.argExps]\n )\n return sexp\n\nclass IfExp:\n \"\"\"An if expression.\n\n All three parameters can be any Scheme expression.\n \"\"\"\n def __init__(self, condExp, thenExp, elseExp):\n self.condExp = condExp\n self.thenExp = thenExp\n self.elseExp = elseExp\n\n def map(self, f, skip=True):\n if not skip:\n f(self)\n return f(\n IfExp(\n self.condExp.map(f, skip),\n self.thenExp.map(f, skip),\n self.elseExp.map(f, skip)\n )\n )\n\n def __repr__(self):\n return pretty(self.toSExp())\n\n def toSExp(self):\n sexp = SExp(unkpos,\n Token(unkpos, 'if'),\n self.condExp.toSExp(),\n self.thenExp.toSExp(),\n self.elseExp.toSExp()\n )\n return sexp\n\nclass LetRecExp:\n \"\"\"A letrec expression.\n\n @type bindings: A list of [VarExp, LamExp] bindings\n @param bindings: The bindings to add\n @type bodyExp: Any Scheme expression\n @param bodyExp: The body of the LetRec expression\n \"\"\"\n def __init__(self, bindings, bodyExp):\n if isinstance(bindings, AppExp):\n bindings = bindings.tolist()\n for i, expr in enumerate(bindings):\n if isinstance(expr, AppExp):\n bindings[i] = expr.tolist()\n self.bindings = bindings\n self.bodyExp = bodyExp\n\n def map(self, f, skip=True):\n if not skip:\n f(self)\n return f(\n LetRecExp(\n [(v.map(f, skip), l.map(f, skip)) for v, l in self.bindings],\n self.bodyExp.map(f, skip)\n )\n )\n\n def __repr__(self):\n return pretty(self.toSExp())\n\n def toSExp(self):\n sexp = SExp(unkpos,\n Token(unkpos, 'letrec'),\n SExp(unkpos, *[\n SExp(unkpos,\n v.toSExp(),\n f.toSExp()\n ) for v, f in self.bindings\n ]),\n self.bodyExp.toSExp()\n )\n return sexp\n\nclass BeginExp:\n \"\"\"A begin expression.\n\n @type exps: A list of Scheme expressions\n @param exps: The expressions contained within the `begin`\n \"\"\"\n def __init__(self, *exps):\n self.exps = exps\n\n def map(self, f, skip=True):\n if not skip:\n f(self)\n return f(\n BeginExp(e.map(f, skip) for e in self.exps)\n )\n\n def __repr__(self):\n return pretty(self.toSExp())\n\n def toSExp(self):\n sexp = SExp(unkpos,\n Token(unkpos, 'begin'),\n SExp(unkpos, *[e.toSExp() for e in self.exps])\n )\n return sexp\n\nclass SetExp:\n \"\"\"A set! expression.\n\n @type varExp: A VarExp\n @param varExp: The symbol to be rebound\n @type exp: Any Scheme expression\n @param exp: The new value to be bound to varExp\n \"\"\"\n def __init__(self, varExp, exp):\n self.varExp = varExp\n self.exp = exp\n\n def map(self, f, skip=True):\n if not skip:\n f(self)\n return f(\n SetExp(\n self.varExp.map(f, skip),\n self.exp.map(f, skip)\n )\n )\n\n def __repr__(self):\n return pretty(self.toSExp())\n\n def toSExp(self):\n sexp = SExp(unkpos,\n Token(unkpos, 'set!'),\n self.varExp.toSExp(),\n self.exp.toSExp()\n )\n return sexp\n\nclass SetThenExp:\n \"\"\"A set-then! expression.\n\n @type varExp: A VarExp\n @param varExp: The symbol to be rebound\n @type exp: Any Scheme expression\n @param exp: The new value to be bound to varExp\n @type thenExp: Any Scheme expression\n @param thenExp: The continuation to apply\n \"\"\"\n def __init__(self, varExp, exp, thenExp):\n self.varExp = varExp\n self.exp = exp\n self.thenExp = thenExp\n\n def map(self, f, skip=True):\n if not skip:\n f(self)\n return f(\n SetThenExp(\n self.varExp.map(f, skip),\n self.exp.map(f, skip),\n self.thenExp.map(f, skip)\n )\n )\n\n def __repr__(self):\n pretty(self.toSExp())\n\n def toSExp(self):\n sexp = SExp(unkpos,\n Token(unkpos, 'set-then!'),\n self.varExp.toSExp(),\n self.exp.toSExp(),\n self.thenExp.toSExp()\n )\n return sexp\n","repo_name":"edu-ucsd-cse-231/fa12-schemec","sub_path":"schemec/typs.py","file_name":"typs.py","file_ext":"py","file_size_in_byte":9842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3944257443","text":"import os\nimport time\n\nfrom .serialize import serialize, deserialize\n\n\nclass Transport:\n @staticmethod\n def send(data, sock):\n data = serialize(**data)\n sent = 0\n while sent < len(data):\n curr = sock.send(data[sent:])\n if not curr:\n raise Exception('CLIENT COULD NOT SEND DATA')\n sent += curr\n\n @staticmethod\n def read(fd):\n data = b''\n read = os.read(fd, 1)\n while len(read):\n data += read\n if b'\\n' in read:\n break\n read = os.read(fd, 1)\n return deserialize(data)\n","repo_name":"shedx/lamport_mutex","sub_path":"rpc/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"74463307750","text":"#!/usr/bin/env python\n# encoding=utf8\n# Time : 2022/11/23 2:21 下午\n# Author : xing tian wei\n# File : app_2.py\n\n\n# python库\nfrom flask import Flask, request, send_from_directory, send_file, jsonify, Response\nimport json # python自带,无需安装\nfrom collections import OrderedDict # python自带,无需安装\nimport os\nimport logging\nimport numpy as np\nimport pandas as pd\n\n# 自定义函数\nfrom run_word_filter import DFAFilter\nfrom data_processing import keywords_bag_init_judge\nfrom data_processing import add_keywords_refresh\nfrom data_processing import remove_keywords_refresh\nfrom config import turn_supplement\n\n# 初始化\napp = Flask(__name__)\napp.config['JSON_SORT_KEYS'] = False\napp.config['JSON_AS_ASCII'] = False\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n# time.sleep(3)\n\n# 初始化 flag 和 supplement 和 敏感词库\nflag = 0\nflag_1 = 0\nflag_2 = 0\nuse_path_dir_or_mysql = 'path_dir' # 'path_dir'表示从path_dir加载词库,'mysql'表示从mysql数据库表加载词库\npath_dir = [\"m_无标签敏感词库\", \"m_无标签敏感词库2\", \"m_无标签敏感词库3\"]\nsave_file = 'data.txt'\n\n# 初始化模型\ngfw = DFAFilter()\n\n\n@app.route('/community/2', methods=['GET','POST'])\ndef first_post_2():\n global flag_1\n global flag_2\n\n my_init_bag_flag = request.form.get('init_bag_flag').strip() # '初始化敏感词袋(选填,填\"init\"表示走初始化)'\n my_sensitive_word_add = request.form.get('sensitive_word_add').strip() # '自定义添加敏感词(选填,添加多个敏感词时请以中文逗号分隔)'\n my_sensitive_word_remove = request.form.get('sensitive_word_remove').strip() # '自定义移除敏感词(选填,移除多个敏感词时请以中文逗号分隔)'\n my_text = request.form.get('text').strip() # text\n\n init_bag_flag = my_init_bag_flag\n sensitive_word_add = my_sensitive_word_add\n sensitive_word_remove = my_sensitive_word_remove\n text = my_text\n try:\n result = {}\n # 加载词库\n flag = keywords_bag_init_judge(use_path_dir_or_mysql, path_dir, save_file, init_bag_flag) # 调用\n gfw.parse(save_file) # 词库 调用\n\n if sensitive_word_add != \"\": # 如果非空,则走 逗号分割 + 批量添加敏感词\n sensitive_word_ls = sensitive_word_add.split(',') # 注:是中文逗号\n gfw.add_batch(sensitive_word_ls) # 调用\n flag_1 = 2\n add_keywords_refresh(save_file, sensitive_word_ls) # 调用\n\n if sensitive_word_remove != \"\":\n sensitive_word_ls = sensitive_word_remove.split(',')\n gfw.remove_refresh_batch(sensitive_word_ls) # 调用\n flag_2 = 3\n remove_keywords_refresh(save_file, sensitive_word_ls) # 调用\n\n assert flag == 0 or flag == 1, \"flag值返回错误\"\n supplement = turn_supplement(flag, flag_1, flag_2) # 调用\n\n if text == \"\":\n code = 0\n message = '输入为空'\n else:\n code = 200\n message = '成功'\n\n # data = gfw.word_replace(text)\n # result = {\"code\": code, \"message\": message, \"supplement\":supplement, \"result\": data}\n data = gfw.word_replace_renew(text)\n\n result = {\"message\": message, \"supplement\": supplement, \"result\": data[0], \"idx\": data[1]}\n result[\"code\"] = code\n\n except Exception as e:\n result = {}\n logger.error(e)\n code = -1\n message = 'error'\n supplement = 'error'\n data = 'error'\n\n result = {\"message\": message, \"supplement\": supplement, \"result\": data}\n result[\"code\"] = code\n\n return jsonify(result)\n\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=2002, debug=False, threaded=False)","repo_name":"xtv417810/MinGanCi_public","sub_path":"app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14210230960","text":"from copy import copy\nfrom pathlib import Path\nfrom typing import List, Tuple\n\n\ndef _calculate_score(card: List[List[str]], number: str) -> None:\n unmarked_num_val = 0\n for row in card:\n unmarked_num_val += sum([int(x) for x in row if not x.endswith('*')])\n\n print( unmarked_num_val * int(number))\n\n\ndef _score_card(card: List[List[str]]) -> Tuple[bool, List[List[str]]]:\n row_complete = False\n column_complete = False\n \n # Row\n for row in card:\n row_complete = all([x.endswith('*') for x in row])\n if row_complete:\n break\n \n if not row_complete:\n for i in range(len(card[0])):\n column = [r[i] for r in card]\n column_complete = all([x.endswith('*') for x in column])\n if column_complete:\n break\n\n return any([row_complete, column_complete]), card\n\n\ndef _stamp_card( number: str, card: List[List[str]]) -> Tuple[bool, List[List[str]]]:\n success = False\n for i, row in enumerate(card):\n if number in row:\n idx = row.index(number)\n if not row[idx].endswith('*'):\n row[idx] = row[idx] + '*'\n card[i] = row\n \n success, card = _score_card(card)\n if success:\n break\n\n return success, card\n\n\ndef part_1( bingo_numbers: List[str], bingo_cards: List[List[List[str]]]) -> None:\n for number in bingo_numbers:\n for i, bc in enumerate(bingo_cards):\n success, bc = _stamp_card(number, bc)\n bingo_cards[i] = bc\n if success:\n _calculate_score( bc, number)\n return\n\n\ndef part_2( bingo_numbers: List[str], bingo_cards: List[List[List[str]]]) -> None:\n cards_in_play = list(range(len(bingo_cards)))\n last_winning_combo = None\n for number in bingo_numbers:\n for i, bc, in enumerate(bingo_cards):\n if i in cards_in_play:\n success, bc = _stamp_card(number, bc)\n bingo_cards[i] = bc\n if success:\n last_winning_combo = (bc, number)\n cards_in_play.remove(i) \n\n if last_winning_combo:\n _calculate_score(*last_winning_combo)\n\n\nif __name__ == \"__main__\":\n filepath = Path(__file__).parent / \"day_04_input.txt\"\n data = filepath.read_text().splitlines()\n bingo_numbers = [x for x in data.pop(0).split(',')]\n data.pop(0)\n \n bingo_cards = []\n bingo_card = []\n for x in data:\n if not x:\n bingo_cards.append(copy(bingo_card))\n bingo_card.clear()\n continue\n\n row = [n for n in x.split( ) if n]\n bingo_card.append(row)\n \n # To cover for there not being a blank line at the end of the data\n if bingo_card:\n bingo_cards.append(copy(bingo_card))\n\n part_1(bingo_numbers, bingo_cards)\n part_2(bingo_numbers, bingo_cards)\n\n","repo_name":"techartorg/Advent_of_code_2021","sub_path":"jeff_hanna/day_04.py","file_name":"day_04.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21151175899","text":"#!/usr/bin/env python\nimport unittest\nfrom typing import List\nimport random\n\n\n\"\"\"\n给定整数数组 nums 和整数 k,请返回数组中第 k 个最大的元素。\n\n请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。\n\n示例 1:\n\n输入: [3,2,1,5,6,4] 和 k = 2\n输出: 5\n示例 2:\n\n输入: [3,2,3,1,2,4,5,5,6] 和 k = 4\n输出: 4\n\"\"\"\n\n\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n pivot = random.sample(nums, 1)[0]\n larger = [n for n in nums if n > pivot]\n smaller = [n for n in nums if n < pivot]\n equal = [n for n in nums if n == pivot]\n\n if len(larger) >= k:\n return self.findKthLargest(larger, k)\n elif len(larger) + len(equal) >= k:\n return pivot\n else:\n return self.findKthLargest(smaller + equal, k - len(larger))\n\n\ncases = [\n {\n \"input\": [[3, 2, 1, 5, 6, 4], 2],\n \"output\": 5,\n },\n {\n \"input\": [[3, 2, 3, 1, 2, 4, 5, 5, 6], 4],\n \"output\": 4,\n },\n]\n\n\nclass SolutionTestCase(unittest.TestCase):\n def test(self):\n for t in cases:\n print(f\"input: {t['input']}\\noutput: {t['output']}\\n\")\n ret = Solution().findKthLargest(*t[\"input\"])\n self.assertEqual(ret, t[\"output\"])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"drinks5/algorithm","sub_path":"kth-largest-element-in-an-array/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"5273902972","text":"from django.contrib.auth import get_user_model\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers\n\nfrom .models import Faculty, Staff, Student\n\nUser = get_user_model()\n\n\nclass StudentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Student\n fields = ['roll_no', 'batch', 'department', 'hostel_address', 'bio']\n\n\nclass FacultySerializer(serializers.ModelSerializer):\n class Meta:\n model = Faculty\n fields = ['title', 'department', 'designation']\n\n\nclass StaffSerializer(serializers.ModelSerializer):\n class Meta:\n model = Staff\n fields = ['department', 'designation']\n\n\nclass UserSerializer(serializers.ModelSerializer):\n student = StudentSerializer(many=False, allow_null=True, required=False, read_only=False)\n faculty = FacultySerializer(many=False, allow_null=True, required=False, read_only=False)\n staff = StaffSerializer(many=False, allow_null=True, required=False, read_only=False)\n\n class Meta:\n model = User\n fields = ['email', 'first_name', 'last_name', 'gender',\n 'contact_no', 'user_type', 'picture_url', 'student', 'faculty', 'staff']\n extra_kwargs = {'email': {'required': False}}\n\n def create(self, validated_data):\n student_data = validated_data.pop('student', None)\n faculty_data = validated_data.pop('faculty', None)\n staff_data = validated_data.pop('staff', None)\n\n # check for email\n if not validated_data.get('email'):\n raise serializers.ValidationError('Email field is required.')\n\n user = User.objects.create(**validated_data)\n user_type = user.user_type\n\n # Match data provided to user_type\n if user_type == 'Student' and student_data:\n Student.objects.create(user=user, **student_data)\n elif user_type == 'Faculty' and faculty_data:\n Faculty.objects.create(user=user, **faculty_data)\n elif user_type == 'Staff' and staff_data:\n Staff.objects.create(user=user, **staff_data)\n return user\n\n def update(self, instance, validated_data):\n # get new user_type data or set them to None\n student_data = validated_data.pop('student', None)\n faculty_data = validated_data.pop('faculty', None)\n staff_data = validated_data.pop('staff', None)\n\n self._update_base_user(instance, validated_data)\n\n # try to get user_type instances or set them to None\n try:\n student = instance.student\n except ObjectDoesNotExist:\n student = None\n\n try:\n faculty = instance.faculty\n except ObjectDoesNotExist:\n faculty = None\n\n try:\n staff = instance.staff\n except ObjectDoesNotExist:\n staff = None\n\n user_type = instance.user_type\n\n # create or update user type only if user_type and data match else ignore it\n if user_type == 'Student':\n if student and student_data:\n self._update_student(student, student_data)\n elif student_data:\n Student.objects.create(user=instance, **student_data)\n elif user_type == 'Faculty':\n if faculty and faculty_data:\n self._update_faculty(faculty, faculty_data)\n elif faculty_data:\n Faculty.objects.create(user=instance, **faculty_data)\n elif user_type == 'Staff':\n if staff and staff_data:\n self._update_staff(staff, staff_data)\n elif staff_data:\n Staff.objects.create(user=instance, **staff_data)\n\n return instance\n\n def _update_base_user(self, instance, validated_data):\n updatable_fields = ['first_name', 'last_name',\n 'gender', 'contact_no', 'picture_url']\n\n admin_only_editable_fields = ['email', 'user_type']\n\n request_user = self.context['request'].user\n\n if request_user.is_staff:\n updatable_fields += admin_only_editable_fields\n\n self._update_instance(instance, validated_data, updatable_fields)\n\n def _update_student(self, instance, validated_data):\n updatable_fields = ['hostel_address', 'bio']\n admin_only_editable_fields = ['roll_no', 'batch', 'department']\n\n request_user = self.context['request'].user\n\n if request_user.is_staff:\n updatable_fields += admin_only_editable_fields\n\n self._update_instance(instance, validated_data, updatable_fields)\n\n def _update_faculty(self, instance, validated_data):\n updatable_fields = ['title', 'department', 'designation']\n\n self._update_instance(instance, validated_data, updatable_fields)\n\n def _update_staff(self, instance, validated_data):\n updatable_fields = ['department', 'designation']\n\n self._update_instance(instance, validated_data, updatable_fields)\n\n def _update_instance(self, instance, validated_data, updatable_fields):\n\n for field in updatable_fields:\n field_stored_value = getattr(instance, field)\n field_new_value = validated_data.get(field, field_stored_value)\n setattr(instance, field, field_new_value)\n\n instance.save()\n","repo_name":"BitByte-TPC/gymkhana","sub_path":"api/api/accounts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"70"} +{"seq_id":"28978804290","text":"# -*- coding: UTF-8 -*-\nimport numpy as np\nimport time\nimport random\nimport operator\nfrom builtins import input\nfrom test.test_lzma import INPUT\nfrom pip._vendor.html5lib._ihatexml import digit\nimport sys\nimport csv\n\nclass Clustering:\n def __init__(self):\n self.motsAEviter = [\"le\",\"la\",\"lui\",\"elle\",\"il\",\"on\",\"tu\",\"je\",\"vous\",\"nous\",\"ils\",\"car\",\"parce\",\"que\",\"cet\",\"cette\",\"toujours\",\"pendant\",\"par\"\n ,\"dans\",\"ce\",\"ca\",\"mon\",\"ma\",\"mais\",\"les\",\"l\",\"de\",\"du\",\"des\",\"ton\",\"ta\",\"son\",\"sa\",\"mes\",\"ses\",\"ces\",\"tes\",\"notre\"\n ,\"votre\",\"leur\",\"leurs\",\"nos\",\"vos\",\"elles\",\"et\",\"où\",\"ou\",\"donc\",\"alors\",\"quoi\",\"quand\",\"avec\",\"sans\",\"toi\",\"moi\",\"eux\",\"sur\"\n ,\"à\",\"d\",\"par\",\"pas\",\"qu\",\"m\",\"ce\",\"plus\",\"ces\",\"se\",\"ne\",\"tout\",\"toutes\",\"tous\",\"un\",\"pour\",\"une\",\"peu\",\"cette\",\"cettes\",\n \"cet\",\"comme\",\"s\",\"en\",\"n\",\"c\",\"--\",\"a\",\"est\",\"était\",\"avait\",\"j\",\"au\",\"qui\",\"y\",\"me\",\"artagnan\",\"athos\",\"si\",\"dit\",\"même\"\n ,\"dont\",\"sous\",\"air\",\"aux\",\"jusqu\",\"la\",\"là\",\"maintenant\",\"moins\",\"mot\",\"ni\",\"nommés\",\"peut\",\"plupart\",\"pourquoi\",\"quel\",\"quelle\",\n \"quelles\",\"quels\",\"seulement\",\"sien\",\"sont\",\"soyez\",\"tandis\",\"tellement\",\"tels\",\"trop\",\"très\",\"voient\",\"vont\",\"vu\",\"ça\",\"étaient\",\n \"état\",\"étions\",\"été\",\"être\",\"ai\",\"a\",\"ah\",\"eh\",\"est-ce\",\"un\"]\n \n \"\"\"\n param 1:liste de mot chosi au depart, 2:liste de mot les plus proche, 3: dictionaire, 4:listeLS, 5:nbCluster, 6 vecteurs\n \"\"\"\n def barycentreCalc(self,listeMotPlusProche,nbCentroids,vecteur):\n newCenters = []\n emplacementCentroide = []\n for c in range(nbCentroids): #pour le nb de centroids\n emplacementMot = []\n for i in range(len(listeMotPlusProche)): #fait le tour de tout les mots qui on ete donné à leur centroide\n if listeMotPlusProche[i] == c: #si le mot appartien on vecteur c, rentre et append son emplacement dans la liste\n emplacementMot.append(vecteur[i])\n emplacementCentroide.append(emplacementMot) #mettre tout les emplacements des mots dans une liste pourcalculer le point milieu du prochain centroide\n\n for baryCenter in range(nbCentroids) :\n newCenters.append(np.mean(emplacementCentroide[baryCenter], axis = 0))#calculer la position des centroides\n \n return newCenters\n \"\"\"\n pour obtenir distance\n \"\"\"\n def obtenirDistEuclidean(self,row1,row2):\n return np.sum((row1 - row2)**2)\n \"\"\"\n PARAM: 1:CONNEXION, 2:TAILLE DE FENETRE, 3:LISTE DE LIGNES DE VECTEURS DES NOUVEAU CENTRES, 4:DICTIONNAIRE\n \"\"\"\n def TrouverPointCluster(self,nouveauCentres,dico,matrice,iter,rand,lsLesPlusProche):\n tailleDico = len(dico)\n listeLS = []\n vecteurCluster = []\n mesClusterEnScore = []\n listeMotPlusProche=[]\n\n # pour chaque point \n for motChoisi in range(len(nouveauCentres)):\n resultat = {}\n #vecteur de mots horizontal du vecteur reconstruit\n\n vecteurMot = nouveauCentres[motChoisi]\n\n # calcul du score least square\n for i in dico.keys():\n vecteurComparer = matrice[dico.get(i)-1]\n produit = self.obtenirDistEuclidean(vecteurMot,vecteurComparer)\n resultat[i] = produit\n # append into list of score \n listeLS.append(resultat) #liste des least square scores\n \n for i in range(len(nouveauCentres)):\n vecteurCluster.append(listeLS[i]) #passe les dictionaires de scores\n \n \n \n inversedDic = {v: k for k, v in dico.items()}\n if iter == 0 and rand == True: # si on veux trouver les clusters de facon random on return la liste maintenant\n return lsLesPlusProche, listeLS\n else:\n for i in range(tailleDico): #tour de tout les mots \n toFilter = []\n \"\"\"\n on fait le tour des cluster -> append son score dans la liste toFilter -> \n on append à listeMotPlusProche l'index (représente à quel centroid il appartient)du score le plus bas de toFilter \n \"\"\"\n for j in range(len(nouveauCentres)):\n toFilter.append(vecteurCluster[j][inversedDic[i+1]])\n listeMotPlusProche.append(toFilter.index(min(toFilter))) #,key = DicoToFilter.get\n return listeMotPlusProche , listeLS\n \n \"\"\"\n reconstruction de la matrice de coocurences\n PARAM: 1:LISTE DE VALEURS VENANT DE LA BASE DE DONNÉE, 2:TAILLE DE DICTIONAIRE, 3:TAILLE DE FENETRE\n \"\"\"\n def ReconstrucTabCooc(self, valuesFromDB,tailleDico,taille):\n vecteur = np.zeros((tailleDico,tailleDico))\n for mesValeurs in valuesFromDB:\n if int(mesValeurs[2]) == int(taille):\n if mesValeurs[0] != mesValeurs[1]:\n vecteur[mesValeurs[0]-1][mesValeurs[1]-1] = mesValeurs[3]\n return vecteur \n \"\"\"\n assignation de chaque mots a un centroid random\n \"\"\"\n def definitionDePointsRandom(self,tailleDico,nbCluster,vecteur):\n nouveauxCentres = []\n emplacementCentroides =[]\n lsVec = []\n listeRandomDesMots=[]\n \n #Faire des équipes de cluster random\n for mot in range(tailleDico):\n clusterRandom = random.randint(0, nbCluster-1)\n listeRandomDesMots.append(clusterRandom)\n #liste de la position \n lsVec.append(vecteur[mot])\n for i in range(nbCluster):\n n=0\n lsLignes = []\n for j in range(len(listeRandomDesMots)):\n if listeRandomDesMots[j] == i:\n lsLignes.append(lsVec[j])\n n+=1\n emplacementCentroides.append(lsLignes)\n print(\"nombre de mots dans centroid \",i+1,\" : \",n)\n print(\"----------------------------------------------------------------\")\n for baryCenter in range(nbCluster) :\n nouveauxCentres.append(np.mean(emplacementCentroides[baryCenter], axis = 0)) \n \n return listeRandomDesMots,lsVec,emplacementCentroides,nouveauxCentres\n \n \"\"\"\n boucle de chaque iteration (appelle des fonctions) et verifie si le clustering est fini (plsu aucun changement)\n \"\"\"\n def boucleCentroid(self,nouveauxCentres,dico,vecteur,nbrIteration,randPoints,listeRandomDesMots,nbCluster,tempListe):\n clustDone = False \n debut = time.time()\n ancienneListe, listeLS= self.TrouverPointCluster(nouveauxCentres,dico,vecteur,nbrIteration,randPoints,listeRandomDesMots)\n #param 1:liste de mot, 2:liste de mot les plus proche, 3: dictionaire, 4:listeLS, 5:nbcluster, 6 vecteurs\n nouveauxCentres = self.barycentreCalc(ancienneListe,nbCluster,vecteur)\n \n ancienneListe = np.array(ancienneListe)\n tempListe = np.array(tempListe)\n \n #changements = len(ancienneListe[ancienneListe != tempListe])\n #nbParCluster = [len(ancienneListe[ancienneListe == i]) for i in range(nbCLusters)]\n \n verifListe = np.equal(ancienneListe,tempListe)\n if verifListe.all():\n clustDone = True\n print(\"clustering finished\")\n changementC = \"aucune autre changement \"\n else:\n changementC = 0\n for i in range(len(tempListe)):\n if tempListe[i] != ancienneListe[i]:\n changementC+=1 \n print(time.time()-debut,\"secondes pour cette iteration\")\n print(\"--------------------------------------------------------\")\n tempListe = ancienneListe\n nbrIteration+=1\n print(\"iteration #\",nbrIteration)\n print(changementC, \"dans cette iteration\")\n for i in range(nbCluster):\n n=0\n for j in range(len(ancienneListe)):\n if ancienneListe[j] == i:\n n+=1 \n\n print(\"nombres de points dans centroid \",i+1,\": \",n)\n return listeLS,nouveauxCentres,nbrIteration,clustDone,tempListe,ancienneListe\n \"\"\"\n dernier sort des valeurs et des scores pour afficher les resultats\n \"\"\"\n def resultSorting(self,nbCluster,ancienneListe,inversedDic,listeLS):\n listeResultatDesResultat = []\n for j in range(nbCluster):\n dicoDesResultats = {}\n for i in range(len(ancienneListe)):\n if ancienneListe[i] == j:\n if inversedDic[i+1] not in self.motsAEviter:\n dicoDesResultats[inversedDic[i+1]] = listeLS[j][inversedDic[i+1]]\n\n listeResultatDesResultat.append(sorted(dicoDesResultats.items(),key=lambda t: t[1])) #sort la liste pour avoir les scores les plus petits\n return listeResultatDesResultat\n \"\"\"\n pour imprimer les resultats pour chaque centroids avec son score ls\n \"\"\"\n def printResults(self,nbCluster,nbMots,listeResultatDesResultat,vecteur,dico,TSVdic):\n for i in range(nbCluster):\n print(\"-----------------------------------------------------------\")\n print(\"resultats du \",i+1,\"ème centroide\")\n for j in range(int(nbMots)):\n if j+1 < len(listeResultatDesResultat[i]): \n print(listeResultatDesResultat[i][j],\" est un \",self.kNearN(vecteur,dico,TSVdic,listeResultatDesResultat[i][j][0]))\n \"\"\"\n fonction principale de clustering\n \"\"\" \n def toCluster(self,dico,listeToCluster,taille,connexion,nbMots):\n listeLS = []\n \"\"\"\n ramasse les coocurences dans la bd\n \"\"\"\n enonce_R_Matrice = \"SELECT * FROM MATRICE WHERE TAILLE = ? \"\n tailleDico = len(dico)\n curseur = connexion.cursor()\n curseur.execute(enonce_R_Matrice,taille)\n retourDesValeurs = curseur.fetchall()\n nouveauxCentres = []\n tempListe = []\n listeRandomDesMots=[]\n nbrIteration = 0\n newCenters=[]\n randPoints = False\n clustDone = False\n inversedDic = {v: k for k, v in dico.items()}\n \"\"\"\n apport du lexique de langue francaise\n \"\"\"\n lexiqueTSV_path = \"Lexique382.tsv\"\n sep = \"\\t\"\n etiq = self.lireLines(lexiqueTSV_path)\n etiq_delimited = self.tsv_To_ls(etiq)\n TSVdic = self.creationDeRefDbTsv(etiq_delimited)\n #sys.stdout = open(\"Resultats.txt\", \"w\")\n \"\"\"\n reconstruction du vecteur de coocurence\n \"\"\"\n vecteur = self.ReconstrucTabCooc(retourDesValeurs,tailleDico,taille)\n \"\"\"\n etablit les nouveaux centres selon si l'utilisateur veut utiliser la fonction alleatoire ou choisir ses mots\n \"\"\"\n if listeToCluster[0].isdigit(): # verifie si s'est un nombre au lieu de mots dans le nombre de centroid (on determine si on veut random ou pas)\n randPoints = True\n nbCluster = int(listeToCluster)\n lsVec = []\n emplacementCentroides = []\n listeRandomDesMots,lsVec,emplacementCentroides,nouveauxCentres = self.definitionDePointsRandom(tailleDico,nbCluster,vecteur)\n else:\n nbCluster = len(listeToCluster)\n for motChoisi in listeToCluster:\n nouveauxCentres.append(vecteur[dico.get(motChoisi)-1]) # ligne au complet\n \n tempListe = np.zeros(tailleDico) \n while(clustDone == False):\n listeLS,nouveauxCentres,nbrIteration,clustDone,tempListe,ancienneListe = self.boucleCentroid(nouveauxCentres, dico, vecteur, nbrIteration, randPoints, listeRandomDesMots, nbCluster,tempListe)\n \n # listeLS key = word value = score\n listeResultatDesResultat = []\n listeResultatDesResultat = self.resultSorting(nbCluster,ancienneListe,inversedDic,listeLS)\n self.printResults(nbCluster,nbMots,listeResultatDesResultat,vecteur,dico,TSVdic) \n \n \n def kNearN(self,vecteur,dico,TSVdic,mot):\n #valeur de K (nombres de voisin)\n kValue = 15\n return self.traiter_donnees(vecteur, dico,TSVdic,mot,kValue)\n \n def creationDeRefDbTsv(self,etiq_delimited):\n vecteur = {}\n for data in etiq_delimited:\n vecteur[data[0]] = data[3]\n return vecteur\n \n def lireLines(self,ch):\n with open(ch,\"r\",encoding = \"utf8\") as f:\n return f.read().splitlines() \n \n def tsv_To_ls(self,etiq):\n return csv.reader(etiq, delimiter='\\t')\n \n def traiter_donnees(self,vecteur,dico,TSVdic,mot,kValue):\n ligneMatriceDuMot = vecteur[dico[mot]]\n dicCategory = {\"NOM\":0,\"VER\":0,\"ADJ\":0,\"AUX\":0,\"PRE\":0,\"ADV\":0,\"ONO\":0,\"CON\":0,\"ADJ:num\":0,\"PRO:ind\":0,\"ADJ:pos\":0,\"ADJ:int\":0,\"ADJ:ind\":0,\"ADJ:dem\":0,\"ART:def\":0,\"ART:ind\":0,\"PRE\":0,\"PRO:dem\":0,\"PRO:int\":0,\"PRO:per\":0,\n \"PRO:rel\":0,\"PRO:pos\":0}\n lsWordValue = {}\n for i in dico.keys():\n vecteurComparer = vecteur[dico.get(i)-1]\n produit = self.obtenirDistEuclidean(ligneMatriceDuMot,vecteurComparer)\n lsWordValue[i] = produit\n sortedLs = sorted(lsWordValue.items(),key=lambda t: t[1])\n k = 0\n #mesVoisins = []\n i=0\n for key,value in sortedLs:\n if key in TSVdic:\n #mesVoisins.append(TSVdic[key])\n dicCategory[TSVdic[key]]+= 1/((sortedLs[i][1])+1)\n k+=1\n if k>=kValue:\n break\n i=+1\n stringDeVote=\"\"\n for key in dicCategory:\n if dicCategory[key] !=0:\n stringDeVote += key +\" : \"+str(dicCategory[key])+\" \"\n print(\"\\n votes: \",stringDeVote,\" En pouvoir de votes (total de \",kValue,\" votes)\")\n return max(dicCategory, key=dicCategory.get)\n \n \n \n \n ","repo_name":"Kason-13/SynonymsML","sub_path":"synonymsML/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":14029,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"22271558294","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Uncle Xiang\"\n# Email: tuxgis@126.com\n# Time: 2020/3/4 19:30\n# version: python \n\nfrom urllib.request import urlopen\nimport json\n\n\ndef wgs84tobaidu(x, y):\n data = str(x) + ',' + str(y)\n output = 'json'\n url = 'http://api.map.baidu.com/geoconv/v1/?coords=' + data + '&from=1&to=5&output=' + output + '&ak=ruXEFRLtTaNikRDAprT5hNGdTYjU3cwz'\n req = urlopen(url)\n res = req.read().decode()\n temp = json.loads(res)\n baidu_x = 0\n baidu_y = 0\n if temp['status'] == 0:\n baidu_x = temp['result'][0]['x']\n baidu_y = temp['result'][0]['y']\n\n return baidu_x, baidu_y\n\nbaidu_x, baidu_y = wgs84tobaidu(111.0160537, 24.2794623)\nprint(baidu_x, baidu_y)","repo_name":"tuxiang-hub/GDAL_demo","sub_path":"WGS84转百度坐标.py","file_name":"WGS84转百度坐标.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"32527659496","text":"from faker import Faker\n\n\nclass World:\n\n def __init__(self):\n self.faker = Faker()\n\n def hello(self, greeting=None):\n the_greeting = \"Hello world \" + self.faker.name() or greeting\n return the_greeting\n\n\nif __name__ == '__main__':\n world = World()\n print(world.hello())\n","repo_name":"redbass/test_python_in_docker","sub_path":"hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"2305364164","text":"from pylab import plt\nfrom os import path, mkdir\nimport itertools as it\nfrom datetime import datetime\n\n\ndef get_fig_name(root_folder=path.expanduser(\"~/Desktop/MoneyBootstrapping\"), root_name=\"MB\"):\n\n if not path.exists(root_folder):\n mkdir(root_folder)\n\n fig_name = \"{}/{}_{}.pdf\".format(root_folder, root_name, datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S_%f\"))\n\n return fig_name\n\n\ndef plot(results, parameters, fig_name):\n\n # What is common to all subplots\n fig = plt.figure(figsize=(25, 12))\n\n fig.patch.set_facecolor('white')\n\n line_width = 2\n\n n_lines = 3\n n_columns = 3\n\n counter = it.count(1)\n\n # ----- FOR EACH GENERATION ------ #\n\n # FITNESS\n\n x = range(len(results[\"fitness\"]))\n y = results[\"fitness\"]\n\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Fitness average\\naccording to number of generations\\n\")\n ax.plot(x, y, linewidth=line_width)\n\n # PROPORTION OF EACH TYPE OF EXCHANGE\n\n x_max = len(results[\"exchanges\"])\n x = range(x_max)\n\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Proportion of each type of exchange\\naccording to number of generations\\n\")\n\n type_of_exchanges = sorted([i for i in results[\"exchanges\"][0].keys()])\n y = []\n for i in range(len(type_of_exchanges)):\n y.append([])\n\n for i in range(x_max):\n\n for exchange_idx in range(len(type_of_exchanges)):\n\n y[exchange_idx].append(results[\"exchanges\"][i][type_of_exchanges[exchange_idx]])\n\n ax.set_ylim([-0.02, 1.02])\n\n for exchange_idx in range(len(type_of_exchanges)):\n\n ax.plot(x, y[exchange_idx], label=\"Exchange {}\".format(type_of_exchanges[exchange_idx]), linewidth=line_width)\n\n ax.legend(fontsize=8)\n\n # NUMBER OF EXCHANGES GENERATION\n\n x = range(len(results[\"n_exchanges\"]))\n y = results[\"n_exchanges\"]\n\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Total number of exchanges\\naccording to number of generations\\n\")\n ax.plot(x, y, linewidth=line_width)\n\n # NUMBER OF INTERVENTION OF EACH GOOD\n\n x_max = len(results[\"n_exchanges\"])\n x = range(x_max)\n y = []\n for i in range(len(results[\"n_goods_intervention\"][0].keys())):\n y.append([])\n\n for i in range(x_max):\n\n for key in results[\"n_goods_intervention\"][0].keys():\n y[key].append(results[\"n_goods_intervention\"][i][key])\n\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Number of interventions of each good\\naccording to number of generations\\n\")\n\n for key in results[\"n_goods_intervention\"][0].keys():\n\n ax.plot(x, y[key], label=\"Good {}\".format(key), linewidth=line_width)\n\n ax.legend(fontsize=8)\n\n # DIVERSITY OF PRODUCTION\n\n x = range(len(results[\"production_diversity\"]))\n y = results[\"production_diversity\"]\n\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Production diversity\\naccording to number of generations\\n\")\n ax.plot(x, y, linewidth=line_width)\n\n # N PRODUCERS\n\n n_goods = len(results[\"n_producers\"][0])\n\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Number of producers for each good \\n\")\n\n for i in range(n_goods):\n y = [j[i] for j in results[\"n_producers\"]]\n x = range(len(y))\n ax.plot(x, y, linewidth=line_width, label=\"Good {}\".format(i))\n\n ax.legend(fontsize=8)\n\n # GLOBAL PRODUCTION\n\n n_goods = len(results[\"production\"][0])\n\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Global production for each good \\n\")\n\n for i in range(n_goods):\n y = [j[i] for j in results[\"production\"]]\n x = range(len(y))\n ax.plot(x, y, linewidth=line_width, label=\"Good {}\".format(i))\n\n ax.legend(fontsize=8)\n\n # ------ PARAMETERS ----- #\n\n # 5th subplot: PARAMETERS\n ax = plt.subplot(n_lines, n_columns, next(counter))\n ax.set_title(\"Parameters\")\n ax.axis('off')\n\n msg = \"\"\n for key in sorted(parameters.keys()):\n msg += \"{}: {}; \\n\".format(key, parameters[key])\n\n ax.text(0.5, 0.5, msg, ha='center', va='center', size=12)\n\n plt.tight_layout()\n\n plt.savefig(fig_name)\n\n plt.close()\n\n\ndef graph(results, parameters, root_folder, root_name):\n\n fig_name = get_fig_name(root_folder=root_folder, root_name=root_name)\n plot(results, parameters, fig_name)\n","repo_name":"AurelienNioche/BootstrappingMoney","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"35482777669","text":"\"\"\"Flask decorators and helpers to set up Microkubes security for Flask apps.\n\"\"\"\nfrom json import dumps\nfrom functools import wraps\n\nfrom flask import request, make_response, g, session, redirect\n\nfrom microkubes.security.jwt import JWTProvider\nfrom microkubes.security.oauth2 import OAuth2Provider\nfrom microkubes.security.saml import SAMLServiceProvider\nfrom microkubes.security.acl import ACLProvider\nfrom microkubes.security.auth import SecurityContext\nfrom microkubes.security.chain import (\n Request,\n Response,\n SecurityException,\n SecurityChain,\n is_authenticated_provider,\n public_routes_provider,\n)\nfrom microkubes.security.keys import KeyStore\n\n\nclass FlaskSecurityError(Exception):\n \"\"\"Error during security setup for Flask apps.\n \"\"\"\n\n pass\n\n\nclass FlaskSecurityContext(SecurityContext):\n \"\"\"SecurityContext that wraps Flask's ``g`` object. Request Scoped.\n \"\"\"\n\n def __init__(self):\n super(FlaskSecurityContext, self).__init__(g)\n\n\nclass Security:\n \"\"\"Main security object.\n\n Contains the configured :class:`microkubes.security.chain.SecurityChain` and\n executes it for every secured action.\n\n A secured action is an action decorated with :func:`Security.secured`.\n\n Given that we have configured a SecurityChain, we can protect/secure Flask actions like this:\n\n .. code-block:: python\n\n sec = Security(security_chain=chain, context=FlaskSecurityContext())\n\n @app.route('/')\n @sec.secured\n def my_action():\n return 'My secured action.'\n\n \"\"\"\n\n def __init__(self, security_chain, context, json_response=True):\n self.security_chain = security_chain\n self.context = context\n self.json_response = json_response\n\n def check(self):\n \"\"\"Perform the check for the given Request in the current SecurityContext.\n\n :returns: ``tuple``: ``(allowed, flask_response, redirect_url)``, where:\n * ``allowed`` is ``bool`` indicating whether the request is allowed to proceed.\n * ``flask_response`` - :class:`Response` the Flask response. May be ``None``.\n \"\"\"\n req = Request(request)\n resp = Response()\n try:\n self.security_chain.execute(req, resp)\n except SecurityException as security_error:\n if resp.redirect_url is not None:\n return (False, None, resp.redirect_url)\n if self.json_response:\n flask_response = make_response(\n dumps(\n {\n 'code': security_error.status_code,\n 'message': str(security_error),\n }\n ),\n security_error.status_code,\n )\n else:\n flask_response = make_response(\n str(security_error), security_error.status_code\n )\n if security_error.headers:\n flask_response.headers.update(security_error.headers)\n return (False, flask_response, None)\n\n if resp.modified or resp.ended:\n flask_response = make_response(resp.get_response_body(), resp.status_code)\n flask_response.status = resp.status\n if resp.headers:\n flask_response.headers.update(resp.headers)\n return (True, flask_response, None)\n return (True, None, None)\n\n def secured(self, decorated):\n \"\"\"Decorator for security check.\n\n When decorated with ``@security.secured``, the decorated method will not be called\n unless the security check passes.\n\n :param decorated: ``function``, the decorated method.\n\n :returns: ``function``, the security decorator for the given method or function..\n \"\"\"\n\n @wraps(decorated)\n def _secured_method(*args, **kwargs):\n allowed, flask_resp, redirect_url = self.check()\n if redirect_url is not None:\n return redirect(redirect_url)\n if not allowed:\n if flask_resp:\n return flask_resp\n return make_response('Request denied', 403)\n if flask_resp:\n return flask_resp\n return decorated(*args, **kwargs)\n\n return _secured_method\n\n\n_SECURITY_CONTEXT = FlaskSecurityContext() # Default g-based security context.\n\n\nclass FlaskSecurity:\n \"\"\"Flask security builder.\n\n Builds new Microkubes enabled security for Flask applications.\n In the background it generates a ``SecurityChain`` and :class:`Security` that can be\n used to secure the Flask's endpoints.\n\n Example setup:\n\n .. code-block:: python\n\n from flask import Flask\n from microkubes.security import FlaskSecurity\n\n app = Flask(__name__)\n\n sec = (FlaskSecurity(). # new security with default secuity context\n keys_dir('./keys'). # the RSA keys are in this directory\n static_files(r'.*\\\\.js', r'.*\\\\.css', r'.*\\\\.png', r'.*\\\\.jpg', r'.*\\\\.jpeg'). # ignore these\n public_route('/public/.*'). # ignore this too\n jwt(). # Support JWT\n oauth2(). # Support OAuth2\n build()) # Finally, build the security\n\n @app.route(\"/\")\n @sec.secured\n def hello_world():\n return 'hello world'\n\n :param context: :class:`microkubes.security.auth.SecurityContext` - the security context to be used.\n By default :class:`FlaskSecurityContext` is used.\n :param key_store: :class:`microkubes.security.keys.KeyStore`, ``KeyStore`` instance.\n\n \"\"\"\n\n def __init__(self, context=None, key_store=None):\n self.key_store = key_store\n context = context or _SECURITY_CONTEXT\n self._context = context\n self._chain = SecurityChain(security_context=context)\n self._public_routes = []\n self._jwt_provider = None\n self._oauth_provider = None\n self._saml_sp = None\n self._acl_provider = None\n self._other_providers = []\n self._prefer_json_respose = True\n\n def keys_dir(self, path):\n \"\"\"Load the keys for the ``KeyStore`` from this directory.\n\n :param path: ``str``, keys directory path.\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n if self.key_store:\n raise FlaskSecurityError('KeyStore is already defined.')\n self.key_store = KeyStore(dir_path=path)\n return self\n\n def use_key(self, key_name, key_file):\n \"\"\"Add mapped key to the ``KeyStore``.\n\n :param key_name: ``str``, the name (id) of the key.\n :param key_file: ``str``, path to the key file.\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n if not self.key_store:\n self.key_store = KeyStore()\n self.key_store.add_key(key_name, key_file)\n return self\n\n def jwt(self, header='Authorization', schema='Bearer', algs=None):\n \"\"\"Setup JWT security provider.\n\n This provider tries to decode and create auth from a JWT in the HTTP request.\n\n :param header: ``str``, the name of the HTTP auth header. Default is ``Authorization``.\n :param schema: ``str``, the auth schema used for the auth HTTP header. By default this is ``Bearer`` token.\n :param algs: ``list``, list of accepted signing algorithms. If not specified assumes ``HS256`` and ``RS256``.\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n if not self.key_store:\n raise FlaskSecurityError(\n 'KeyStore must be defined before setting up the JWT provider.'\n )\n self._jwt_provider = JWTProvider(\n self.key_store, header=header, auth_schema=schema, algs=algs\n )\n return self\n\n def oauth2(self, algs=None):\n \"\"\"Setup OAuth2 security provider.\n\n This provider will try to decode and validate an OAuth2 token. All tokens with Microkubes are\n self-contained and are JWTs.\n\n :param algs: ``list``, list of accepted signing algorithms. If not specified assumes ``HS256`` and ``RS256``.\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n if not self.key_store:\n raise FlaskSecurityError(\n 'KeyStore must be defined before setting up the OAuth2 provider.'\n )\n self._oauth_provider = OAuth2Provider(key_store=self.key_store, algs=algs)\n return self\n\n def saml(self, config=None):\n \"\"\"Setup SAML SP\n\n :param config: ``dict``, the SAML SP config\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n if not self.key_store:\n raise FlaskSecurityError(\n 'KeyStore must be defined before setting up the SAML service provider.'\n )\n if not config:\n raise FlaskSecurityError('SAML config not provided')\n\n self._saml_sp = SAMLServiceProvider(\n self.key_store, config, saml_session=session\n )\n\n return self\n\n def acl(self, config=None):\n \"\"\"Setup ACL provider\n\n :param app: :class:`flask.Flask`, current ``Flask`` instance.\n :param config: ``dict``, the ACL provider config\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n if not config:\n raise FlaskSecurityError('ACL config not provided')\n\n self._acl_provider = ACLProvider(config)\n\n return self\n\n def public_route(self, *args):\n \"\"\"Add public routes that will be ignored and not checked by the security.\n\n A public route is a HTTP request path that is publicly accessible and does not need authorization\n for access.\n\n :param args: variadic args, ``list`` of exact routes or regexp patterns to match against. All routes\n are treated as regular expression pattern, so:\n\n * ``/public/resource`` matches exactly this pattern\n * ``/public/.*`` matches both ``/public/a`` and ``/public/file.js`` but not ``/protected/file``\n * ``.*\\\\.js`` matches all js files - both ``/public/file.js`` and ``/protected/file.js``\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n self._public_routes.append(public_routes_provider(*args))\n return self\n\n def static_files(self, *args):\n \"\"\"Alias for ``public_routes`` method.\n\n The only difference is a semantic one, to allow more readablity in the code.\n \"\"\"\n return self.public_route(*args)\n\n def add_provider(self, provider, position='last'):\n \"\"\"Add custom security provider to the security chain.\n\n The chain executes multiple providers, in order, when processing a request.\n This method adds new provider to the security chain's providers list.\n\n Because there are some defult providers that are set up in a certain order, you can provide\n a ``position`` and place the custom provider anuwhere in the chain.\n If not given, the provider is appended near the end of the list (the provider that checks if\n the request is authenticated is always last).\n\n In general, these are the available positions, and corresponding placement for the providers:\n\n +-----+--------------------------+------------------------------+\n | seq | POSITION | PROVIDERS |\n +-----+--------------------------+------------------------------+\n | 1 | first | custom providers |\n +-----+--------------------------+------------------------------+\n | 2 | N/A | public_routes |\n +-----+--------------------------+------------------------------+\n | 3 | before_jwt, after_public | custom providers |\n +-----+--------------------------+------------------------------+\n | 4 | N/A | jwt_provider |\n +-----+--------------------------+------------------------------+\n | 5 | after_jwt, before_oauth2 | custom providers |\n +-----+--------------------------+------------------------------+\n | 6 | N/A | oauth2_provider |\n +-----+--------------------------+------------------------------+\n | 7 | last | custom providers |\n +-----+--------------------------+------------------------------+\n | 8 | N/A | is_authenticated_provider |\n +-----+--------------------------+------------------------------+\n | 9 | final | custom providers |\n +-----+--------------------------+------------------------------+\n\n :param provider: ``function``, the security provider to add\n :param position: ``str``, at which position in the chain to add the provider. One of ``first``,\n ``before_jwt``, ``after_public``, ``after_jwt``, ``before_oauth2``, ``last`` and ``final`` is allowed.\n Default is ``last``.\n\n :returns: :class:`FlaskSecurity`.\n \"\"\"\n position = position or 'last'\n self._other_providers.append((provider, position))\n return self\n\n def _merge_providers(self):\n providers = []\n\n for provider, position in self._other_providers:\n if position == 'first':\n providers.append(provider)\n\n for provider in self._public_routes:\n providers.append(provider)\n\n for provider, position in self._other_providers:\n if position in ['before_jwt', 'after_public']:\n providers.append(provider)\n\n if self._jwt_provider:\n providers.append(self._jwt_provider)\n\n for provider, position in self._other_providers:\n if position in ['after_jwt', 'before_oauth', 'before_oauth2']:\n providers.append(provider)\n\n if self._oauth_provider:\n providers.append(self._oauth_provider)\n\n if self._saml_sp:\n providers.append(self._saml_sp)\n\n if self._acl_provider:\n providers.append(self._acl_provider)\n\n for provider, position in self._other_providers:\n if position in ['after_oauth', 'after_oauth2', 'last']:\n providers.append(provider)\n\n providers.append(is_authenticated_provider)\n\n for provider, position in self._other_providers:\n if position == 'final':\n providers.append(provider)\n\n return providers\n\n def build_chain(self):\n \"\"\"Build a :class:`microkubes.security.chain.SecurityChain` from the configured values.\n\n :returns: the :class:`microkubes.security.chain.SecurityChain`\n \"\"\"\n if not self.key_store:\n raise FlaskSecurityError('Please define a KeyStore.')\n\n for provider in self._merge_providers():\n self._chain.provider(provider)\n\n return self._chain\n\n def build(self):\n \"\"\"Build a ``Security`` from the configured values.\n\n It builds a security chain and configures a new security to be used in flask apps.\n\n :returns: :class:`Security`.\n \"\"\"\n chain = self.build_chain()\n security = Security(\n security_chain=chain,\n context=self._context,\n json_response=self._prefer_json_respose,\n )\n return security\n","repo_name":"Microkubes/microkubes-python","sub_path":"microkubes/security/flask.py","file_name":"flask.py","file_ext":"py","file_size_in_byte":15524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"34910014876","text":"import urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Sample links:\n# http://py4e-data.dr-chuck.net/comments_42.html (o/p: Count=50, Sum=2553)\n# http://py4e-data.dr-chuck.net/comments_975457.html (o/p: Count=50, Sum=2726)\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl=input('Enter - ')\nif len(url)<1: url='http://py4e-data.dr-chuck.net/comments_42.html' #Default URL\nhtml = urllib.request.urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, 'html.parser')\ntags = soup('span')\nsum=0\ncount=0\nfor tag in tags :\n\tcount = count + 1\n\tsum = sum + int(tag.contents[0])\nprint('Count',count)\nprint('Sum',sum)","repo_name":"SpooderManEXE/Hacktoberfest2020-Expert","sub_path":"Python Programs/Access Data/scrapping_data.py","file_name":"scrapping_data.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"70"} +{"seq_id":"35133045386","text":"from django.db.models.signals import post_save, pre_delete, m2m_changed\nfrom django.dispatch import receiver\nfrom .models import CustomerContract, Customer\nfrom django.contrib.auth.models import Group\n#==================================\n#====== Customer Contract =========\n#==================================\n@receiver(m2m_changed, sender=CustomerContract.option.through)\ndef m2m_changed_customer_contract(sender, instance, action, **kwargs):\n for user in instance.customer.user.all():\n for pk in kwargs['pk_set']:\n if action == 'post_add':\n user.groups.add(Group.objects.get(pk=pk))\n elif action == 'post_remove':\n user.groups.remove(Group.objects.get(pk=pk))\n\n@receiver(post_save, sender=CustomerContract)\ndef post_save_create_or_update_contract(sender, instance, **kwargs):\n if instance.is_active:\n #1)Set tất cả các hợp đồng khác về inactive\n # CustomerContract.objects.filter(customer=instance.customer).exclude(pk=instance.pk).update(is_active = 0)\n\n #2)Phân quyền\n for user in instance.customer.user.all():\n user.groups.clear()\n user.groups.add(instance.plan)\n for option in instance.option.all():\n user.groups.add(option)\n\n@receiver(pre_delete, sender=CustomerContract)\ndef pre_delete_customer_contract(sender, instance, **kwargs):\n if instance.is_active:\n for user in instance.customer.user.all():\n user.groups.clear()","repo_name":"trungphan9559/ACCU","sub_path":"accu_project/SourceCode/f_1_1_customers/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"24285975622","text":"#!/usr/bin/python\n# -*- coding: sjis -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Data setting\n# en-id読み込み\nid, eid2w, ew2id = 1, {}, {}\nwith open('train.en.vocab.4k','r',encoding='utf-8') as f:\n for w in f:\n w = w.strip()\n eid2w[id] = w\n ew2id[w] = id\n id += 1\nev = id\n\n# en-testデータの読み込み\nedata = []\nwith open('train.en','r',encoding='utf-8') as f:\n for sen in f:\n wl = [ew2id['']]\n for w in sen.strip().split():\n if w in ew2id:\n wl.append(ew2id[w])\n else:\n wl.append(ew2id[''])\n wl.append(ew2id[''])\n edata.append(wl)\n\n# ja-id読み込み\nid, jid2w, jw2id = 1, {}, {}\nwith open('train.ja.vocab.4k','r',encoding='utf-8') as f:\n id = 1\n for w in f:\n w = w.strip()\n jid2w[id] = w\n jw2id[w] = id\n id += 1\njv = id\n\n# ja-testデータの読み込み\njdata = []\nwith open('train.ja','r',encoding='utf-8') as f:\n for sen in f:\n wl = [jw2id['']]\n for w in sen.strip().split():\n if w in jw2id:\n wl.append(jw2id[w])\n else:\n wl.append(jw2id[''])\n wl.append(jw2id[''])\n jdata.append(wl)\n\n# Define model\n\nclass MyAttNMT(nn.Module):\n def __init__(self, jv, ev, k):\n super(MyAttNMT, self).__init__()\n # 単語埋め込み\n self.jemb = nn.Embedding(jv, k)\n self.eemb = nn.Embedding(ev, k)\n # lstm2層\n self.lstm1 = nn.LSTM(k, k, num_layers=2,\n batch_first=True)\n self.lstm2 = nn.LSTM(k, k, num_layers=2,\n batch_first=True)\n self.Wc = nn.Linear(2*k, k)\n self.W = nn.Linear(k, ev)\n def forward(self, jline, eline):\n x = self.jemb(jline)\n # 入力 : (seq_len, batch, input_size)\n # 出力:LSTMと同様に出力はoutとhcと2つ(hcのほうは通常のLSTMと同様にhc=(h,c)とタプル形式)\n # outの各要素の次元がLSTMの隠れ層の次元のサイズではなく、その倍の値になっている\n # hcの各要素hやcが2つ返ってきている\n # outの最後の要素の前半分はhc=(h,c)としたときのh[0]と一致\n # outの最初の要素の後ろ半分はhc=(h,c)としたときのh[1]と一致\n ox, (hnx, cnx) = self.lstm1(x)\n y = self.eemb(eline)\n oy, (hny, cny) = self.lstm2(y,(hnx, cnx))\n # permute:次元の位置を入れ替える\n ox1 = ox.permute(0,2,1)\n # bmm:batchごとに内積を計算する\n sim = torch.bmm(oy,ox1)\n\n bs, yws, xws = sim.shape\n sim2 = sim.reshape(bs*yws,xws)\n alpha = F.softmax(sim2,dim=1).reshape(bs, yws, xws)\n ct = torch.bmm(alpha,ox)\n oy1 = torch.cat([ct,oy],dim=2)\n oy2 = self.Wc(oy1)\n return torch.tanh(self.W(oy2))\n\n# model generate, optimizer and criterion setting\n\ndemb = 200\nnet = MyAttNMT(jv, ev, demb).to(device)\noptimizer = optim.SGD(net.parameters(),lr=0.01)\ncriterion = nn.CrossEntropyLoss()\n\n# Learn\n\nfor epoch in range(20):\n loss1K = 0.0\n for i in range(len(jdata)):\n jinput = torch.LongTensor([jdata[i][1:]]).to(device)\n einput = torch.LongTensor([edata[i][:-1]]).to(device)\n out = net(jinput, einput)\n gans = torch.LongTensor([edata[i][1:]]).to(device)\n loss = criterion(out[0],gans[0])\n loss1K += loss.item()\n if (i % 100 == 0):\n print(epoch, i, loss1K)\n loss1K = 0.0\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n outfile = \"outputs/attnmt-\" + str(epoch) + \".model\"\n torch.save(net.state_dict(),outfile)\n","repo_name":"Momo227/Pytorch_practice","sub_path":"lesson4/att-nmt.py","file_name":"att-nmt.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"72800390307","text":"#----Import self made modules\r\nimport txtopp as to # find txtopp module code as a github repository on my account\r\nimport list_module as lm\r\n\r\n\r\n#main convert function\r\ndef convert(value, inunit, outunit, quantity):\r\n #define 2 directories as variables\r\n udir = f'database/{quantity}/{quantity}_units.txt'\r\n cdir = f'database/{quantity}/{quantity}_convo.txt'\r\n\r\n\r\n # use txtopp module to convert the data in the files into a list\r\n units = to.read_list(file=udir, separator='\\n')\r\n convo = to.read_list(file=cdir, separator='\\n')\r\n\r\n\r\n #main dictionary\r\n unitsdict = {}\r\n\r\n\r\n #funtion to insert data into dictionary\r\n def dict():\r\n unitsdict.clear()\r\n for i in range(len(units)):\r\n unitsdict[units[i]] = float(convo[i])\r\n dict()\r\n\r\n\r\n #modifing the data to match output unit\r\n for item in convo:\r\n y = float(item)/unitsdict[outunit]\r\n lm.replace(x=item, y=y, list=convo)\r\n dict()\r\n \r\n\r\n #calculating the answer and returning it\r\n ans = str(float(value)*float(unitsdict[inunit])) + ' '+outunit\r\n return ans","repo_name":"armaanPYTHON/python-unit-converter","sub_path":"ucon.py","file_name":"ucon.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"8014883852","text":"'''\nhttps://programmers.co.kr/learn/courses/30/lessons/42883?language=python3\n\nnumber\tk\treturn\n\"1924\"\t2\t\"94\"\n\"1231234\"\t3\t\"3234\"\n\"4177252841\"\t4\t\"775841\"\n'''\n\ndef solution2(number, k):\n answer = ''\n while k > 0:\n new_numbers = []\n for i in range(len(number)):\n new_numbers.append(int(number[:i] + number[i+1:]))\n number = str(max(new_numbers))\n k -= 1\n answer = number\n return answer\n\ndef solution(number, k):\n answer = ''\n remain = len(number) - k\n max_idx = 0\n while remain > 0:\n for i in range(max_idx, len(number) - (remain - 1)):\n if int(number[max_idx]) < int(number[i]):\n max_idx = i\n answer += number[max_idx]\n max_idx += 1\n remain -= 1\n\n return answer\n\nif __name__ == \"__main__\":\n number = \"4177252841\"\n k = 2\n print(solution(number, k))","repo_name":"hong-sh/coding_test_practice","sub_path":"programmers_high_score_kit/greedy/greedy03.py","file_name":"greedy03.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"23690823880","text":"\"\"\"\nExample script for running an experiment to maximize activity of a unit\n- images are written to disk\n- the program waits for a .mat file that contains responses to the given images;\n in the meantime, some separate program(s) should present the images to a subject,\n record responses from a unit online, and write responses to a .mat file\n- the responses are loaded and used to optimize the images\n- the process is repeated iteratively until manually stopped (by a keyboard interrupt)\n\"\"\"\n\nfrom Experiments import EphysExperiment\n\n\n# set file directories for I/O\ninitcode_dir = None\nnatural_stimuli_dir = None # to be changed; dir containing control images to be shown along with generated ims\nproject_dir = 'demo' # to be changed; dir for writing experiment data & logs\n\n\n# set parameters\nnthreads = 2\nnchannels_per_thread = 1 # unused if nthreads == 1\nichannels = None # 0-based index or list of indices for channels used in score; None means average all\n\nn_natural_stimuli = 20 # None means default to however many is in natstimdir\ncycle_natural_stimuli = True # True: cycle through images in natstimdir\n\nimage_size = 83 # size (height/width) to which to resize synthesized & natural images\nrandom_seed = 0 # seed for all random generators used in the experiment (to ensure reproducibility)\n\noptimizer_name = 'genetic'\noptimizer_parameters =\\\n {'generator_name': 'deepsim-fc6', # see net_catalogue for available options\n # 'generator_parameters': {'engine': 'pytorch'}, # see Generators.py for available options\n 'initial_codes_dir': initcode_dir,\n 'population_size': 20, # size of population each generation\n 'mutation_rate': 0.5, # fraction of code elements to mutate(on average); range 0 - 1\n 'mutation_size': 0.5, # magnitude of mutation (on average); meaningful range 0 - ~1.5\n 'selectivity': 2, # selective pressure, with higher being more selective; range 0 - inf\n 'heritability': 0.5, # how much one parent (of 2) contributes to each progeny; meaningful range 0.5 - 1\n 'n_conserve': 1} # number of best images to keep untouched per generation; range 0 - populationsize\n\n\nif __name__ == '__main__':\n experiment = EphysExperiment(\n project_dir, optimizer_name, optimizer_parameters,\n ichannels, nthreads=nthreads, nchannels_per_thread=nchannels_per_thread, image_size=image_size,\n natural_stimuli_dir=natural_stimuli_dir, n_natural_stimuli=n_natural_stimuli,\n cycle_natural_stimuli=cycle_natural_stimuli, random_seed=random_seed, config_file_path=__file__)\n experiment.run()\n","repo_name":"willwx/XDream","sub_path":"examples/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"70"} +{"seq_id":"26243865068","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\napp_name = 'main-site'\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.homepage, name=\"homepage\"),\n path('about/', views.about_page, name=\"about\"),\n path('tattooblog/', include(\"tattooblog.urls\")),\n path('accounts/', include(\"accounts.urls\")),\n]\n","repo_name":"Olivertrain1221/body_beauty","sub_path":"p4beautybody/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"74743155107","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\n\n# DETECT PLATFORM\nperm = 0\n\nfrom sys import platform\n\nif platform == \"linux\" or platform == \"linux2\":\n perm = 1\n\n\n\n#IMPORTER LES PACKAGES\n\ntry:\n import serial\n import serial.tools.list_ports\nexcept:\n print(\"Erreur durant le chargement du package 'Serial'\\nLancement du téléchargement\")\n install(\"pyserial\")\n\n\ntry:\n import subprocess\nexcept:\n print(\"Erreur durant le chargement du package 'subprocess'\\nLancement du téléchargement\")\n install(\"subprocess\")\n\ntry:\n import pyfirmata\nexcept:\n print(\"Erreur durant le chargement du package 'Pyfirmata'\\nLancement du téléchargement\")\n install(\"pyfirmata\")\n\n\n#INSTALLER LES PACKAGES INTROUVABLES\ndef install(package):\n system.os(\"python3 -m pip install {}\".format(package))\n \n# PLUG ARDUINO\n\nplugpage = False\nplugstart = 0\nportlist = serial.tools.list_ports.comports()\nerreurport = \"\"\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(800, 600)\n MainWindow.setMinimumSize(QtCore.QSize(800, 600))\n MainWindow.setMaximumSize(QtCore.QSize(800, 600))\n MainWindow.setStyleSheet(\"background: #EAF2EF;\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.title = QtWidgets.QLabel(self.centralwidget)\n self.title.setGeometry(QtCore.QRect(40, 10, 111, 41))\n self.title.setStyleSheet(\"background: transparent;\\n\"\n\"font: 81 22pt \\\"Cantarell\\\";\\n\"\n\"color: black;\")\n self.title.setObjectName(\"title\")\n self.line1 = QtWidgets.QFrame(self.centralwidget)\n self.line1.setGeometry(QtCore.QRect(60, 50, 44, 3))\n self.line1.setMinimumSize(QtCore.QSize(0, 0))\n self.line1.setMaximumSize(QtCore.QSize(16777211, 16777215))\n self.line1.setStyleSheet(\"background-color: white;\\n\"\n\"color: black;\")\n self.line1.setFrameShape(QtWidgets.QFrame.HLine)\n self.line1.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line1.setObjectName(\"line1\")\n self.notification = QtWidgets.QPushButton(self.centralwidget)\n self.notification.setGeometry(QtCore.QRect(750, 10, 31, 31))\n self.notification.setAutoFillBackground(False)\n self.notification.setStyleSheet(\"\")\n self.notification.setText(\"\")\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"images/notification.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.notification.setIcon(icon)\n self.notification.setIconSize(QtCore.QSize(30, 30))\n self.notification.setFlat(True)\n self.notification.setObjectName(\"notification\")\n self.information = QtWidgets.QLabel(self.centralwidget)\n self.information.setGeometry(QtCore.QRect(30, 140, 111, 181))\n self.information.setStyleSheet(\"background-color: #fcf1eb;\\n\"\n\"background: qlineargradient(x1:0, y1:0, x10:1, y2:2, stop: 0 rgb(234, 242, 239), stop:0.7 rgba(151, 197, 230));\\n\"\n\"border-radius: 15px;\")\n self.information.setText(\"\")\n self.information.setObjectName(\"information\")\n self.arduino = QtWidgets.QLabel(self.centralwidget)\n self.arduino.setGeometry(QtCore.QRect(170, 140, 111, 181))\n self.arduino.setStyleSheet(\"background-color: #fcf1eb;\\n\"\n\"background: qlineargradient(x1:0, y1:0, x10:1, y2:2, stop: 0 rgb(234, 242, 239), stop:0.7 rgb(37, 150, 190));\\n\"\n\"border-radius: 15px;\")\n self.arduino.setText(\"\")\n self.arduino.setObjectName(\"arduino\")\n self.liaison = QtWidgets.QLabel(self.centralwidget)\n self.liaison.setGeometry(QtCore.QRect(310, 140, 111, 181))\n self.liaison.setStyleSheet(\"background-color: #fcf1eb;\\n\"\n\"background: qlineargradient(x1:0, y1:0, x10:1, y2:2, stop: 0 rgb(234, 242, 239), stop:0.7 rgb(18, 78, 120));\\n\"\n\"border-radius: 15px;\")\n self.liaison.setText(\"\")\n self.liaison.setObjectName(\"liaison\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(190, 160, 71, 71))\n self.label.setStyleSheet(\"background: transparent;\")\n self.label.setText(\"\")\n self.label.setPixmap(QtGui.QPixmap(\"images/arduino.png\"))\n self.label.setScaledContents(True)\n self.label.setObjectName(\"label\")\n self.bluetoothbutton = QtWidgets.QPushButton(self.centralwidget)\n self.bluetoothbutton.setGeometry(QtCore.QRect(180, 270, 91, 31))\n self.bluetoothbutton.setStyleSheet(\"QPushButton#bluetoothbutton {\\n\"\n\"color: black;\\n\"\n\"font: 75 13pt \\\"Bitstream Vera Sans\\\";\\n\"\n\"background-color: white;\\n\"\n\"border-radius: 10px;\\n\"\n\"}\\n\"\n\"QPushButton#bluetoothbutton:hover {\\n\"\n\"color: black;\\n\"\n\"font: 75 13pt \\\"Bitstream Vera Sans\\\";\\n\"\n\"background-color: #F0F0C9;\\n\"\n\"border-radius: 10px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton#bluetoothbutton:pressed{\\n\"\n\"color: black;\\n\"\n\"font: 75 13pt \\\"Bitstream Vera Sans\\\";\\n\"\n\"background-color: #F0F0C9;\\n\"\n\"border-radius: 10px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"\\n\"\n\"\")\n self.bluetoothbutton.setText(\"\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\"images/bluetooth.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.bluetoothbutton.setIcon(icon1)\n self.bluetoothbutton.setIconSize(QtCore.QSize(25, 22))\n self.bluetoothbutton.setFlat(True)\n self.bluetoothbutton.setObjectName(\"bluetoothbutton\")\n self.arduinopair = QtWidgets.QWidget(self.centralwidget)\n self.arduinopair.setGeometry(QtCore.QRect(0, 0, 801, 601))\n self.arduinopair.setObjectName(\"arduinopair\")\n self.title_2 = QtWidgets.QLabel(self.arduinopair)\n self.title_2.setGeometry(QtCore.QRect(40, 10, 91, 41))\n self.title_2.setStyleSheet(\"background: transparent;\\n\"\n\"font: 81 22pt \\\"Cantarell\\\";\\n\"\n\"color: black;\")\n self.title_2.setObjectName(\"title_2\")\n self.line1_2 = QtWidgets.QFrame(self.arduinopair)\n self.line1_2.setGeometry(QtCore.QRect(60, 50, 49, 3))\n self.line1_2.setMinimumSize(QtCore.QSize(0, 0))\n self.line1_2.setMaximumSize(QtCore.QSize(16777211, 16777215))\n self.line1_2.setStyleSheet(\"background-color: white;\\n\"\n\"color: black;\")\n self.line1_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line1_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line1_2.setObjectName(\"line1_2\")\n self.plugbutton = QtWidgets.QPushButton(self.arduinopair)\n self.plugbutton.setGeometry(QtCore.QRect(730, 530, 51, 51))\n self.plugbutton.setStyleSheet(\"QPushButton#plugbutton {\\n\"\n\"color: black;\\n\"\n\"font: 75 13pt \\\"Bitstream Vera Sans\\\";\\n\"\n\"background-color: white;\\n\"\n\"border-radius: 10px;\\n\"\n\"border: 2px solid black;\\n\"\n\"}\\n\"\n\"QPushButton#plugbutton:hover {\\n\"\n\"color: black;\\n\"\n\"font: 75 13pt \\\"Bitstream Vera Sans\\\";\\n\"\n\"background-color: #F0F0C9;\\n\"\n\"border-radius: 10px;\\n\"\n\"border: 2px solid black;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton#plugbutton:pressed{\\n\"\n\"color: black;\\n\"\n\"font: 75 13pt \\\"Bitstream Vera Sans\\\";\\n\"\n\"background-color: #F0F0C9;\\n\"\n\"border: 2px solid black;\\n\"\n\"border-radius: 10px;\\n\"\n\"\\n\"\n\"}\\n\"\n\"\\n\"\n\"\\n\"\n\"\")\n self.plugbutton.setText(\"\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\"images/power.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.plugbutton.setIcon(icon2)\n self.plugbutton.setIconSize(QtCore.QSize(40, 40))\n self.plugbutton.setFlat(True)\n self.plugbutton.setObjectName(\"plugbutton\")\n self.arduinowidget = QtWidgets.QWidget(self.arduinopair)\n self.arduinowidget.setGeometry(QtCore.QRect(0, 60, 801, 461))\n self.arduinowidget.setObjectName(\"arduinowidget\")\n self.listWidget = QtWidgets.QListWidget(self.arduinowidget)\n self.listWidget.setGeometry(QtCore.QRect(20, 60, 141, 171))\n self.listWidget.setStyleSheet(\"border: 2px solid black;\\n\"\n\"border-radius: 15px;\\n\"\n\"color: #191919;\")\n self.listWidget.setObjectName(\"listWidget\")\n item = QtWidgets.QListWidgetItem()\n self.listWidget.addItem(item)\n self.arduinoportlineedit = QtWidgets.QLineEdit(self.arduinowidget)\n self.arduinoportlineedit.setGeometry(QtCore.QRect(31, 240, 113, 33))\n self.arduinoportlineedit.setStyleSheet(\"border: 3px solid black,;\\n\"\n\"border-radius: 10px;\\n\"\n\"color: #4287f5;\\n\"\n\"font: 11pt \\\"MathJax_Size4\\\";\")\n self.arduinoportlineedit.setEchoMode(QtWidgets.QLineEdit.Normal)\n self.arduinoportlineedit.setDragEnabled(False)\n self.arduinoportlineedit.setClearButtonEnabled(False)\n self.arduinoportlineedit.setObjectName(\"arduinoportlineedit\")\n self.textearduinoblebrowser = QtWidgets.QTextBrowser(self.arduinowidget)\n self.textearduinoblebrowser.setGeometry(QtCore.QRect(250, 60, 301, 111))\n self.textearduinoblebrowser.setStyleSheet(\"border: 2px solid black;\\n\"\n\"border-radius: 10px;\\n\"\n\"color: black;\")\n self.textearduinoblebrowser.setObjectName(\"textearduinoblebrowser\")\n self.connectionbutton = QtWidgets.QPushButton(self.arduinowidget)\n self.connectionbutton.setGeometry(QtCore.QRect(30, 280, 113, 31))\n self.connectionbutton.setStyleSheet(\"border-radius: 10px;\\n\"\n\"color: #4287f5;\\n\"\n\"border: 3px solid black;\")\n self.connectionbutton.setObjectName(\"connectionbutton\")\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n \n # DEBUT\n\n self.arduinopair.hide()\n self.arduinowidget.hide()\n\n # BOUTONS\n\n\n self.bluetoothbutton.clicked.connect(self.arduinopair.show)\n self.plugbutton.clicked.connect(self.plugarduino)\n self.connectionbutton.clicked.connect(self.connectarduino)\n\n\n\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MalSpace\"))\n self.title.setText(_translate(\"MainWindow\", \"Accueil\"))\n self.title_2.setText(_translate(\"MainWindow\", \"Clé BLE\"))\n __sortingEnabled = self.listWidget.isSortingEnabled()\n self.listWidget.setSortingEnabled(False)\n item = self.listWidget.item(0)\n item.setText(_translate(\"MainWindow\", \" Listes des ports\"))\n self.listWidget.setSortingEnabled(__sortingEnabled)\n self.arduinoportlineedit.setPlaceholderText(_translate(\"MainWindow\", \" Nom du port\"))\n self.textearduinoblebrowser.setHtml(_translate(\"MainWindow\", \"\\n\"\n\"\\n\"\n\"

Recopiez le nom exact du port auquel

\\n\"\n\"

votre clé BLE est connectée puis

\\n\"\n\"

connectez vous.

\"))\n self.connectionbutton.setText(_translate(\"MainWindow\", \"CONNEXION\"))\n\n\n\n def plugarduino(self):\n global plugstart\n global plugpage\n global portlist\n if plugstart == 0:\n plugstart = 1\n for port in portlist:\n self.listWidget.addItem(str(port))\n if plugpage == False:\n self.arduinowidget.show()\n plugpage = True\n elif plugpage == True:\n self.arduinowidget.hide()\n plugpage = False\n\n def connectarduino(self):\n global erreurport\n avr_port = self.arduinoportlineedit.text()\n if avr_port == \"ERREUR1\" or avr_port == \"erreur1\":\n if perm == 1:\n print(\"Système détécté 'LINUX'\")\n os.system(\"notify-send 'Permission' 'Vous recevrez une demande d authentification afin de régler le problème de permission.'\")\n subprocess.call('sudo chmod 777 {}'.format(erreurport), shell = True)\n else:\n print(\"Système windows\")\n else:\n avr_port_split = avr_port.split(\" \")\n avr_port = avr_port_split[0]\n try:\n board = pyfirmata.Arduino(avr_port)\n print(\"Connexion clé BLE réussite\")\n except:\n print(\"Erreur de connexion avec la clé BLE\\n Erreur possibles:\")\n self.textearduinoblebrowser.append(\"

Il y'a eu une erreur de connexion avec la clé

\\n\")\n self.textearduinoblebrowser.append(\"

Les causes problables sont:

\\n\")\n self.textearduinoblebrowser.append(\"

Mauvais port, essayez un autre.

\\n\")\n self.textearduinoblebrowser.append(\"

Écrivez ERREUR1 pour essayer de

\\n\")\n self.textearduinoblebrowser.append(\"

régler la permission de connexion au port.

\")\n erreurport = avr_port\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n w = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(w)\n w.show()\n sys.exit(app.exec_())\n","repo_name":"Loubaris/Arduino-PyQT5","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":14552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"17692936188","text":"from _shutil import *\nfrom _script import *\nfrom _editor import open_in_vscode\n\nmkdir(\"~/Projects\")\nchdir(\"~/Projects\")\n\nfolder = os.path.basename(\"{{GIT_URL}}\")\nfolder = re.sub(\".git$\", \"\", folder)\n\nif not exists(folder):\n call(\"git clone %s --depth=1\" % \"{{GIT_URL}}\")\n\nset_variable(\"GIT_REPO\", os.path.realpath(folder))\n\nopen_in_vscode(folder)\n","repo_name":"MOA007/MyScripts","sub_path":"scripts/r/git/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"2037703785","text":"#!/usr/bin/env python3\nfrom pwn import *\nio = remote('p1.tjctf.org', 8003)\nx = io.recvline()\na, b = re.search(br'(\\d+) \\+ (\\d+)', x).groups()\nio.sendline(str(int(a) + int(b)))\nio.interactive()\n# [DEBUG] Received 0x3c bytes:\n# b'tjctf{TH3_1llum1n4ti_I5_R3aL}\\r'\n# b'tjctf{[CENt sSORED]} \\n'\n","repo_name":"mephi42/ctf","sub_path":"2020.05.23-TJCTF_2020/Censorship/pwnit.py","file_name":"pwnit.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"70"} +{"seq_id":"16284925704","text":"import numpy as np\n\ndef entropy(X):\n total = 0\n for x in X:\n if x != 0:\n total += (-1 * x * np.log2(x))\n return total\n\ndef infoGain(before, after):\n e_b = entropy(before) # entropy before \n print(\"Before: \", e_b)\n e_a = 0 # entropy after\n for x in after:\n print(\"term: \", entropy(x))\n e_a += (0.5) * entropy(x)\n print(\"After: \", e_a)\n return e_b - e_a\n\ndef main():\n before = [5/8, 3/8]\n after = [[1/2, 1/2], [3/4, 1/4]] # A: [[1, 0], [1/4, 3/4]]\n gain = infoGain(before, after)\n print(gain)\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"ACK101101/Theory---Algs-of-ML","sub_path":"hw 1/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"33381769391","text":"\"\"\"\nThis script analyzes an election vote count data set and returns the following:\n\n1) The total number of votes cast\n2) A complete list of candidates who received votes\n3) The percentage of votes each candidate won\n4) The total number of votes each candidate won\n5) The winner of the election based on popular vote.\n\nAll statistics are calculated concurrent with a single pass through the data.\nSince the variables are not known in advance, i.e., the names of the candidates\nwho are up for election, mutable data types (lists and dictionaries) are used.\n\"\"\"\n\n# Import csv and os libraries to read/write\nimport csv\nimport os\n\n# Assemble the input csv file path, starting from the cwd\ninput_path = os.path.join(os.path.dirname(__file__), \"Resources\", \"election_data.csv\")\n\n# Open the election_data csv file for reading\nwith open(input_path, encoding=\"utf-8\", newline=\"\") as csvfile:\n csvread = csv.reader(csvfile, delimiter=\",\")\n\n # Skip header row since it is not valid data\n next(csvread)\n\n # Initialize the vote count dictionary\n votes = {}\n\n # Loop through the remaining records\n for record in csvread:\n \n candidate = str(record[2])\n votes[candidate] = votes.get(candidate, 0) + 1\n\n# Find the total number of votes\ntotal = sum(votes.values())\n\n# Find the name key of the maximum value in the dictionary\nwinner = max(votes, key=votes.get)\n\n# Format the votes dictionary entries for the report\nlines = [\n f\"{k + ':':24}\" f\"{votes[k]:10,}\" f\"{votes[k]/total * 100:11,.2f}%\\n\"\n for k in sorted(votes)\n]\n\n# Generate the report string text\nreport = (\n f\"{' Election Results ':^46}\\n\"\n f\"{'--':-^46}\\n\"\n f\"{'Total Votes:':24}{total:10,}{100:11,.2f}%\\n\"\n f\"{'--':-^46}\\n\"\n f\"{''.join(lines)}\"\n f\"{'--':-^46}\\n\"\n f\"{'Winner:':10}{winner:>24}{votes[winner]/total * 100:11,.2f}%\\n\"\n f\"{'--':-^46}\\n\"\n)\n\n# Assemble the output text file path, starting from the cwd\noutput_path = os.path.join(os.path.dirname(__file__), \"Analysis\", \"poll_analysis.csv\")\nprint(f\"\\n\\n {output_path} \\n\\n\")\n\n# Open the poll_analysis text file and write the report to it\nwith open(output_path, \"w\", encoding=\"utf-8\") as textfile:\n textfile.write(report)\n\n# Check to see if the report exists and is properly formatted:\ninput_path = output_path\n\n# Open the report text file for reading and print it to the terminal\nwith open(input_path, \"r\", encoding=\"utf-8\") as textfile:\n report = textfile.read()\n\n# And print it out to see if the information, formatting, etc. is correct\nprint(f\"\\n\\n{report}\\n\\n\")\n","repo_name":"SnowCode930/python-challenge","sub_path":"python-challenge/PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"29389524248","text":"import sys\nimport os\nimport subprocess\nimport time\nfrom filecmp import cmp\n\ngreen = \"\\u001b[32m\"\nred = \"\\u001b[31m\"\nblue = \"\\u001b[35m\"\nreset = \"\\u001b[0m\"\n\ndef compileCode(solver):\n extension = solver.split('.')\n extension = extension[len(extension) - 1]\n if (extension == \"cpp\"):\n os.system(\"g++ \\\"%s\\\" -o test -std=c++17 -static -O2 -lm\" % solver)\n elif (extension == \"c\"):\n os.system(\"gcc \\\"%s\\\" -o test -static -O2 -lm\" % solver)\n\nsolver, testCasesPath, timeLimit = sys.argv[1:]\ntimeLimit = float(timeLimit)\ncompileCode(solver)\nprint(\"Compiled Code\", solver)\n\ninputPath, outputPath = testCasesPath + \"/input/\", testCasesPath + \"/output/\"\ntestCases = os.listdir(inputPath)\naccepted = True\nfor test in sorted(testCases, key=lambda x: int(x.split('_')[1])):\n startTime = time.time()\n os.system(\"./test < \\\"%s\\\" > auxOut\" % (inputPath + test))\n # subprocess.run(\"./test < \\\"%s\\\" > auxOut\" % (inputPath + test), shell=True, timeout=timeLimit)\n executionTime = time.time() - startTime\n\n verdict = \"Accepted\" if cmp(\"auxOut\", outputPath + test) and executionTime <= timeLimit else \"Wrong Answer\" if executionTime <= timeLimit else \"Time Limit Exceeded\"\n color = green if verdict == \"Accepted\" else red if verdict == \"Wrong Answer\" else blue\n if verdict != \"Accepted\":\n accepted = False\n # os.system(\"cat %s\" % (outputPath + test))\n\n print(\"%3d\" % int(test.split(\"_\")[1]), \" - Verdict: \", color, verdict, reset, \" | time: \", round(executionTime, 4), sep=\"\")\n\nif accepted:\n print(green, \"ACCEPTED!!!!!\", sep=\"\")\nelse:\n print(red, \"SOMETHING WENT WRONG!!!!!\", sep=\"\")\n","repo_name":"NelsonGomesNeto/Competitive-Programming","sub_path":"Competitions/ICPC/2020/Sub-Regionals/judge.py","file_name":"judge.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"} +{"seq_id":"34234467023","text":"\"\"\"Functions for organizing and calculating student exam scores.\"\"\"\n\ndef round_scores(student_scores):\n \"\"\"Round all provided student scores.\"\"\"\n rounded_scores = []\n\n for score in student_scores:\n rounded_scores.append(round(score))\n\n return rounded_scores\n\n\ndef count_failed_students(student_scores):\n \"\"\"Count the number of failing students out of the group provided.\"\"\"\n failed_count = 0\n\n for score in student_scores:\n if score <= 40:\n failed_count += 1\n\n return failed_count\n\n\ndef above_threshold(student_scores, threshold):\n \"\"\"Determine how many of the provided student scores were 'the best' based on the provided threshold.\"\"\"\n best_scores = []\n\n for score in student_scores:\n if score >= threshold:\n best_scores.append(score)\n\n return best_scores\n\n\ndef letter_grades(highest):\n \"\"\"Create a list of grade thresholds based on the provided highest grade.\"\"\"\n lower_threshold_scores = [41]\n letter_grade_range = round((highest-40) / 4)\n\n for index in range(3):\n lower_threshold = lower_threshold_scores[index] + letter_grade_range\n lower_threshold_scores.append(lower_threshold)\n\n return lower_threshold_scores\n\n\ndef student_ranking(student_scores, student_names):\n \"\"\"Organize the student's rank, name, and grade information in ascending order.\"\"\"\n rankings = []\n\n for index, score in enumerate(student_scores):\n rank = index + 1\n name = student_names[index]\n rankings.append(f'{rank}. {name}: {score}')\n\n return rankings\n\n\ndef perfect_score(student_info):\n \"\"\"Create a list that contains the name and grade of the first student to make a perfect score on the exam.\"\"\"\n for info in student_info:\n name = info[0]\n score = info[1]\n\n if score == 100:\n return [name, score]\n\n return []\n","repo_name":"nmsalvatore/exercism","sub_path":"python/making-the-grade/loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"7219028526","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom align_class import *\n\nbam_file = sys.argv[1]\naln = open(bam_file)\n\n# Extract supplementary alignments\nos.system('samtools view -f 2048 ' + bam_file + ' > supp.sam')\n\n\ndef read_bam():\n seenset = set()\n pri_sup_pairs = {}\n\n alns = open('supp.sam')\n for aln in alns:\n QNAME = aln.split()[0] # sequence name\n if QNAME not in seenset:\n seenset.add(QNAME)\n pri_sup_pairs[QNAME] = []\n sup1 = alignment(QNAME, aln.split()[2], aln.split()[3], aln.split()[15][5:], aln.split()[5], aln.split()[4],\n aln.split()[11][5:])\n pri_sup_pairs[QNAME].append(sup1)\n SAtag = aln.split()[21].split(\";\") # SA tag are seprated by \";\"\n for i in range(len(SAtag) - 1):\n sups = alignment(QNAME,\n SAtag[i].split(',')[0][5:],\n SAtag[i].split(',')[1],\n SAtag[i].split(',')[2],\n SAtag[i].split(',')[3],\n SAtag[i].split(',')[4],\n SAtag[i].split(',')[5])\n pri_sup_pairs[QNAME].append(sups)\n return pri_sup_pairs\n","repo_name":"wangyiqing50/Fulloy","sub_path":"bam_reader.py","file_name":"bam_reader.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"26248615764","text":"import sys\nsys.setrecursionlimit(int(1e6))\ninput = sys.stdin.readline\nfrom collections import deque\nMAX = 50 + 10\n\nn, k = map(int, input().split())\n\ndef bfs(a,b):\n q = []\n visited = [False] * 200001\n \n q.append(a)\n visited[a] = 1\n \n while q:\n x = q.popleft()\n if x == b:\n return visited[b]-1\n for i in (x-1, x+1, x*2):\n if 0 <= i <= 200000 and visited[i] == 0:\n q.append(i)\n visited[i] = visited[x] + 1\n \n return -1\nprint(bfs(n,k))\n","repo_name":"SESAC2023/jinwoo_seo","sub_path":"BOJ/st_31_graph_traversal/1697.py","file_name":"1697.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"27242533034","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n\ndef spatial_soft_argmax(x):\n ''' Implementation of the spatial softmax layer\n in Deep Spatial Autoencoders for Visuomotor Learning.\n See paper at https://arxiv.org/pdf/1509.06113.pdf.\n Code inspired by https://github.com/tensorflow/tensorflow/issues/6271.\n\n Args:\n x: input of shape [N, H, W, C]\n '''\n N, C, H, W = x.get_shape().as_list()\n\n # convert softmax to shape [N, H, W, C, 1]\n features = tf.reshape(tf.transpose(x, [0, 3, 1, 2]), [N * C, H * W])\n softmax = tf.nn.softmax(features)\n softmax = tf.transpose(tf.reshape(softmax, [N, C, H, W]), [0, 2, 3, 1])\n softmax = tf.expand_dims(softmax, -1)\n\n # get image coordinates in shape [H, W, 1, 2]\n posx, posy = tf.meshgrid(tf.lin_space(-1., 1., num=H),\n tf.lin_space(-1., 1., num=W),\n indexing='ij')\n image_coords = tf.stack([posx, posy], -1)\n image_coords = tf.expand_dims(image_coords, 2)\n\n # return argmax coordinates of [N, C * 2]\n res = tf.reduce_sum(softmax * image_coords, reduction_indices=[1, 2])\n res = tf.reshape(res, [N, C * 2])\n return res\n\n\nclass SpatialSoftmaxCNN(object):\n def __init__(self, model_config):\n self.batch_size = model_config.batch_size\n self.image_size = model_config.image_size\n self.obs_dim = model_config.obs_dim\n self.rgb_layers = model_config.rgb_layers\n self.depth_layers = model_config.depth_layers\n self.cnn_layers = model_config.cnn_layers\n self.aux_task = model_config.aux_task\n self.fc_layers = model_config.fc_layers\n self.output_min = model_config.output_min\n self.output_max = model_config.output_max\n self.output_dim = model_config.output_dim\n self.l1_loss_weight = model_config.l1_loss_weight\n self.l2_loss_weight = model_config.l2_loss_weight\n self.aux_loss_weight = model_config.aux_loss_weight\n\n self.output_translate = (self.output_max + self.output_min) / 2\n self.output_scale = (self.output_max - self.output_min) / 2\n\n if self.aux_task == 'obj_pos':\n self.aux_task_dim = 3\n else:\n raise ValueError(f'Undefined aux task {self.aux_task}.')\n\n # create placeholders\n self.input_rgb = tf.placeholder(name='input_rgb', dtype=tf.float32,\n shape=[self.batch_size,\n self.image_size,\n self.image_size,\n 3])\n self.input_d = tf.placeholder(name='input_d', dtype=tf.float32,\n shape=[self.batch_size, self.image_size,\n self.image_size, 1])\n self.input_obs = tf.placeholder(name='input_obs', dtype=tf.float32,\n shape=[self.batch_size, self.obs_dim])\n self.gt_aux = tf.placeholder(name='gt_aux', dtype=tf.float32,\n shape=[self.batch_size, self.aux_task_dim])\n self.gt_output = tf.placeholder(name='gt_output', dtype=tf.float32,\n shape=[self.batch_size, self.output_dim])\n\n def get_feed_dict(self, batch):\n return {\n self.input_rgb: batch['input_rgb'],\n self.input_d: batch['input_d'],\n self.input_obs: batch['input_obs'],\n self.gt_aux: batch['gt_aux'],\n self.gt_output: batch['gt_output']\n }\n\n def get_fetch_dict(self, is_train=True):\n return {\n 'loss_l2': self.l2_loss,\n 'loss_l1': self.l1_loss,\n 'loss_aux': self.aux_loss\n }\n\n def build(self):\n rgb_embed = slim.stack(self.input_rgb, slim.conv2d,\n self.rgb_layers, scope='rgb_embed')\n d_embed = slim.stack(self.input_d, slim.conv2d,\n self.depth_layers, scope='d_embed')\n _ = tf.concat([rgb_embed, d_embed], axis=-1, name='concat_rgbd')\n _ = slim.stack(_, slim.conv2d, self.cnn_layers, scope='cnn')\n\n _ = spatial_soft_argmax(_)\n\n self.aux_output = slim.fully_connected(_, self.aux_task_dim, scope='aux')\n\n _ = tf.concat([_, self.aux_output, self.input_obs], -1)\n _ = slim.stack(_, slim.fully_connected, self.fc_layers, scope='fc')\n _ = slim.fully_connected(_, self.output_dim, activation_fn=tf.tanh,\n scope='fc_out')\n _ = (_ + self.output_translate) * self.output_scale\n self.output = _\n \n self.l2_loss = tf.reduce_mean((self.output - self.gt_output) ** 2)\n self.l1_loss = tf.reduce_mean(tf.abs(self.output - self.gt_output))\n self.aux_loss = tf.reduce_mean((self.aux_output - self.gt_aux) ** 2)\n self.loss = self.l2_loss * self.l2_loss_weight + \\\n self.l1_loss * self.l1_loss_weight + \\\n self.aux_loss * self.aux_loss_weight\n","repo_name":"YunchuZhang/Visually-Grounded-Library-of-Behaviors","sub_path":"bc/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"} +{"seq_id":"39170848447","text":"# config.py\n\n\nclass Config(object):\n DEBUG = False\n TESTING = False\n SECRET_KEY = 'developement key'\n #DATABASE_URI = 'postgresql://vagrant@localhost/issuetracker'\n DATABASE_URI = 'postgresql:///issuetracker'\n USE_TOKEN_AUTH = True\n\n\nclass ProductionConfig(Config):\n DEBUG = False\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n\n\nclass TestingConfig(Config):\n TESTING = True\n","repo_name":"postsneakernet/issue-tracker","sub_path":"issuetracker/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"5581243543","text":"import random\n\n# задание 1\n\nnum = int(input(\"Введите целое число: \"))\n\nif num % 2 == 0:\n print(f\"Число {num} - четное!\")\nelse:\n print(f\"Число {num} - нечетное!\")\n\n\n# задание 2\n\nfinger = int(input(\"Введите порядковый номер пальца: \"))\n\nif finger == 1:\n print(f\"Палец под номером {finger} - большой!\")\nelif finger == 2:\n print(f\"Палец под номером {finger} - указательный!\")\nelif finger == 3:\n print(f\"Палец под номером {finger} - средний!\")\nelif finger == 4:\n print(f\"Палец под номером {finger} - безымянный!\")\nelif finger == 5:\n print(f\"Палец под номером {finger} - мизинец!\")\nelse:\n print(\"Если вы не из Чернобыля, то пальцев должно быть не больше пяти =)\")\n\n\n# задание 3\n\nmonth = int(input(\"Введите номер месяца: \"))\nif 1 <= month <= 2 or month == 12:\n print(\"Зима\")\nelif 3 <= month <= 5:\n print(\"Весна\")\nelif 6 <= month <= 8:\n print(\"Лето\")\nelif 9 <= month <= 11:\n print(\"Осень\")\nelse:\n print(\"Загляни в календарь!\")\n\n\n# задание 4\n\none = int(input())\ntwo = int(input())\nthree = int(input())\n\nif one > two and one > three:\n print(f\"{one} is biggest\")\nelif two > one and two > three:\n print(f\"{two} is biggest\")\nelif three > one and three > two:\n print(f\"{three} is biggest\")\n\n\n# задание 5\n\nfirst_player = random.randint(1, 3)\nsecond_player = random.randint(1, 3)\n\nif first_player == 1:\n if second_player == 2:\n print(\"Камень побеждает Ножницы\")\n elif second_player == 3:\n print(\"Бумага побеждает Камень\")\n elif second_player == 1:\n print(\"Ничья\")\nelif first_player == 2:\n if second_player == 2:\n print(\"Ничья\")\n elif second_player == 3:\n print(\"Ножницы побеждают Бумагу\")\n elif second_player == 1:\n print(\"Камень побеждает Ножницы\")\nelif first_player == 3:\n if second_player == 2:\n print(\"Ножницы побеждают Бумагу\")\n elif second_player == 3:\n print(\"Ничья\")\n elif second_player == 1:\n print(\"Бумага побеждает Камень\")\n\n\n# задание 6\n\nnum1 = int(input())\nnum2 = int(input())\nflag = num1 > num2\nif flag:\n print(\"YES\")\nelse:\n print(\"NO\")\n\n\n# задание 7\n\nnum1 = int(input())\nnum2 = int(input())\nnum3 = int(input())\nis_triangle = num1 > num2 + num3 and num2 > num1 + num3 and num3 > num1 + num2\n\nif is_triangle:\n print(\"Такой треугольник существует\")\nelse:\n print(\"Такой треугольник не существует\")\n\n\n# задание 8\n\nnum1 = float(input(\"Введите число: \"))\nsign = input(\"Введите знак операции (+, -, /, *): \")\nnum2 = float(input(\"Введите число: \"))\n\nif sign == \"+\":\n print(num1 + num2)\nelif sign == \"-\":\n print(num1 - num2)\nelif sign == \"*\":\n print(num1 * num2)\nelif sign == \"/\":\n print(num1 / num2)\n\n\n# задание 9\n\nst = input(\"Введите слово: \")\nis_mister = st == \"Mister\"\n\nif is_mister:\n print(\"Введено слово Mister\")\nelse:\n print(\"Введено слово отличное от слова Mister\")","repo_name":"Aleksei911/Python","sub_path":"Lessons/lesson4.py","file_name":"lesson4.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"6572938791","text":"class Solution:\n def earliestFullBloom(self, plantTime: List[int], growTime: List[int]) -> int:\n plantGrowMap = []\n for i in range(len(plantTime)):\n plantGrowMap.append((growTime[i], plantTime[i]))\n\n plantGrowMap.sort(reverse = True)\n\n currentTime = 0\n ans = 0\n\n for i in range(len(plantGrowMap)):\n currentTime += plantGrowMap[i][1]\n ans = max(ans, currentTime + plantGrowMap[i][0])\n return ans","repo_name":"TigistShiferaw/competitive-programming","sub_path":"squid game/week1/2136. Earliest Possible Day of Full Bloom.py","file_name":"2136. Earliest Possible Day of Full Bloom.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"15615285021","text":"import os\nimport tempfile\nfrom pathlib import Path\n\nimport pandas as pd\nimport pytest\n\nfrom cport.modules.predict import (\n format_predictions,\n mean_calculator,\n read_pred,\n scriber_ispred4_scannet_sppider,\n)\n\nDATA_DIR = Path(__file__).parents[1] / \"tests/test_data\"\n\n\n@pytest.fixture\ndef prediction_dic():\n return {\n \"predictor\": [\"predictor\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\"],\n \"ispred4\": [\"ispred4\", \"P\", \"P\", \"AP\", \"0\", \"-\", \"1.7\"],\n }\n\n\n@pytest.fixture\ndef formatted_prediction():\n return {\n \"predictor\": [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"],\n \"ispred4\": [0.0, 0.0, 0.5, 0.0, 0.0, 1.7],\n }\n\n\n@pytest.fixture\ndef prediction_csv():\n return Path(DATA_DIR, \"predictors_1PPE.csv\")\n\n\n@pytest.fixture\ndef temp_csv():\n input_csv = tempfile.NamedTemporaryFile(suffix=\".csv\", delete=False)\n yield input_csv\n os.unlink(input_csv.name)\n\n\ndef test_read_pred(temp_csv, prediction_dic):\n contents = \"predictor,1,2,3,4,5,6\\nispred4,P,P,AP,0,-,1.7\\n\"\n temp_csv.write(contents.encode())\n temp_csv.close()\n\n observed = read_pred(temp_csv.name)\n expected = prediction_dic\n\n assert observed == expected\n\n\ndef test_format_predictions(prediction_dic, formatted_prediction):\n observed = format_predictions(prediction_dic)\n expected = formatted_prediction\n\n assert observed == expected\n\n\n@pytest.mark.skip(reason=\"Not implemented yet.\")\ndef test_scriber_ispred4_sppider_csm_potential_scannet():\n pass\n\n\ndef test_scriber_ispred4_scannet_sppider(prediction_csv):\n scriber_ispred4_scannet_sppider(prediction_csv)\n expected_file = Path(\"output/cport_ML_scriber_ispred4_scannet_sppider.csv\")\n assert expected_file.exists()\n\n\ndef test_mean_calculator():\n df = pd.DataFrame.from_dict(\n {\"col_1\": [1.0, 2.0], \"col_2\": [2.0, 3.0], \"col_3\": [3.0, 4.0]}\n )\n\n observed = mean_calculator(df, [\"col_1\", \"col_2\", \"col_3\"])\n expected = [2.0, 3.0]\n\n assert observed == expected\n","repo_name":"haddocking/cport","sub_path":"tests/test_predict.py","file_name":"test_predict.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"4678611356","text":"\"\"\"\n Last Modified: August 9, 2017\n\n------------------------------------------------------------------------------\n Description\n------------------------------------------------------------------------------\n\n This python script is used to submit a single request to a web service\n or collection of web services. This script will capture and parse the\n web service response (and header) and write an entry to a text file.\n Intended to be included in a scheduled job with a regular interval to\n monitor a web service.\n\n The script will log the date, time, executionTime, latency\n and response code for the BC Physical Address Geocoder and the\n BC Route Planner.\n\n Learn more about these APIs using API consoles linked from the BC\n Data Catalogue:\n\n BC Physical Address Geocoder:\n https://catalogue.data.gov.bc.ca/dataset/8f4a016f-14db-4def-8ef9-7c797de1cdd9\n\n BC Route Planner:\n https://catalogue.data.gov.bc.ca/dataset/3dad0c30-ef32-4f4c-82fa-33787d5f85f8\n\n Two of these APIs require the use of API keys. To acquire an API\n key for your application please contact DataBC.\n\n------------------------------------------------------------------------------\n Usage\n------------------------------------------------------------------------------\n python heartbeat.py -url \"\" -o \n\n------------------------------------------------------------------------------\n Usage Examples\n------------------------------------------------------------------------------\n python heartbeat.py -url \"https://geocoder.api.gov.bc.ca/addresses.json?addressString=%20525%20Superior%20Street%2C%20Victoria%2C%20BC&locationDescriptor=any&maxResults=1&interpolation=adaptive&echo=true&setBack=0&outputSRS=4326&minScore=1&provinceCode=BC&apikey=\" -o ../data/geocoder-secure-heartbeat.txt\n\n python heartbeat.py -url \"https://apps.gov.bc.ca/pub/geocoder/addresses.json?addressString=%20525%20Superior%20Street%2C%20Victoria%2C%20BC&locationDescriptor=any&maxResults=1&interpolation=adaptive&echo=true&setBack=0&outputSRS=4326&minScore=1&provinceCode=BC&apikey=nokeyprovided\" -o ../data/geocoder-public-heartbeat.txt\n\n python heartbeat.py -url \"https://router.api.gov.bc.ca/route.json?routeDescription=spansProvinceFastest&points=-126.844567%2C49.978503%2C-122.799997%2C58.925305&outputSRS=4326&criteria=fastest&distanceUnit=km&apikey=\" -o ../data/router-heartbeat.txt\n\n python heartbeat.py -url \"http://apps.gov.bc.ca/pub/bcgnws/names/search?name=victoria&outputFormat=json\" -o ../data/bcgnws-heartbeat.txt\n\n python heartbeat.py -url \"https://apps.gov.bc.ca/pub/geomark/geomarks/gm-7A4A2A93A090493186442C1A48B179C4/point.json?srid=4326\" -o ../data/geomark-heartbeat.txt\n\n python heartbeat.py -url \"https://catalogue.data.gov.bc.ca/api/3/action/package_search?fq=license_id:22\" -o ../data/bcdc-heartbeat.txt\n\n------------------------------------------------------------------------------\n Contact Us\n------------------------------------------------------------------------------\n https://forms.gov.bc.ca/databc-contact-us/\n\"\"\"\n\nimport argparse\nimport collections\nimport csv\nimport datetime\nimport json\nimport os\nimport sys\nimport time\n\nif sys.version_info > (3,):\n from urllib.error import HTTPError, URLError\n from urllib.request import urlopen\nelse:\n from urllib2 import HTTPError, URLError, urlopen\n\nHEADER_FIELDNAMES = (\n 'chart', # TODO: should be 'date'?\n 'date', # TODO: should be 'time'?\n 'executionTime',\n 'upstreamLatency',\n 'proxyLatency',\n 'responseCode',\n 'responseTime'\n)\n# Only retain 7 days of data (10 minute intervals)\n# 6 (10 minute chunks per hour) * 24 (hours per day) * 7 (days)\nMAX_ROWS = 6 * 24 * 7 # 1008\n\n\ndef _get_current_date_and_time():\n d = datetime.datetime.now()\n return {\n 'chart': d.strftime('%Y/%m/%d'), # TODO: should be 'date'?\n 'date': d.strftime('%H:%M:%S') # TODO: should be 'time'?\n }\n\n\ndef _get_url_response_info(url):\n try:\n # Run an HTTP GET on the url\n start_time = time.time()\n response = urlopen(url)\n except HTTPError as e:\n return {'responseCode': e.code}\n except (URLError, ValueError):\n return {}\n else:\n end_time = time.time()\n\n try:\n data = json.loads(response.read())\n except ValueError:\n # If the data being deserialized is not a valid JSON document,\n # a ValueError will be raised (Python 2.7 -> 3.4)\n exec_time = ''\n else:\n exec_time = data.get('executionTime', '')\n\n return {\n 'executionTime': exec_time,\n 'proxyLatency': response.info().getheader(\n 'X-Kong-Proxy-Latency',\n default=''\n ),\n 'responseCode': response.getcode(),\n 'responseTime': (end_time - start_time) * 1000,\n 'upstreamLatency': response.info().getheader(\n 'X-Kong-Upstream-Latency',\n default=''\n )\n }\n\n\ndef _parse_cmd_line_args():\n parser = argparse.ArgumentParser(\n description='Checks the heartbeat of a given URL'\n )\n parser.add_argument(\n '-url',\n dest='url',\n action='store',\n required=True,\n help='The URL to check the heartbeat of'\n )\n parser.add_argument(\n '-o',\n dest='output_filename',\n action='store',\n required=True,\n help='The file to append heartbeat results to'\n )\n return parser.parse_args()\n\n\ndef _read_csv(filename):\n result = collections.deque(maxlen=MAX_ROWS)\n if os.path.exists(filename):\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter='|')\n result.extend(reader)\n return result\n\n\ndef _write_csv(filename, rows):\n with open(filename, 'w') as csvfile:\n writer = csv.DictWriter(\n csvfile,\n delimiter='|',\n fieldnames=HEADER_FIELDNAMES\n )\n writer.writeheader()\n writer.writerows(rows)\n\n\ndef main():\n # Parse command-line arguments from sys.argv\n args = _parse_cmd_line_args()\n\n # Initialize dictionary to hold new URL response information\n url_response_info = {key: '' for key in HEADER_FIELDNAMES}\n\n # Update the date and time key/value pairs\n url_response_info.update(_get_current_date_and_time())\n\n # Update the URL response information key/value pairs\n url_response_info.update(_get_url_response_info(args.url))\n\n # Read rows into a deque (if file exists, else empty deque)\n current_rows = _read_csv(args.output_filename)\n\n # Append new URL response information to the deque\n current_rows.append(url_response_info)\n\n # Write all rows to csv output file\n _write_csv(args.output_filename, current_rows)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bcgov/dbcrss","sub_path":"heartbeat/heartbeat.py","file_name":"heartbeat.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"} +{"seq_id":"27181332910","text":"#######################################################\r\n# Program: Obtaining month day and year in numerical #\r\n# Description: Obtain m, d, y in numerical and #\r\n# asking user if they want to continue the program#\r\n# Author: Rebecca Hope Simmons #\r\n# Date: Wednesday, October 1, 2014 #\r\n#######################################################\r\n\r\n# while loop \r\n\r\ndate = \"yes\"\r\nwhile date == \"yes\": \r\n\r\n # Evaluate the\tmonth\t, day\t, and\tyear\r\n month, day,\tyear = eval(input(\"Enter month day year with commas: \"))\r\n \r\n # Determine\tthe month\r\n if\tmonth\t==\t1:\t\r\n \tmonthName = \"January\"\r\n elif month == 2:\r\n \tmonthName = \"February\"\r\n elif month == 3:\r\n \tmonthName = \"March\"\r\n elif month == 4:\r\n \tmonthName = \"April\"\r\n elif month == 5: \r\n \tmonthName = \"May\"\r\n elif month == 6:\r\n \tmonthName = \"June\"\r\n elif month == 7:\r\n \tmonthName = \"July\"\r\n elif month == 8:\r\n \tmonthName = \"August\"\r\n elif month == 9:\r\n \tmonthName = \"September\"\r\n elif month == 10:\r\n \tmonthName = \"October\"\r\n elif month == 11:\r\n \tmonthName = \"November\"\r\n elif month == 12:\r\n \tmonthName = \"December\"\r\n else:\r\n \tprint(\"Invalid Month\")\r\n \r\n # if month and\tday IS valid \r\n if\t(month == 1\tor\tmonth\t==\t3 or month == 5 or month == 7\tor\tmonth\t==\t8 or month == 10 or month == 12):\r\n if day <= 31 and day\t>=\t1:\r\n print(monthName, day, year)# print date, valid or invalid\r\n else:\r\n print(\"Invalid Day\")\r\n elif (month\t==\t4 or month == 6 or month == 9\tor\tmonth\t==\t11):\r\n \tif\tday <= 30 and day\t>=\t1:\r\n print(monthName, day, year)# print date, valid or invalid\r\n \telse:\r\n print(\"Invalid Day\")\r\n elif (month\t==\t2):\r\n if\tday <= 28 and day\t>=\t1:\r\n \t print(monthName, day, year)# print date, valid or invalid\r\n else:\r\n print(\"Invalid Day\") \r\n else:\r\n print(\"Invalid Day\")\r\n\r\n date = input(\"Do you want to continue?\") \r\n \r\nprint(\"Goodbye!\")\r\n\r\n \r\n \r\n\r\n \r\n","repo_name":"rebeccasimmons/Python","sub_path":"RebeccaSimmonsLab12.py","file_name":"RebeccaSimmonsLab12.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"29755229671","text":"class Lines:\r\n\r\n def __init__(self, start_x, start_y, end_x, end_y, color):\r\n\r\n self.start_x = start_x\r\n self.start_y = start_y\r\n self.end_x = end_x\r\n self.end_y = end_y\r\n self.color = color\r\n\r\n def __eq__(self, other):\r\n\r\n if self.start_x == other.start_x and self.start_y == other.start_y and self.end_x == other.end_x and self.end_y == other.end_y:\r\n return True\r\n\r\n\r\n","repo_name":"sn0wman12/DijsktraVisualisation","sub_path":"domain/Lines.py","file_name":"Lines.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"5850878224","text":"import os\nimport time\nimport logging\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\n\ndef is_in_page(selector):\n try:\n if \"@\" in selector:\n return driver.find_element_by_xpath(selector)\n return driver.find_element_by_css_selector(selector)\n except Exception:\n print(selector + ' Not found')\n return False\n\n\ndef click_if_exist(path_locator):\n element = is_in_page(path_locator)\n\n if element:\n element.click()\n return element\n return False\n\n\ndef type_if_exist(path_locator, keys_to_send):\n element = click_if_exist(path_locator)\n if element:\n element.send_keys(keys_to_send)\n\n\ndef is_correct_status(status_option):\n is_correct = False\n status_mapping = {\n 'Start': True,\n 'Stop': True,\n 'Pause': True,\n 'Resume': True\n }\n is_correct = status_mapping.get(status_option)\n return is_correct\n\n\ndef actual_status():\n global actualStatus\n startActive = '.sl-item:nth-child(1) i.fa.fa-play.fa-2x.text-success'\n stopActive = '.sl-item:nth-child(1) .fa.fa-stop.fa-2x.text-danger'\n pauseActive = '.sl-item:nth-child(1) i.fa.fa-pause.fa-2x.text-warning'\n resumeActive = '.sl-item:nth-child(1) i.fa.fa-refresh.fa-2x.text-success'\n actualStatus = 'no'\n if is_in_page(startActive):\n actualStatus = 'Start'\n print('status load ' + actualStatus)\n else:\n if is_in_page(stopActive):\n actualStatus = 'Stop'\n print('status load ' + actualStatus)\n else:\n if is_in_page(pauseActive):\n actualStatus = 'Pause'\n print('status load ' + actualStatus)\n else:\n if is_in_page(resumeActive):\n actualStatus = 'Resume'\n print('status load ' + actualStatus)\n else:\n print('tenemos cambios en web')\n\n\ndef chrome_webdriver():\n global driver\n try:\n driver = webdriver.Chrome(executable_path=r\"/usr/lib/chromium-browser/chromedriver\")\n except Exception as e:\n try:\n driver = webdriver.Chrome(executable_path=r\"./chromedriver76\")\n except Exception as e2:\n try:\n driver = webdriver.Chrome(executable_path=r\"./chromedriver77\")\n except Exception as e3:\n try:\n driver = webdriver.Chrome(executable_path=r\"./chromedriver83\")\n except Exception as e4:\n print('version chromedriver fails')\n return driver\n\n\nos.environ['DISPLAY'] = os.getenv('INS_DISPLAY', ':0.0')\nlogging.basicConfig(level=logging.INFO)\n# driver = webdriver.Firefox(executable_path=r\"./geckodriver\")\ndriver = chrome_webdriver()\n\ndriver.get(\"https://auth2.bixpe.com/Account/Login\")\n\ndriver.maximize_window()\n\ntype_if_exist('input#Username', os.getenv('BIXPE_USER', 'miuser'))\ntype_if_exist('input#Password', os.getenv('BIXPE_PASS', 'mipass'))\ntype_if_exist('input#Password', Keys.ENTER)\n\n\nexpectedStatus = os.getenv('BIXPE_STATUS', 'start').capitalize()\n\nactual_status()\n\nif actualStatus != expectedStatus:\n if is_correct_status(expectedStatus):\n time.sleep(8)\n driver.get(\"https://worktime.bixpe.com/WorkDay/\" + expectedStatus)\n time.sleep(8)\n driver.get(\"https://worktime.bixpe.com/\")\n time.sleep(8)\n else:\n print('status is not valid')\n\nactual_status()\n\nif actualStatus == expectedStatus:\n print(actualStatus + ' it\\'s actual status')\n\n\ntime.sleep(2)\n\ndriver.close()\n","repo_name":"alvarezbruned/bixpe","sub_path":"bixpe.py","file_name":"bixpe.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"16786217892","text":"import matplotlib.pyplot as plt\nimport os\nimport torch\n\nfrom rouge import Rouge\nfrom typing import List\nfrom typing import Optional\n\n\ndef calculate_rouge(hypothesis: str, reference: str) -> Optional[List[dict]]:\n \"\"\"\n Calculates Rouge scores which is a set of metrics for evaluation of machine translation or text summarization tasks.\n Rouge stands for Recall-Oriented Understudy for Gisting Evaluation and compares model output with target text.\n For text summarization task we consider two base accuracy measures - recall and precision.\n * Recall - number of overlapping words, divided by number of words in reference summary\n * Precision - number of overlapping words, divided by number of words in model generated summary\n\n Additionally we consider F1-score of both recall and precision.\n\n For the best overview of a model performance, we should measure recall, precision and F-score values.\n\n There are few type of metrices:\n * ROUGE-1\n Measures overlapping unigrams\n * ROUGE-2\n Measures overlapping bigrams\n * ROUGE-L\n Measures longest common subsequence (LCS), takes into account in-sequence matches on sentence level word order.\n\n :param hypothesis: Model generated text sequence\n :type hypothesis: str\n :param reference: Reference text sequence\n :type reference: str\n :return: List of precision, recall and F1-score for Rouge-1, Rouge-2 and Rouge-L metrics\n :rtype: list\n \"\"\"\n rouge = Rouge()\n hypothesis = hypothesis.split('')[1].split('')[0].strip()\n reference = reference.split('')[1].split('')[0].strip()\n try:\n scores = rouge.get_scores(hypothesis, reference)\n return scores\n except Exception:\n return None\n\n\ndef draw_attention_matrix(\n attention: torch.Tensor,\n original: str,\n summary: str,\n config=None,\n epoch=None,\n batch_id=None,\n) -> None:\n \"\"\"\n Draws plot with heatmap of attention using matplotlib for particular training step.\n If config specified, saves plot in specified location\n\n :param attention: Matrix with attention values\n :type attention: torch.Tensor\n :param original: Original text to summarize\n :type original: str\n :param summary: Model generated summary text\n :type summary: str\n :param config: Config, if passed then the plot is saved in config-defined location\n :type config: dict, optional\n :param epoch: Current training epoch for naming purpose in plot saving operation\n :type epoch: int, optional\n :param batch_id: Current batch number for naming purpose in plot saving operation\n :type batch_id: int, optional\n \"\"\"\n labels_original = original.split('')[1].split('')[0].strip().split()\n labels_summary = summary.split('')[1].split('')[0].strip().split()\n plt.figure(figsize=(20, 10))\n plt.imshow(attention.numpy()[:len(labels_summary), 1:len(labels_original)])\n plt.xticks([i for i in range(len(labels_original)-1)], labels_original, rotation=75)\n plt.yticks([i for i in range(len(labels_summary))], labels_summary)\n plt.draw()\n if config:\n save_path = os.path.join(\n config['model_output_path'], config['model_name'] + f'_attention_matrix_epoch_{epoch}_batch_{batch_id}.png')\n plt.savefig(save_path)\n","repo_name":"oziomek1/NLP_summarizer","sub_path":"NLPer/nlper/utils/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"23742283584","text":"'''\n使用代码读取csv,清洗数据后写入另一个csv\n'''\nimport pandas as pd\nfrom ezetl.utils import get_reader\nfrom ezetl.utils.common_utils import md5\n\n\ndef trans_volume(volume):\n '''\n 转换交易量\n :param volume:\n :return:\n '''\n if str(volume).endswith('K'):\n volume = float(volume[:-1]) * 1000\n if str(volume).endswith('M'):\n volume = float(volume[:-1]) * 1000*1000\n if str(volume).endswith('B'):\n volume = float(str(volume)[:-1].replace(',', '')) * 1000 * 1000 * 1000\n if volume == '-':\n volume = 0\n return round(float(volume), 1)\n\n\ndef trans_date(date):\n '''\n 转换日期格式\n :return:\n '''\n year = date.split('年')[0]\n month = date.split('年')[1].split('月')[0]\n day = date.split('年')[1].split('月')[1].split('日')[0]\n return \"%d-%02d-%02d 00:00:00\" % (int(year), int(month), int(day))\n\n\ndef transform(source):\n dic = {\n 'time': trans_date(source['日期']),\n 'symbol': f'BTC/USD',\n 'timeframe': '1d',\n 'low': float(str(source['低']).replace(',', '')),\n 'high': float(str(source['高']).replace(',', '')),\n 'open': float(str(source['开盘']).replace(',', '')),\n 'close': float(str(source['收盘']).replace(',', '')),\n 'volume': trans_volume(source['交易量'])\n }\n dic['id'] = md5(f\"{dic.get('timeframe')}{dic.get('symbol')}{dic.get('time')}\")\n return dic\n\n\nreader_info = {\n 'source': {\n \"name\": \"test\",\n \"type\": \"file\",\n \"conn_conf\": {\n \"path\": \"../data/btc_history.csv\",\n },\n \"ext_params\": {}\n },\n 'model': {\n \"name\": \"test\",\n \"type\": \"file_table\",\n \"model_conf\": {},\n \"ext_params\": {},\n \"fields\": []\n },\n 'extract_info': {\n 'batch_size': 100,\n 'extract_rules': []\n }\n}\nflag, reader = get_reader(reader_info)\nprint(flag, reader)\nflag, res = reader.connect()\nprint(flag, res)\nprint(reader.get_res_fields())\ndata_li = []\nfor flag, read_data in reader.read_batch():\n print(read_data)\n if read_data['code'] == 200:\n write_data = read_data['data']['records']\n for record in write_data:\n data_li.append(transform(record))\ndf = pd.DataFrame(data_li)\ndf.to_csv('btc_history_target.csv', index=False)\n","repo_name":"xuwei95/ez-etl","sub_path":"ezetl/examples/etl_demos/csv2csv.py","file_name":"csv2csv.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"} +{"seq_id":"73214040547","text":"import re\nimport sys\nfile = sys.argv[1]\nf = open('%s' % file, 'r')\nw = open('%s.2' % file, 'w')\nfor line in f.readlines():\n if 'to_date(' in line:\n line = line.split(',')\n for i in range(len(line)):\n line[i] = line[i].replace('dd-mm-yyyy', '%Y-%m-%d')\n line[i] = line[i].replace('hh24:mi:ss', '%H:%m:%s')\n if 'to_date(' in line[i]:\n time = line[i].split(\"to_date('\")[1].split(\"'\")[0].split(' ')[0]\n time = time.split('-')[2] + '-' + time.split('-')[1] + '-' + time.split('-')[0]\n line[i] = line[i].replace('to_date(', 'date_format(')\n pattern_string = '[0-9]{2}-[0-9]{2}-[0-9]{4}'\n pattern = re.compile(pattern_string)\n match = pattern.search(line[i])\n line[i] = line[i].replace(match.group(), time)\n line = ','.join(line)\n w.write(line)\n","repo_name":"liuliainio/liuli","sub_path":"tykj-operation/tykj-operation/MarketSearchCrawler/scripts/file_transformat.py","file_name":"file_transformat.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"72861159585","text":"import timeit\r\nfrom time import strptime, strftime\r\nfrom collections import namedtuple\r\ntry:\r\n import xml.etree.cElementTree as xml\r\nexcept:\r\n import xml.etree.cElementTree as xml\r\nimport matplotlib.pyplot as plt\r\n\r\nns = {'role': 'http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2'}\r\n\r\nNamespaces = {\r\n None: \"http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2\",\r\n \"ns2\": \"http://www.garmin.com/xmlschemas/UserProfile/v2\",\r\n \"tpx\": \"http://www.garmin.com/xmlschemas/ActivityExtension/v2\",\r\n \"ns4\": \"http://www.garmin.com/xmlschemas/ProfileExtension/v1\",\r\n \"ns5\": \"http://www.garmin.com/xmlschemas/ActivityGoals/v1\",\r\n \"xsi\": \"http://www.w3.org/2001/XMLSchema-instance\"\r\n }\r\n\r\n\r\nTrackPoint = namedtuple('TrackPoint', 'Time HeartRateBpm AltitudeMeters DistanceMeters Cadence')\r\n\r\n\r\nclass ParseTcx:\r\n def __init__(self, file):\r\n self.file = file\r\n self.root = None\r\n self.Sport = None\r\n self.StartTime = None\r\n self.TotalTimeSeconds = None\r\n self.DistanceMeters = None\r\n self.Calories = None\r\n self.AverageHeartRate = None\r\n self.MaximumHeartRate = None\r\n self.Intensity = None\r\n self.Cadence = None\r\n self.TriggerMethod = None\r\n self.TrackPoints = dict()\r\n\r\n def parse(self):\r\n xml.register_namespace('', 'http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2')\r\n self.tree = xml.parse(self.file)\r\n self.root = self.tree.getroot()\r\n self.parse_root(self.root)\r\n\r\n def get_info(self):\r\n print('Sport: ', self.Sport)\r\n print('StartTime: ', self.StartTime)\r\n print('Duration: ', float(self.TotalTimeSeconds)/60, ' [minutes]')\r\n print('Distance: ', self.DistanceMeters, ' [meters]')\r\n print('Average HeartRate: ', self.AverageHeartRate, ' [BPM]')\r\n\r\n def get_text(self, item, name, default=None):\r\n try:\r\n return item.find(self.get_role(name)).text\r\n except:\r\n return default\r\n\r\n def get_role(self, role):\r\n return '{' + ns['role'] + '}' + role\r\n\r\n def parse_root(self, root):\r\n #print(root.tag, root.attrib)\r\n for child in root:\r\n if child.tag == self.get_role('Activities'):\r\n self.parse_activities(child)\r\n\r\n def parse_activities(self, actitivities):\r\n #print(actitivities.tag, actitivities.attrib)\r\n for child in actitivities:\r\n if child.tag == self.get_role('Activity'):\r\n self.parse_activity(child)\r\n\r\n def parse_activity(self, actitivity):\r\n #print(actitivity.tag, actitivity.attrib)\r\n self.Sport = actitivity.attrib['Sport']\r\n for child in actitivity:\r\n if child.tag == self.get_role('Lap'):\r\n self.parse_lap(child)\r\n\r\n def parse_lap(self, lap):\r\n #print(lap.tag, lap.attrib)\r\n self.get_lap_info(lap)\r\n for child in lap:\r\n if child.tag == self.get_role('Track'):\r\n self.dom_trackpoints = child.findall('role:Trackpoint', ns)\r\n for point in child.findall('role:Trackpoint', ns):\r\n Time = self.get_text(point, 'Time')\r\n hr = point.find('role:HeartRateBpm', ns)\r\n HeartRateBpm = self.get_text(hr, 'Value')\r\n AltitudeMeters = self.get_text(point, 'AltitudeMeters')\r\n DistanceMeters = self.get_text(point, 'DistanceMeters')\r\n Cadence = self.get_text(point, 'Cadence')\r\n trackpoint = TrackPoint(Time, HeartRateBpm, AltitudeMeters, DistanceMeters, Cadence)\r\n #self.TrackPoints.append(trackpoint)\r\n timeKey = Time[0:19]\r\n self.TrackPoints[timeKey] = trackpoint\r\n\r\n def get_lap_info(self, lap):\r\n self.StartTime = lap.attrib['StartTime']\r\n self.TotalTimeSeconds = self.get_text(lap, 'TotalTimeSeconds')\r\n self.DistanceMeters = self.get_text(lap, 'DistanceMeters')\r\n self.Calories = self.get_text(lap, 'Calories')\r\n self.AverageHeartRate = self.get_text(lap, 'AverageHeartRate')\r\n self.MaximumHeartRate = self.get_text(lap, 'MaximumHeartRate')\r\n self.Intensity = self.get_text(lap, 'Intensity')\r\n self.Cadence = self.get_text(lap, 'Cadence')\r\n self.TriggerMethod = self.get_text(lap, 'TriggerMethod')\r\n\r\n def plot_signal(self, signal):\r\n signal_values = []\r\n time_values = []\r\n if signal == 'HeartRateBpm':\r\n for tp_key in sorted(list(self.TrackPoints.keys())):\r\n signal_values.append(self.TrackPoints[tp_key].HeartRateBpm)\r\n #time_values.append(self.TrackPoints[tp_key].Time)\r\n plt.plot(signal_values)\r\n plt.ylabel('Heartbeat')\r\n plt.show()\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n file = 'Data/file_with_heartrate_data.tcx'\r\n tcx = ParseTcx(file)\r\n tcx.parse()\r\n tcx.get_info()\r\n","repo_name":"trieb/merge_tcx","sub_path":"ParseTcx.py","file_name":"ParseTcx.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"42151868710","text":"#!/usr/bin/env python3\n# hjson2csv.py\n#\n# A simple HJSON to CSV converter.\n\nimport sys\nimport hjson\nimport itertools\nimport pandas as pd\n\ndef read_hjson(path):\n with open(path) as file:\n data = hjson.load(file)\n\n return data\n\nif __name__ == '__main__':\n try:\n _, in_path, out_path = sys.argv\n except ValueError:\n print(\"Usage:\")\n print(\" hjson2csv.py source dest\")\n sys.exit(1)\n\n data = read_hjson(in_path)\n\n # Chain the lists together.\n data = itertools.chain.from_iterable(data)\n\n data = pd.DataFrame.from_records(data)\n\n data.to_csv(out_path, index=False)\n","repo_name":"nick-ulle/2015-ecs230","sub_path":"hw2/scripts/hjson2csv.py","file_name":"hjson2csv.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"3911770211","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread(\"../data/check_board.PNG\")\ngray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# 가장 최적화된 X개의 코더 반환 -> 5개 반환\ncorner = cv2.goodFeaturesToTrack(gray_img, 20, 0.01, 10)\n\n\ncorner = np.int0(corner)\nprint (corner)\n\nfor i in corner:\n x, y = i.ravel()\n cv2.circle(img, (x,y), 3, (255, 0, 0 ), -1)\n\nplt.imshow(img, cmap = \"gray\")\nplt.show()","repo_name":"aksrb1030/ImageProcessing","sub_path":"usingOpencv/shi_corner.py","file_name":"shi_corner.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"39277283756","text":"from setuptools import setup, find_packages\nfrom os import path\n\nfrom owo.utils import version\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name='owo',\n\n version=version,\n\n description='Python API wrapper for api.awau.moe',\n long_description=long_description,\n\n url='https://github.com/whats-this/owo.py',\n\n author='martmists',\n author_email='martmists@gmail.com',\n\n license='MIT',\n\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Topic :: Communications :: File Sharing',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n ],\n\n install_requires=[\n \"requests\"\n ],\n\n python_requires='>=2.0,>=3.0',\n\n packages=find_packages(),\n\n entry_points={\n 'console_scripts': ['owo=owo.cli:main', 'owo-bg=owo.bg:main',\n 'owo-fix=owo.fix_termux:main'],\n },\n keywords='api wrapper owo',\n)\n","repo_name":"whats-this/owo.py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"70"} +{"seq_id":"7873088573","text":"from flask import Blueprint, request, jsonify\nfrom models.models import Platform, StreamingAccount, Screen, ProductsByRequest, CompleteAccountRequest, db\nfrom flask_jwt_extended import jwt_required, current_user\nimport os\nfrom main import ErrorResponse, SuccessResponse\n\n# BLUEPRINTS\nproducts_bp = Blueprint('products_bp', __name__)\n\n@products_bp.before_request\ndef users_before_request():\n pass\n\ndef indexJSON(product, price=None, in_buy=0):\n return {\n \"id\":product.id,\n \"title\": product.name,\n \"img_path\": product.img_path(),\n \"file_name\": product.file_name,\n \"price\": f\"{price if price else product.final_price()} Bs\",\n \"in_buy\":in_buy\n }\n\ndef requestJSON(product, price=None, in_buy=0):\n ret = indexJSON(product, price, in_buy)\n ret[\"slug\"] = product.title_slug\n return ret\n\n@products_bp.route('/')\ndef index():\n all = {\"products\":\"\", \"platforms\":\"\"}\n all[\"products\"] = [requestJSON(products) for products in ProductsByRequest.query.filter(ProductsByRequest.public==1).all()]\n all[\"platforms\"] = [indexJSON(platforms, price=account.final_price(), in_buy=screens) for platforms, account, screens in Platform.all_with_price()]\n return all\n\n@products_bp.route(\"/platform//\")\n@jwt_required(optional=True)\ndef platform(id):\n def account_json(account, user=None):\n return {\n \"id\":account.id,\n \"days_left\":account.days_left(),\n \"start_date\":account.start_date.strftime(\"%d-%m-%Y\"),\n \"end_date\":account.end_date.strftime(\"%d-%m-%Y\"),\n \"price\":account.final_price(user=user),\n \"reference_reward\":account.final_reward\n }\n ret = dict()\n platform = Platform.query.filter(Platform.id == id).first()\n streaming_accounts = platform.streaming_accounts_dif_day(reverse=False)\n platformJSON = indexJSON(platform)\n platformJSON[\"url\"] = platform.url\n\n return jsonify({\n \"platform\":platformJSON,\n \"streaming_accounts\":[account_json(account, user=current_user) for account in streaming_accounts ]\n })\n\n \n@products_bp.route(\"/request//\")\n@jwt_required(optional=True)\ndef request_(slug):\n def product_json(product, user=None):\n return {\n \"id\":product.id,\n \"title\":product.title,\n \"img_path\":product.img_path(),\n \"description\":product.description\n }\n def config_json(product, user=None):\n return {\n **product.config,\n \"price_is_list\":product.price_is_list(),\n \"is_time\":product.is_time(),\n \"price\":product.final_price_list(user=user)\n }\n # ret = dict()\n productModel = ProductsByRequest.query.filter(ProductsByRequest.title_slug == slug).first()\n product = product_json(productModel)\n config = config_json(productModel)\n \n\n return jsonify({\n \"product\":product,\n \"config\":config\n # \"streaming_accounts\":[account_json(account, user=current_user) for account in streaming_accounts ]\n })\n\n\n\n@products_bp.route(\"/buy/