diff --git "a/3153.jsonl" "b/3153.jsonl" new file mode 100644--- /dev/null +++ "b/3153.jsonl" @@ -0,0 +1,716 @@ +{"seq_id":"28657648019","text":"\n# Задайте два числа. Напишите программу, которая найдёт НОК (наименьшее общее кратное) этих двух чисел.\n\n\n# Set a string of a set of numbers. Write a program that shows the higher and the lower numbers. Use a space as the separator character.\n\n# string = \"11 2 3 4 5 6\"\n\n# our_list = string.split()\n# number_list = list(map(int, our_list))\n# print(number_list)\n# print(max(number_list))\n# print(min(number_list))\n\nimport numpy as np\nfrom math import sqrt\n\n\ndef find_high_low(number_string):\n number_list = number_string.split(' ')\n number_list = list(map(int, number_list))\n highest = max(number_list)\n lowest = min(number_list)\n return f'highest is {highest} and lowest is {lowest}'\n\n\nnumber_string = input(\"Enter a string of numbers separated by spaces: \")\nprint(find_high_low(number_string))\n\n\n# 2. Find the roots of the quadratic equation Ax² + Bx + C = 0 in two ways:\n# 1) Using mathematical formulas to find the roots of a quadratic equation\n# 2) with the help of additional Python libraries\n\n\ndef quadratic_equation(a, b, c):\n discriminant = (b**2) - (4 * a * c)\n if discriminant < 0:\n return 'there no roots, D < 0'\n elif discriminant == 0:\n x = (-b + 0) / (2 * a)\n return f'D = 0, the only root is {x}'\n else:\n x_1 = (-b + sqrt(discriminant)) / (2 * a)\n x_2 = (-b - sqrt(discriminant)) / (2 * a)\n return f'x_1 is {x_1}, x_2 is {x_2}'\n\n\na = int(input('enter a: '))\nb = int(input('enter b: '))\nc = int(input('enter c: '))\nprint(quadratic_equation(a, b, c))\n# −4x2 + 28x — 49 = 0\n# 3x2— 4x+94 = 0\n# x2— 10 = 39\n\n\n# input coefficients\nA = float(input(\"Enter the coefficient of x^2: \"))\nB = float(input(\"Enter the coefficient of x: \"))\nC = float(input(\"Enter the constant term: \"))\n\n# create an array of coefficients\ncoefficients = np.array([A, B, C])\n\n# calculate the roots using numpy.roots function\nroots = np.roots(coefficients)\n\n# print the roots\nprint(\"The roots are\", roots[0], \"and\", roots[1])\n#\n#\n# Note: The numpy.roots function returns an array of complex roots. If the roots are real, the imaginary part of the complex roots will be 0.\n\n\n# Define two numbers. Write a program that finds the least common multiple of these two numbers.\n# lcm(a, b) = (a * b) / gcd(a, b)\n\ndef find_lcm(a, b):\n def gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\n lcm = (a*b) / gcd(a, b)\n return f'least common multiple is {lcm} '\n\n\na = int(int(input('enter first number: ')))\nb = int(int(input('enter second number: ')))\n\nprint(find_lcm(a, b))\n","repo_name":"yarkinlx/Python_study","sub_path":"sem4.py","file_name":"sem4.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74657459122","text":"import uuid\nfrom typing import List\n\nfrom sqlalchemy import (\n create_engine, Column, String, Float,\n Integer, ForeignKey, UniqueConstraint,\n Table, UUID, Text,\n)\nfrom sqlalchemy.orm import relationship, registry\n\nfrom assimilator.core.database import BaseModel\nfrom assimilator.mongo.database import MongoModel\nfrom assimilator.redis_.database import RedisModel\n\nengine = create_engine(url=\"sqlite:///crud_database.db\")\nmapper_registry = registry()\n\nusers = Table(\n \"users\",\n mapper_registry.metadata,\n Column(\"id\", Text(), default=lambda: str(uuid.uuid4()), primary_key=True),\n Column(\"username\", String()),\n Column(\"email\", String()),\n)\n\n\nbalances = Table(\n \"balances\",\n mapper_registry.metadata,\n Column(\"id\", Text(), default=lambda: str(uuid.uuid4()), primary_key=True),\n Column('user_id', ForeignKey(\"users.id\", ondelete=\"CASCADE\")),\n Column('balance', Float(), server_default='0'),\n Column('currency_id', ForeignKey(\"currency.id\")),\n\n UniqueConstraint(\"balance\", \"user_id\"),\n)\n\n\ncurrency = Table(\n \"currency\",\n mapper_registry.metadata,\n Column(\"id\", Text(), default=lambda: str(uuid.uuid4()), primary_key=True),\n Column('currency', String(length=20)),\n Column('country', String(length=20)),\n)\n\n\nclass AlchemyUser:\n pass\n\n\nclass AlchemyBalance:\n pass\n\n\nclass AlchemyCurrency:\n pass\n\n\nmapper_registry.map_imperatively(\n AlchemyUser,\n users,\n properties={\n \"balances\": relationship(AlchemyBalance, uselist=True, lazy='select'),\n },\n)\n\nmapper_registry.map_imperatively(\n AlchemyBalance,\n balances,\n properties={\n \"currency\": relationship(AlchemyCurrency, uselist=False, lazy='select'),\n },\n)\n\nmapper_registry.map_imperatively(AlchemyCurrency, currency)\nmapper_registry.metadata.create_all(bind=engine, tables=[users, balances, currency])\n\n\nclass InternalCurrency(BaseModel):\n currency: str\n country: str\n\n\nclass InternalBalance(BaseModel):\n balance: float\n currency: InternalCurrency\n\n\nclass InternalUser(BaseModel):\n username: str\n email: str\n balances: List[InternalBalance] = []\n\n\nclass RedisCurrency(InternalCurrency):\n pass\n\n\nclass RedisBalance(InternalBalance, RedisModel):\n currency: RedisCurrency\n\n\nclass RedisUser(InternalUser, RedisModel):\n balances: List[RedisBalance] = []\n\n\nclass MongoCurrency(MongoModel):\n class AssimilatorConfig:\n collection: str = \"currencies\"\n autogenerate_id = True\n\n currency: str\n country: str\n\n\nclass MongoBalance(MongoModel):\n class AssimilatorConfig:\n collection: str = \"balances\"\n\n balance: float\n currency: MongoCurrency\n\n\nclass MongoUser(MongoModel):\n class AssimilatorConfig:\n collection: str = \"users\"\n\n balances: List[MongoBalance] = []\n username: str\n email: str\n","repo_name":"knucklesuganda/py_assimilator","sub_path":"examples/fastapi_crud_example/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"75"} +{"seq_id":"30395503536","text":"import requests\nimport requests_cache\nimport sys\n\nfrom bs4 import BeautifulSoup\n# from sklearn.naive_bayes import MultinomialNB\nimport pandas as pd\n\nrequests_cache.install_cache('demo_cache')\n\n\ndef download(url, num_retries=2):\n page = None\n try:\n response = requests.get(url)\n page = response.text\n if response.status_code >= 400:\n print('Download error:', response.text)\n if num_retries and 500 <= response.status_code < 600:\n return download(url, num_retries - 1)\n except requests.exceptions.RequestException as e:\n print('Download error:', e.reason)\n return page\n\n\nurl = 'http://www.tce.pi.gov.br'\nhtml = download(url)\nsoup = BeautifulSoup(html, 'html.parser')\n\ntable = soup.find(attrs={'id': 'latestnews'})\n_links = table.select('a.latestnews')\n\nlinks = []\n# print(_links)\nfor link in _links:\n row = {\n 'html': download(link['href']),\n 'titulo': link.text\n }\n links.append(row)\n\n\nwhile True:\n index = 0\n for link in links:\n print(str(index) + ' - ' + link['titulo'] + '\\n')\n index += 1\n\n option = input('Escolha uma das noticias, ou 5 para sair \\n')\n\n # print(links[option]['html'])\n\n try:\n print(links[int(option)]['html'])\n x = input('Exibir o menu novamente? 1 para sim 5 para sair \\n')\n if int(x) == 5:\n break\n except Exception:\n if int(option) == 5:\n break\n else:\n print('Opção invalida')\n\n\n","repo_name":"raphaeltataia/provatesi1","sub_path":"avaliacao-01.py","file_name":"avaliacao-01.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39854937031","text":"import pygame\nfrom pygame import Rect\npygame.init()\nscreen = pygame.display.set_mode((800, 600), 0, 32)\nclk = pygame.time.Clock()\nangulo = 0\nfont = pygame.font.SysFont(\"arial\", 48, False, False)\npontos = 9999\ntexto = \"Hi-Score:{}\".format(pontos)\nimg_texto = font.render(texto, True, (255, 255, 0))\nwhile True:\n screen.fill((0, 0, 0))\n img_rot = pygame.transform.rotate(img_texto, angulo)\n angulo += 1\n screen.blit(img_rot, (200, 100))\n pygame.display.update()\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n exit()\n","repo_name":"antoniorcn/fatec-2019-1s","sub_path":"djd-prog2/manha/aula12/teste_fontes.py","file_name":"teste_fontes.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"73519034801","text":"from steps.EtlStepBase import EtlStepBase\r\nfrom utils.RedshiftDbQueryExecutor import RedshiftDbQueryExecutor\r\nfrom infrastructure.InfrastructureSettings import InfrastructureSettings\r\n\r\nclass CreateUserDimensionsEtlStep(EtlStepBase):\r\n \"\"\" Load the user dimension table\r\n\r\n Parameters:\r\n services_config : config settings \r\n infra_settings : infratructure settings \r\n\r\n \"\"\"\r\n\r\n def __init__(self, services_config, infra_settings : InfrastructureSettings ):\r\n super().__init__(services_config, infra_settings) \r\n self.redshiftDbQueryExecutor = RedshiftDbQueryExecutor(infra_settings)\r\n \r\n def run(self):\r\n \"\"\" Run the step- execute query \r\n \r\n \"\"\"\r\n\r\n print('start CreateUserDimensionsEtlStep')\r\n\r\n dim_song_query = \"\"\"insert into dwh.dim_users(user_id,first_name, last_name, gender, \"level\" )\r\n select user_id,first_name, last_name, gender, \"level\" from (\r\n select userid as user_id, firstname as first_name, lastname as last_name, gender, level , \r\n ROW_NUMBER() over (partition by userid order by ts desc) as rownumber\r\n from staging.stg_events\r\n where user_id is not null and upper(page)= 'NEXTSONG')\r\n where rownumber = 1;\"\"\"\r\n\r\n self.redshiftDbQueryExecutor.executeQuery(dim_song_query)\r\n print('end CreateUserDimensionsEtlStep')\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ameyakhedekar/udacity_data_warehouse_assignment","sub_path":"steps/CreateUserDimensionsEtlStep.py","file_name":"CreateUserDimensionsEtlStep.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20962426817","text":"import flask\nfrom flask import Flask, request, jsonify, make_response, render_template\n# from flask_socketio import SocketIO, emit\nfrom flask_cors import CORS, cross_origin\n\nfrom cute_ids import generate_cute_id\nfrom models import Game\nimport utils\nimport conf\nimport atexit\n\nimport json\nfrom datetime import datetime\nimport os\n\napp = Flask(__name__, static_url_path='',\n static_folder='react_build',\n template_folder='react_build')\napp.config['SECRET_KEY'] = conf.secret_key\napp.config[\"DEBUG\"] = True\ncors = CORS(app)\n# origins=[\"http://127.0.0.1:3000\"], headers=['Content-Type'], expose_headers=['Access-Control-Allow-Origin'], supports_credentials=True\napp.config['CORS_HEADERS'] = 'Content-Type'\napp.config['CORS_SUPPORTS_CREDENTIALS'] = True\napp.config['CORS_SUPPORTS_CREDENTIALS'] = True\napp.config['CORS_ORIGINS'] = [\"http://127.0.0.1:3000\"]\napp.config['CORS_EXPOSE_HEADERS'] = ['Access-Control-Allow-Origin']\n\ngg = Game(\"yellow-ladybug\")\ngames = {'yellow-ladybug': gg}\ncounter = {'c': 0}\n\n## React Routes ##\n\n@atexit.register\ndef shutdown():\n # save the game data :)\n recs = []\n for _, g in games.items():\n recs.append(g.to_json())\n data = {'games': recs}\n fn = os.path.join(\"./game_data/export_{}.json\".format(datetime.now().strftime(\"%Y%m%d_%H%M%S\")))\n with open(fn, 'w') as f:\n json.dump(data, f)\n\nfrom datetime import datetime\n\n@app.route(\"/\")\ndef home():\n print(\"one call at {}\".format(datetime.now()))\n return render_template(\"index.html\")\n\n\n@app.route(\"/login/\")\ndef login(gid):\n return render_template(\"index.html\")\n\n\n@app.route(\"/board/\")\ndef board(gid):\n return render_template(\"index.html\")\n\n\n@app.route(\"/board//winners\")\ndef board_winners():\n return render_template(\"index.html\")\n\n## End of React Routes ##\n\n\n\n@app.route('/games', methods=['POST', 'GET'])\n@cross_origin()\ndef games_api():\n if request.method == 'POST':\n player_name = request.json['player']\n game_id = request.json[\"game\"]\n if game_id == \"new\":\n while True:\n uid = generate_cute_id()\n if uid not in games:\n break\n game = Game(uid)\n add_game(game)\n else:\n uid = game_id\n game = get_game_by_id(uid)\n\n try:\n game.join(player_name)\n except Exception as e:\n print(e)\n flask.abort(400, str(e))\n resp = make_response(jsonify({\"game\": game.id}))\n resp.set_cookie(\"player\", player_name, httponly=True, samesite='Strict')\n resp.set_cookie(\"gid\", game.id, httponly=True, samesite='Strict')\n resp.set_cookie(\"token\", utils.create_token(player_name, game.id), httponly=True, samesite='Strict')\n return resp\n\n else:\n if request.args.get('joinable_for_player'):\n player = request.args.get('joinable_for_player')\n else:\n player = None\n return jsonify({\"games\": [g.serialize_for_list_view(joinable_for_player=player) for _, g in games.items()]})\n\n\n@app.after_request\ndef creds(response):\n response.headers['Access-Control-Allow-Credentials'] = 'true'\n response.headers['Access-Control-Allow-Origin'] = \"http://127.0.0.1:3000\"\n return response\n\n\ndef get_game_by_id(gid):\n return games.get(gid)\n\n\ndef add_game(g):\n games[g.id] = g\n\n\n@app.route('/games/', methods=['POST', 'GET'])\n@cross_origin()\n@utils.authenticate_with_cookie_token\ndef games_status_api(gid):\n counter['c'] += 1\n print(\"Called {} times\".format(counter['c']))\n if request.method == \"GET\":\n ### verify that game exists and current request is allowed to get its general state and their personal data ###\n game, player = get_authenticated_game_and_player_or_error(gid, request)\n ### end verify ###\n\n game_data = game.serialize_for_status_view(player)\n # get the public state\n # TODO: and the users state, cards etc\n return jsonify({\"game\": game_data})\n\n\ndef get_authenticated_game_and_player_or_error(gid, request):\n intended_game = request.cookies.to_dict()['gid']\n player = request.cookies.to_dict()['player']\n if intended_game != gid:\n # the game in the cookie is different than the one the request is trying to get info for\n error = \"Trying to get data for {} when the game the player is in is {}\".format(gid, intended_game)\n print(error)\n flask.abort(403, error)\n game = get_game_by_id(gid)\n if not game:\n flask.abort(404)\n if not game.contains_player(player):\n error = \"Player {} is not in game {}\".format(player, gid)\n print(error)\n flask.abort(403, error)\n return game, player\n\n\ndef get_authenticated_game_and_player_or_error_for_resume(request):\n intended_game = request.cookies.to_dict()['gid']\n player = request.cookies.to_dict()['player']\n game = get_game_by_id(intended_game)\n if not game:\n flask.abort(404)\n if game.has_ended():\n error = \"Game {} has ended. Deleting cookie.\".format(game.id)\n print(error)\n resp = make_response(error, 403)\n resp.set_cookie(\"player\", '', httponly=True, samesite='Strict', expires=0)\n resp.set_cookie(\"gid\", '', httponly=True, samesite='Strict', expires=0)\n resp.set_cookie(\"token\", '', httponly=True, samesite='Strict', expires=0)\n flask.abort(resp)\n if not game.contains_player(player):\n error = \"Player {} is not in game {}\".format(player, intended_game)\n print(error)\n flask.abort(403, error)\n return game, player\n\n\n@app.route('/games//start', methods=['PUT'])\n@cross_origin()\n@utils.authenticate_with_cookie_token\ndef games_start(gid):\n game, player = get_authenticated_game_and_player_or_error(gid, request)\n try:\n game.start()\n except Exception as e:\n print(e)\n flask.abort(400)\n game_data = game.serialize_for_status_view(player)\n return jsonify({\"game\": game_data})\n\n\n@app.route('/games//set', methods=['PUT'])\n@cross_origin()\n@utils.authenticate_with_cookie_token\ndef games_set_card(gid):\n game, player = get_authenticated_game_and_player_or_error(gid, request)\n try:\n card = request.json['card']\n phrase = request.json.get('phrase')\n if phrase:\n game.set_narrator_card(player, card, phrase)\n else:\n game.set_decoy_card(player, card)\n except Exception as e:\n print(e)\n flask.abort(400)\n game_data = game.serialize_for_status_view(player)\n return jsonify({\"game\": game_data})\n\n\n@app.route('/games//vote', methods=['PUT'])\n@cross_origin()\n@utils.authenticate_with_cookie_token\ndef games_vote_card(gid):\n game, player = get_authenticated_game_and_player_or_error(gid, request)\n try:\n card = request.json['vote'] # this is the 'string' of the card\n game.cast_vote(player, card)\n except Exception as e:\n print(e)\n flask.abort(400, str(e))\n game_data = game.serialize_for_status_view(player)\n return jsonify({\"game\": game_data})\n\n\n@app.route('/games//next', methods=['PUT'])\n@cross_origin()\n@utils.authenticate_with_cookie_token\ndef games_next_round(gid):\n game, player = get_authenticated_game_and_player_or_error(gid, request)\n try:\n game.start_next_round()\n except Exception as e:\n print(e)\n flask.abort(400, str(e))\n game_data = game.serialize_for_status_view(player)\n return jsonify({\"game\": game_data})\n\n\n@app.route('/games/resume', methods=['GET'])\n@cross_origin()\n@utils.authenticate_with_cookie_token\ndef games_resume_from_cookie():\n game, player = get_authenticated_game_and_player_or_error_for_resume(request)\n return jsonify({\"game\": game.id, 'player': player})\n\n\nif __name__ == '__main__':\n app.run(port=5000, threaded=False, debug=False, host=\"0.0.0.0\")\n # app.run(port=5000, threaded=False, debug=True) #local\n # pipenv run gunicorn server:app -w=1 -b 0.0.0.0:5000 --threads 4\n","repo_name":"carolinux/dixit-api","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2861169413","text":"import random\n\nclass Book:\n\n def __init__(self, name: str, author: str, year: int) -> None:\n self.__name = name\n self.__author = author\n self.__year = year\n self.__id = None\n\n def set_id(self, id: int) -> None:\n self.__id = id\n\n def is_valid(self) -> bool:\n if not isinstance(self.__name, str):\n return False\n\n if not isinstance(self.__author, str):\n return False\n\n if not isinstance(self.__year, int):\n return False\n\n return True\n\n def _book_not_valid_message(self) -> None:\n print(\"Livro inválido! \")\n\n def get_info(self) -> None:\n print(\n f\"\"\"\n id: {self.__id}\n name: {self.__name}\n author: {self.__author}\n year: {self.__year}\n \"\"\"\n )","repo_name":"cerozi/POO","sub_path":"desafio4/toDo/model/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28971468649","text":"# LEETCODE@ 57. Insert Interval\n#\n# --END--\n\n\nclass Solution(object):\n def insert(self, intervals, newInterval):\n s, e = newInterval.start, newInterval.end\n l, r = [], []\n for i in intervals:\n if i.end < s:\n l.append(i)\n elif e < i.start:\n r.append(i)\n else:\n s = min(s, i.start)\n e = max(e, i.end)\n return l + [Interval(s, e)] + r\n","repo_name":"Lancher/coding-challenge","sub_path":"array/_insert_interval.py","file_name":"_insert_interval.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"7848800480","text":"import pymongo as pymongo\r\n\r\ndef connectdb():\r\n client = pymongo.MongoClient(\"mongodb+srv://natfal:HaA9HeB1nrFcRXTz@cluster0.xz5ni.mongodb.net/test?retryWrites=true&w=majority\")\r\n db = client.test\r\n countries = db.countries\r\n cont = db.continents\r\n\r\n def find_country():\r\n var = str(input(\"Please enter word or letter: \"))\r\n for country in countries.find({\"Name\":{'$regex':var,'$options':'i'}}):\r\n print(country['Name'])\r\n\r\n find_country()\r\n\r\nif __name__ == '__main__':\r\n connectdb()","repo_name":"natfal14/nosql-assignment","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8971633379","text":"import sys\nimport os\nfrom pathlib import Path\n\nmyDir = os.getcwd()\nsys.path.append(myDir)\npath = Path(myDir)\nabsolute_path = str(path.parent.absolute())\nsys.path.append(absolute_path)\n\nimport requests\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom src.blockchain.blockchain import Blockchain\nfrom src.blockchain.block import Block\nfrom src.constant import HOST, TEMP_STORAGE_PORT\n\n\napp = Flask(__name__)\nCORS(app)\nports = set()\nattack_ports = set()\nfork_chain = None\ndsa_success = False\nmain_attacker = set()\n\n\n@app.route('/ports/all')\ndef all_ports():\n response = {'ports': list(ports)}\n return jsonify(response)\n\n\n@app.route('/ports/all/other/')\ndef all_other_ports():\n current_port = request.args.get('current_port')\n other_ports = ports.difference({current_port})\n response = {'ports': list(other_ports)}\n return jsonify(response)\n\n\n@app.route('/ports/add/', methods=['POST'])\ndef add_port():\n port = request.args.get('port')\n try:\n register_port(port)\n except ValueError:\n pass\n\n response = {'port': port}\n\n return jsonify(response)\n\n\ndef register_port(port):\n # if is_port_in_use(port):\n # ports.add(port)\n # print(f'add port {port}')\n # else:\n # print(f'port {port} not in use')\n ports.add(port)\n\n\n@app.route('/chain/fork', methods=['POST'])\ndef fork_blockchain():\n global fork_chain\n\n response = {}\n if not fork_chain:\n fork_chain = Blockchain.from_json(request.get_json())\n\n response['or_chain'] = fork_chain.to_json()\n\n fork_chain.fork()\n response['forked'] = True\n else:\n response['forked'] = False\n\n response['fork_chain'] = fork_chain.to_json()\n\n return jsonify(response)\n\n\n@app.route('/chain/fork')\ndef get_fork_chain():\n if fork_chain:\n response = {\n 'fork_chain': fork_chain.to_json(),\n 'len': len(fork_chain.chain)\n }\n return jsonify(response)\n\n return jsonify({'fork_chain': None, 'len': 0})\n\n\n@app.route('/length')\ndef get_fork_chain_len():\n if fork_chain:\n response = {\n 'len': len(fork_chain.chain)\n }\n return jsonify(response)\n\n return jsonify({'len': 0})\n\n\n@app.route('/chain/fork/clear')\ndef clear_fork_chain():\n global fork_chain\n if fork_chain:\n fork_chain = None\n\n return jsonify({'fork_chain': None})\n\n\n@app.route('/chain/fork/add/block', methods=['POST'])\ndef fork_chain_add_block():\n response = {}\n if fork_chain:\n potential_block_json = request.get_json()\n\n if fork_chain.add_block(Block.from_json(potential_block_json)):\n response['added'] = True\n else:\n response['added'] = False\n response['len'] = len(fork_chain.chain)\n response['fork_chain'] = fork_chain.to_json()\n\n return jsonify(response)\n\n\n@app.route('/chain/fork/broadcast')\ndef broadcast_chain():\n bc_response = {\n 'success_ports': [],\n 'fail_ports': []\n }\n if fork_chain:\n for port in ports:\n url = f'http://{HOST}:{port}/chain/resolve'\n response = requests.post(url, json=fork_chain.to_json())\n if response.status_code == 200:\n if response.json()['success']:\n bc_response['success_ports'].append(port)\n else:\n bc_response['fail_ports'].append(port)\n\n return jsonify(bc_response)\n\n\n@app.route('/dsa/success')\ndef get_dsa_success():\n return jsonify({'dsa_success': dsa_success})\n\n\n@app.route('/dsa/success/set/true', methods=['POST'])\ndef set_dsa_success_true():\n global dsa_success\n dsa_success = True\n\n return jsonify({'dsa_success': dsa_success})\n\n\n@app.route('/dsa/success/set/false', methods=['POST'])\ndef set_dsa_success_false():\n global dsa_success\n dsa_success = False\n\n return jsonify({'dsa_success': dsa_success})\n\n\n@app.route('/main/attacker')\ndef get_main_attacker():\n return jsonify({'main_attacker': len(main_attacker) == 1})\n\n\n@app.route('/main/attacker/set/true', methods=['POST'])\ndef set_main_attacker_true():\n global main_attacker\n main_attacker.add('true')\n\n return jsonify({'main_attacker': len(main_attacker) == 1})\n\n\n@app.route('/main/attacker/set/false', methods=['POST'])\ndef set_main_attacker_false():\n global main_attacker\n main_attacker = set()\n\n return jsonify({'main_attacker': len(main_attacker) == 1})\n\n\nif __name__ == '__main__':\n app.run(host=HOST, port=int(TEMP_STORAGE_PORT), debug=False)\n","repo_name":"zwan0202/double-spend-attack","sub_path":"src/temp_storage_manager.py","file_name":"temp_storage_manager.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"23318180787","text":"from os import path, sep\nfrom qgis.core import ( QgsVectorLayer, QgsApplication, QgsWkbTypes)\n\n# configura caminho ao recursos QGIS\nQgsApplication.setPrefixPath('/home/kylefelipe/miniconda3/envs/qgis_34', True)\n\n# Cria referencia a aplicação QGIS\n# Segundo argumento desabilita Interface grafica.\nqgs = QgsApplication([], False)\n\n# Inicializando e carregando os recursos\nqgs.initQgis()\n\ndiretorio = \"/home/kylefelipe/geocast_repo/livepyqgis/\"\n\n# criando caminho ao geopackage\nfull_path = path.join(diretorio, 'dados/base_dados.gpkg')\n\n# Montando metadados do arquivo\nmetadado = f\"\"\"{'-'*15}METADADOS{'-'*15}\"\"\"\nmetadado += f\"\"\"\\n\\nArquivo: {full_path.split(sep)[-1]}\"\"\"\nmetadado += f\"\"\"\\n\\nConteúdo:\"\"\"\n\n# testando se ele existe\nif path.isfile(full_path):\n arquivo = QgsVectorLayer( full_path, \"test\", \"ogr\")\n # Conectando ao gpkg com dataProvider()\n subLayers = arquivo.dataProvider().subLayers()\n for subLayer in subLayers:\n # print( subLayer )\n nome = subLayer.split( \"!!::!!\" )[1]\n print( \"Nome da layer =\", nome )\n uri = \"%s|layername=%s\" % ( full_path , nome ) # Se o python for 3.6 + pode-se usar f\"{full_path}|layername={nome}\"\n # Cria Camada\n sub_vlayer = QgsVectorLayer( uri, nome, 'ogr' )\n # quantas feicoes\n print( \"Total de feições =\", sub_vlayer.featureCount())\n \n # Quantidade de Campos\n qtd_campos = len(sub_vlayer.fields())\n print(\"Quantidade de campos =\", qtd_campos)\n \n # qual geometria\n geometria = QgsWkbTypes.displayString(sub_vlayer.wkbType())\n print( \"Geometria =\", geometria)\n \n # como pegar geometria\n print( sub_vlayer.wkbType())\n # consulta crs\n print( \"SRC =\", sub_vlayer.crs().authid(), \"\\n\\n\")\n \n # Montando Metadado\n metadado += f\"\"\"\\n\\nCamada = {nome}\"\"\"\n metadado += f\"\"\"\\nTotal de feições = {sub_vlayer.featureCount()}\"\"\"\n metadado += f\"\"\"\\nQuantidade de campos = {qtd_campos}\"\"\"\n metadado += f\"\"\"\\nGeometria = {geometria}\"\"\"\n metadado += f\"\"\"\\nSRC = {sub_vlayer.crs().authid()}\"\"\"\nelse:\n print( \"nao deu\" )\n\nsPath = full_path.split(sep)\npath_Metadados = path.join(diretorio, 'dados', sPath[-1].split('.')[0]+\"_metadados.txt\")\n\nwith open(path_Metadados, 'w') as meta:\n meta.write(metadado)\n print(\"Metadado gerado com sucesso.\")\n\n# fecha recursos\nqgs.exitQgis()\n","repo_name":"KaduMelo/pyqgis","sub_path":"aula_2/script_4.py","file_name":"script_4.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29065274326","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\nimport pandas as pd\nfrom main.models import *\n\nclass Command(BaseCommand):\n help = 'Loads Data for Nigerian states'\n\n def handle(self, *args, **kwargs):\n\n country_name = \"\"\n cases = 0\n deaths = 0\n recoveries = 0\n\n total_cases = 0\n total_deaths = 0\n total_recoveries = 0\n\n wiki_url = \"https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_Nigeria\"\n\n dfs = pd.read_html(wiki_url,header=0)\n dataframe = dfs[2][:-2] # REMOVE THE LAST 2 UNWANTED ROWS IN THE DATA FRAME (TOTAL & DESCRIPTION)\n\n state_name = \"\"\n cases = 0\n deaths = 0\n recoveries = 0\n\n\n for i in range(len(dataframe)):\n data = (dataframe.iloc[i])\n\n if True :\n if not isinstance(data[0], float):\n\n state_name = data[\"State\"].split(\"[\")[0]\n cases = data[\"Cases\"]\n deaths = data[\"Deaths\"]\n recoveries = data[\"Recovered\"]\n \n country = Country.objects.get(name = \"Nigeria\")\n state = State.objects.get_or_create(state_name = state_name, country = country)[0]\n\n state.country = country\n state.cases = cases \n state.deaths = deaths \n state.recoveries = recoveries\n\n state.save()\n\n print((state_name), (cases), (deaths), (recoveries), (recoveries).isnumeric())\n # except TypeError:\n # pass\n time = timezone.now().strftime('%X')\n self.stdout.write(\"It's now %s\" % time)","repo_name":"dtekluva/covid19_web_app","sub_path":"main/management/commands/update_nigeria_data.py","file_name":"update_nigeria_data.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43315885733","text":"from ..filtering import ReadsFanout\nimport json\nfrom tagtools import utils\n\ndef add_subcommand_select(subparsers):\n parser = subparsers.add_parser('select')\n parser.set_defaults(func=select)\n parser.add_argument('--version', action='version', version='1.0.0')\n parser.add_argument('gps', help='groups')\n parser.add_argument('pairs', help='pairs file (MUST be sorted by readID)')\n parser.add_argument('tagfile', help='yaml tagfile')\n parser.add_argument('outprfx', help='output pairs file prfx')\n parser.add_argument('--txdict','-t', help='txtogene conversion')\n parser.add_argument('--gpnames','-n', help='gp1,gp2,etc...')\n parser.add_argument('--genes_by_name', '-e', action='store_true', help='gene names in english, rather that ENSG') #english\n parser.add_argument('--bytx', '-g', action='store_true', help='switches to tx level mode') \n #switches to gen\n parser.add_argument('--noambig', '-s', action='store_true', help='switches to STRICT mode')\n # rna_bam_in=rna, dna_bam_in=dna, readid_out_prfx='k_', pairs_out_prfx='k_', rnadna_out_prfx='k_', to_stdout=\"rna\", nmax=0\n parser.add_argument('--rna','-R', help='rna bam in')\n parser.add_argument('--dna','-D', help='dna bam in')\n parser.add_argument('--indexbams', '-i', action='store_true', help='sort and indexes out bams')\n parser.add_argument('--stdout','-o', help='choose what goes to stdout', choices=['pairs','readids','rna','dna'])\n parser.add_argument('--minflight', help='min flight distance', type=int, default=0)\n parser.add_argument('--nmax', help='limits parsing to that many reads')\n \n # parser.set_defaults(func=tag)\n\ndef select(args):\n print('pairup', args)\n from signal import signal, SIGPIPE, SIG_DFL\n signal(SIGPIPE,SIG_DFL) #allows to do head operations without throwing errors\n\n print(\"loading tag file\")\n with open(args.tagfile, \"r\") as ambiv_file:\n ambiv_data = json.load(ambiv_file)\n if args.bytx:\n # ambiv_LUT=ambiv_data[1][0]\n ambiv_LUT=utils.make_ambiv_LUT(ambiv_data[0])[0]\n else:\n ambiv_LUT=utils.make_ambiv_LUT(ambiv_data[1])[0]\n # ambiv_LUT=ambiv_data[0][0]\n\n print(\"loading annotations file\")\n with open(args.txdict, \"r\") as read_file:\n annot_data=json.load(read_file)\n\n tx_dict=annot_data[0]\n name_to_ENSG=annot_data[1]\n bygene=not(args.bytx)\n\n tx_pre_list=args.gps.split(\",\")\n tx_list_names=[pre.split(\"+\") for pre in tx_pre_list]\n\n if args.genes_by_name:\n tx_list_list=[[name_to_ENSG.get(s,\"*\") for s in g] for g in tx_list_names]\n else:\n tx_list_list=tx_list_names\n\n if args.gpnames is None:\n gpnames=[str(i+1) for i in range(len(tx_list_list))]\n else:\n gpnames=args.gpnames.split(\",\")\n \n # if args.minflight is None:\n # myminflight=0\n # else:\n # myminflight=args.minflight\n\n # if (args.minflight>0) and (len(gpnames)<2*len(tx_list_list)):\n # gpnames2=gpnames+[x+\"_selflocus\" for x in gpnames]\n # gpnames=gpnames2 \n\n print(\"started!\")\n fanout=ReadsFanout(tx_list_list, ambiv_LUT, bygene=bygene, tx_dict=tx_dict, group_names=gpnames, noambig=args.noambig, min_flight=args.minflight)\n fanout.fanout(args.pairs, rna_bam_in=args.rna, dna_bam_in=args.dna, readid_out_prfx=args.outprfx, pairs_out_prfx=args.outprfx, rnadna_out_prfx=args.outprfx, to_stdout=args.stdout, indexbams=args.indexbams, nmax=args.nmax)\n \n","repo_name":"straightlab/chartools","sub_path":"chartools/cli/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"24250018306","text":"#Programa 01\n# programa para ler 30 números\n# exibir a quantidade de positivos\n\ncont = 0\nqtdePositivos = 0\n\nwhile cont < 30:\n numero = int(input())\n if numero >= 0:\n qtdePositivos += 1\n cont += 1\nprint(qtdePositivos)","repo_name":"enosteteo/Introducao-a-Programacao-P1","sub_path":"4. Estrutura de Repeticao While/Lista 02/programa 01.b.py","file_name":"programa 01.b.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41850217710","text":"# coding:latin-1\r\n\r\nfrom lib.pdm.modelo_pdm import ModeloPDM\r\n\r\n\r\nclass PDM:\r\n\r\n def __init__(self, gama, delta_max):\r\n self.__gama = gama # fator de desconto temporal\r\n self.__delta_max = delta_max # limiar de convergência\r\n\r\n def utilidade(self, modelo):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n modelo : ModeloPDM\r\n\r\n Returns\r\n -------\r\n dict[(int,int), float]\r\n mapa de pares estado utilidade\r\n \"\"\"\r\n S, A = modelo.S, modelo.A # renomear funções\r\n U = {s: 0 for s in S()}\r\n while True:\r\n Uant = U.copy()\r\n delta = 0\r\n for s in S():\r\n U[s] = max(self.util_accao(s, a, Uant, modelo) for a in A(s))\r\n delta = max(delta, abs(U[s] - Uant[s]))\r\n if delta < self.__delta_max:\r\n break\r\n return U\r\n\r\n def util_accao(self, s, a, U, modelo):\r\n \"\"\"\r\n Obtém a utilidade da execução de uma ação num estado\r\n\r\n Parameters\r\n ----------\r\n s : (int,int)\r\n estado\r\n a : Operador\r\n ação\r\n U : dict[(int,int), float]\r\n utilidade\r\n modelo : ModeloPDM\r\n modelo\r\n\r\n Returns\r\n -------\r\n float\r\n a utilidade\r\n \"\"\"\r\n T, R, gama = modelo.T, modelo.R, self.__gama\r\n return sum(p * (R(s, a, sn) + gama * U[sn]) for (p, sn) in T(s, a))\r\n\r\n def politica(self, U, modelo):\r\n \"\"\"\r\n Calcula a politica ótima\r\n\r\n Parameters\r\n ----------\r\n U : dict[(int,int), float]\r\n utilidade\r\n modelo : ModeloPDM\r\n modelo\r\n\r\n Returns\r\n -------\r\n dict[(int,int), float]\r\n a política ótima\r\n\r\n \"\"\"\r\n pol = {}\r\n for s in modelo.S():\r\n pol[s] = max(modelo.A(s), key=lambda a: self.util_accao(s, a, U, modelo))\r\n return pol\r\n\r\n def resolver(self, modelo):\r\n utilidade = self.utilidade(modelo)\r\n return utilidade, self.politica(utilidade, modelo)\r\n","repo_name":"GiodoAldeima/iasa-cp3","sub_path":"lib/pdm/pdm.py","file_name":"pdm.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73721598642","text":"import math\nfrom shapely import geometry\n\n_indent = 8.0\n\n\ndef getMovementDescription(ib_link, ob_link):\n ib_start, ib_end = ib_link.geometry_xy.coords[0], ib_link.geometry_xy.coords[-1]\n ob_end = ob_link.geometry_xy.coords[-1]\n\n angle_ib = math.atan2(ib_end[1] - ib_start[1], ib_end[0] - ib_start[0])\n if -0.75 * math.pi <= angle_ib < -0.25 * math.pi:\n direction = 'SB'\n elif -0.25 * math.pi <= angle_ib < 0.25 * math.pi:\n direction = 'EB'\n elif 0.25 * math.pi <= angle_ib < 0.75 * math.pi:\n direction = 'NB'\n else:\n direction = 'WB'\n\n angle_ob = math.atan2(ob_end[1] - ib_end[1], ob_end[0] - ib_end[0])\n angle = angle_ob - angle_ib\n if angle < -1 * math.pi:\n angle += 2 * math.pi\n if angle > math.pi:\n angle -= 2 * math.pi\n\n if -0.25 * math.pi <= angle <= 0.25 * math.pi:\n mvmt = 'T'\n mvmt_type = 'thru'\n elif angle < -0.25 * math.pi:\n mvmt = 'R'\n mvmt_type = 'right'\n elif angle <= 0.75 * math.pi:\n mvmt = 'L'\n mvmt_type = 'left'\n else:\n mvmt = 'U'\n mvmt_type = 'uturn'\n\n mvmt_txt_id = direction + mvmt\n return mvmt_txt_id, mvmt_type\n\n\ndef getMovementGeometry(ib_link, ob_link):\n ib_geometry_xy = ib_link.geometry_xy\n ib_indent = _indent if ib_geometry_xy.length > _indent else ib_geometry_xy.length / 2\n ib_point = ib_geometry_xy.interpolate(-1 * ib_indent)\n\n ob_geometry_xy = ob_link.geometry_xy\n ob_indent = _indent if ob_geometry_xy.length > _indent else ob_geometry_xy.length / 2\n ob_point = ob_geometry_xy.interpolate(ob_indent)\n\n geometry_xy = geometry.LineString([ib_point, ob_point])\n return geometry_xy\n\n\n","repo_name":"jiawlu/OSM2GMNS","sub_path":"osm2gmns/movement/util_mvmt.py","file_name":"util_mvmt.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"75"} +{"seq_id":"19227637821","text":"\"\"\"\nEvolution Strategy\n This module implements the Evolution Strategy Optimization algorithm.\n\nClasses:\n EvolutionStrategy: An implementation of the Evolution Strategy.\n\"\"\"\n\nimport random\nfrom typing import List, Tuple\n\nfrom natural_computing.objective_functions import BaseFunction\nfrom natural_computing.utils import argsort, bounded_random_vectors, sum_lists\n\nfrom .base_optimizer import PopulationBaseOptimizer\n\n\nclass EvolutionStrategy(PopulationBaseOptimizer):\n def __init__(\n self,\n mu: int,\n lambda_value: int,\n step_size: float,\n max_iterations: int,\n search_space: List[Tuple[float, float]],\n plus_version: bool = False,\n ) -> None:\n \"\"\"\n Initialize the Evolution Strategy optimizer.\n\n Args:\n mu (int): Number of selected parents.\n lambda_value (int): The size of the population.\n step_size (float): Constant that multiples Gaussian values\n max_iterations (int): The maximum number of optimization\n iterations.\n search_space (List[Tuple[float, float]]): The search space bounds\n for each dimension.\n plus_version (bool): Indicates whether to add selected parents to\n new generation.\n \"\"\"\n super().__init__(max_iterations, lambda_value, search_space)\n self._mu = mu\n self._step_size = step_size\n self._dimension = len(search_space)\n self._plus_version = plus_version\n self.population: List[List[float]] = []\n self.best_global_position: List[float] = [0.0 for _ in search_space]\n self.initialize_population()\n\n def initialize_population(self) -> None:\n \"\"\"\n Initialize individuals with random values.\n \"\"\"\n # clear current population\n self.population.clear()\n\n for _ in range(self.population_size):\n self.population.append(bounded_random_vectors(self.search_space))\n\n def _optimization_step(\n self, objective_function: BaseFunction\n ) -> List[float]:\n \"\"\"\n Perform a single optimization step using Evolution Strategy.\n\n Args:\n objective_function (BaseFunction): The objective function to be\n optimized.\n\n Returns:\n List[float]: List of fitness values for the population.\n \"\"\"\n new_population: List[List[float]] = []\n\n # selected the best individuals based on the rank\n ranks = argsort(\n [objective_function.evaluate(x) for x in self.population]\n )\n selected = [self.population[x] for x in ranks[: self._mu]]\n\n # generate children\n for parent in selected:\n # add parent\n if self._plus_version:\n new_population.append(parent)\n\n for _ in range(self.population_size // self._mu):\n # create child\n new_population.append(\n sum_lists(\n parent,\n [\n self._step_size * random.gauss(0, 1)\n for _ in range(self._dimension)\n ],\n )\n )\n\n # update the best value found\n self.population = new_population\n fits = [objective_function.evaluate(x) for x in self.population]\n\n for i in range(self.population_size):\n if fits[i] < self.best_global_value:\n self.best_global_value = fits[i]\n self.best_global_position = self.population[i]\n\n return fits\n","repo_name":"gsoaresbaptista/natural-computing","sub_path":"natural_computing/optimization/evolution_strategy.py","file_name":"evolution_strategy.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71761297842","text":"#-*- coding:utf-8 -*-\n\n\"\"\"\n10026 적록색약\n\nNxN 그리드에 RGB로 그린 그림\n몇 개의 구역은 같은 색으로 이루어짐\n상하좌우 인접한 색은 같은 구역\n일반인이 보는 구역의 개수와 적록색약이 보는 구역의 개수를 출력\n\n알고리즘: DFS\n\n1. 적록색약이 보는 그림을 얻기\n2. dfs로 그림을 RGB 하나씩 모두 살펴보는데 방문한 곳은 X로 치환\n3. 구역 수 얻기\n\"\"\"\n\n\ndef dfs(start, picture):\n stack = [start]\n # 단순히 True False가 아닌 RGB이기 때문에 처음 시작하는 color를 저장\n color = picture[start[0]][start[1]]\n\n while stack:\n node = stack.pop()\n\n # 만약 처음 color와 현재 node의 색이 같���면\n if picture[node[0]][node[1]] == color:\n # 방문했으니깐 X로 치환\n picture[node[0]][node[1]] = 'X'\n # check_next로 다음 가능한 상하좌우를 stack에 추가\n stack.extend(check_next(node[0], node[1], len(picture), len(picture[0])))\n\n # dfs가 끝난다는 것은 경로 하나가 나온 것이므로 1을 리턴\n return 1\n\n\ndef check_next(x, y, width, height):\n next_list = []\n if x-1 >= 0: # 좌\n next_list.append((x-1, y))\n if y-1 >= 0: # 하\n next_list.append((x, y-1))\n if x+1 <= width-1: # 우\n next_list.append((x+1, y))\n if y+1 <= height-1: # 상\n next_list.append((x, y+1))\n return next_list\n\n\nif __name__ == '__main__':\n n = int(input())\n pic = [list(input()) for _ in range(n)]\n\n # 적록색약이 보는 그림\n pic_weak = [[] for _ in range(n)]\n for i, row in enumerate(pic):\n for j, v in enumerate(list(row)):\n # 값이 G면\n if v == \"G\":\n # G가 아니라 R로 저장\n pic_weak[i].append(\"R\")\n else:\n # R, B는 그대로 저장\n pic_weak[i].append(v)\n\n # 구역 개수 초기화\n normal, weak = 0, 0\n for i, row in enumerate(pic):\n for j, v in enumerate(row):\n # 정상인의 그림의 값이 X가 아니면, 즉 방문하지 않았으면\n if v != \"X\":\n # 현 위치에서 dfs 시작 및 return된 1 추가\n normal += dfs((i, j), pic)\n\n # 적록색약의 그림의 값이 X가 아니면, 즉 방문하지 않았으면\n if pic_weak[i][j] != \"X\":\n # dfs 시작\n weak += dfs((i, j), pic_weak)\n\n print(\"{} {}\".format(normal, weak))","repo_name":"HanSeokhyeon/Baekjoon-Online-Judge","sub_path":"code/10026_color_weakness.py","file_name":"10026_color_weakness.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"36103715816","text":"#Getting İnput(for getting input you should put blank space at end of input)\r\nlicenses=[]\r\nflag=True\r\nwhile flag:\r\n\r\n company_dict={}\r\n company_prop = input().split()\r\n if not company_prop:\r\n flag=False\r\n break\r\n if(flag):\r\n company_dict[\"company\"] = int(company_prop[0])\r\n company_dict[\"price\"] = int(company_prop[1])\r\n licenses.append(company_dict)\r\n#-----------------------------------------------------------------\r\nhighest=0\r\norder=0\r\nansArr=[]\r\n#For this question,beacuse of increasing prices we tend to choose first highest price of all of them,then do it same the rest licenses.\r\n#We use greedy algorithm pick highest cost to lowest cost.\r\nfor i in range(0,len(licenses)):\r\n for j in range(0,len(licenses)):\r\n if licenses[j][\"price\"]>highest:\r\n highest=licenses[j][\"price\"]\r\n order=j\r\n\r\n ansArr.append(licenses[order][\"company\"])\r\n licenses.pop(order)\r\n highest=0\r\n\r\nprint(*ansArr)\r\n\r\n#total cost big-oh(n*n)\r\n\r\n# PROOF\r\n# Our goal is paying least money\r\n#\r\n# Greedy algorithm of this question tend to pay highest cost to lowest cost because the price doubles itself so it becoming getting larger rapidly\r\n#\r\n# Sorted list highest to lowest A={p1,p2,..,pn}\r\n#\r\n# we know that list is highest to lowest ,so for getting minimum price=p1*(2**0)+p2*(2**1)+...+pn*(2**n-1)\r\n# if we change the placement of elements price going to be higher according to price equation,so we have to choose lowest price element to highest price element","repo_name":"KeremAliAkyuz/AlgorithmAssignments","sub_path":"hw6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4765872333","text":"#print(all([0,1,2,3]))\n#print(all([1,2,3]))\n#mylist = [\"hello\", \"there\", \"how\", \"are\", \"you\", \"\"]\n#print(all(mylist))\n\n\n#a_var = 'I am a global var'\n\n#def a_func():\n # a_var = 'I am a local var'\n # print(a_var, '[ a_var inside a_func() ]')\n\n#a_func()\n#print(a_var, '[ a_var outside a_func() ]')\n\na = 'global'\n\n\ndef outer():\n\n def functionLen(in_var):\n print('called my len() function: ', end=\"\")\n l = 0\n for i in in_var:\n l += 1\n return l\n\n a = 'local'\n\n def inner():\n global functionLen\n nonlocal a\n a += ' variable'\n\n inner()\n print('a is', a)\n print(functionLen(a))\n\nouter()\n\nprint(len(a))\nprint('a is', a)\n\n\n","repo_name":"piksa13/ChallengingTasks4beginners","sub_path":"ChallengingTask_level_3/GlobalVsLocal_var.py","file_name":"GlobalVsLocal_var.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39911378690","text":"cont = 'yes'\r\nwhile cont.lower() != 'no':\r\n print('Ravmobedreniy treygolnic!!!\\n')\r\n height = int(input('Vvedite visoty treygolnika: '))\r\n for i in range(0, height):\r\n i += 1\r\n for j in range(1, height - i + 1): # рисуем отступы\r\n print(' ', end='')\r\n j += 1\r\n for j in range(height - (2 * i) + 1, height): # рисуем ^\r\n print('^', end='')\r\n j += 1\r\n print('')\r\n cont = input('Hotite prodolzat? Type yes or no ')","repo_name":"AkOutlaw/MG_HomeWork","sub_path":"L4 dz.py","file_name":"L4 dz.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71961794161","text":"import torch\nimport torch.nn as nn\nfrom ..models.vgg19_model import Vgg19\n\n\nclass VGGLoss(nn.Module):\n def __init__(self):\n super(VGGLoss, self).__init__()\n self.vgg = Vgg19().cuda()\n self.criterion = nn.L1Loss()\n self.weights = [1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0 / 2, 1.0]\n\n def forward(self, x, y):\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n loss = 0\n for i in range(len(x_vgg)):\n loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())\n return loss\n\n def warp(self, x, y):\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n loss = 0\n loss += self.weights[4] * self.criterion(x_vgg[4], y_vgg[4].detach())\n return loss","repo_name":"GeCao/DL-Driven-Fashion-Image-Generation","sub_path":"src/losses/vgg_loss.py","file_name":"vgg_loss.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"74539066161","text":"import csv\r\n\r\n\r\ndef read_csv(file):\r\n feature = []\r\n with open(file, \"r\", encoding='UTF-8') as csvFile:\r\n csv_reader = csv.reader(csvFile)\r\n for i in csv_reader:\r\n feature.append(i)\r\n csvFile.close()\r\n return feature\r\n\r\n\r\ndef write_csv(name, list_w):\r\n with open(name, 'a', newline='') as csvFile:\r\n writer = csv.writer(csvFile)\r\n for i in list_w:\r\n writer.writerow(i)\r\n\r\n\r\ndef cre_dict(f1, f2):\r\n word = []\r\n d = []\r\n for i in f1:\r\n for j in i and j not in word:\r\n word.append(j)\r\n for i in f2:\r\n for j in word:\r\n if j in i[5]:\r\n d.append((j, [i[0], i[1], i[2], i[3], i[4]]))\r\n return d\r\n\r\n\r\nif __name__ == '__main__':\r\n file1 = \"feature/dbscan/FoodForHealth.csv\"\r\n file2 = \"feature/food_1000_new.csv\"\r\n file3 = \"feature/food_dict\"\r\n f1 = read_csv(file1)\r\n f2 = read_csv(file2)\r\n dict = cre_dict(f1, f2)\r\n write_csv(file3, d)\r\n","repo_name":"Kazaf123/Cross-Domain-Requirement-Acquisition","sub_path":"代码/dbscan_dict.py","file_name":"dbscan_dict.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6545796760","text":"import torch\nimport sys\nsys.path.append('../utils/')\nfrom utils import plotsAnalysis\nif __name__ == '__main__':\n #pathnamelist = ['reg0.0001','reg0.0005','reg0.005','reg0.001','reg1e-5','reg5e-5']\n pathnamelist = ['ballistics']\n for pathname in pathnamelist:\n plotsAnalysis.HeatMapBVL(plot_x_name='layer_num', plot_y_name='linear_unit', title='linear vs linear_unit Heat Map',\n save_name=pathname + '_heatmap.png', HeatMap_dir='models/'+pathname,\n feature_1_name='linear_d', feature_2_name='linear_unit')\n #plotsAnalysis.HeatMapBVL(plot_x_name='linear_d_layer', plot_y_name='linear_unit', title='linear_d_layer vs linear_unit Heat Map',\n # save_name=pathname + '_heatmap.png', HeatMap_dir='models/'+pathname,\n # feature_1_name='linear_d', feature_2_name='linear_unit')\n","repo_name":"BensonRen/idlm_Pytorch","sub_path":"VAE/plotswipe.py","file_name":"plotswipe.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"10347858979","text":"import os\nimport numpy as np\nfrom colorama import Back, Fore\nfrom config import cfg\nfrom dataset import detection_set\nfrom dataset.voc.pascal_voc import PascalVoc\nfrom dataset.coco.coco import COCO\n\n\ndef get_dataset(dataset_sequence, params, mode='train', only_classes=False):\n only_cls_str = 'classes for ' if only_classes else ''\n print(Back.WHITE + Fore.BLACK + 'Loading {}image dataset...'.format(only_cls_str))\n dataset_name = dataset_sequence.split('_')[0]\n if dataset_name == 'detect':\n dataset = detection_set.DetectionSet(params)\n short_name = 'det_set'\n print('Loaded Detection dataset.')\n elif dataset_name == 'voc':\n year = dataset_sequence.split('_')[1]\n image_set = dataset_sequence[(len(dataset_name) + len(year) + 2):]\n if 'devkit_path' in params:\n params['devkit_path'] = os.path.join(cfg.DATA_DIR, params['devkit_path'])\n else:\n print(Back.YELLOW + Fore.BLACK + 'WARNING! '\n + 'Cannot find \"devkit_path\" in additional parameters. '\n + 'Try to use default path (./data/VOCdevkit)...')\n params['devkit_path'] = os.path.join(cfg.DATA_DIR, 'VOCdevkit')\n dataset = PascalVoc(image_set, year, params, only_classes)\n short_name = dataset_name + '_' + year\n print('Loaded {}PascalVoc {} {} dataset.'.format(only_cls_str, year, image_set))\n elif dataset_name == 'coco':\n year = dataset_sequence.split('_')[1]\n image_set = dataset_sequence[(len(dataset_name) + len(year) + 2):]\n if 'data_path' in params:\n params['data_path'] = os.path.join(cfg.DATA_DIR, params['data_path'])\n else:\n print(Back.YELLOW + Fore.BLACK + 'WARNING! '\n + 'Cannot find \"data_path\" in additional parameters. '\n + 'Try to use default path (./data/COCO)...')\n params['data_path'] = os.path.join(cfg.DATA_DIR, 'COCO')\n dataset = COCO(image_set, year, params, only_classes)\n short_name = dataset_name + '_' + year\n print('Loaded {}COCO {} {} dataset.'.format(only_cls_str, year, image_set))\n else:\n raise NotImplementedError(Back.RED + 'Not implement for \"{}\" dataset!'.format(dataset_name))\n\n if not only_classes:\n if mode == 'train' and cfg.TRAIN.USE_FLIPPED:\n print(Back.WHITE + Fore.BLACK + 'Appending horizontally-flipped '\n + 'training examples...')\n dataset = _append_flipped_images(dataset)\n print('Done.')\n\n print(Back.WHITE + Fore.BLACK + 'Preparing image data...')\n dataset = _prepare_data(dataset)\n print('Done.')\n\n if mode == 'train':\n print(Back.WHITE + Fore.BLACK + 'Filtering image data '\n + '(remove images without boxes)...')\n dataset = _filter_data(dataset)\n print('Done.')\n\n return dataset, short_name\n\n\ndef _append_flipped_images(dataset):\n for i in range(len(dataset)):\n img = dataset.image_data[i].copy()\n img['index'] = len(dataset)\n img['id'] += '_f'\n img['flipped'] = True\n boxes = img['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = img['width'] - oldx2 - 1\n boxes[:, 2] = img['width'] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n img['boxes'] = boxes\n dataset.image_data.append(img)\n dataset._image_index.append(img['id'])\n\n return dataset\n\n\ndef _prepare_data(dataset):\n for i in range(len(dataset)):\n # TODO: is this really need!?\n # max overlap with gt over classes (columns)\n max_overlaps = dataset.image_data[i]['gt_overlaps'].max(axis=1)\n # gt class that had the max overlap\n max_classes = dataset.image_data[i]['gt_overlaps'].argmax(axis=1)\n dataset.image_data[i]['max_classes'] = max_classes\n dataset.image_data[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n\n return dataset\n\n\ndef _filter_data(dataset):\n print('Before filtering, there are %d images...' % (len(dataset)))\n i = 0\n while i < len(dataset):\n if len(dataset.image_data[i]['boxes']) == 0:\n del dataset.image_data[i]\n i -= 1\n i += 1\n\n print('After filtering, there are %d images...' % (len(dataset)))\n return dataset\n\n","repo_name":"loolzaaa/faster-rcnn-pytorch","sub_path":"lib/dataset/dataset_factory.py","file_name":"dataset_factory.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"75"} +{"seq_id":"36446489067","text":"diagnostic_report = []\nwith open('day3_input.txt', 'r') as infile:\n for row in infile:\n\n row= (row.strip('\\n'))\n diagnostic_report.append(row)\n\nprint(diagnostic_report) # len = 1000\n\ngamma_rate = [0]*len(diagnostic_report[0])\nepsilon_rate = [0]*len(diagnostic_report[0])\nprint(gamma_rate)\nfor num in diagnostic_report:\n for i in range(len(num)):\n gamma_rate[i] += int(num[i])\ndiagnostic_report.sort()\n# print(diagnostic_report)\n\n# diagnostic_report = ['00100',\n# '11110',\n# '10110',\n# '10111',\n# '10101',\n# '01111',\n# '00111',\n# '11100',\n# '10000',\n# '11001',\n# '00010',\n# '01010']\n# diagnostic_report.sort()\n\n#Oxygen gen. rating, most common value\ndef oxy_gen(report, digit):\n if len(report) == 1:\n return report\n for i in range(len(report)):\n if int(report[i][digit]) == 1:\n if i <= len(report)/2:\n report = report[i::]\n return oxy_gen(report,digit+1)\n\n else:\n report = report[0:i]\n return oxy_gen(report, digit+1)\noxy_gen_binary = oxy_gen(diagnostic_report,0)\n\n#co2 scrub rating, least common value\ndef co2_scrub(report, digit):\n if len(report) == 1:\n return report\n for i in range(len(report)):\n if int(report[i][digit]) == 1:\n if i <= len(report)/2:\n report = report[0:i]\n return co2_scrub(report,digit+1)\n\n else:\n report = report[i::]\n return co2_scrub(report, digit+1)\nco2_scrub_binary = co2_scrub(diagnostic_report,0)\n\n\noxy_gen_converted = 0\nco2_scrub_converted = 0\ni = len(oxy_gen_binary[0])\nwhile i > 0:\n if int(oxy_gen_binary[0][len(oxy_gen_binary[0])-i])==1:\n oxy_gen_converted += 2**(i-1)\n if int(co2_scrub_binary[0][len(co2_scrub_binary[0])-i]) == 1:\n co2_scrub_converted += 2 **(i-1)\n i -= 1\n\nprint(oxy_gen_converted,co2_scrub_converted,oxy_gen_converted*co2_scrub_converted)\n\n","repo_name":"ngt1986/Projects","sub_path":"Advent of Code 2021/Day 3/Day3_2.py","file_name":"Day3_2.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43509716219","text":"# -*- coding: UTF-8 -*-\nimport string\noStr = input(\"请输入一串字符:\")\n \nstr_num = 0\nspac_num = 0\nfigue_num = 0\nChinese_num = 0\nother_num = 0\n \nfor strs in oStr:\n if strs in string.ascii_letters:\n str_num +=1\n elif strs.isdigit():\n figue_num +=1\n elif strs.isspace():\n spac_num +=1\n elif strs.isalpha():\n Chinese_num += 1\n else:\n other_num += 1\nprint (\"英文字母有:%d\" %str_num)\nprint (\"数字字符有:%d\" %figue_num)\nprint (\"空格字符有:%d\" %spac_num)\nprint (\"中文字符有:%d\" %Chinese_num)\nprint (\"其他字符有:%d\" %other_num)\n","repo_name":"ProjectInAction/FirstProject","sub_path":"1520950wumingya/myapp/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11931308726","text":"import torch\n\nfrom ..utils import ext_loader\n\next_module = ext_loader.load_ext('_ext', ['box_iou_rotated'])\n\n\ndef box_iou_rotated(bboxes1: torch.Tensor,\n bboxes2: torch.Tensor,\n mode: str = 'iou',\n aligned: bool = False,\n clockwise: bool = True) -> torch.Tensor:\n \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in\n (x_center, y_center, width, height, angle) format.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n .. note::\n The operator assumes:\n\n 1) The positive direction along x axis is left -> right.\n\n 2) The positive direction along y axis is top -> down.\n\n 3) The w border is in parallel with x axis when angle = 0.\n\n However, there are 2 opposite definitions of the positive angular\n direction, clockwise (CW) and counter-clockwise (CCW). MMCV supports\n both definitions and uses CW by default.\n\n Please set ``clockwise=False`` if you are using the CCW definition.\n\n The coordinate system when ``clockwise`` is ``True`` (default)\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\\\begin{pmatrix}\n \\\\cos\\\\alpha & -\\\\sin\\\\alpha \\\\\\\\\n \\\\sin\\\\alpha & \\\\cos\\\\alpha\n \\\\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\\\begin{pmatrix} x_A \\\\\\\\ y_A\\\\end{pmatrix}\n =\n \\\\begin{pmatrix} x_{center} \\\\\\\\ y_{center}\\\\end{pmatrix} +\n \\\\begin{pmatrix}\\\\cos\\\\alpha & -\\\\sin\\\\alpha \\\\\\\\\n \\\\sin\\\\alpha & \\\\cos\\\\alpha\\\\end{pmatrix}\n \\\\begin{pmatrix} -0.5w \\\\\\\\ -0.5h\\\\end{pmatrix} \\\\\\\\\n =\n \\\\begin{pmatrix} x_{center}-0.5w\\\\cos\\\\alpha+0.5h\\\\sin\\\\alpha\n \\\\\\\\\n y_{center}-0.5w\\\\sin\\\\alpha-0.5h\\\\cos\\\\alpha\\\\end{pmatrix}\n\n\n The coordinate system when ``clockwise`` is ``False``\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (-pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\\\begin{pmatrix}\n \\\\cos\\\\alpha & \\\\sin\\\\alpha \\\\\\\\\n -\\\\sin\\\\alpha & \\\\cos\\\\alpha\n \\\\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\\\begin{pmatrix} x_A \\\\\\\\ y_A\\\\end{pmatrix}\n =\n \\\\begin{pmatrix} x_{center} \\\\\\\\ y_{center}\\\\end{pmatrix} +\n \\\\begin{pmatrix}\\\\cos\\\\alpha & \\\\sin\\\\alpha \\\\\\\\\n -\\\\sin\\\\alpha & \\\\cos\\\\alpha\\\\end{pmatrix}\n \\\\begin{pmatrix} -0.5w \\\\\\\\ -0.5h\\\\end{pmatrix} \\\\\\\\\n =\n \\\\begin{pmatrix} x_{center}-0.5w\\\\cos\\\\alpha-0.5h\\\\sin\\\\alpha\n \\\\\\\\\n y_{center}+0.5w\\\\sin\\\\alpha-0.5h\\\\cos\\\\alpha\\\\end{pmatrix}\n\n Args:\n boxes1 (torch.Tensor): rotated bboxes 1. It has shape (N, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n boxes2 (torch.Tensor): rotated bboxes 2. It has shape (M, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n mode (str): \"iou\" (intersection over union) or iof (intersection over\n foreground).\n clockwise (bool): flag indicating whether the positive angular\n orientation is clockwise. default True.\n `New in version 1.4.3.`\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (N, M) else (N,).\n \"\"\"\n assert mode in ['iou', 'iof']\n mode_dict = {'iou': 0, 'iof': 1}\n mode_flag = mode_dict[mode]\n rows = bboxes1.size(0)\n cols = bboxes2.size(0)\n if aligned:\n ious = bboxes1.new_zeros(rows)\n else:\n if bboxes1.device.type == 'mlu':\n ious = bboxes1.new_zeros([rows, cols])\n else:\n ious = bboxes1.new_zeros(rows * cols)\n if not clockwise:\n flip_mat = bboxes1.new_ones(bboxes1.shape[-1])\n flip_mat[-1] = -1\n bboxes1 = bboxes1 * flip_mat\n bboxes2 = bboxes2 * flip_mat\n if bboxes1.device.type == 'npu':\n scale_mat = bboxes1.new_ones(bboxes1.shape[-1])\n scale_mat[-1] = 1.0 / 0.01745329252\n bboxes1 = bboxes1 * scale_mat\n bboxes2 = bboxes2 * scale_mat\n bboxes1 = bboxes1.contiguous()\n bboxes2 = bboxes2.contiguous()\n ext_module.box_iou_rotated(\n bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)\n if not aligned:\n ious = ious.view(rows, cols)\n return ious\n","repo_name":"open-mmlab/mmcv","sub_path":"mmcv/ops/box_iou_rotated.py","file_name":"box_iou_rotated.py","file_ext":"py","file_size_in_byte":5527,"program_lang":"python","lang":"en","doc_type":"code","stars":5327,"dataset":"github-code","pt":"75"} +{"seq_id":"14606470679","text":"from discord.ext import tasks, commands\nfrom datetime import datetime\nimport discord\nimport json\nfrom db.db import db\nimport random\nfrom db.sql import *\n\nclass Loops(commands.Cog):\n\tdef __init__(self, bot: commands.Bot):\n\t\tself.bot = bot\n\t\tself.status.start()\n\n\tdef cog_unload(self):\n\t\tself.status.cancel()\n\n\t@tasks.loop(seconds=13.0)\n\tasync def status(self):\n\t\t# change status\n\t\tdata = db.read()\n\t\tstatuslist = data[\"status\"]\n\t\tif len(statuslist) != 0:\n\t\t\t# pick random from statuslist\n\t\t\tstatusname = random.choice(statuslist)\n\n\t\t\t# rich presence\n\t\t\tactivity = discord.Activity(\n\t\t\t\tname=statusname,\n\t\t\t\ttype=discord.ActivityType.watching\n\t\t\t)\n\t\telse:\n\t\t\tactivity = None\n\t\tawait self.bot.change_presence(activity=activity)\n\t\n\t@status.before_loop\n\tasync def before_status(self):\n\t\tawait self.bot.wait_until_ready()\n\nasync def setup(bot: commands.Bot):\n\tawait bot.add_cog(Loops(bot))","repo_name":"writeblankspace/pytree","sub_path":"cogs/_loop.py","file_name":"_loop.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6698959983","text":"import os\nfrom functools import wraps\nfrom inspect import isawaitable\n\nimport nanoid\nfrom itsdangerous import BadSignature, TimedSerializer\nfrom sanic.exceptions import SanicException\nfrom sanic.exceptions import Unauthorized as SanicUnauthorized\n\nfrom oauthpy import defaults\n\nSECRET = os.environ[\"SECRET\"]\nmax_age_seconds = 120\n\n\nclass WebAuthFailed(SanicException):\n status_code = 401\n quiet = True\n\n def __init__(self, message=\"Authentication failed.\", **kwargs):\n super().__init__(message, **kwargs)\n\n\ndef sessionid_factory(salt=\"cookies\"):\n s = TimedSerializer(SECRET, salt=salt)\n session = s.dumps(nanoid.generate(size=6))\n return session\n\n\ndef validate_session(cookie, max_age_seconds=30, salt=\"cookies\"):\n s = TimedSerializer(SECRET, salt=salt)\n return s.loads(cookie, max_age=max_age_seconds)\n\n\ndef cookie_protected(max_age=max_age_seconds):\n \"\"\"verify a token from a request.\n Optionally if a list of scopes is given then will check that scopes\n with the scopes provided by the token.\n :param scopes: a list of scopes\n :param required_all: if true it will check that the all the names provided\n match with the required.\n \"\"\"\n\n def decorator(f):\n @wraps(f)\n async def decorated_function(request, *args, **kwargs):\n cookie = request.cookies.get(defaults.COOKIE_SESSION_KEY)\n if not cookie:\n raise WebAuthFailed()\n try:\n sess = validate_session(cookie,\n max_age_seconds=max_age,\n salt=\"cookies\")\n request.ctx.sessionid = sess\n response = f(request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n except BadSignature as e:\n raise WebAuthFailed() from e\n return response\n\n return decorated_function\n\n return decorator\n","repo_name":"nuxion/oauthpy","sub_path":"oauthpy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"39152553823","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom os.path import isfile\nimport sys\n\nimport gpxpy\nimport gpxpy.gpx\n\ndef parse_arguments(args):\n parser = argparse.ArgumentParser(description='Reverse tracks, segments and/or points on a gpx file.')\n \n parser.add_argument(\n \"-i\", \"--input\", nargs='+', required=True, \n help=\"Input GPX file\"\n )\n parser.add_argument(\n \"-o\", \"--output\", nargs='+', required=True, \n help=\"Output GPX file (reversed)\"\n )\n parser.add_argument(\"-t\", \"--tracks\", help=\"Reverse tracks\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--segments\", help=\"Reverse segments\", action=\"store_true\")\n parser.add_argument(\"-p\", \"--points\", help=\"Reverse points\", action=\"store_true\")\n \n return parser.parse_args()\n\ndef validate(args):\n if not args.tracks and not args.segments and not args.points:\n print(\"You must select at least one entity to revert (--tracks, --segments and/or --points)\")\n exit(0)\n\n if len(args.input) != 1:\n print(\"Only one input file its required.\")\n exit(0)\n\n if len(args.output) != 1:\n print(\"Only one output file its required.\")\n exit(0)\n\n if not isfile(args.input[0]):\n print(\"Input file %s doesn't exists.\" % args.input[0])\n exit(0)\n\n if isfile(args.output[0]):\n print(\"Output file %s already exists.\" % args.output[0])\n exit(0)\n\ndef main(args):\n validate(args)\n cur_gpx = None\n\n with open(args.input[0], 'r') as input_file:\n cur_gpx = gpxpy.parse(input_file)\n \n new_gpx = gpxpy.gpx.GPX()\n\n if args.tracks:\n cur_gpx.tracks.reverse()\n\n for cur_track in cur_gpx.tracks:\n new_track = gpxpy.gpx.GPXTrack()\n new_track.name = cur_track.name\n \n if args.segments:\n cur_track.segments.reverse()\n\n for cur_segment in cur_track.segments:\n new_segment = gpxpy.gpx.GPXTrackSegment()\n\n if args.points:\n cur_segment.points.reverse()\n\n new_segment.points = cur_segment.points\n new_track.segments.append(new_segment)\n \n new_gpx.tracks.append(new_track)\n\n with open(args.output[0], 'w') as output_file:\n output_file.write(new_gpx.to_xml())\n\n print('All done!!!')\n\nif __name__ == '__main__':\n arguments = parse_arguments(sys.argv[1:])\n main(arguments)\n","repo_name":"moz667/gpxreverse","sub_path":"gpxreverse.py","file_name":"gpxreverse.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21113473005","text":"stock_purchase_price = 40\nnum_shares = 2000\ncommission_fee_rate = .03\nstock_selling_price = 42.75\n\namount_paid = stock_purchase_price * num_shares\ncommission_purchase_fee = amount_paid * commission_fee_rate\namount_sold = num_shares * stock_selling_price\ncommission_sold_fee = amount_sold * commission_fee_rate\namount_profit = amount_sold - amount_paid - commission_purchase_fee - commission_sold_fee\n\nprint('Initial stock cost:','$',format(amount_paid))\nprint('Commission paid for buying:','$',format(commission_purchase_fee))\nprint('Stock sold return:','$',format(amount_sold))\nprint('Commission paid for selling:','$',format(commission_sold_fee))\nprint('Profit:','$',format(amount_profit))\n","repo_name":"scottsheble/python-assignment-1","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31174519546","text":"# \"paraparaparadise\"と\"paragraph\"に含まれる文字bi-gramの集合を,\n# それぞれ, XとYとして求め,XとYの和集合,積集合,差集合を求めよ.さらに,\n# 'se'というbi-gramがXおよびYに含まれるかどうかを調べよ.\n\ndef makeNgram(words, n):\n return [words[i:i+n] for i in range(len(words)-n+1)]\n\nstr = \"paraparaparadise\"\nstr2 = \"paragraph\"\n\nX = set(makeNgram(str, 2))\nY = set(makeNgram(str2, 2))\n\nwa = X | Y\nseki = X & Y\nsa = X - Y\n\nprint(X)\nprint(Y)\n\nprint(wa)\nprint(seki)\nprint(sa)\n","repo_name":"ryu022304/NLP_100knocks","sub_path":"src/chap.01/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"36417509697","text":"import engpy.tools.exprs as exprs\nfrom engpy.misc.abilities import numable\nfrom engpy.misc.internals import unnest_with_powers\nfrom engpy.misc.miscs import _isinstance\nfrom engpy.misc.gen import getter\nfrom engpy.misc.scan.scan_expr import scan_MD\nfrom engpy.misc.assist import get_den, get_num, number_generator\nfrom engpy.errors.exceptions import *\nfrom copy import copy, deepcopy\n\nfrom engpy.misc.vars import variable_generator\n\n\nclass Fraction:\n def __init__(self, num, den = ''):\n self.name = 'Fraction'\n if isinstance(num,list):\n self.num, self.den = num\n return None\n elif isinstance(num,dict):\n self.expr = num\n return\n if not den:\n den = exprs.Expr('1')\n if not getter(num,'name') =='Expr' or not getter(den,'name') =='Expr':\n raise UnacceptableToken(f'Both {num} and {den} must be an Expr object')\n self.expr = {num: den}\n\n def __str__(self):\n if not self.den:\n return 'inf'\n elif not self.num:\n self.den = exprs.Expr('1')\n return '0'\n num = f'({self.num})' if len(self.num) > 1 else f'{self.num}'\n den = f'({self.den})' if len(self.den) > 1 else f'{self.den}'\n return f'{num}/{den}'\n\n @property\n def num(self):\n return list(self.expr)[0]\n\n @property\n def den(self):\n return self.expr[self.num]\n\n @num.setter\n def num(self, value):\n self.expr = {value: self.den}\n\n @den.setter\n def den(self, value):\n self.expr = {self.num: value}\n \n def __mul__(self, other):\n \n if not getter(other, 'name') == 'Fraction':\n if '/' in format(other):\n num2 = get_num(other)\n den2 = get_den(other)\n else:\n if numable(other):\n return Fraction(self.num *other, self.den)\n\n num2 = exprs.Expr(other)\n if num2 == 1:\n return copy(self)\n den2 = 1;\n else: den2 = other.den; num2 = other.num\n self_, pas = self, False\n den1, num1 = self_.den, self_.num\n if den2 != 1:\n if num1.isdivisible(den2):\n _num = num1 / den2\n if not '/' in format(_num):\n pas = True\n num1 = _num; den2 = 1\n if not pas and ((len(num1) == 1 and den2.isfactor(num1)) or (len(num1) > 1 and den2.isdivisible(num1))):\n _num = den2 / num1\n if not '/' in format(_num):\n num1 = 1; den2 = _num\n pas = False\n \n if num2.isdivisible(den1):\n _num = num2 / den1\n if not '/' in format(_num):\n pas = True\n num2 = _num; den1 = 1\n if not pas and ((len(num2) == 1 and den1.isfactor(num2)) or (len(num2) > 1 and den1.isdivisible(num2))):\n _num = den1 / num2\n if not '/' in format(_num):\n pas = True\n num2 = 1; den1 = _num\n num = num1 * num2; den = den1 * den2\n return num / den if numable(den) or len(den) == 1 else Fraction(exprs.Expr(num), den)\n if num2.isdivisible(den1):\n _num = num2 / den1\n if '/' not in format(_num):\n return num1 * _num\n elif den1.isdivisible(num2):\n _den = den1 / num2\n _den_ = format(_den)\n if '/' not in _den_ and not '^-' in _den_:\n den1 = _den; num2 = 1\n\n num = num1 * num2; den = den1 * den2\n return num / den if len(den) == 1 else Fraction(num, den)\n \n return copy(self)\n\n \n def partial(self):\n new_den = self.den.factorize(level=1)\n print(new_den.expr, format(new_den), '................')\n new_den = unnest_with_powers(new_den)\n print(format(new_den), 'trthwt', new_den.expr)\n try:\n\n coeff, products = new_den.__extract__\n var_list, var_gen, working_vars = self.vars, variable_generator(self.vars), []\n partials, variable = self.den.new, self.vars[0]\n print(products,'ggbbrg')\n for var, index in products.items():\n print(format(partials),'hdth')\n if index == 1:\n sub_expr = self.den.new\n for i in range(var.deg):\n sub_expr_var = f'{variable}^{i}' if i else ''\n sub_expr += next(var_gen) + sub_expr_var\n partials += sub_expr / self.den.form()({1: [{var: index}]})\n continue\n\n for i in range(index):\n product = self.den.form()({1: [{var: i + 1}]})\n print(product.expr, '2122212121')\n v = self.den.form()(next(var_gen)) / product\n print(v.expr, format(v))\n partials += v\n print(partials.expr,'rr3rgvr3',format(partials))\n\n except OperationNotAllowed:\n return self\n from engpy.tools.exprs import Eqns, Eqn\n equations, number_gen = Eqns(), number_generator()\n for i in range(len(products)):\n while True:\n try:\n value = next(number_gen)\n print('value', value)\n equations.add(Eqn((partials.cal(**{variable: value}) + self.cal({variable: value})).simp(), 0))\n break\n except (ZeroDivisionError, InvalidOperation) as e:\n pass\n print(equations)\n return self.cal(equations.solve())\n\n @property\n def vars(self):\n return self.num.vars + self.den.vars\n\n @property\n def variables(self):\n return self.num.variables + self.den.variables\n \n \n def __truediv__(self,other):\n if not isinstance(other,Fraction):\n other = Fraction(other) \n return self * ~other\n\n def __len__(self):\n return 2\n \n def cal(self, values, desolve='', desolved=False):\n return self.num.cal(values, desolve=desolve, desolved=desolved)/self.den.cal(values, desolve=desolve, desolved=desolved)\n \n def __add__(self, other):\n \n if not isinstance(other,Fraction):\n other = Fraction(other)\n if self.num == other.num == 0:\n self.den = exprs.Expr('1')\n return self\n \n elif self.num == 0:\n return other\n elif other.num == 0:\n return self\n self_ = copy(self); st = 0\n if '/' not in format(self.den / other.den) and (not '^-' in format(self) and not '^-' in format(self.den / other.den)):\n den = self.den\n elif not '/' in format(other.den / self.den) and (not '^-' in format(other) and not '^-' in format(other.den / self.den)):\n den = other.den\n else:\n st = 1; den = self.den * other.den\n if st:\n self_.num = self.num * other.den + other.num * self.den\n \n else:\n self_.num = self.num * (den/self.den) + other.num * (den/other.den)\n self_.den = den\n return self_\n \n def __sub__(self,other):\n if not isinstance(other,Fraction):\n other = Fraction(other)\n if self.num == other.num == 0:\n self.den = exprs.Expr('1')\n return self\n \n elif self.num == 0:\n return other\n elif other.num == 0:\n return self\n self_ = copy(self); st = 0\n if '/' not in format(self.den / other.den) and (not '^-' in format(self) and not '^-' in format(self.den / other.den)):\n den = self.den\n elif not '/' in format(other.den / self.den) and (not '^-' in format(other) and not '^-' in format(other.den / self.den)):\n den = other.den\n else:\n st = 1; den = self.den * other.den\n if st:\n self_.num = self.num * other.den + other.num * self.den\n \n else:\n self_.num = self.num * (den/self.den) - other.num * (den/other.den)\n self_.den = den\n return self_\n \n def __invert__(self):\n self_ = copy(self)\n self_.num = self.den\n self_.den = self.num\n return self_\n\n def __hash__(self):\n return hash((self.num.__hash__(),hash(self.den)))\n \n def lin_diff(self,var = 'x'):\n \n return ((self.den * self.num.lin_diff(var) - self.num * self.den.lin_diff(var))/ self.den**2).simp()\n\n def simp(self):\n \n return Fraction(self.num.simp(),self.den.simp())\n\n @property\n def struct(self):\n return [self]\n \n def __pow__(self,index):\n exchange = 0\n if isinstance(index, (int, float)) and index < 0:\n exchange = 1\n index = abs\n if index == 1:\n return self\n num = self.num ** index; den = self.den ** index\n if not num:\n return exprs.Expr('0')\n if not den:\n raise ZeroDivisionError\n \n return Fraction(exprs.Expr(str(num)) if isinstance(num,(int,\n float)) else num ,\n exprs.Expr(str(den)) if isinstance(den,(float,\n int)) else den) if exchange else Fraction(exprs.Expr(str(num)) if isinstance(num,(int,\n float)) else num ,\n exprs.Expr(str(den)) if isinstance(den,(float,\n int)) else den)\n\n def __bool__(self):\n if not self.num or not self.den:\n return False\n return True\n \n @classmethod\n def form(cls):\n return cls\n \n @property\n def recreate(self):\n return self.form()\n\n def __lt__(self, other):\n return format(self) < format(other)\n\n def __gt__(self, other):\n return format(self) > format(other)\n\n def __copy__(self):\n return Fraction(self.num.duplicate(), self.den.duplicate())\n\n def __repr__(self):\n return self.__str__()\n\n def __rmul__(self, other):\n return self * exprs.Expr(other)\n\n def __radd__(self, other):\n other = exprs.Expr(str(other))\n return self + other\n\n def __rsub__(self, other):\n other = exprs.Expr(str(other))\n return self - other\n\n def __round__(self, fix):\n return Fraction(round(self.num, fix), round(self.den, fix))\n\n","repo_name":"Blueprime-C/engpy-beta","sub_path":"engpy/tools/_fractions.py","file_name":"_fractions.py","file_ext":"py","file_size_in_byte":10773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72241658161","text":"from typing import (\n List,\n Dict,\n Callable,\n Type,\n Any,\n Tuple,\n Union,\n Container,\n Optional,\n)\nimport typing\nimport inspect\nfrom dataclasses import dataclass\nimport argparse\nfrom argparse import ArgumentParser, Namespace\nimport sys\nfrom .exceptions import UserError\nfrom .registry import Context, DeviceRegistry, Device\n\n\n@dataclass\nclass ParameterInfo:\n parameter: inspect.Parameter\n help: Optional[str] = None\n\n @property\n def name(self):\n return self.parameter.name\n\n\n# - param has no default -> positional, default ignored\n# - param has default -> optional, default used\n\n\nclass ArgumentBase:\n def __init__(self, param_info: ParameterInfo):\n self.param_info = param_info\n self.param = param_info.parameter\n\n def get_name(self) -> str:\n if self.param.default is inspect.Parameter.empty:\n return self.param.name\n else:\n return \"--{name}\".format(name=self.param.name.replace(\"_\", \"-\"))\n\n def get_type(self) -> Callable:\n if is_optional(self.param.annotation):\n return typing.get_args(self.param.annotation)[0]\n else:\n return self.param.annotation\n\n def add_to_parser(self, parser: argparse._ActionsContainer, **kwargs):\n parser.add_argument(\n self.get_name(), type=self.get_type(), default=self.param.default, **kwargs\n )\n\n def get_argument(self, parsed_args) -> Any:\n return getattr(parsed_args, self.param.name)\n\n\nclass BoolArg(ArgumentBase):\n def add_to_parser(self, parser: argparse._ActionsContainer, **kwargs):\n assert self.param.default is not inspect.Parameter.empty\n assert isinstance(self.param.default, bool)\n\n name = self.param.name.replace(\"_\", \"-\")\n if self.param.default:\n name = f\"--no-{name}\"\n action = \"store_false\"\n else:\n name = f\"--{name}\"\n action = \"store_true\"\n\n parser.add_argument(\n name,\n action=action,\n default=self.param.default,\n dest=self.param.name,\n **kwargs,\n )\n\n\ndef is_optional(t: Type[Any]) -> bool:\n return (\n typing.get_origin(t) == Union\n and len(typing.get_args(t)) == 2\n and issubclass(typing.get_args(t)[1], type(None))\n )\n\n\ndef is_optional_t(t: Type[Any], types: Container[Type[Any]]) -> bool:\n return is_optional(t) and typing.get_args(t)[0] in types\n\n\ndef handle_arg_base(param: ParameterInfo) -> ArgumentBase:\n base_types = (int, float, str)\n if param.parameter.annotation in base_types:\n return ArgumentBase(param)\n elif is_optional_t(param.parameter.annotation, base_types):\n return ArgumentBase(param)\n elif param.parameter.annotation is bool:\n return BoolArg(param)\n else:\n assert False\n\n\nKwargs = Dict[str, Any]\n\n\ndef add_args_to_parser(\n parser: argparse._ActionsContainer, args: List[ParameterInfo]\n) -> Callable[[Namespace], Kwargs]:\n arg_handlers = {arg.name: handle_arg_base(arg) for arg in args}\n\n for handler in arg_handlers.values():\n handler.add_to_parser(parser)\n\n def get_args(parsed_args: Namespace) -> Dict[str, Any]:\n return {\n name: handler.get_argument(parsed_args)\n for name, handler in arg_handlers.items()\n }\n\n return get_args\n\n\n@dataclass\nclass Step:\n name: str\n func: Callable\n context_args: List[inspect.Parameter]\n option_args: List[ParameterInfo]\n\n def __init__(self, name: str, func: Callable):\n self.name = name\n self.func = func # type: ignore\n\n self.context_args = []\n self.option_args = []\n\n parameters = inspect.signature(func).parameters\n\n for param in parameters.values():\n if type(param.annotation) is type and issubclass(param.annotation, Context):\n self.context_args.append(param)\n else:\n self.option_args.append(ParameterInfo(param))\n\n def make_parser(self) -> Tuple[ArgumentParser, Callable[[Namespace], Kwargs]]:\n p = ArgumentParser(\n prog=self.name, description=getattr(self.func, \"__help__\", None)\n )\n\n get_args = add_args_to_parser(p, self.option_args)\n\n return p, get_args\n\n\n@dataclass\nclass CLIDevice:\n device: Device\n steps: Dict[str, Step]\n\n\nclass ContextInfo:\n def __init__(self, name: str, type: Type[Context]):\n self.name = name\n self.type = type\n\n self.option_args = [\n ParameterInfo(p) for p in inspect.signature(type).parameters.values()\n ]\n\n\nclass Runner:\n def __init__(self, registry: DeviceRegistry):\n self.devices = {\n device.name: CLIDevice(\n device=device,\n steps={\n step_fn.__name__: Step(step_fn.__name__, step_fn)\n for step_fn in device.steps\n },\n )\n for device in registry.devices\n }\n\n context_types = set(\n arg.annotation\n for device in self.devices.values()\n for step in device.steps.values()\n for arg in step.context_args\n )\n\n self.context_types = [\n ContextInfo(context_type.__name__, context_type)\n for context_type in context_types\n ]\n\n def list_steps(self, device: CLIDevice):\n print(f\"available steps for {device.device.name}:\")\n for step in device.steps.values():\n print(f\" {step.name}\")\n sys.exit(0)\n\n def list_devices(self):\n for device in self.devices.values():\n print(device.device.architecture, device.device.name)\n sys.exit(0)\n\n def parse_step_args(self, device: CLIDevice, step_args: List[str]):\n if step_args == []:\n return self.list_steps(device)\n\n steps_and_args = []\n\n while step_args:\n step_name = step_args.pop(0)\n\n if step_name == \"list\":\n return self.list_steps(device)\n\n if step_name not in device.steps:\n raise UserError(f\"expected step name, got {step_name}\")\n step = device.steps[step_name]\n\n step_parser, get_args = step.make_parser()\n step_parser.add_argument(\n \"commands\", metavar=\"command [arg ...] ...\", nargs=\"...\"\n )\n\n parsed_step_args = step_parser.parse_args(step_args)\n step_kwargs = get_args(parsed_step_args)\n steps_and_args.append((step, step_kwargs))\n\n step_args = parsed_step_args.commands\n\n return steps_and_args\n\n def add_context_args(self, parser: ArgumentParser):\n ctx_with_args = []\n for ctx_type in self.context_types:\n group = parser.add_argument_group(ctx_type.name)\n get_args = add_args_to_parser(group, ctx_type.option_args)\n ctx_with_args.append((ctx_type, get_args))\n return ctx_with_args\n\n def get_device(self, device_name):\n if device_name in self.devices:\n return self.devices[device_name]\n elif device_name == \"list\":\n self.list_devices()\n else:\n raise UserError(\n f\"device {device_name} not known; use 'list' to show known devices\"\n )\n\n def parse_and_run(self, args: List[str]):\n main_parser = ArgumentParser()\n\n context_args = self.add_context_args(main_parser)\n\n main_parser.add_argument(\n \"device\", help=\"device name; use 'list' to show known devices\"\n )\n main_parser.add_argument(\n \"commands\", metavar=\"command [arg ...] ...\", nargs=\"...\"\n )\n\n main_args = main_parser.parse_args(args)\n\n context_args_parsed = {\n ctx.type: get_args(main_args) for ctx, get_args in context_args\n }\n\n device = self.get_device(main_args.device)\n\n steps_and_args = self.parse_step_args(device, main_args.commands)\n\n self.run(steps_and_args, context_args_parsed)\n\n def run(self, steps_and_args, context_args_parsed):\n required_contexts = set(\n arg.annotation\n for step, _kwargs in steps_and_args\n for arg in step.context_args\n )\n\n contexts = {ctx: ctx(**context_args_parsed[ctx]) for ctx in required_contexts}\n\n for step, kwargs in steps_and_args:\n for ctx_arg in step.context_args:\n kwargs[ctx_arg.name] = contexts[ctx_arg.annotation]\n\n for ctx in contexts.values():\n ctx.__enter__()\n\n for step, kwargs in steps_and_args:\n step.func(**kwargs)\n\n for ctx in contexts.values():\n ctx.__exit__()\n\n\ndef main():\n from .devices import registry\n import sys\n import logging\n\n logging.basicConfig(level=logging.DEBUG)\n\n r = Runner(registry)\n try:\n r.parse_and_run(sys.argv[1:])\n except UserError as e:\n print(str(e), file=sys.stderr)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tomjnixon/autoflash","sub_path":"autoflash/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71865182641","text":"import json\n\n\nclass ExtractedComponents(object):\n def __init__(self, comps_names, comps_positions):\n \"\"\"\n\n :param comps_names: list\n :param comps_positions: list\n \"\"\"\n self.comps_names = comps_names\n self.comps_positions = comps_positions\n\n\nclass JavaCallGraphMetricExtractor(object):\n\n def __init__(self, java_call_graph_file, test_2_components, experiment_instance):\n \"\"\"\n\n :param java_call_graph_file: generated file using the caller graph tool\n :param test_2_components: dictionary\n :param experiment_instance: ExperimentInstance\n \"\"\"\n self._call_graph_file = java_call_graph_file\n self.test_2_interest_methods = test_2_components # dict>\n self.experiment_instance = experiment_instance\n\n def get_connected_methods(self):\n \"\"\"\n :return: dictionary >\n \"\"\"\n # initialize result\n result = {}\n for test in self.test_2_interest_methods.keys():\n result[test] = []\n\n with open(self._call_graph_file, 'r+') as f:\n while True:\n line = f.readline()\n if not line:\n break\n # M:class1:(arg_types) (typeofcall)class2:(arg_types)\n if line.startswith(\"M:\"):\n pair = line.split()\n f1_struct = pair[0].split(\":\")\n func1 = self.shorten_parameters(f1_struct[2])\n f1 = f1_struct[1] + \".\" + func1\n\n f2_struct = pair[1].split(\":\")\n class2 = f2_struct[0].split(\")\")[1]\n func2 = self.shorten_parameters(f2_struct[1])\n f2 = class2 + \".\" + func2\n\n for test, method_2_params in self.test_2_interest_methods.items():\n f1_lower = f1.lower()\n f2_lower = f2.lower()\n if f1_lower in method_2_params:\n if f2_lower in method_2_params:\n ec = self.get_extracted_components([f1_lower, f2_lower])\n result[test].append(ec)\n\n return result\n\n def shorten_parameters(self, func):\n import re\n split1 = re.split('\\(|\\)', func)\n method_name = split1[0]\n method_params = split1[1]\n if not method_params:\n return method_name + '()'\n result = method_name + '('\n full_named_parameters = method_params.split(',')\n counter = 1\n total_params = len(full_named_parameters)\n for full_param in full_named_parameters:\n all_param_names = full_param.split('.')\n param_name = all_param_names[len(all_param_names) - 1].lower()\n result += param_name\n if counter < total_params:\n result += ';'\n counter += 1\n\n return result + ')'\n\n def get_extracted_components(self, comps_list):\n resulting_positions = []\n for comp in comps_list:\n for position, comp_name in self.experiment_instance.components.items():\n if comp == str(comp_name):\n resulting_positions.append(position)\n return ExtractedComponents(comps_list, resulting_positions)\n\n\nclass JpeekMetricExtractor(object):\n\n def __init__(self, jpeek_res_file, test_2_components, experiment_instance):\n \"\"\"\n\n :param java_call_graph_file: generated file using the caller graph tool\n :param test_2_components: dictionary\n :param experiment_instance: ExperimentInstance\n :param jpeek_data: json dictionary where keys: class nams, value: dictionary of methods in class and LCOM distance\n \"\"\"\n self._call_graph_file = jpeek_res_file\n self.test_2_interest_methods = test_2_components # dict>\n self.experiment_instance = experiment_instance\n with open(jpeek_res_file) as json_file:\n self.jpeek_data = json.load(json_file)\n print('jpeekMetricExtractor initilized')\n\n def get_connected_methods(self):\n \"\"\"\n :return: dictionary >\n \"\"\"\n # initialize result\n result = {}\n\n for test, method_2_params in self.test_2_interest_methods.items():\n result[test] = []\n for m in method_2_params:\n m = m.lower()\n C = self.get_class_of_method(m)\n if C:\n class_methods = set(self.jpeek_data[C][\"methods\"])\n test_methods = set(method_2_params)\n mutual_methods = list(class_methods & test_methods - set(m))\n if self.jpeek_data[C][\"distance\"]>0.3:\n for mutual_method in mutual_methods:\n ec = self.get_extracted_components([m, mutual_method])\n result[test].append((ec,self.jpeek_data[C][\"distance\"]))\n # result[test].append(ec)\n return result\n\n\n def get_class_of_method(self, method_name):\n for class_name, class_data in self.jpeek_data.items():\n if method_name in class_data[\"methods\"]:\n return class_name\n return ''\n\n def get_extracted_components(self, comps_list):\n resulting_positions = []\n for comp in comps_list:\n for position, comp_name in self.experiment_instance.components.items():\n if comp == str(comp_name):\n resulting_positions.append(position)\n return ExtractedComponents(comps_list, resulting_positions)","repo_name":"cshablin/Metric_SFL","sub_path":"sfl/MetricExtractor.py","file_name":"MetricExtractor.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71251766643","text":"import six\nfrom logging import getLogger\nimport traceback\nlogger = getLogger(__name__)\n\nfrom Products.PageTemplates.unicodeconflictresolver \\\n import PreferredCharsetResolver\n\ndef PreferredCharsetResolver_resolve(context, text, expression):\n # Since we use UTF-8 only in PageTemplate, it is enough here. It is\n # faster than the original implementation, and it is compatible with\n # requests that do not contain Accept-Charset header.\n try:\n result = six.text_type(text, 'utf-8')\n except UnicodeDecodeError as e:\n tb_info = ''.join(traceback.format_stack())\n logger.warn('UnicodeDecodeError(%s: %s): %s\\ntext: %r\\nat:\\n%s' %\n (e.start, e.end, e.reason, repr(text), tb_info))\n result = six.text_type(text, 'utf-8', 'ignore')\n return result\nPreferredCharsetResolver.resolve = PreferredCharsetResolver_resolve\n","repo_name":"Nexedi/erp5","sub_path":"product/ERP5Type/patches/unicodeconflictresolver.py","file_name":"unicodeconflictresolver.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"27990081328","text":"\"\"\" This file contatins functions for writing output files\"\"\"\nfrom tree_tools import *\nfrom asciitree import LeftAligned\nimport numpy as np\nimport pickle\n\ndef table_expression(remheight, remwidth, border, orient, tree, node, vafs, colors):\n \"\"\" generates the table html node expression for the sample tiling output\"\"\"\n bgcolors = ['white']+list(colors[1:])\n res =''\n if len(get_childs(tree,node))==0:\n res += r''\n res += '\\n'\n return res\n else:\n res += r'\"\"\"\n res += '\\n'\n childs = get_childs(tree,node)\n proportion = vafs[node]- sum([vafs[child] for child in childs])\n proportions = np.array([0.0,proportion]+[vafs[child] for child in childs])\n proportions = proportions/sum(proportions)\n cumprops = np.cumsum(proportions)\n if orient == 'h':\n res += r''\n effwidth = remwidth - border*(len(childs)+1)\n effheight = remheight - 2*border\n remwidths = np.diff(np.around(effwidth*cumprops,0)).astype(int)\n if remwidths[0]> 2*border:\n res +=r'\"\"\"\n res +='\\n'\n else:\n remwidths[1] += border\n# nodes_past =1\n\n for ind in range(len(childs)):\n# print nodes_past\n res += table_expression(effheight, remwidths[ind+1], border, 'v', tree, childs[ind], vafs, colors)\n# nodes_past = nodes_past+1+len(get_desc(tree, childs[ind]))\n res +='\\n'+r''\n if orient =='v':\n effheight = remheight - border*(len(childs)+1)\n effwidth = remwidth - 2*border\n remheights = np.diff(np.around(effheight*cumprops,0)).astype(int)\n if remheights[0]>2*border:\n res += r''\n res +=r'\"\"\"\n res +='\\n'\n else:\n remheights[1] += border\n# nodes_past=1\n for ind in range(len(childs)):\n res += r''\n res += table_expression(remheights[ind+1], effwidth, border, 'h', tree, childs[ind], vafs,colors)\n# nodes_past = nodes_past+1+len(get_desc(tree, childs[ind]))\n res +=r''+'\\n'\n res +='\\n'\n res += '
'\n return res\n \n\ndef put_sample_in_node(tree, vafs_tr, sample_names, sample_colors, cluster_colors, height=100, width=100, border=2, orient='v'):\n res=''\n for ind in range(len(sample_names)):\n otree =[-1]+list((np.array(tree)+1).astype(int))\n colors = tuple([\"black\"]+list(cluster_colors))\n vafs = [1.0]+list(vafs_tr[ind])\n existing_clones = [0]+[x for x in range(1,len(vafs)) if (vafs[x]>0.04)]\n absent_clones = [x for x in range(len(vafs)) if x not in existing_clones]\n for x in absent_clones:\n otree[x] = -5\n \n res += sample_names[ind]+' [\\n shape=plaintext\\n xlabel =\"'+sample_names[ind]+ '\"\\n label=<\\n' + table_expression(height, width, border, 'v', otree, 0, vafs,colors)[4:-5]+ '\\n >];'\n return res\n \n\n\ndef write_dot_files(dat,sample_colors, cluster_colors,treeoutfile,samplesoutfile):\n res = put_sample_in_node(dat['tree'],dat['vaf'].transpose(),dat['sample_names'],sample_colors,cluster_colors,height=150,width=150)\n res = 'digraph {\\n'+res+'}\\n'\n with open(samplesoutfile,'w') as fi:\n fi.write(res)\n res=''\n clone_letters = [chr(x) for x in range(65,len(dat['tree'])+65)]\n for x in range(len(dat['tree'])):\n num_muts = len([y for y in dat['assign'] if y==x])\n res += chart_plot(clone_letters[x],cluster_colors[x],dat['vaf'].transpose()[:,x],dat['sample_names'], sample_colors, num_muts)\n res +='\\n'\n for x in range(len(dat['tree'])):\n if not x==dat['root']:\n res += 'Subclone'+clone_letters[int(dat['tree'][x])]+' -> Subclone'+clone_letters[x]+'\\n'\n res = 'digraph {\\n'+res+'}\\n'\n with open(treeoutfile,'w') as fi:\n fi.write(res)\n\n\ndef chart_plot(subclone_letter,color,percentages,sample_names, sample_colors, num_muts, bar_width=30,border=3):\n res = ''\n res += 'Subclone'+subclone_letter+' [\\n shape=plaintext\\n label=<\\n'\n res +='\\n'\n for ind in range(len(sample_names)):\n percent = int(100*percentages[ind])\n if percent <100:\n res +=''\n res +=''\n for x in percentages:\n res += ''\n res +=''\n for x in sample_names:\n res += ''\n res +='
'+subclone_letter+'-'+str(num_muts)+' '\n if percent > 2:\n res +=' '\n res +='
'+str(int(np.around(100*x,0)))+'
'+x+'
\\n>]'\n\n return res\n\ndef get_asciitree_dict(tree,labels,node):\n return {labels[x]:get_asciitree_dict(tree,labels,x) for x in get_childs(tree,node)}\n\ndef get_ascii_tree(tree,labels):\n root = list(tree).index(-1)\n tree_dict = {labels[root]:get_asciitree_dict(tree,labels,root)}\n tr = LeftAligned()\n return tr(tree_dict)\n\ndef write_text_output(data,destfile):\n with open(destfile,'w') as fi:\n for i,tree in enumerate(data):\n fi.write('Solution Number '+str(i+1)+'\\n')\n fi.write('tree = ' + str(tree['tree']) + '\\n')\n depth = get_tree_depth(tree['tree'])\n print_depth = max(depth) - np.array(depth) + 1\n labels = [chr(x+65)+' '*4*print_depth[x] + \\\n str(np.around(tree['vaf'][x,:],2)) for x in range(len(tree['tree']))]\n fi.write(get_ascii_tree(tree['tree'],labels)+'\\n')\n fi.write('logscore = '+ str(tree['totalscore']) + '\\n')\n fi.write('ML Subclone Fractions = \\n' + str(tree['clone_proportions']) + '\\n' )\n fi.write ('Mutations Subclone Membership = ' + str([chr(x+65) for x in tree['assign']]) + '\\n')\n fi.write('----------------------------------------------------------------')\n return\n","repo_name":"HoseinT/BAMSE","sub_path":"tree_io.py","file_name":"tree_io.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"36052242667","text":"from UM.Mesh.MeshReader import MeshReader #The class we're extending/implementing.\nfrom UM.PluginRegistry import PluginRegistry\nfrom UM.Message import Message\nfrom UM.i18n import i18nCatalog\nfrom UM.MimeTypeDatabase import MimeTypeDatabase, MimeType\n\ncatalog = i18nCatalog(\"cura\")\n\nimport zipfile\nimport tempfile\nimport json\nimport os\n\nTMP_FOLDER = tempfile.gettempdir()\n## zaxe file reader\n#\nclass ZaxeCodeReader(MeshReader):\n\n MIN_VERSION = [1, 0, 2]\n\n def __init__(self) -> None:\n super().__init__()\n MimeTypeDatabase.addMimeType(\n MimeType(\n name = \"application/zaxe\",\n comment=\"Zaxe Code\",\n suffixes=[\"zaxe\"]\n )\n )\n self._supported_extensions = [\".zaxe\"]\n\n def _read(self, fileName):\n zipFile = zipfile.ZipFile(fileName, \"r\")\n\n zipFile.extract(\"info.json\", TMP_FOLDER)\n\n info = json.load(open(os.path.join(TMP_FOLDER, \"info.json\"), \"r\"))\n version = [int(ver) for ver in info[\"version\"].rsplit(\".\")]\n\n deviceModel = PluginRegistry.getInstance().getPluginObject(\"GCodeReader\").getCurrentDeviceModel()\n\n # don't import older versions (hey: no snapshot)\n if version < self.MIN_VERSION:\n infoMessage = Message(catalog.i18nc(\n \"@info:zaxecode\",\n \"This Zaxe file belongs to an older version. Please slice your original mesh file again.\"),\n lifetime=10,\n title = catalog.i18nc(\"@info:title\", \"Zaxe Code Details\"))\n infoMessage.show()\n return None\n elif info[\"model\"].upper() != deviceModel.replace(\"+\", \"PLUS\"):\n infoMessage = Message(catalog.i18nc(\n \"@info:zaxecode\",\n \"This Zaxe file is sliced for Zaxe {0}. Please switch to Zaxe {0} before importing this file again.\", info[\"model\"].replace(\"PLUS\", \"+\")),\n lifetime=10,\n title = catalog.i18nc(\"@info:title\", \"Zaxe Code Details\"))\n infoMessage.show()\n return None\n\n zipFile.extract(\"data.zaxe_code\", TMP_FOLDER)\n zipFile.extract(\"snapshot.png\", TMP_FOLDER)\n\n\n zaxeCode = open(os.path.join(TMP_FOLDER, \"data.zaxe_code\"), \"r\").read()\n\n PluginRegistry.getInstance().getPluginObject(\"GCodeReader\").preReadFromStream(zaxeCode)\n result = PluginRegistry.getInstance().getPluginObject(\"GCodeReader\").readFromStream(zaxeCode, info)\n\n return result\n","repo_name":"zaxe3d/xdesktop","sub_path":"cura/ZaxeCodeReader.py","file_name":"ZaxeCodeReader.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14152640629","text":"#!/usr/bin/env pybricks-micropython\nfrom pybricks.hubs import EV3Brick\nfrom pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,\n InfraredSensor, UltrasonicSensor, GyroSensor)\nfrom pybricks.parameters import Port, Stop, Direction, Button, Color\nfrom pybricks.tools import wait, StopWatch, DataLog\nfrom pybricks.robotics import DriveBase\nfrom pybricks.media.ev3dev import SoundFile, ImageFile\n\nimport math\n\n\n# CONSTANTS\nROBOT_DIAMETER = 46.49\nWHEEL_CIRCUMFERENCE = 17.28\n\n# [[0, 10], [15, 20], [30, 10]]\n\n# This program requires LEGO EV3 MicroPython v2.0 or higher.\n# Click \"Open user guide\" on the EV3 extension tab for more information.\n\n# Create your objects here.\nev3 = EV3Brick()\n\nleft_motor = Motor(Port.B)\nright_motor = Motor(Port.C)\n# gyro_sensor = GyroSensor(Port.S1)\n\nd = DriveBase(left_motor, right_motor, wheel_diameter=56, axle_track=114.0) #might need to change last 2 params\nd.settings(700, 100, 100, 100)\n\ndef path_mines():\n angles = {'S-A': 90.0, 'A-B': 33.690067525979785, 'B-C': -33.690067525979785, 'C-S': 18.43494882292201}\n dists = {'S-A': 10.0, 'A-B': 18.027756377319946, 'B-C': 18.027756377319946, 'C-S': 31.622776601683793}\n\n angles_list = list(angles.values())\n dists_list = list(dists.values())\n\n print(dists_list)\n\n for i in range(len(angles_list)):\n d.turn(angles_list[i])\n d.straight(dists_list[i])\n\ndef main():\n path_mines()\n\nmain()\n\n","repo_name":"frisoc/Roobart","sub_path":"final_project/traverse_graph.py","file_name":"traverse_graph.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39879464454","text":"from .. import db\nfrom .activity import Activity\nfrom .guest import Guest\nfrom sqlalchemy import Column, Integer, Boolean, DateTime, Text\nfrom datetime import datetime\n\nclass RoomRequest(db.Model):\n __tablename__ = \"room_requests\"\n\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime(), default=datetime.utcnow)\n\n # Requester Personal Information\n first_name = db.Column(db.String())\n last_name = db.Column(db.String())\n relationship_to_patient = db.Column(db.String())\n address_line_one = db.Column(db.String())\n address_line_two = db.Column(db.String())\n city = db.Column(db.String())\n state = db.Column(db.String())\n zip_code = db.Column(db.String())\n country = db.Column(db.String())\n primary_phone = db.Column(db.String())\n secondary_phone = db.Column(db.String())\n email = db.Column(db.String())\n primary_language = db.Column(db.String())\n secondary_language = db.Column(db.String())\n previous_stay = db.Column(db.Boolean())\n\n # Patient Information\n patient_first_name = db.Column(db.String())\n patient_last_name = db.Column(db.String())\n patient_dob = db.Column(db.Date)\n patient_gender = db.Column(db.String())\n patient_hospital = db.Column(db.String())\n patient_hospital_department = db.Column(db.String())\n patient_treatment_description = db.Column(db.String())\n patient_diagnosis = db.Column(db.String())\n patient_first_appt_date = db.Column(db.Date)\n patient_check_in = db.Column(db.Date)\n patient_check_out = db.Column(db.Date)\n patient_treating_doctor = db.Column(db.String())\n patient_doctors_phone = db.Column(db.String())\n patient_social_worker = db.Column(db.String())\n patient_social_worker_phone = db.Column(db.String())\n inpatient = db.Column(db.String())\n inpatient_prior = db.Column(db.Boolean())\n vaccinated = db.Column(db.Boolean())\n comments = db.Column(db.Text)\n\n # Special Needs\n wheelchair_access = db.Column(db.Boolean())\n full_bathroom = db.Column(db.Boolean())\n pack_n_play = db.Column(db.Boolean())\n\n # Relationship\n guests = db.relationship('Guest', backref='room_request')\n activity = db.relationship('Activity', backref='activity')\n\n def __repr__(self):\n return (''\n f'Patient Last Name: {self.patient_last_name}\\n>')\n\n def __str__(self):\n return self.__repr__()\n\n def print_info(self):\n return ('')\n\n @staticmethod\n def generate_fake(count=5, **kwargs):\n \"\"\"Generate fake room requests for testing.\"\"\"\n from sqlalchemy.exc import IntegrityError\n from faker import Faker\n from random import seed, choice\n\n fake = Faker()\n seed()\n for _ in range(count):\n request = RoomRequest(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n relationship_to_patient=choice([\"Father\", \"Mother\", \"Parent\"]),\n address_line_one=fake.street_address(),\n address_line_two=fake.secondary_address(),\n city=fake.city(),\n state=fake.state(),\n zip_code=fake.zipcode(),\n country=fake.country(),\n primary_phone=fake.phone_number(),\n secondary_phone=fake.phone_number(),\n email=fake.email(),\n primary_language=choice([\"English\", \"Spanish\"]),\n secondary_language=choice([\"English\", \"Spanish\", \"Japanese\", \"ASL\"]),\n previous_stay=fake.boolean(),\n patient_first_name=fake.first_name(),\n patient_last_name=fake.last_name(),\n patient_dob=fake.past_date(),\n patient_gender=choice([\"Male\", \"Female\", \"Non Binary\"]),\n patient_hospital=choice([\"Children's Hospital of Pennsylvania\", \"Hospital of the University of Pennsylvania\", \"St. Christopher's\", \"Shriners\"]),\n patient_hospital_department=choice([\"Pediatrics\",\"Oncology\",\"General\"]),\n patient_treatment_description=fake.word(),\n patient_diagnosis=fake.word(),\n patient_first_appt_date=fake.future_date(),\n patient_check_in=fake.future_date(),\n patient_check_out=fake.future_date(),\n patient_treating_doctor=fake.name(),\n patient_doctors_phone=fake.phone_number(),\n patient_social_worker=fake.name(),\n patient_social_worker_phone=fake.phone_number(),\n inpatient=choice(['Inpatient', 'Outpatient']),\n inpatient_prior=fake.boolean(),\n vaccinated=fake.boolean(),\n comments=fake.sentence(),\n wheelchair_access=fake.boolean(),\n full_bathroom=fake.boolean(),\n pack_n_play=fake.boolean(),\n **kwargs)\n db.session.add(request)\n try:\n db.session.commit()\n Guest.generate_fake(request)\n Activity.generate_fake(request)\n except IntegrityError:\n db.session.rollback()\n\n @staticmethod\n def delete(id):\n room_request = RoomRequest.query.get(id)\n if room_request is None:\n print(f\"No room request in database with id {id}.\")\n else:\n try:\n db.session.delete(room_request)\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n","repo_name":"hack4impact-upenn/ronald-mcdonald-house","sub_path":"app/models/room_request.py","file_name":"room_request.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"20369558964","text":"import sys\nimport pcl\n\n\nif __name__ == '__main__':\n\n if len(sys.argv)>1:\n LEAF_SIZE = float(sys.argv[1])\n else:\n # Default \n LEAF_SIZE = 0.01\n\n ### Load Point Cloud file\n cloud = pcl.load_XYZRGB('tabletop.pcd')\n\n\n ### Voxel Grid filter\n # Create a VoxelGrid filter object\n vox = cloud.make_voxel_grid_filter()\n # Set the voxel grid size (leaf)\n vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)\n\n # Apply the filter to the loaded point cloud\n cloud_filtered = vox.filter()\n filename = 'voxel_downsampled_{}m.pcd'.format(LEAF_SIZE)\n\n\n # Save pcd for table\n pcl.save(cloud_filtered, filename)\n\n\n\n","repo_name":"AntoBrandi/Robotics-ND","sub_path":"code/perception/Voxel_Grid_Downsampling.py","file_name":"Voxel_Grid_Downsampling.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25231376037","text":"def Menu():\r\n print('======================================')\r\n print('''(1)caculate the sum of five numbers''')\r\n print('''(2)caculate the average of five numbers''')\r\n print('''(X)exit''')\r\n\r\ndef Sum():\r\n list = [1,2,3,4,5]\r\n i = 0\r\n sum = 0\r\n while i < len(list):\r\n list[i] = float(input('Please input the %dth number:' %(i + 1)))\r\n sum = sum + list[i]\r\n i += 1\r\n return sum\r\n\r\ndef Aver():\r\n sum = Sum()\r\n aver = float(sum / 5)\r\n return aver\r\n\r\nwhile True:\r\n Menu()\r\n option = str(input('Please choose an option:'))\r\n if option == '1':\r\n result = Sum()\r\n print(result)\r\n continue\r\n elif option == '2':\r\n result = Aver()\r\n print(result)\r\n continue\r\n elif option == 'X':\r\n print('system exit')\r\n break\r\n else:\r\n print('wrong option')\r\n continue\r\n","repo_name":"Waynelee94/python","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26426892889","text":"#T8Q1\n# Initialize dictionary \"contactinfo\" with the values \n# as shown in above examples. Hint: The key is a string \n# literal while the value is a dictionary type.\n\ncontactinfo = {\"Sally\" : {'Email':'sally@hotmail.com', 'Phone':67654321}, \"Tom\" : {'Email':'tom@gmail.com', 'Phone':61234567} }\n\n\n#T8Q5\n#In gene expression, mRNA is transcribed from a DNA template. The 4\n#nucleotide bases of A, T, C, G corresponds to the U, A, G, C bases\n#of the mRNA. Write a function that returns the mRNA transcript given\n#the sequence of a DNA strand.\n#Use a dictionary to provide the mapping of DNA to RNA bases.\n\n# Use a dictionary to provide the mapping of DNA to RNA bases.\ndef mRNAtranscription(dna_template):\n dna2rna = {'A' : 'U', 'T' : 'A', 'C' : 'G', 'G' : 'C'}\n mRNA = ''\n for base in dna_template: \n mRNA += dna2rna[base]\n\n return mRNA\n\t\n\t\n#T8Q6\n#A DNA strand consisting of the 4 nucleotide bases is usually represented\n#with a string of letters: A,T, C, G. Write a function that computes the base\n#composition of a given DNA sequence. \n\n\ndef baseComposition(dna_seq): \n composition = {'A' : 0, 'C' : 0, 'T' : 0, 'G' : 0}\n for element in dna_seq:\n if element in composition:\n composition[element] += 1\n \n return composition\n \n \n\t\t\n#T8Q7\n#Write a function countLetters(word) that takes in a word as argument and\n#returns a dictionary that counts the number of times each letter appears. \n\ndef countLetters(word):\n dict = {}\n for base in word:\n if base in dict:\n dict[base] += 1\n else:\n dict[base] = 1\n \n return dict\n \n\n\n#T8Q8\n#Write a function reverseLookup(dictionary, value) that takes in a dictionary\n#and a value as arguments and returns a sorted list of all keys that contains\n#the value. The function will return an empty list if no match is found.\n\n#T8Q9\n#Write a function invertDictionary(d) that takes in a dictionary as argument\n#and return a dictionary that inverts the keys and the values of the original\n#dictionary. \n\ndef invertDictionary(d):\n inv = {}\n for k, v in d.items():\n if v in inv:\n inv[v].append(k)\n else:\n inv[v] = [k]\n return inv\n \n\t\n#T8Q10\n#A sparse vector is a vector whose entries are almost all zero, like\n#[1, 0, 0, 0, 0, 0, 0, 2, 0]. Storing all those zeros wastes memory and\n#dictionaries are commonly used to keep track of just the nonzero entries.\n#For example, the vector shown earlier can be represented as {0:1, 7:2}, since\n#the vector it is meant to represent has the value 1 at index 0 and the value\n#2 at index 7. Write a function that converts a sparse vector into a dictionary\n#as described above. \n\n#T8Q11\n#A sparse vector is a vector whose entries are almost all zero, like\n#[1, 0, 0, 0, 0, 0, 0, 2, 0]. Storing all those zeros wastes memory and dictionaries\n#are commonly used to keep track of just the nonzero entries. For example, the vector\n#shown earlier can be represented as {0:1, 7:2}, since the vector it is meant to represent\n#has the value 1 at index 0 and the value 2 at index 7. Write a function that converts a\n#dictionary back to its sparese vector representation. ","repo_name":"singhbhupender1/Python_excercises","sub_path":"Topic8_Dictionaries.py","file_name":"Topic8_Dictionaries.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"69906193522","text":"import random\nfrom collections import defaultdict\n\n\n\nclass NeuralNetwork(object):\n\n def train(self, inputs, expected):\n raise NotImplementedError(\"This method is not yet implemented.\")\n\n def feedforward(self, inputs):\n raise NotImplementedError(\"This method is not yet implemented.\")\n\n\nclass StepPerceptronWithBias(NeuralNetwork):\n def __init__(self, n, c=0.1):\n self.n = n\n self.rate = c\n self.weights = [random.random()*1.0 - 0.5 for i in range(n)]\n self.weights.append(random.random()*1.0 - 0.5)\n\n def _activate(self, s):\n return 1 if s > 0 else -1\n\n def feedforward(self, inputs):\n s = sum([float(inputs[i]) * self.weights[i] for i in range(self.n)])\n return self._activate(s + self.weights[-1])\n\n def train(self, inputs, desired):\n guess = self.feedforward(inputs)\n error = desired - guess\n for i in range(self.n):\n self.weights[i] += self.rate * error * inputs[i]\n self.weights[-1] += self.rate * error\n\n def update_weights(aggregate_error, inputs):\n deltas = [i*aggregate_error*rate for i in inputs]\n self.weights = [w+dw for d, dw in zip(self.weights, deltas)]\n\n return [i*aggregate_error * w for i,w in zip(inputs, self.weights)]\n\n def __str__(self):\n return str(self.weights)\n\nTRUE, FALSE = 1, -1\n\nTRUTH_TABLES = {\n 'NOT': [\n ([FALSE], TRUE),\n ([TRUE], FALSE),\n ],\n\n 'AND': [\n ([FALSE, FALSE], FALSE),\n ([FALSE, TRUE ], FALSE),\n ([TRUE , FALSE], FALSE),\n ([TRUE , TRUE ], TRUE ),\n ],\n\n 'OR': [\n ([FALSE, FALSE], FALSE),\n ([FALSE, TRUE ], TRUE ),\n ([TRUE , FALSE], TRUE ),\n ([TRUE , TRUE ], TRUE ),\n ],\n 'NOR':[\n ([FALSE, FALSE], TRUE),\n ([FALSE, TRUE ], FALSE ),\n ([TRUE , FALSE], FALSE ),\n ([TRUE , TRUE ], FALSE ),\n ],\n\n 'NAND': [\n ([FALSE, FALSE], TRUE ),\n ([FALSE, TRUE ], TRUE ),\n ([TRUE , FALSE], TRUE ),\n ([TRUE , TRUE ], FALSE ),\n ],\n\n 'XOR': [\n ([FALSE, FALSE], FALSE ),\n ([FALSE, TRUE ], TRUE ),\n ([TRUE , FALSE], TRUE ),\n ([TRUE , TRUE ], FALSE ),\n ]\n}\n\n\n\ndef test (p, data):\n results = [p.feedforward(inputs) for inputs, expected in data]\n #print( results )\n tests = [1 if result == d[1] else 0 for result, d in zip(results, data)]\n return sum(tests) / (1.0 * len(tests))\n\n\ndef train (p, data, iterations):\n for i in range(iterations):\n sample = random.choice(data)\n p.train(sample[0], sample[1])\n return p\n\n\ndef run_test(per, data):\n print(\"\")\n # display and test the perceptron without training\n print(\"before training {} success rate {}\".format(per, test(per, data)))\n\n train(per, data, 100)\n\n print(\"after training {} success rate {}\".format(per, test(per, data)))\n\n\nclass XORMLP(NeuralNetwork):\n def __init__(self):\n self.input_layer = [PerceptronWithBias(2), PerceptronWithBias(2)]\n self.output_layer = [PerceptronWithBias(2)]\n\n def feedforward(self, inputs):\n o1 = i1.feedforward(inputs)\n o2 = i2.feedforward(inputs)\n return o.feedforward([o1,o2])\n\n def train(self, inputs, target):\n hidden_values = [neuron.feedforward(i) for neuron, i in zip(input_layer, inputs)]\n outputs = [neuron.feedforward(hidden_values) for neuron in self.output_layer]\n difference = target - output\n output_error = output * difference\n\n hidden_errors = o.update_weights(output_error, hidden_values)\n\n [neuron.update_weights(e,i) for neuron, e, i in zip(input_layer, hidden_errors, inputs)]\n\n\n\n\n\n\nrun_test(StepPerceptronWithBias(2, c=0.02), TRUTH_TABLES[\"OR\"])\nrun_test(StepPerceptronWithBias(2, c=0.02), TRUTH_TABLES[\"AND\"])\nrun_test(StepPerceptronWithBias(2, c=0.02), TRUTH_TABLES[\"NAND\"])\nrun_test(StepPerceptronWithBias(2, c=0.02), TRUTH_TABLES[\"NOR\"])\nrun_test(StepPerceptronWithBias(2, c=0.02), TRUTH_TABLES[\"XOR\"])\n\n\n\n","repo_name":"CambridgeProgrammerStudyGroup/machine-learning","sub_path":"02-The-Perceptron/python-simple/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"} +{"seq_id":"29320878133","text":"import socket\nimport py_eureka_client.http_client as http_client\n\n\n# the method below is copied from eureka python sdk.\n# https://github.com/keijack/python-eureka-client\ndef get_instance_ip(server):\n url_obj = http_client.parse_url(server)\n target_ip = url_obj[\"host\"]\n target_port = url_obj[\"port\"]\n if target_port is None:\n if url_obj[\"schema\"] == \"http\":\n target_port = 80\n else:\n target_port = 443\n\n if url_obj[\"ipv6\"] is not None:\n target_ip = url_obj[\"ipv6\"]\n socket_family = socket.AF_INET6\n else:\n socket_family = socket.AF_INET\n\n s = socket.socket(socket_family, socket.SOCK_DGRAM)\n s.connect((target_ip, target_port))\n ip = s.getsockname()[0]\n s.close()\n return ip\n","repo_name":"aliyun/algorithm-base","sub_path":"src/ab/utils/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"75"} +{"seq_id":"20418054135","text":"\"\"\"\nSupport for retrieving status info from Google Wifi/OnHub routers.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.google_wifi/\n\"\"\"\nimport logging\nfrom datetime import timedelta\n\nimport voluptuous as vol\nimport requests\n\nimport homeassistant.util.dt as dt\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nfrom homeassistant.const import (\n CONF_NAME, CONF_HOST, CONF_MONITORED_CONDITIONS, STATE_UNKNOWN)\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_CURRENT_VERSION = 'current_version'\nATTR_LAST_RESTART = 'last_restart'\nATTR_LOCAL_IP = 'local_ip'\nATTR_NEW_VERSION = 'new_version'\nATTR_STATUS = 'status'\nATTR_UPTIME = 'uptime'\n\nDEFAULT_HOST = 'testwifi.here'\nDEFAULT_NAME = 'google_wifi'\n\nENDPOINT = '/api/v1/status'\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)\n\nMONITORED_CONDITIONS = {\n ATTR_CURRENT_VERSION: [\n ['software', 'softwareVersion'],\n None,\n 'mdi:checkbox-marked-circle-outline'\n ],\n ATTR_NEW_VERSION: [\n ['software', 'updateNewVersion'],\n None,\n 'mdi:update'\n ],\n ATTR_UPTIME: [\n ['system', 'uptime'],\n 'days',\n 'mdi:timelapse'\n ],\n ATTR_LAST_RESTART: [\n ['system', 'uptime'],\n None,\n 'mdi:restart'\n ],\n ATTR_LOCAL_IP: [\n ['wan', 'localIpAddress'],\n None,\n 'mdi:access-point-network'\n ],\n ATTR_STATUS: [\n ['wan', 'online'],\n None,\n 'mdi:google'\n ]\n}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,\n vol.Optional(CONF_MONITORED_CONDITIONS,\n default=list(MONITORED_CONDITIONS)):\n vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the Google Wifi sensor.\"\"\"\n name = config.get(CONF_NAME)\n host = config.get(CONF_HOST)\n conditions = config.get(CONF_MONITORED_CONDITIONS)\n\n api = GoogleWifiAPI(host, conditions)\n dev = []\n for condition in conditions:\n dev.append(GoogleWifiSensor(api, name, condition))\n\n add_devices(dev, True)\n\n\nclass GoogleWifiSensor(Entity):\n \"\"\"Representation of a Google Wifi sensor.\"\"\"\n\n def __init__(self, api, name, variable):\n \"\"\"Initialize a Google Wifi sensor.\"\"\"\n self._api = api\n self._name = name\n self._state = STATE_UNKNOWN\n\n variable_info = MONITORED_CONDITIONS[variable]\n self._var_name = variable\n self._var_units = variable_info[1]\n self._var_icon = variable_info[2]\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return '{}_{}'.format(self._name, self._var_name)\n\n @property\n def icon(self):\n \"\"\"Icon to use in the frontend, if any.\"\"\"\n return self._var_icon\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit the value is expressed in.\"\"\"\n return self._var_units\n\n @property\n def available(self):\n \"\"\"Return availability of Google Wifi API.\"\"\"\n return self._api.available\n\n @property\n def state(self):\n \"\"\"Return the state of the device.\"\"\"\n return self._state\n\n def update(self):\n \"\"\"Get the latest data from the Google Wifi API.\"\"\"\n self._api.update()\n if self.available:\n self._state = self._api.data[self._var_name]\n else:\n self._state = STATE_UNKNOWN\n\n\nclass GoogleWifiAPI(object):\n \"\"\"Get the latest data and update the states.\"\"\"\n\n def __init__(self, host, conditions):\n \"\"\"Initialize the data object.\"\"\"\n uri = 'http://'\n resource = \"{}{}{}\".format(uri, host, ENDPOINT)\n self._request = requests.Request('GET', resource).prepare()\n self.raw_data = None\n self.conditions = conditions\n self.data = {\n ATTR_CURRENT_VERSION: STATE_UNKNOWN,\n ATTR_NEW_VERSION: STATE_UNKNOWN,\n ATTR_UPTIME: STATE_UNKNOWN,\n ATTR_LAST_RESTART: STATE_UNKNOWN,\n ATTR_LOCAL_IP: STATE_UNKNOWN,\n ATTR_STATUS: STATE_UNKNOWN\n }\n self.available = True\n self.update()\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self):\n \"\"\"Get the latest data from the router.\"\"\"\n try:\n with requests.Session() as sess:\n response = sess.send(self._request, timeout=10)\n self.raw_data = response.json()\n self.data_format()\n self.available = True\n except (ValueError, requests.exceptions.ConnectionError):\n _LOGGER.warning(\"Unable to fetch data from Google Wifi\")\n self.available = False\n self.raw_data = None\n\n def data_format(self):\n \"\"\"Format raw data into easily accessible dict.\"\"\"\n for attr_key in self.conditions:\n value = MONITORED_CONDITIONS[attr_key]\n try:\n primary_key = value[0][0]\n sensor_key = value[0][1]\n if primary_key in self.raw_data:\n sensor_value = self.raw_data[primary_key][sensor_key]\n # Format sensor for better readability\n if (attr_key == ATTR_NEW_VERSION and\n sensor_value == '0.0.0.0'):\n sensor_value = 'Latest'\n elif attr_key == ATTR_UPTIME:\n sensor_value = round(sensor_value / (3600 * 24), 2)\n elif attr_key == ATTR_LAST_RESTART:\n last_restart = (\n dt.now() - timedelta(seconds=sensor_value))\n sensor_value = last_restart.strftime(\n '%Y-%m-%d %H:%M:%S')\n elif attr_key == ATTR_STATUS:\n if sensor_value:\n sensor_value = 'Online'\n else:\n sensor_value = 'Offline'\n elif attr_key == ATTR_LOCAL_IP:\n if not self.raw_data['wan']['online']:\n sensor_value = STATE_UNKNOWN\n\n self.data[attr_key] = sensor_value\n except KeyError:\n _LOGGER.error(\"Router does not support %s field. \"\n \"Please remove %s from monitored_conditions\",\n sensor_key, attr_key)\n self.data[attr_key] = STATE_UNKNOWN\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/sensor/google_wifi.py","file_name":"google_wifi.py","file_ext":"py","file_size_in_byte":6748,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"71251750963","text":"from inspect import getargspec, isfunction\nfrom Products.MailHost.MailHost import MailBase\nimport six\n\nfor f in six.itervalues(MailBase.__dict__):\n if isfunction(f):\n args, _, _, defaults = getargspec(f)\n try:\n i = args.index('immediate') - len(args)\n except ValueError:\n continue\n f.__defaults__ = defaults[:i] + (True,) + defaults[i+1 or len(args):]\n\nfrom App.special_dtml import DTMLFile\nMailBase.manage = MailBase.manage_main = DTMLFile('dtml/manageMailHost', globals())\nMailBase.smtp_socket_timeout = 16.\n\nfrom functools import partial\nMailBase__makeMailer = MailBase._makeMailer\ndef _makeMailer(self):\n \"\"\" Create a SMTPMailer \"\"\"\n smtp_mailer = MailBase__makeMailer(self)\n smtp_mailer.smtp = partial(smtp_mailer.smtp, timeout=self.smtp_socket_timeout)\n return smtp_mailer\n\nMailBase._makeMailer = _makeMailer\n","repo_name":"Nexedi/erp5","sub_path":"product/ERP5Type/patches/MailHost.py","file_name":"MailHost.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"15894832729","text":"'''\nDocumentation, License etc.\n\n@package techfugees_python\n'''\nimport json\nimport pprint\nimport Levenshtein\n\nmentor_json=open(\"mentor_data.json\").read()\nmentee_json=open(\"mentee_data.json\").read()\n\nmentor_data = json.loads(mentor_json)\nmentee_data = json.loads(mentee_json)\n\n#pprint.pprint(mentor_data[0])\n#pprint.pprint(mentee_data)\n\nfor mentee in mentee_data:\n print(mentee)\n# print(mentee[\"Goal\"])\n best_mentor = dict()\n best_mentor_score = 999\n for mentor in mentor_data:\n# print(mentor)\n help_score = Levenshtein.distance(mentee[\"Help Need\"],mentor[\"Help Offered\"]);\n # print(\"Goal Match: \",100-Levenshtein.distance(mentee[\"Goal\"],mentor[\"Goal\"]))\n #print(\"Help Match:\\t\",100-help_score)\n #print(\"Language Match:\\t\",100-Levenshtein.distance(mentee[\"Language\"],mentor[\"Language\"]),)\n# print(\"Strengths Match: \",100-Levenshtein.distance(mentee[\"Strengths\"],mentor[\"Strengths\"]),)\n# print(\"Interests Match: \",100-Levenshtein.distance(mentee[\"Interests\"],mentor[\"Interests\"]),)\n if help_score < best_mentor_score:\n best_mentor = mentor\n best_mentor_score = help_score\n print (best_mentor)\n print(\"Help Match:\\t\",100-best_mentor_score,\"\\n\")\n\n \n","repo_name":"reubot/techfugees_ths","sub_path":"techfugees_python.py","file_name":"techfugees_python.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14162650197","text":"from os import initgroups\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import MNIST\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom network_ex import Net\n\n# データセットをダウンロード(download=True)\n# 学習データを用意\ntrain_data = MNIST('~/Desktop/rikulab_rinkou/AI_rinkou/dataset_scaleup/', train=True, download=True, transform=transforms.ToTensor())\ntrain_loader = DataLoader(train_data,\n batch_size=4,\n shuffle=True)\n\n\nnet = Net() # ネットワーク定義\nnet = net.to(\"cuda\")\ncriterion = nn.CrossEntropyLoss() # 損失関数(loss)を定義\noptimizer = optim.SGD(net.parameters(), lr=0.01) # 最適化手法\n\nsave_path = \"/home/ericlab/Desktop/rikulab_rinkou/AI_rinkou/chackpoints/net_ex.pth\" # 重みファイルの保存\n\n# 学習スタート\nfor epoch in range(20): # 何回繰り返すか(epoch数指定)\n running_loss = 0.0\n for i, data in enumerate(train_loader): # dataloaderで取り込んだデータセットを一つ一つネットワークに入力(学習)\n inputs, labels = data # 入力データと真値を格納\n inputs, labels = inputs.to(\"cuda\"), labels.to(\"cuda\")\n optimizer.zero_grad () # 勾配情報をリセット\n outputs = net(inputs) # 順伝播\n loss = criterion(outputs, labels) # ロスの計算\n loss.to(\"cuda\")\n loss.backward() # 逆伝播\n optimizer.step() # パラメータの更新\n running_loss += loss.item() # lossの足し算\n\n # print(\"loss: %.3f\" %(loss.item()))\n\n print(i)\n torch.save(net.state_dict(), save_path) # 重みファイルの保存\n\n # 最後にlossの出力\n if i % 1000 == 0:\n if i == 0:\n continue\n print('epoch:%d iter:%d loss:%.3f' % (epoch + 1, i, running_loss / 1000))\n running_loss = 0.0\n\nprint('Finished Training')\n","repo_name":"ERiC-Labo/AI_rinkou","sub_path":"week_3/next_step/train_ex.py","file_name":"train_ex.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8131683570","text":"import re\nfrom mongodb_operation import database\n\n\ndef user_save_mongodb():\n # 获取用户列表\n users = []\n with open(\"./ml-100k/u.user\") as file_object:\n for line in file_object:\n line_s = re.split('\\|', line)\n users.append([line_s[0], line_s[1], line_s[2], line_s[3], line_s[4].strip()])\n\n collection_u = database('kg', 'user')\n\n # 存入mongodb\n for item in users:\n user = {}\n user['user_id'] = item[0]\n user['user_age'] = item[1]\n user['user_gender'] = item[2]\n user['user_occupation'] = item[3]\n collection_u.insert_dict(user)\n\ndef rating_save_mongodb():\n # 获取评分列表\n ratings = []\n with open(\"./ml-100k/u.data\") as file_object:\n for line in file_object:\n line_s = re.split('\\t', line)\n ratings.append([line_s[0], line_s[1], line_s[2], line_s[3].strip()])\n\n collection_u = database('kg', 'rating')\n\n # 存入mongodb\n for item in ratings:\n rating = {}\n rating['rating_user_id'] = item[0]\n rating['rating_movie_key'] = item[1]\n rating['rating_r'] = item[2]\n rating['rating_time'] = item[3]\n collection_u.insert_dict(rating)","repo_name":"ranlongyu/movie_knowledge_graph","sub_path":"KG/user_mongodb.py","file_name":"user_mongodb.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"69899116084","text":"import socket\nimport threading\n\nHOST = 'localhost'\nPORT = 9999\n\nname = input('Enter your name ')\n\nclient = socket.socket()\nclient.connect((HOST,PORT))\n\ndef recieve():\n while True:\n try:\n message = client.recv(1024).decode('utf-8')\n if message == 'NAME':\n client.send(name.encode('utf-8'))\n else:\n print(message)\n\n except:\n print('ERROR!!! Closing the connection :(')\n client.close()\n break\n\ndef write():\n while True:\n message = name + \":\" + input(' ')\n client.send(message.encode('utf-8'))\n\n\nrecieve_thread = threading.Thread(target=recieve)\nrecieve_thread.start()\n\nwrite_thread = threading.Thread(target=write)\nwrite_thread.start()","repo_name":"balachander712/Lab-Main","sub_path":"Sem4/Networking/assignment-4/2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11693614794","text":"from Push2.browser_component import BrowserComponent, FolderBrowserItem\nfrom ableton.v2.base import listens\n\nfrom .PushMonitoringComponent import PushMonitoringComponent\n\n\nclass PushMonitoringBrowser(PushMonitoringComponent):\n def __init__(self, *a, **k):\n self._args = a\n self._kwargs = k\n\n def enter(self):\n super(PushMonitoringBrowser, self).__init__(BrowserComponent, *self._args, **self._kwargs)\n super(PushMonitoringBrowser, self).enter()\n\n self._on_focused_item_changed.subject = self.component\n\n\n self._on_prehear_enabled_changed.subject = self.component\n\n def update_controls(self):\n pass\n\n @listens(\"focused_item\")\n def _on_focused_item_changed(self):\n index = self.component._focused_list_index\n _list = self.component._lists[index]\n selected_index = _list._selected_index\n focused_item = _list._items[selected_index]\n msg = focused_item.name\n if focused_item.is_loadable:\n msg += \", press bottom button 8 to load.\"\n if isinstance(focused_item, FolderBrowserItem):\n msg += \" press right arrow to explore.\"\n self.send_midi(msg)\n\n @listens(\"prehear_enabled\")\n def _on_prehear_enabled_changed(self):\n self.send_midi(\"preview on\" if self.component.prehear_button.is_toggled else \"preview off\")\n\n","repo_name":"jbflow/LiveReader","sub_path":"MIDI Remote Script/PushMonitoring/PushMonitoringBrowser.py","file_name":"PushMonitoringBrowser.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"28420632123","text":"from LinkList.helper import ListNode\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n fast_ptr = head\n slow_ptr = head\n for i in range(n):\n fast_ptr = fast_ptr.next\n if fast_ptr is None:\n return head.next\n while(fast_ptr.next):\n fast_ptr = fast_ptr.next\n slow_ptr = slow_ptr.next\n slow_ptr.next = slow_ptr.next.next\n return head\n\nif __name__ == '__main__':\n n1 = ListNode(1)\n n2 = ListNode(2)\n n3 = ListNode(3)\n n4 = ListNode(4)\n n1.next = n2\n n2.next = n3\n n3.next = n4\n s = Solution()\n cur = s.removeNthFromEnd(n1,1)\n while(cur):\n print(cur.val)\n cur = cur.next\n\n\n","repo_name":"rorschach-xiao/LeetcodeLearning","sub_path":"LinkList/19. 删除链表的倒数第N个节点/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2877500246","text":"import datetime\nimport email\nimport imaplib\n\nfrom pymongo import HASHED\n\nfrom ._base import BaseSpider\n\n\nclass EmailSpider(BaseSpider):\n name = 'email'\n\n # DB specs\n collections_config = {\n 'Scraper_covidscholar_receiever_gmail': [\n [('MessageId', HASHED)],\n 'MessageId',\n 'last_updated'\n ]\n }\n\n def start_requests(self):\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(\n self.settings['COVIDSCHOLAR_RECEIVER_EMAIL'],\n self.settings['COVIDSCHOLAR_RECEIVER_PASSWORD'])\n mail.list()\n mail.select('inbox')\n\n _, message_ids = mail.uid('search', None, \"ALL\")\n message_ids = message_ids[0].decode('utf8').split()\n\n for message_uid in message_ids:\n if self.has_duplicate(\n where='Scraper_covidscholar_receiever_gmail',\n query={'MessageId': message_uid}):\n continue\n _, email_data = mail.uid('fetch', message_uid, '(RFC822)')\n raw_email = email_data[0][1].decode('utf-8')\n\n email_message = email.message_from_string(raw_email)\n\n date_tuple = email.utils.parsedate_tz(email_message['Date'])\n if date_tuple:\n time_received = datetime.datetime.fromtimestamp(email.utils.mktime_tz(date_tuple))\n else:\n time_received = None\n\n email_from = str(email.header.make_header(email.header.decode_header(email_message['From'])))\n email_to = str(email.header.make_header(email.header.decode_header(email_message['To'])))\n subject = str(email.header.make_header(email.header.decode_header(email_message['Subject'])))\n\n body = []\n for part in email_message.walk():\n if part.get_content_type() == \"text/plain\":\n try:\n body.append(part.get_payload(decode=True).decode('utf8'))\n except UnicodeEncodeError:\n pass\n else:\n continue\n\n item = {\n 'MessageId': message_uid,\n 'From': email_from,\n 'To': email_to,\n 'Subject': subject,\n 'TimeReceived': time_received,\n 'Body': body,\n 'Raw': raw_email,\n }\n self.save_article(item, to='Scraper_covidscholar_receiever_gmail')\n\n yield from ()\n","repo_name":"COVID-19-Text-Mining/scrapers","sub_path":"covidscholar_scraper/spiders/gmail_receiever.py","file_name":"gmail_receiever.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"8436388018","text":"# Taobao data crawler\n\nimport json, threading\nfrom Download import dl\nfrom urllib.parse import urlencode\n\ndef getUrl(page, keyword):\n start_url = 'https://s.taobao.com/api?'\n add_dict = {\n '_ksTS': '1492391041115_231',\n 'ajax': 'true',\n 'm': 'customized',\n 'rn': 'fcb53e078ddd635d41aa048a8a671207',\n 'q': keyword,\n 'ie': 'utf8',\n 's': page,\n 'bcoffset': 0\n }\n return start_url + urlencode(add_dict)\n\ndef getDetail(page, keyword, output):\n print('[Catching]。。。 page: ', page)\n data = json.loads(dl.GetHtml(getUrl(page, keyword)))\n if data and data['API.CustomizedApi']['itemlist']['auctions']:\n for item in data['API.CustomizedApi']['itemlist']['auctions']:\n detail_url = 'http:' + item['detail_url']\n detail_title = item['raw_title']\n store_name = item['nick']\n if item['icon']:\n store = item['icon'][0]['innerText']\n else:\n store = '普通淘宝'\n sales_volume = item['view_sales']\n price = item['view_price']\n output.write(store+'\\n'+store_name+'\\n'+detail_title+'\\n'+price+'元\\n'+sales_volume+'\\n'+detail_url+'\\n\\n')\n\ndef get(keyword):\n out_put_file = open(keyword + '.txt', 'w', encoding='utf8')\n for page in range(0, 1000, 10):\n th = threading.Thread(target=getDetail, args=(page, keyword, out_put_file))\n th.start()\n\nif __name__ == '__main__':\n get(input('input keyword: '))\n","repo_name":"Aston5128/TaoBao_Crawler","sub_path":"TaoBao_crawler.py","file_name":"TaoBao_crawler.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"18873410401","text":"# coding: utf-8\n\n\"\"\"\n Cloud Speech-to-Text API\n\n Converts audio to text by applying powerful neural network models.
**PLEASE NOTE**: This API is provided by Google, beside the documentation provide below, you can find Google API documentation [here](https://cloud.google.com/speech-to-text/docs/reference/rest). You can refer to the Google documentation as well except by the URLs needed to call the API and that are documented here below. # noqa: E501\n\n OpenAPI spec version: v3.3\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass SpeechRecognitionAlternative(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'confidence': 'float',\n 'transcript': 'str',\n 'words': 'list[WordInfo]'\n }\n\n attribute_map = {\n 'confidence': 'confidence',\n 'transcript': 'transcript',\n 'words': 'words'\n }\n\n def __init__(self, confidence=None, transcript=None, words=None): # noqa: E501\n \"\"\"SpeechRecognitionAlternative - a model defined in Swagger\"\"\" # noqa: E501\n self._confidence = None\n self._transcript = None\n self._words = None\n self.discriminator = None\n if confidence is not None:\n self.confidence = confidence\n if transcript is not None:\n self.transcript = transcript\n if words is not None:\n self.words = words\n\n @property\n def confidence(self):\n \"\"\"Gets the confidence of this SpeechRecognitionAlternative. # noqa: E501\n\n The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative of a non-streaming result or, of a streaming result where `is_final=true`. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. # noqa: E501\n\n :return: The confidence of this SpeechRecognitionAlternative. # noqa: E501\n :rtype: float\n \"\"\"\n return self._confidence\n\n @confidence.setter\n def confidence(self, confidence):\n \"\"\"Sets the confidence of this SpeechRecognitionAlternative.\n\n The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative of a non-streaming result or, of a streaming result where `is_final=true`. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. # noqa: E501\n\n :param confidence: The confidence of this SpeechRecognitionAlternative. # noqa: E501\n :type: float\n \"\"\"\n\n self._confidence = confidence\n\n @property\n def transcript(self):\n \"\"\"Gets the transcript of this SpeechRecognitionAlternative. # noqa: E501\n\n Transcript text representing the words that the user spoke. # noqa: E501\n\n :return: The transcript of this SpeechRecognitionAlternative. # noqa: E501\n :rtype: str\n \"\"\"\n return self._transcript\n\n @transcript.setter\n def transcript(self, transcript):\n \"\"\"Sets the transcript of this SpeechRecognitionAlternative.\n\n Transcript text representing the words that the user spoke. # noqa: E501\n\n :param transcript: The transcript of this SpeechRecognitionAlternative. # noqa: E501\n :type: str\n \"\"\"\n\n self._transcript = transcript\n\n @property\n def words(self):\n \"\"\"Gets the words of this SpeechRecognitionAlternative. # noqa: E501\n\n A list of word-specific information for each recognized word. Note: When `enable_speaker_diarization` is true, you will see all the words from the beginning of the audio. # noqa: E501\n\n :return: The words of this SpeechRecognitionAlternative. # noqa: E501\n :rtype: list[WordInfo]\n \"\"\"\n return self._words\n\n @words.setter\n def words(self, words):\n \"\"\"Sets the words of this SpeechRecognitionAlternative.\n\n A list of word-specific information for each recognized word. Note: When `enable_speaker_diarization` is true, you will see all the words from the beginning of the audio. # noqa: E501\n\n :param words: The words of this SpeechRecognitionAlternative. # noqa: E501\n :type: list[WordInfo]\n \"\"\"\n\n self._words = words\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(SpeechRecognitionAlternative, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, SpeechRecognitionAlternative):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"r3versi/gcloud-speech-to-text","sub_path":"gcloudspeechtotext/models/speech_recognition_alternative.py","file_name":"speech_recognition_alternative.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17755102076","text":"import os\nimport sys\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import functions as psf\n\n\ndef main(filepath, value):\n \"\"\"Read in data, add a column `added_column`.\"\"\"\n spark = SparkSession.builder.getOrCreate()\n\n databricks_check = os.environ.get('DATABRICKS_HOST', None)\n if databricks_check is not None:\n cwd = \"/\"\n else:\n cwd = \"\"\n\n print(cwd, databricks_check)\n\n return (\n spark.read\n .option('inferSchema', True)\n .csv(cwd+filepath, header=True)\n .withColumn('added_column', psf.lit(value))\n )\n\n\nif __name__ == \"__main__\":\n value = sys.argv[1]\n main('mnt/demo/*.csv', value).show()\n","repo_name":"pdemeulenaer/demo-project","sub_path":"src/demo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"38905111564","text":"import gzip\nfrom datetime import datetime, timedelta\n\nimport chardet\nimport requests\n\nimport amazon_sp_api_clients\nfrom . import client_config\n\nreports_client = amazon_sp_api_clients.Reports20210630Client(**client_config.client_config)\norders_client = amazon_sp_api_clients.OrdersV0Client(**client_config.client_config)\n\n\ndef test_get_orders():\n for order in orders_client.getOrders(\n [client_config.marketplace.market_place],\n CreatedAfter=(datetime.now() - timedelta(days=1000)).isoformat()\n ).payload.Orders:\n print(order.AmazonOrderId)\n\n\ndef test_get_inventory_report():\n report_id = reports_client.createReport(\n amazon_sp_api_clients.reports_2021_06_30.CreateReportSpecification({\n 'reportType': 'GET_MERCHANT_LISTINGS_ALL_DATA',\n 'marketplaceIds': [client_config.marketplace.market_place],\n })).reportId\n while True:\n response = reports_client.getReport(report_id)\n if response.processingStatus not in ('IN_QUEUE', 'IN_PROGRESS'):\n break\n document_id = response.reportDocumentId\n response = reports_client.getReportDocument(reportDocumentId=document_id)\n assert response.compressionAlgorithm == 'GZIP'\n data = requests.get(response.url).content\n data = gzip.decompress(data)\n encoding = chardet.detect(data)['encoding']\n lines = data.decode(encoding).splitlines()\n lines = [line.split('\\t') for line in lines]\n columns = ('asin1', 'item-name', 'seller-sku', 'listing-id', 'price', 'quantity', 'product-id', 'status')\n indexes = [lines[0].index(f) for f in columns]\n data = [[line[i] for i in indexes] for line in lines[1:]]\n for line in data:\n print(line)\n\n\ndef test_get_and_delete_report():\n report_id = reports_client.createReport(\n amazon_sp_api_clients.reports_2021_06_30.CreateReportSpecification({\n 'reportType': 'GET_MERCHANT_LISTINGS_ALL_DATA',\n 'marketplaceIds': [client_config.marketplace.market_place],\n })).reportId\n print(report_id)\n response = reports_client.cancelReport(report_id)\n assert response is None\n response = reports_client.getReport(report_id)\n print(response.data)\n","repo_name":"panhaoyu/amazon-sp-api-clients","sub_path":"tests/test_get_post_delete.py","file_name":"test_get_post_delete.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"75"} +{"seq_id":"13202088934","text":"# coding=utf-8\n# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\nimport sys\nfrom Repository import repo\nfrom myDAOs import vaccines\nfrom myDTOs import Vaccine\nfrom myDAOs import suppliers\nfrom myDTOs import Supplier\nfrom myDAOs import clinics\nfrom myDTOs import Clinic\nfrom myDAOs import logistics\nfrom myDTOs import Logistic\n\n\ndef main():\n repo.create_tables()\n summery = [0, 0, 0, 0, 0]\n summery = initTable(summery)\n numOfVec = summery[4]\n summery = summery[0:4]\n repo.setSum(summery, numOfVec)\n makeOrders()\n\n\ndef makeOrders():\n with open(sys.argv[2]) as ordersFile:\n ordersFileLines = ordersFile.readlines()\n for line in ordersFileLines:\n y = line.split(\",\")\n if len(y) == 3:\n repo.receiveShipment(y[0], int(y[1]), y[2])\n else:\n repo.sendShipment(y[0], int(y[1]))\n with open(sys.argv[3], \"a\") as output: # check 'w+'\n output.write('\\n')\n\ndef initTable(summery):\n # read and use repo\n with open(sys.argv[1]) as configFile:\n configFileLines = configFile.readlines()\n numOfVaccinesLines = int(configFileLines[0].split(',')[0])\n numOfSupplierLines = int(configFileLines[0].split(',')[1])\n numOfClinicsLines = int(configFileLines[0].split(',')[2])\n numOfLogisticLines = int(configFileLines[0].split(',')[3])\n total_inventory = 0\n for i in range(1, numOfVaccinesLines+1):\n myVecLine = configFileLines[i].split(',')\n repo.vaccines.insert(Vaccine(*myVecLine))\n total_inventory = total_inventory + (int(myVecLine[3]))\n\n summery[0] = total_inventory\n summery[4] = numOfVaccinesLines\n point = 1 + numOfVaccinesLines\n\n for i in range(point, point + numOfSupplierLines):\n mySupLine = configFileLines[i].split(',')\n repo.suppliers.insert(Supplier(*mySupLine))\n\n point = point + numOfSupplierLines\n total_demand = 0\n for i in range(point, point + numOfClinicsLines):\n myCliLine = configFileLines[i].split(',')\n repo.clinics.insert(Clinic(*myCliLine))\n total_demand = total_demand + (int(myCliLine[2]))\n\n summery[1] = total_demand\n point = point + numOfClinicsLines\n\n for i in range(point, point + numOfLogisticLines):\n myLogLine = configFileLines[i].split(',')\n repo.logistics.insert(Logistic(*myLogLine))\n\n return summery\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YonatanNechmad/vaccine_distribution_center","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34113694325","text":"from __future__ import annotations\n\nfrom typing import Optional, Tuple\n\nfrom sqlalchemy import BigInteger, Column, Enum, ForeignKey, and_, or_, select, update, delete\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom Quadrant.models.db_init import Base\nfrom Quadrant.models.users_package.relations_types import UsersRelationType\nfrom Quadrant.models.users_package.users_status import UsersStatus\nfrom .user import User\n\nUSERS_RELATIONS_PER_PAGE = 50\n\n\nclass UsersRelations(Base):\n relation_id = Column(BigInteger, primary_key=True)\n initiator_id = Column(ForeignKey('users.id'), nullable=False, index=True)\n relation_with_id = Column(ForeignKey('users.id'), nullable=False, index=True)\n relation_status = Column(Enum(UsersRelationType), default=UsersRelationType.none, nullable=False)\n\n __tablename__ = \"users_relations\"\n\n @staticmethod\n def any_user_initialized_relationship(user_id: User.id, with_user_id: User.id):\n \"\"\"\n Query part that will give True when any of users pair had initialized relation with other user\n\n :param user_id: user id of someone who asks for this.\n :param with_user_id: user id of someone with whom we look for any relations.\n :return: sqlalchemy query.\n \"\"\"\n return or_(\n and_(UsersRelations.initiator_id == user_id, UsersRelations.relation_with_id == with_user_id),\n and_(UsersRelations.initiator_id == with_user_id, UsersRelations.relation_with_id == user_id)\n )\n\n @staticmethod\n async def get_any_relationships_status_with(\n user_id: User.id, with_user_id: User.id, *, session\n ) -> UsersRelationType:\n \"\"\"\n Gives relationship status between any of two users.\n\n :param user_id: user id of someone who asks for this.\n :param with_user_id: user id of someone with whom we look for any relations.\n :param session: sqlalchemy session.\n :return: one of UsersRelationType.\n \"\"\"\n relation_result = await session.execute(\n select(UsersRelations.relation_status).filter(\n UsersRelations.any_user_initialized_relationship(user_id, with_user_id)\n )\n )\n relation = relation_result.scalar()\n\n if relation is None:\n return UsersRelationType.none\n\n return relation\n\n @staticmethod\n async def get_exact_relationship_with_user(\n user: User, with_user_id: User.id, *, session: AsyncSession\n ) -> Tuple[UsersRelations, User]:\n \"\"\"\n Gives exact relationship depending of who is requester. Needed for cases when we want unblock other user\n or send him an friend request.\n\n :param user: user instance of someone who asks for this.\n :param with_user_id: user id of someone with whom we look for relation.\n :param session: sqlalchemy session.\n :return: UsersRelations instance and instance of User with whom requester have some relationship.\n \"\"\"\n query = select(UsersRelations, User).filter(\n UsersRelations.initiator_id == user.id,\n UsersRelations.relation_with_id == with_user_id\n ).join(User, User.id == with_user_id)\n result = await session.execute(query)\n relation, relation_with = result.scalar_one()\n\n return relation, relation_with\n\n @classmethod\n async def get_exact_relationship(\n cls, user_id: User.id, with_user_id: User.id, *, session\n ) -> Optional[UsersRelations]:\n \"\"\"\n Gives exact relationship depending on requester id but without instance of User with whom we have it.\n In case found nothing - returns None.\n\n :param user_id: requester id.\n :param with_user_id: user id of someone with whom we look for relation.\n :param session: sqlalchemy session.\n :return: returns exact relationship instance of\n \"\"\"\n query = select(cls).filter(\n UsersRelations.initiator_id == user_id,\n UsersRelations.relation_with_id == with_user_id\n )\n result = await session.execute(query)\n relation = result.scalar_one_or_none()\n\n return relation\n\n @staticmethod\n async def get_exact_relationship_status(user_id: User.id, with_user_id: User.id, *, session) -> UsersRelationType:\n \"\"\"\n Gives exact relation status with user depending on requester id.\n\n :param user_id: requester id.\n :param with_user_id: user id of someone with whom we look for relation.\n :param session: sqlalchemy session.\n :return: one of UsersRelationType.\n \"\"\"\n query = select(UsersRelations.relation_status).filter(\n UsersRelations.initiator_id == user_id,\n UsersRelations.relation_with_id == with_user_id\n )\n query_result = await session.execute(query)\n relation = query_result.scalar_one_or_none()\n\n if relation is None:\n return UsersRelationType.none\n\n return relation\n\n @staticmethod\n async def get_relationships_page(\n user: User, page: int, relationship_type: UsersRelationType, *, session\n ) -> Tuple[Tuple[UsersRelations.relation_status, User]]:\n \"\"\"\n Gives page of relationships ordered by username and users with who Users instances with whom has relations.\n\n :param user: user instance of someone who asks for this.\n :param page: page number.\n :param relationship_type: filter by type of requester to other user relation.\n :param session: sqlalchemy session.\n :return: relationship status and User instance with whom we have it.\n \"\"\"\n if page < 0:\n raise ValueError(\"Invalid page\")\n\n query = select(UsersRelations.relation_status, User).join(User, User.id != user.id) \\\n .filter(\n UsersRelations.initiator_id == user.id,\n UsersRelations.relation_status == relationship_type\n ).limit(USERS_RELATIONS_PER_PAGE).offset(USERS_RELATIONS_PER_PAGE * page) \\\n .order_by(\n User.status == UsersStatus.online,\n User.status == UsersStatus.away,\n User.status == UsersStatus.asleep,\n User.status == UsersStatus.offline,\n User.username\n )\n\n result = await session.execute(query)\n relations = result.all()\n\n packed_relations = []\n for relation, user in relations:\n packed_relations.append((relation, user))\n\n return relations\n\n @staticmethod\n async def send_friend_request(request_sender: User, request_receiver: User, *, session) -> None:\n \"\"\"\n Sending friend request to someone.\n\n :param request_sender: who authored request.\n :param request_receiver: who will receive it.\n :param session: sqlalchemy session.\n :return: nothing (raises exception if something's wrong).\n \"\"\"\n relationships_status: UsersRelationType = await UsersRelations.get_any_relationships_status_with(\n request_sender.id, request_receiver.id, session=session\n )\n\n # TODO: check if receiver user allowed to send friend requests\n if request_receiver.is_bot:\n raise request_receiver.exc.UserIsBot(\"Bot users can not receive friend requests\")\n\n if request_receiver.id == request_sender.id:\n raise ValueError(\"User can not become friend with himself\")\n\n if relationships_status == UsersRelationType.none:\n friend_request_outgoing = UsersRelations(\n initiator_id=request_sender.id, relation_with_id=request_receiver.id,\n relation_status=UsersRelationType.friend_request_sender\n )\n friend_request_incoming = UsersRelations(\n initiator_id=request_receiver.id, relation_with_id=request_sender.id,\n relation_status=UsersRelationType.friend_request_receiver\n )\n\n session.add_all([friend_request_outgoing, friend_request_incoming])\n await session.commit()\n\n else:\n raise UsersRelations.exc.RelationshipsException(\"Invalid relationship type\")\n\n @staticmethod\n async def cancel_friend_request(canceller: User, friend_request_to: User, *, session) -> None:\n \"\"\"\n Cancels sent friend request.\n\n :param canceller: user that sent friend request.\n :param friend_request_to: user who received request.\n :param session: sqlalchemy session.\n :return: nothing (raises exception if something's wrong).\n \"\"\"\n relationships_status: UsersRelationType = await UsersRelations.get_exact_relationship_status(\n canceller.id, friend_request_to.id, session=session\n )\n\n if relationships_status != UsersRelationType.friend_request_sender:\n raise UsersRelations.exc.RelationshipsException(\"You can not cancel not existing friend request\")\n\n users_relations_query = UsersRelations.any_user_initialized_relationship(canceller.id, friend_request_to.id)\n query = delete(UsersRelations).where(\n and_(\n users_relations_query,\n UsersRelations.relation_status.in_(\n [UsersRelationType.friend_request_receiver, UsersRelationType.friend_request_sender]\n )\n )\n ).execution_options(synchronize_session=\"fetch\")\n\n await session.execute(query)\n await session.commit()\n\n @staticmethod\n async def respond_on_friend_request(\n request_receiver: User, request_sender: User, accept_request: bool, *, session\n ) -> None:\n \"\"\"\n Responds on friend request by adding a new friend or rejecting request.\n\n :param request_receiver: user that received friend request.\n :param request_sender: user who authored request.\n :param accept_request: bool flag that shows that request was accepted or not.\n :param session: sqlalchemy session.\n :return: nothing (raises exception if something's wrong).\n \"\"\"\n relationships_status: UsersRelationType = await UsersRelations.get_exact_relationship_status(\n request_sender.id, request_receiver.id, session=session\n )\n\n if relationships_status == UsersRelationType.friend_request_receiver:\n users_relations_query = UsersRelations.any_user_initialized_relationship(request_receiver.id, request_sender.id)\n\n if accept_request:\n query = update(UsersRelations).where(users_relations_query).values(\n relation_status=relationships_status.friends\n ).execution_options(synchronize_session=\"fetch\")\n await session.execute(query)\n\n else:\n query = delete(UsersRelations).where(\n and_(\n users_relations_query,\n UsersRelations.relation_status.in_(\n [UsersRelationType.friend_request_receiver, UsersRelationType.friend_request_sender]\n )\n )\n ).execution_options(synchronize_session=\"fetch\")\n await session.execute(query)\n\n await session.commit()\n\n else:\n raise UsersRelations.exc.RelationshipsException(\"Invalid relationships to become friends\")\n\n @staticmethod\n async def remove_user_from_friends(removed_by: User, friend: User, *, session) -> None:\n \"\"\"\n Removes friend from friend list if he's in or if user isn't in - raises error.\n\n :param removed_by: user that wants to remove someone from friends.\n :param friend: user that will be removed from friends.\n :param session: sqlalchemy session.\n :return: nothing (raises exception if something's wrong).\n \"\"\"\n relationships_status: UsersRelationType = await UsersRelations.get_any_relationships_status_with(\n friend.id, removed_by.id, session=session\n )\n\n if relationships_status == UsersRelationType.friends:\n users_relations_query = UsersRelations.any_user_initialized_relationship(removed_by.id, friend.id)\n query = delete(UsersRelations).where(\n and_(\n users_relations_query,\n UsersRelations.relation_status == UsersRelationType.friends\n )\n ).execution_options(synchronize_session=\"fetch\")\n\n await session.execute(query)\n await session.commit()\n\n else:\n raise UsersRelations.exc.RelationshipsException(\"Invalid relationships to become friends\")\n\n @staticmethod\n async def block_user(blocking_by: User, blocking_user: User, *, session) -> UsersRelations:\n \"\"\"\n Destroys friends relationship, requester or receiver of friend request and sets relationship status, that\n initiated by blocking_by user, to UsersRelationType.blocked.\n Must not remove all relationships to be able keep other's user\n possible block relationship still existing.\n\n :param blocking_by: user who blocks other user.\n :param blocking_user: user who blocking_by wants to block.\n :param session: sqlalchemy session.\n :return: updated or relationship, initialized by blocking_by.\n \"\"\"\n # There only max of two relations and we should make sure that we're not unblocking someone unintentionally\n if blocking_user.id == blocking_by.id:\n raise ValueError(\"Can not block yourself\")\n\n initialized_by_blocker: UsersRelations = await UsersRelations.get_exact_relationship(\n blocking_by.id, blocking_user.id, session=session\n )\n initialized_by_blocking_user: UsersRelations = await UsersRelations.get_exact_relationship(\n blocking_user.id, blocking_by.id, session=session\n )\n\n if initialized_by_blocker is None:\n initialized_by_blocker = UsersRelations(\n initiator_id=blocking_by.id,\n relation_with_id=blocking_user.id,\n relation_status=UsersRelationType.blocked\n )\n session.add(initialized_by_blocker)\n\n elif initialized_by_blocker.relation_status == UsersRelationType.blocked:\n raise UsersRelations.exc.AlreadyBlockedException(\"User is already blocked\")\n\n else:\n initialized_by_blocker.relation_status = UsersRelationType.blocked\n\n if (\n (initialized_by_blocking_user is not None) and\n (initialized_by_blocking_user.relation_status != UsersRelationType.blocked)\n ):\n await session.delete(initialized_by_blocking_user)\n\n await session.commit()\n return initialized_by_blocker\n\n @staticmethod\n async def unblock_user(user_unblock_initializer: User, unblocking_user: User, *, session) -> None:\n \"\"\"\n Unblocks user.\n\n :param user_unblock_initializer: user who wants to unblock someone.\n :param unblocking_user: user whom he wants to unblock.\n :param session: sqlalchemy session.\n :return: nothing (may raise exceptions).\n \"\"\"\n relation = await UsersRelations.get_exact_relationship_status(\n user_unblock_initializer.id, unblocking_user.id, session=session\n )\n\n if relation != UsersRelationType.blocked:\n raise UsersRelations.exc.RelationshipsException(\"Invalid relation ship to unblock user\")\n\n await session.execute(\n delete(UsersRelations).where(\n UsersRelations.initiator_id == user_unblock_initializer.id,\n UsersRelations.relation_with_id == unblocking_user.id,\n UsersRelations.relation_status.is_(UsersRelationType.blocked)\n )\n )\n await session.commit()\n\n class exc:\n class RelationshipsException(Exception):\n \"\"\"\n Base exception for relationships class.\n \"\"\"\n pass\n\n class BlockedRelationshipException(RelationshipsException):\n \"\"\"\n One user blocked another and so anything beside blocking each other can not be performed.\n \"\"\"\n pass\n\n class AlreadyBlockedException(BlockedRelationshipException):\n \"\"\"\n Special exception that represents that user is already blocked\n \"\"\"\n pass\n","repo_name":"Rud356/Quadrant","sub_path":"Quadrant/models/users_package/users_relation.py","file_name":"users_relation.py","file_ext":"py","file_size_in_byte":16430,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"18345589362","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# preorder = [15,9,20, 3,7]\n# inorder = [ 9,3,15,20,7]\n# idx = [ 0,1, 2, 3,4]\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n if len(preorder) == 0 or len(inorder) == 0:\n return None\n root = TreeNode(preorder[0])\n idx = inorder.index(preorder[0])\n root.left = self.buildTree(preorder[1:1 + idx], inorder[:idx])\n root.right = self.buildTree(preorder[idx + 1:], inorder[idx + 1:])\n return root\n# class Solution:\n# def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n# if not preorder or not inorder: # 递归终止条件\n# return\n# root = TreeNode(preorder[0]) # 先序为“根左右”,所以根据preorder可以确定root\n# idx = inorder.index(preorder[0]) # 中序为“左根右”,根据root可以划分出左右子树\n# # 下面递归对root的左右子树求解即可\n# root.left = self.buildTree(preorder[1:1 + idx], inorder[:idx])\n# root.right = self.buildTree(preorder[1 + idx:], inorder[idx + 1:])\n# return root\n\n\n# if len(preorder) == 0:\n# return None\n# if len(preorder) == 1:\n# return TreeNode(preorder.pop(0))\n\n# def buildTreedfs(inorder, begin, end):\n# if end - begin == 0:\n# return None\n# elif end-begin == 1:\n# return TreeNode(preorder.pop(0))\n# i = preorder.pop(0)\n# newNode = TreeNode(i)\n# rootindex = inorder.index(i)\n# newNode.left = buildTreedfs(inorder, begin, rootindex)\n# newNode.right = buildTreedfs(inorder, rootindex+1, end)\n# return newNode\n\n# begin = 0\n# end = len(preorder)-1\n# i = preorder.pop(0)\n# res = TreeNode(i)\n# rootindex = inorder.index(i)\n# res.left = buildTreedfs(inorder, begin, rootindex)\n# res.right = buildTreedfs(inorder, rootindex+1, end+1)\n# return res\n","repo_name":"llliuer/my-leetcode","sub_path":"leetcode-python/buildTree.py","file_name":"buildTree.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43409309353","text":"from rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView, CreateAPIView, RetrieveAPIView\nfrom .serializers import UserSerializer, FileDataSerializer, PostSerializer\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.response import Response\nfrom .models import User, Post\nfrom rest_framework.exceptions import AuthenticationFailed\nimport jwt\nimport datetime\nimport json\nfrom django.http import JsonResponse\n\n\nclass RegisterView(APIView):\n def post(self, request):\n serializer = UserSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)\n\n\nclass LoginView(APIView):\n def post(self, request):\n email = request.data[\"email\"]\n password = request.data[\"password\"]\n\n user = User.objects.filter(email=email).first()\n\n if user is None:\n raise AuthenticationFailed('User Not Found')\n\n if not user.check_password(password):\n raise AuthenticationFailed(\"Incorrect Password\")\n\n payload = {\n 'id': user.id,\n 'exp': datetime.datetime.utcnow()+datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow()\n }\n\n token = jwt.encode(payload, 'secret',\n algorithm='HS256')\n\n response = Response()\n\n response.set_cookie(key='jwt', value=token, httponly=True)\n\n response.data = {\n 'jwt': token,\n 'expiry': payload['exp']\n }\n\n return response\n\n\nclass UserView(APIView):\n def get(self, request):\n token = request.COOKIES.get('jwt')\n\n if not token:\n raise AuthenticationFailed('Unauthenticated!')\n\n try:\n payload = jwt.decode(token, \"secret\", algorithms=[\"HS256\"])\n\n except jwt.ExpiredSignatureError:\n raise AuthenticationFailed('Unauthenticated!')\n\n user = User.objects.filter(id=payload['id']).first()\n\n serializer = UserSerializer(user)\n\n expiryData = {\n 'expiry': payload['exp']*1000\n }\n\n responseData = expiryData.copy()\n responseData.update(serializer.data)\n\n return Response(responseData)\n\n\nclass LogoutView(APIView):\n def get(self, request):\n response = Response()\n response.delete_cookie('jwt')\n response.data = {\n 'status': 'Success'\n }\n return response\n\n\nclass FileUploadView(APIView):\n\n def post(self, request, format=\"json\"):\n file_obj = request.FILES['file']\n file_data = file_obj.read()\n try:\n file_json = json.loads(file_data)\n except Exception:\n # print(f\"excetion {traceback.format_exc()}\")\n return JsonResponse({'status': \"error\", 'message': 'Error in json loads!'}, status=400)\n if not isinstance(file_json, list):\n return JsonResponse({'status': \"error\", 'message': 'Invalid Json Data!'}, status=400)\n\n # serilize each each_entry\n for each_entry in file_json:\n serialized_data = FileDataSerializer(data=each_entry)\n if not serialized_data.is_valid():\n return JsonResponse({'status': \"error\", 'message': 'Invalid Json Data!'}, status=400)\n\n # print(f\"{serialized_data.data}\")\n # print(\"is valid\", serialized_data.is_valid())\n serialized_data.create(serialized_data.data)\n\n # return Response(status=200)\n return JsonResponse({'status': \"success\", 'message': 'File Upload Success'}, status=200)\n # do some stuff with uploaded file\n\n\nclass StandardResultsSetPagination(PageNumberPagination):\n page_size = 12\n page_size_query_param = 'page_number'\n max_page_size = 12\n\n\nclass ListPost(ListAPIView):\n serializer_class= PostSerializer\n queryset = Post.objects.all()\n pagination_class = StandardResultsSetPagination\n\n #this gets all the posts\n\n","repo_name":"AniketUndalekar1997/financepeers_assign_backend","sub_path":"backend/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5743310583","text":"from django.conf import settings\nfrom django.contrib.gis import forms\nfrom django.template import loader\n\nfrom geoportal import utils\n\n\nclass BaseWidget(forms.Textarea):\n template = 'geoportal/widget.html'\n is_point = False\n is_linestring = False\n is_polygon = False\n is_collection = False\n collection_type = 'None'\n\n def __init__(self, *args, **kwargs):\n super(BaseWidget, self).__init__(*args, **kwargs)\n attrs = kwargs.pop('attrs', {})\n self.options = {\n 'width': attrs.pop('width', utils.DEFAULT_WIDTH),\n 'height': attrs.pop('height', utils.DEFAULT_HEIGHT),\n 'color': attrs.pop('color', utils.DEFAULT_COLOR),\n 'opacity': attrs.pop('opacity', utils.DEFAULT_OPACITY),\n 'default_zoom': attrs.pop('default_zoom', utils.DEFAULT_ZOOM),\n 'default_lon': attrs.pop('default_lon', utils.DEFAULT_LON),\n 'default_lat': attrs.pop('default_lat', utils.DEFAULT_LAT),\n 'layers': utils.get_layers(attrs.pop('layers', (('maps', 1),))),\n 'srid': attrs.pop('srid', 4326),\n }\n\n def render(self, name, value, attrs=None):\n if value is None:\n value = ''\n\n context = {\n 'map_var': 'map_' + name,\n 'is_polygon': self.is_polygon,\n 'is_linestring': self.is_linestring,\n 'is_point': self.is_point,\n 'is_collection': self.is_collection,\n 'collection_type': self.collection_type,\n 'api_key': settings.GEOPORTAL_API_KEY,\n 'wms_url': utils.WMS_URL,\n 'field_name': name,\n 'wkt': value,\n 'point_zoom': utils.POINT_ZOOM,\n 'admin_media_url': settings.ADMIN_MEDIA_PREFIX,\n }\n context.update(self.options)\n\n return loader.render_to_string(self.template, context)\n\n\nclass PointWidget(BaseWidget):\n is_point = True\n\n\nclass MultiPointWidget(PointWidget):\n is_collection = True\n collection_type = 'MultiPoint'\n\n\nclass LineStringWidget(BaseWidget):\n is_linestring = True\n\n\nclass MultiLineStringWidget(LineStringWidget):\n is_collection = True\n collection_type = 'MultiLineString'\n\n\nclass PolygonWidget(BaseWidget):\n is_polygon = True\n\n\nclass MultiPolygonWidget(PolygonWidget):\n is_collection = True\n collection_type = 'MultiPolygon'\n","repo_name":"theCISProject/cis","sub_path":"geoportal/forms/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"75"} +{"seq_id":"37058768115","text":"import dash\nimport time\nimport uuid\nfrom dash import dcc\nfrom dash.dependencies import Input, Output, State, ALL\nfrom dash.exceptions import PreventUpdate\nimport feffery_antd_components as fac\nimport feffery_utils_components as fuc\n\nfrom server import app\nfrom api.role import get_role_list_api, get_role_detail_api, add_role_api, edit_role_api, delete_role_api, export_role_list_api\nfrom api.menu import get_menu_tree_api\n\n\n@app.callback(\n output=dict(\n role_table_data=Output('role-list-table', 'data', allow_duplicate=True),\n role_table_pagination=Output('role-list-table', 'pagination', allow_duplicate=True),\n role_table_key=Output('role-list-table', 'key'),\n role_table_selectedrowkeys=Output('role-list-table', 'selectedRowKeys'),\n api_check_token_trigger=Output('api-check-token', 'data', allow_duplicate=True)\n ),\n inputs=dict(\n search_click=Input('role-search', 'nClicks'),\n refresh_click=Input('role-refresh', 'nClicks'),\n pagination=Input('role-list-table', 'pagination'),\n operations=Input('role-operations-store', 'data')\n ),\n state=dict(\n role_name=State('role-role_name-input', 'value'),\n role_key=State('role-role_key-input', 'value'),\n status_select=State('role-status-select', 'value'),\n create_time_range=State('role-create_time-range', 'value'),\n button_perms=State('role-button-perms-container', 'data')\n ),\n prevent_initial_call=True\n)\ndef get_role_table_data(search_click, refresh_click, pagination, operations, role_name, role_key, status_select, create_time_range, button_perms):\n \"\"\"\n 获取角色表格数据回调(进行表格相关增删查改操作后均会触发此回调)\n \"\"\"\n\n create_time_start = None\n create_time_end = None\n if create_time_range:\n create_time_start = create_time_range[0]\n create_time_end = create_time_range[1]\n query_params = dict(\n role_name=role_name,\n role_key=role_key,\n status=status_select,\n create_time_start=create_time_start,\n create_time_end=create_time_end,\n page_num=1,\n page_size=10\n )\n triggered_id = dash.ctx.triggered_id\n if triggered_id == 'role-list-table':\n query_params = dict(\n role_name=role_name,\n role_key=role_key,\n status=status_select,\n create_time_start=create_time_start,\n create_time_end=create_time_end,\n page_num=pagination['current'],\n page_size=pagination['pageSize']\n )\n if search_click or refresh_click or pagination or operations:\n table_info = get_role_list_api(query_params)\n if table_info['code'] == 200:\n table_data = table_info['data']['rows']\n table_pagination = dict(\n pageSize=table_info['data']['page_size'],\n current=table_info['data']['page_num'],\n showSizeChanger=True,\n pageSizeOptions=[10, 30, 50, 100],\n showQuickJumper=True,\n total=table_info['data']['total']\n )\n for item in table_data:\n if item['status'] == '0':\n item['status'] = dict(checked=True, disabled=item['role_id'] == 1)\n else:\n item['status'] = dict(checked=False, disabled=item['role_id'] == 1)\n item['key'] = str(item['role_id'])\n if item['role_id'] == 1:\n item['operation'] = []\n else:\n item['operation'] = fac.AntdSpace(\n [\n fac.AntdButton(\n '修改',\n id={\n 'type': 'role-operation-table',\n 'operation': 'edit',\n 'index': str(item['role_id'])\n },\n type='link',\n icon=fac.AntdIcon(\n icon='antd-edit'\n ),\n style={\n 'padding': 0\n }\n ) if 'system:role:edit' in button_perms else [],\n fac.AntdButton(\n '删除',\n id={\n 'type': 'role-operation-table',\n 'operation': 'delete',\n 'index': str(item['role_id'])\n },\n type='link',\n icon=fac.AntdIcon(\n icon='antd-delete'\n ),\n style={\n 'padding': 0\n }\n ) if 'system:role:remove' in button_perms else [],\n fac.AntdPopover(\n fac.AntdButton(\n '更多',\n type='link',\n icon=fac.AntdIcon(\n icon='antd-more'\n ),\n style={\n 'padding': 0\n }\n ),\n content=fac.AntdSpace(\n [\n fac.AntdButton(\n '数据权限',\n id={\n 'type': 'role-operation-table',\n 'operation': 'datascope',\n 'index': str(item['role_id'])\n },\n type='text',\n block=True,\n icon=fac.AntdIcon(\n icon='antd-check-circle'\n ),\n style={\n 'padding': 0\n }\n ),\n fac.AntdButton(\n '分配用户',\n id={\n 'type': 'role-operation-table',\n 'operation': 'allocation',\n 'index': str(item['role_id'])\n },\n type='text',\n block=True,\n icon=fac.AntdIcon(\n icon='antd-user'\n ),\n style={\n 'padding': 0\n }\n ),\n ],\n direction='vertical'\n ),\n placement='bottomRight'\n ) if 'system:role:edit' in button_perms else []\n ]\n )\n\n return dict(\n role_table_data=table_data,\n role_table_pagination=table_pagination,\n role_table_key=str(uuid.uuid4()),\n role_table_selectedrowkeys=None,\n api_check_token_trigger={'timestamp': time.time()}\n )\n\n return dict(\n role_table_data=dash.no_update,\n role_table_pagination=dash.no_update,\n role_table_key=dash.no_update,\n role_table_selectedrowkeys=dash.no_update,\n api_check_token_trigger={'timestamp': time.time()}\n )\n\n raise PreventUpdate\n\n\n# 重置角色搜索表单数据回调\napp.clientside_callback(\n '''\n (reset_click) => {\n if (reset_click) {\n return [null, null, null, null, {'type': 'reset'}]\n }\n return window.dash_clientside.no_update;\n }\n ''',\n [Output('role-role_name-input', 'value'),\n Output('role-role_key-input', 'value'),\n Output('role-status-select', 'value'),\n Output('role-create_time-range', 'value'),\n Output('role-operations-store', 'data')],\n Input('role-reset', 'nClicks'),\n prevent_initial_call=True\n)\n\n\n# 隐藏/显示角色搜索表单回调\napp.clientside_callback(\n '''\n (hidden_click, hidden_status) => {\n if (hidden_click) {\n return [\n !hidden_status,\n hidden_status ? '隐藏搜索' : '显示搜索'\n ]\n }\n return window.dash_clientside.no_update;\n }\n ''',\n [Output('role-search-form-container', 'hidden'),\n Output('role-hidden-tooltip', 'title')],\n Input('role-hidden', 'nClicks'),\n State('role-search-form-container', 'hidden'),\n prevent_initial_call=True\n)\n\n\n@app.callback(\n Output({'type': 'role-operation-button', 'operation': 'edit'}, 'disabled'),\n Input('role-list-table', 'selectedRowKeys'),\n prevent_initial_call=True\n)\ndef change_role_edit_button_status(table_rows_selected):\n \"\"\"\n 根据选择的表格数据行数控制编辑按钮状态回调\n \"\"\"\n outputs_list = dash.ctx.outputs_list\n if outputs_list:\n if table_rows_selected:\n if len(table_rows_selected) > 1 or '1' in table_rows_selected:\n return True\n\n return False\n\n return True\n\n return dash.no_update\n\n\n@app.callback(\n Output({'type': 'role-operation-button', 'operation': 'delete'}, 'disabled'),\n Input('role-list-table', 'selectedRowKeys'),\n prevent_initial_call=True\n)\ndef change_role_delete_button_status(table_rows_selected):\n \"\"\"\n 根据选择的表格数据行数控制删除按钮状态回调\n \"\"\"\n outputs_list = dash.ctx.outputs_list\n if outputs_list:\n if table_rows_selected:\n if '1' in table_rows_selected:\n return True\n\n return False\n\n return True\n\n return dash.no_update\n\n\n@app.callback(\n Output('role-menu-perms', 'expandedKeys', allow_duplicate=True),\n Input('role-menu-perms-radio-fold-unfold', 'checked'),\n State('role-menu-store', 'data'),\n prevent_initial_call=True\n)\ndef fold_unfold_role_menu(fold_unfold, menu_info):\n \"\"\"\n 新增和编辑表单中展开/折叠checkbox回调\n \"\"\"\n if menu_info:\n default_expanded_keys = []\n for item in menu_info:\n if item.get('parent_id') == 0:\n default_expanded_keys.append(str(item.get('menu_id')))\n \n if fold_unfold:\n return default_expanded_keys\n else:\n return []\n \n return dash.no_update\n\n\n@app.callback(\n Output('role-menu-perms', 'checkedKeys', allow_duplicate=True),\n Input('role-menu-perms-radio-all-none', 'checked'),\n State('role-menu-store', 'data'),\n prevent_initial_call=True\n)\ndef all_none_role_menu_mode(all_none, menu_info):\n \"\"\"\n 新增和编辑表单中全选/全不选checkbox回调\n \"\"\"\n if menu_info:\n default_expanded_keys = []\n for item in menu_info:\n if item.get('parent_id') == 0:\n default_expanded_keys.append(str(item.get('menu_id')))\n \n if all_none:\n return [str(item.get('menu_id')) for item in menu_info]\n else:\n return []\n \n return dash.no_update\n\n\n@app.callback(\n [Output('role-menu-perms', 'checkStrictly'),\n Output('role-menu-perms', 'checkedKeys', allow_duplicate=True)],\n Input('role-menu-perms-radio-parent-children', 'checked'),\n State('current-role-menu-store', 'data'),\n prevent_initial_call=True\n)\ndef change_role_menu_mode(parent_children, current_role_menu):\n \"\"\"\n 新增和编辑表单中父子联动checkbox回调\n \"\"\"\n checked_menu = []\n if parent_children:\n if current_role_menu:\n for item in current_role_menu:\n has_children = False\n for other_item in current_role_menu:\n if other_item['parent_id'] == item['menu_id']:\n has_children = True\n break\n if not has_children:\n checked_menu.append(str(item.get('menu_id')))\n return [False, checked_menu]\n else:\n if current_role_menu:\n checked_menu = [str(item.get('menu_id')) for item in current_role_menu if item] or []\n return [True, checked_menu]\n\n\n@app.callback(\n output=dict(\n modal_visible=Output('role-modal', 'visible', allow_duplicate=True),\n modal_title=Output('role-modal', 'title'),\n form_value=Output({'type': 'role-form-value', 'index': ALL, 'required': ALL}, 'value'),\n form_label_validate_status=Output({'type': 'role-form-label', 'index': ALL, 'required': True}, 'validateStatus', allow_duplicate=True),\n form_label_validate_info=Output({'type': 'role-form-label', 'index': ALL, 'required': True}, 'help', allow_duplicate=True),\n menu_perms_tree=Output('role-menu-perms', 'treeData'),\n menu_perms_expandedkeys=Output('role-menu-perms', 'expandedKeys', allow_duplicate=True),\n menu_perms_checkedkeys=Output('role-menu-perms', 'checkedKeys', allow_duplicate=True),\n menu_perms_halfcheckedkeys=Output('role-menu-perms', 'halfCheckedKeys', allow_duplicate=True),\n role_menu=Output('role-menu-store', 'data'),\n current_role_menu=Output('current-role-menu-store', 'data'),\n api_check_token_trigger=Output('api-check-token', 'data', allow_duplicate=True),\n edit_row_info=Output('role-edit-id-store', 'data'),\n modal_type=Output('role-operations-store-bk', 'data')\n ),\n inputs=dict(\n operation_click=Input({'type': 'role-operation-button', 'operation': ALL}, 'nClicks'),\n button_click=Input({'type': 'role-operation-table', 'operation': ALL, 'index': ALL}, 'nClicks')\n ),\n state=dict(\n selected_row_keys=State('role-list-table', 'selectedRowKeys')\n ),\n prevent_initial_call=True\n)\ndef add_edit_role_modal(operation_click, button_click, selected_row_keys):\n \"\"\"\n 显示新增或编辑角色弹窗回调\n \"\"\"\n trigger_id = dash.ctx.triggered_id\n if trigger_id.operation in ['add', 'edit']:\n # 获取所有输出表单项对应value的index\n form_value_list = [x['id']['index'] for x in dash.ctx.outputs_list[2]]\n # 获取所有输出表单项对应label的index\n form_label_list = [x['id']['index'] for x in dash.ctx.outputs_list[3]]\n menu_params = dict(menu_name='', type='role')\n tree_info = get_menu_tree_api(menu_params)\n if tree_info.get('code') == 200:\n tree_data = tree_info['data']\n if trigger_id.type == 'role-operation-button' and trigger_id.operation == 'add':\n role_info = dict(role_name=None, role_key=None, role_sort=None, status='0', remark=None)\n return dict(\n modal_visible=True,\n modal_title='新增角色',\n form_value=[role_info.get(k) for k in form_value_list],\n form_label_validate_status=[None] * len(form_label_list),\n form_label_validate_info=[None] * len(form_label_list),\n menu_perms_tree=tree_data[0],\n menu_perms_expandedkeys=[],\n menu_perms_checkedkeys=None,\n menu_perms_halfcheckedkeys=None,\n role_menu=tree_data[1],\n current_role_menu=None,\n api_check_token_trigger={'timestamp': time.time()},\n edit_row_info=None,\n modal_type={'type': 'add'}\n )\n elif trigger_id.operation == 'edit':\n if trigger_id.type == 'role-operation-button':\n role_id = int(','.join(selected_row_keys))\n else:\n role_id = int(trigger_id.index)\n role_info_res = get_role_detail_api(role_id=role_id)\n if role_info_res['code'] == 200:\n role_info = role_info_res['data']\n checked_menu = []\n checked_menu_all = []\n if role_info.get('menu')[0]:\n for item in role_info.get('menu'):\n checked_menu_all.append(str(item.get('menu_id')))\n has_children = False\n for other_item in role_info.get('menu'):\n if other_item['parent_id'] == item['menu_id']:\n has_children = True\n break\n if not has_children:\n checked_menu.append(str(item.get('menu_id')))\n half_checked_menu = [x for x in checked_menu_all if x not in checked_menu]\n return dict(\n modal_visible=True,\n modal_title='编辑角色',\n form_value=[role_info.get('role').get(k) for k in form_value_list],\n form_label_validate_status=[None] * len(form_label_list),\n form_label_validate_info=[None] * len(form_label_list),\n menu_perms_tree=tree_data[0],\n menu_perms_expandedkeys=[],\n menu_perms_checkedkeys=checked_menu,\n menu_perms_halfcheckedkeys=half_checked_menu,\n role_menu=tree_data[1],\n current_role_menu=role_info.get('menu'),\n api_check_token_trigger={'timestamp': time.time()},\n edit_row_info=role_info.get('role') if role_info else None,\n modal_type={'type': 'edit'}\n )\n\n return dict(\n modal_visible=dash.no_update,\n modal_title=dash.no_update,\n form_value=[dash.no_update] * len(form_value_list),\n form_label_validate_status=[dash.no_update] * len(form_value_list),\n form_label_validate_info=[dash.no_update] * len(form_value_list),\n menu_perms_tree=dash.no_update,\n menu_perms_expandedkeys=dash.no_update,\n menu_perms_checkedkeys=dash.no_update,\n menu_perms_halfcheckedkeys=dash.no_update,\n role_menu=dash.no_update,\n current_role_menu=dash.no_update,\n api_check_token_trigger={'timestamp': time.time()},\n edit_row_info=None,\n modal_type=None\n )\n\n raise PreventUpdate\n\n\n@app.callback(\n output=dict(\n form_label_validate_status=Output({'type': 'role-form-label', 'index': ALL, 'required': True}, 'validateStatus',\n allow_duplicate=True),\n form_label_validate_info=Output({'type': 'role-form-label', 'index': ALL, 'required': True}, 'help',\n allow_duplicate=True),\n modal_visible=Output('role-modal', 'visible'),\n operations=Output('role-operations-store', 'data', allow_duplicate=True),\n api_check_token_trigger=Output('api-check-token', 'data', allow_duplicate=True),\n global_message_container=Output('global-message-container', 'children', allow_duplicate=True)\n ),\n inputs=dict(\n confirm_trigger=Input('role-modal', 'okCounts')\n ),\n state=dict(\n modal_type=State('role-operations-store-bk', 'data'),\n edit_row_info=State('role-edit-id-store', 'data'),\n form_value=State({'type': 'role-form-value', 'index': ALL, 'required': ALL}, 'value'),\n form_label=State({'type': 'role-form-value', 'index': ALL, 'required': True}, 'placeholder'),\n menu_checked_keys=State('role-menu-perms', 'checkedKeys'),\n menu_half_checked_keys=State('role-menu-perms', 'halfCheckedKeys'),\n parent_checked=State('role-menu-perms-radio-parent-children', 'checked')\n ),\n prevent_initial_call=True\n)\ndef role_confirm(confirm_trigger, modal_type, edit_row_info, form_value, form_label, menu_checked_keys, menu_half_checked_keys, parent_checked):\n \"\"\"\n 新增或编辑角色弹窗确认回调,实现新增或编辑操作\n \"\"\"\n if confirm_trigger:\n # 获取所有输出表单项对应label的index\n form_label_output_list = [x['id']['index'] for x in dash.ctx.outputs_list[0]]\n # 获取所有输入表单项对应的value及label\n form_value_state = {x['id']['index']: x.get('value') for x in dash.ctx.states_list[2]}\n form_label_state = {x['id']['index']: x.get('value') for x in dash.ctx.states_list[3]}\n if all([form_value_state.get(k) for k in form_label_output_list]):\n menu_half_checked_keys = menu_half_checked_keys if menu_half_checked_keys else []\n menu_checked_keys = menu_checked_keys if menu_checked_keys else []\n if parent_checked:\n menu_perms = menu_half_checked_keys + menu_checked_keys\n else:\n menu_perms = menu_checked_keys\n params_add = form_value_state\n params_add['menu_id'] = ','.join(menu_perms) if menu_perms else None\n params_edit = params_add.copy()\n params_edit['role_id'] = edit_row_info.get('role_id') if edit_row_info else None\n api_res = {}\n modal_type = modal_type.get('type')\n if modal_type == 'add':\n api_res = add_role_api(params_add)\n if modal_type == 'edit':\n api_res = edit_role_api(params_edit)\n if api_res.get('code') == 200:\n if modal_type == 'add':\n return dict(\n form_label_validate_status=[None] * len(form_label_output_list),\n form_label_validate_info=[None] * len(form_label_output_list),\n modal_visible=False,\n operations={'type': 'add'},\n api_check_token_trigger={'timestamp': time.time()},\n global_message_container=fuc.FefferyFancyMessage('新增成功', type='success')\n )\n if modal_type == 'edit':\n return dict(\n form_label_validate_status=[None] * len(form_label_output_list),\n form_label_validate_info=[None] * len(form_label_output_list),\n modal_visible=False,\n operations={'type': 'edit'},\n api_check_token_trigger={'timestamp': time.time()},\n global_message_container=fuc.FefferyFancyMessage('编辑成功', type='success')\n )\n\n return dict(\n form_label_validate_status=[None] * len(form_label_output_list),\n form_label_validate_info=[None] * len(form_label_output_list),\n modal_visible=dash.no_update,\n operations=dash.no_update,\n api_check_token_trigger={'timestamp': time.time()},\n global_message_container=fuc.FefferyFancyMessage('处理失败', type='error')\n )\n\n return dict(\n form_label_validate_status=[None if form_value_state.get(k) else 'error' for k in form_label_output_list],\n form_label_validate_info=[None if form_value_state.get(k) else form_label_state.get(k) for k in form_label_output_list],\n modal_visible=dash.no_update,\n operations=dash.no_update,\n api_check_token_trigger=dash.no_update,\n global_message_container=fuc.FefferyFancyMessage('处理失败', type='error')\n )\n\n raise PreventUpdate\n\n\n@app.callback(\n [Output('role-operations-store', 'data', allow_duplicate=True),\n Output('api-check-token', 'data', allow_duplicate=True),\n Output('global-message-container', 'children', allow_duplicate=True)],\n [Input('role-list-table', 'recentlySwitchDataIndex'),\n Input('role-list-table', 'recentlySwitchStatus'),\n Input('role-list-table', 'recentlySwitchRow')],\n prevent_initial_call=True\n)\ndef table_switch_role_status(recently_switch_data_index, recently_switch_status, recently_switch_row):\n \"\"\"\n 表格内切换角色状态回调\n \"\"\"\n if recently_switch_data_index:\n if recently_switch_status:\n params = dict(role_id=int(recently_switch_row['key']), status='0', type='status')\n else:\n params = dict(role_id=int(recently_switch_row['key']), status='1', type='status')\n edit_button_result = edit_role_api(params)\n if edit_button_result['code'] == 200:\n\n return [\n {'type': 'switch-status'},\n {'timestamp': time.time()},\n fuc.FefferyFancyMessage('修改成功', type='success')\n ]\n\n return [\n {'type': 'switch-status'},\n {'timestamp': time.time()},\n fuc.FefferyFancyMessage('修改失败', type='error')\n ]\n\n raise PreventUpdate\n\n\n@app.callback(\n [Output('role-delete-text', 'children'),\n Output('role-delete-confirm-modal', 'visible'),\n Output('role-delete-ids-store', 'data')],\n [Input({'type': 'role-operation-button', 'operation': ALL}, 'nClicks'),\n Input({'type': 'role-operation-table', 'operation': ALL, 'index': ALL}, 'nClicks')],\n State('role-list-table', 'selectedRowKeys'),\n prevent_initial_call=True\n)\ndef role_delete_modal(operation_click, button_click, selected_row_keys):\n \"\"\"\n 显示删除角色二次确认弹窗回调\n \"\"\"\n trigger_id = dash.ctx.triggered_id\n if trigger_id.operation == 'delete':\n\n if trigger_id.type == 'role-operation-button':\n role_ids = ','.join(selected_row_keys)\n else:\n if trigger_id.type == 'role-operation-table':\n role_ids = trigger_id.index\n else:\n raise PreventUpdate\n\n return [\n f'是否确认删除角色编号为{role_ids}的角色?',\n True,\n {'role_ids': role_ids}\n ]\n\n raise PreventUpdate\n\n\n@app.callback(\n [Output('role-operations-store', 'data', allow_duplicate=True),\n Output('api-check-token', 'data', allow_duplicate=True),\n Output('global-message-container', 'children', allow_duplicate=True)],\n Input('role-delete-confirm-modal', 'okCounts'),\n State('role-delete-ids-store', 'data'),\n prevent_initial_call=True\n)\ndef role_delete_confirm(delete_confirm, role_ids_data):\n \"\"\"\n 删除角色弹窗确认回调,实现删除操作\n \"\"\"\n if delete_confirm:\n\n params = role_ids_data\n delete_button_info = delete_role_api(params)\n if delete_button_info['code'] == 200:\n return [\n {'type': 'delete'},\n {'timestamp': time.time()},\n fuc.FefferyFancyMessage('删除成功', type='success')\n ]\n\n return [\n dash.no_update,\n {'timestamp': time.time()},\n fuc.FefferyFancyMessage('删除失败', type='error')\n ]\n\n raise PreventUpdate\n\n\n@app.callback(\n [Output('role_to_allocated_user-modal', 'visible'),\n Output({'type': 'allocate_user-search', 'index': 'allocated'}, 'nClicks'),\n Output('allocate_user-role_id-container', 'data')],\n Input({'type': 'role-operation-table', 'operation': ALL, 'index': ALL}, 'nClicks'),\n State({'type': 'allocate_user-search', 'index': 'allocated'}, 'nClicks'),\n prevent_initial_call=True\n)\ndef role_to_allocated_user_modal(allocated_click, allocated_user_search_nclick):\n \"\"\"\n 显示角色分配用户弹窗回调\n \"\"\"\n trigger_id = dash.ctx.triggered_id\n if trigger_id.operation == 'allocation':\n return [\n True,\n allocated_user_search_nclick + 1 if allocated_user_search_nclick else 1,\n trigger_id.index\n ]\n\n raise PreventUpdate\n\n\n@app.callback(\n [Output('role-export-container', 'data', allow_duplicate=True),\n Output('role-export-complete-judge-container', 'data'),\n Output('api-check-token', 'data', allow_duplicate=True),\n Output('global-message-container', 'children', allow_duplicate=True)],\n Input('role-export', 'nClicks'),\n prevent_initial_call=True\n)\ndef export_role_list(export_click):\n \"\"\"\n 导出角色信息回调\n \"\"\"\n if export_click:\n export_role_res = export_role_list_api({})\n if export_role_res.status_code == 200:\n export_role = export_role_res.content\n\n return [\n dcc.send_bytes(export_role, f'角色信息_{time.strftime(\"%Y%m%d%H%M%S\", time.localtime())}.xlsx'),\n {'timestamp': time.time()},\n {'timestamp': time.time()},\n fuc.FefferyFancyMessage('导出成功', type='success')\n ]\n\n return [\n dash.no_update,\n dash.no_update,\n {'timestamp': time.time()},\n fuc.FefferyFancyMessage('导出失败', type='error')\n ]\n\n raise PreventUpdate\n\n\n@app.callback(\n Output('role-export-container', 'data', allow_duplicate=True),\n Input('role-export-complete-judge-container', 'data'),\n prevent_initial_call=True\n)\ndef reset_role_export_status(data):\n \"\"\"\n 导出完成后重置下载组件数据回调,防止重复下载文件\n \"\"\"\n time.sleep(0.5)\n if data:\n\n return None\n\n raise PreventUpdate\n","repo_name":"insistence/Dash-FastAPI-Admin","sub_path":"dash-fastapi-frontend/callbacks/system_c/role_c/role_c.py","file_name":"role_c.py","file_ext":"py","file_size_in_byte":30335,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"75"} +{"seq_id":"41737000836","text":"from PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtWidgets import (QWidget, QPushButton, QDesktopWidget, QTreeWidget,\n QGridLayout, QLabel, QHeaderView, QLineEdit)\n\n\nclass UI(QWidget):\n \"\"\"\n Main window class. Include QPushButtons and QTreeViewWidgets.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n self.remote_tree = None\n self.local_tree = None\n\n self.grid = None\n\n self.initUI()\n\n def initUI(self):\n\n self.resize(1000, 500)\n self.center()\n self.setWindowTitle('Database redactor')\n\n self.grid = QGridLayout()\n self.grid.setSpacing(10)\n\n self.init_buttons()\n self.init_tree_views()\n\n self.setLayout(self.grid)\n\n self.show()\n\n def init_buttons(self):\n # DOWNLOAD\n self.download_btn = QPushButton('<<<', self)\n self.grid.addWidget(self.download_btn, 1, 6)\n\n # PLUS\n self.plus_btn = QPushButton('+', self)\n self.grid.addWidget(self.plus_btn, 10, 1)\n\n # MINUS\n self.minus_btn = QPushButton('-', self)\n self.grid.addWidget(self.minus_btn, 10, 2)\n\n # RENAME(a)\n self.rename_btn = QPushButton('a', self)\n self.grid.addWidget(self.rename_btn, 10, 3)\n\n # APPLY\n self.apply_btn = QPushButton('Apply', self)\n self.grid.addWidget(self.apply_btn, 10, 4)\n\n # RESET\n self.reset_btn = QPushButton('Reset', self)\n self.grid.addWidget(self.reset_btn, 10, 5)\n\n def init_tree_views(self):\n\n self.remote_tree = QTreeWidget(self)\n self.remote_tree.clear()\n self.remote_tree.setHeaderLabels(['Remote database view', 'Id', 'Value'])\n self.remote_tree.setColumnCount(3)\n self.remote_tree.setColumnWidth(0, 180)\n self.remote_tree.setColumnWidth(1, QHeaderView.ResizeToContents)\n self.remote_tree.setColumnWidth(2, QHeaderView.ResizeToContents)\n self.grid.addWidget(self.remote_tree, 0, 7, 5, 5)\n\n self.local_tree = QTreeWidget(self)\n self.local_tree.clear()\n self.local_tree.setHeaderLabels(['Local cache view', 'Id', 'Value'])\n self.local_tree.setColumnCount(3)\n self.local_tree.setColumnWidth(0, 180)\n self.local_tree.setColumnWidth(1, QHeaderView.ResizeToContents)\n self.local_tree.setColumnWidth(2, QHeaderView.ResizeToContents)\n self.grid.addWidget(self.local_tree, 0, 1, 5, 5)\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n\nclass NameValDialog(QWidget):\n \"\"\"\n Dialog window class. This dialog can change name and value.\n It shows default values in lineEdits, when starts.\n It have two lineEdit widgets and two buttons - OK and CANCEL.\n \"\"\"\n resultOk = pyqtSignal(str, str)\n\n def __init__(self, default_name='', default_val=''):\n super().__init__()\n self.default_val = default_val\n self.default_name = default_name\n self.grid = None\n self.initUI()\n self.cancel_btn.clicked.connect(self.close)\n self.ok_btn.clicked.connect(self.return_msg)\n\n def initUI(self):\n self.resize(200, 100)\n self.center()\n self.setWindowTitle('Change name/value')\n\n self.grid = QGridLayout()\n self.grid.setSpacing(5)\n\n self.init_buttons()\n self.init_line_edit()\n\n self.setLayout(self.grid)\n\n self.show()\n\n def init_buttons(self):\n self.ok_btn = QPushButton('OK', self)\n self.grid.addWidget(self.ok_btn, 4, 0)\n\n self.cancel_btn = QPushButton('Cancel', self)\n self.grid.addWidget(self.cancel_btn, 4, 1)\n\n def init_line_edit(self):\n self.qle1 = QLineEdit(self.default_name, self)\n self.grid.addWidget(self.qle1, 1, 0, 1, 2)\n self.grid.addWidget(QLabel('Enter name:'), 0, 0)\n\n self.qle2 = QLineEdit(self.default_val, self)\n self.grid.addWidget(self.qle2, 3, 0, 1, 2)\n self.grid.addWidget(QLabel('Enter value:'), 2, 0)\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def return_msg(self):\n name = self.qle1.text()\n value = self.qle2.text()\n\n self.resultOk.emit(name, value)\n self.close()\n\n","repo_name":"ChachkovAnt/db_terminal","sub_path":"src/app_UI.py","file_name":"app_UI.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2634980606","text":"import logging\nfrom typing import Union\n\nimport modal\nfrom numpy import dot, ndarray\nfrom numpy.linalg import norm\n\nfrom account.models import Profile\nfrom activity.models import Post\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_posts_vectors(profile: Profile):\n POSTS_LIMIT = 100\n posts = (\n Post.objects.filter(acct__account__profile=profile)\n .only(\"text_content\")\n .order_by(\"-created_at\")[0:POSTS_LIMIT]\n )\n\n if not posts:\n return\n\n post_texts = [p.text_content for p in posts]\n\n logger.debug(f\"Generate vectors for {profile}\")\n\n try:\n vectors = get_text_embeddings(post_texts)\n\n logger.debug(f\"Save post vectors for {profile}\")\n profile.posts_vectors = vectors\n profile.save()\n except Exception as e:\n logger.exception(e)\n\n\ndef get_text_embeddings(text: Union[list[str], str]) -> ndarray:\n modal_get_text_embeddings_fn = modal.Function.lookup(\n \"text-embeddings\", \"Roberta.get_text_embeddings\"\n )\n\n vectors = modal_get_text_embeddings_fn.call(text)\n\n if vectors is None:\n raise Exception(\"Invalid vectors\")\n\n return vectors\n\n\ndef cosine_similarity(\n vectors_one: ndarray, vectors_two: ndarray, eps: float = 1e-5\n) -> float:\n # cosine similarity between two vectors\n\n return dot(vectors_one, vectors_two) / (norm(vectors_two) * norm(vectors_two) + eps)\n\n\ndef get_similarity_to_posts_vectors(profile: Profile, text: str):\n vectors = get_text_embeddings(text)\n\n return cosine_similarity(profile.posts_vectors, vectors)\n","repo_name":"adamghill/fediview","sub_path":"activity/text_embeddings_retriever.py","file_name":"text_embeddings_retriever.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"75"} +{"seq_id":"26393041850","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 22 10:37:38 2019\n\n@author: kmpoo\n\"\"\"\n\nimport csv\nimport sys\nif \"C:\\\\Users\\\\kmpoo\\\\Dropbox\\\\HEC\\\\Python\\\\CustomLib\\\\PooLIB\" not in sys.path:\n sys.path.append('C:\\\\Users\\kmpoo\\Dropbox\\HEC\\Python\\CustomLib\\PooLIB')\n print(sys.path)\nfrom poo_ghmodules import getGitHubapi\nfrom poo_ghmodules import ghpaginate\nfrom poo_ghmodules import ghparse_row\norg_list = ['apple']\n\nPW_CSV = 'C:/Users/kmpoo/Dropbox/HEC/Python/PW/PW_GitHub.csv'\nLOG_CSV = 'C:\\\\Users\\kmpoo\\Dropbox\\HEC\\Project 6 - MS Acquire Github Allies and Competitors\\Data\\AppleLog_OrgRepo_20190518.csv'\n\ndef getcommitinfo(repoid,write_handle):\n commit_url = \"https://api.github.com/repositories/\"+str(repoid)+\"/commits?per_page=100\"\n while commit_url:\n commit_req = getGitHubapi(commit_url,PW_CSV,LOG_CSV)\n if commit_req:\n commit_json = commit_req.json()\n for commit in commit_json:\n commit_row = ghparse_row(commit,\"sha\", \"commit*author*name\",\"commit*author*email\",\"commit*author*date\", \"commit*committer*name\",\"commit*committer*email\",\"commit*committer*date\",\"commit*message\",\"commit*comment_count\",\"commit*verification\",\"url\",\"parents\", prespace = 1)\n write_handle.writerow(commit_row) \n commit_url = ghpaginate(commit_req)\n else:\n print(\"Error getting commit info \",commit_url)\n with open(LOG_CSV, 'at', encoding = 'utf-8', newline =\"\") as loglist:\n log_handle = csv.writer(loglist)\n log_handle.writerow([\"Error getting commit\",commit_url,\"UNKNOWN\"])\n return\n\ndef getrepoinfo(NEWREPO_CSV):\n \"\"\"Update the repo inforation with, PUSHED,STARS, SUBSCRIBERS, FORKS, SIZE, LICENCE \"\"\"\n with open(NEWREPO_CSV, 'wt', encoding = 'utf-8', newline='') as writelist:\n write_handle = csv.writer(writelist)\n for org in org_list:\n repo_url = 'https://api.github.com/orgs/'+org+'/repos?per_page=100&page=1' \n while repo_url:\n repoid_req = getGitHubapi(repo_url,PW_CSV,LOG_CSV)\n# print(repoid_req.headers['link'])\n if repoid_req:\n repo_json = repoid_req.json()\n for repo in repo_json:\n repo_row = ghparse_row(repo,\"id\", \"full_name\",\"description\",\"fork\",\"url\",\"created_at\",\"updated_at\",\"pushed_at\",\"homepage\",\"size\",\"stargazers_count\",\"watchers_count\",\"language\",\"has_issues\",\"has_projects\",\"has_downloads\",\"has_wiki\",\"has_pages\",\"forks_count\",\"mirror_url\",\"archived\",\"disabled\",\"open_issues_count\",\"license*name\",\"forks\",\"open_issues\",\"watchers\",\"default_branch\",\"permissions\")\n write_handle.writerow(repo_row)\n # get commits\n #getcommitinfo(repo['id'],write_handle)\n #end get commits\n repo_url = ghpaginate(repoid_req)\n else: break\n print(repo_url)\n# write_handle.writerow(row)\n \n \n\ndef main():\n \n # For WINDOWS \n NEWREPO_CSV = 'C:\\\\Users\\kmpoo\\Dropbox\\HEC\\Project 6 - MS Acquire Github Allies and Competitors\\Data\\AppleRepo_20190518.csv'\n getrepoinfo(NEWREPO_CSV) \n \nif __name__ == '__main__':\n main()\n ","repo_name":"km-Poonacha/GitHubDataAnalysis","sub_path":"Xitong Prog/OrgData.py","file_name":"OrgData.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"12834274469","text":"import numpy as np\nfrom numpy.typing import DTypeLike\nfrom ot import emd2\nfrom scipy.sparse.csgraph import floyd_warshall\nfrom sklearn.preprocessing import normalize\nfrom typing import Callable\n\n\ndef MakeDiscreteRicciFlowIterator(\n N: int,\n alpha: float = 0.5,\n exp_power: float = 0.0,\n eps: float = 1.0,\n ElementType: DTypeLike = np.float64,\n inplace: bool = True\n) -> Callable:\n MatrixType = np.ndarray[(N, N), ElementType]\n alpha = ElementType(alpha)\n\n # storages for intermediate calculation results\n graph_shortest_distances: MatrixType = np.zeros((N, N), dtype=ElementType)\n neighbor_probability_distribution: MatrixType = np.zeros((N, N), dtype=ElementType)\n olivier_ricci_curvatures: MatrixType = np.zeros((N, N), dtype=ElementType)\n non_neighbor_mask: np.ndarray[(N, N), np.bool8] = np.zeros((N, N), dtype=np.bool8)\n need_to_calculate_mask = True\n n_edges = 0\n\n def CalcShortestDistances(adj_matrix: MatrixType) -> MatrixType:\n nonlocal need_to_calculate_mask, n_edges\n np.copyto(\n dst=graph_shortest_distances,\n src=adj_matrix\n )\n if need_to_calculate_mask:\n np.equal(adj_matrix, ElementType(0), out=non_neighbor_mask)\n need_to_calculate_mask = False\n n_edges = N * N - np.sum(non_neighbor_mask)\n # print(\"n edges:\", n_edges)\n\n graph_shortest_distances[non_neighbor_mask] = ElementType(np.Inf)\n floyd_warshall(\n graph_shortest_distances,\n directed=False,\n overwrite=True\n )\n return graph_shortest_distances\n\n def CalcEdgeOlivierRicciCurvatures(shortest_distances: MatrixType) -> MatrixType:\n probability_distribution = neighbor_probability_distribution\n np.copyto(\n dst=probability_distribution,\n src=shortest_distances\n )\n probability_distribution[non_neighbor_mask] = ElementType(0)\n # renorm\n probability_distribution *= n_edges / np.sum(probability_distribution)\n np.power(\n shortest_distances,\n exp_power,\n out=probability_distribution\n )\n probability_distribution *= -1\n np.exp(\n probability_distribution,\n out=probability_distribution\n )\n probability_distribution[non_neighbor_mask] = ElementType(0)\n normalize(probability_distribution, norm=\"l1\", axis=1, copy=False)\n probability_distribution *= ElementType(1) - alpha\n np.fill_diagonal(probability_distribution, alpha)\n normalize(probability_distribution, norm=\"l1\", axis=1, copy=False)\n\n for u in range(N):\n for v in range(u + 1, N):\n if not non_neighbor_mask[u, v]: # u and v are neighbors\n wasserstein_distance_uv = emd2(\n probability_distribution[u],\n probability_distribution[v],\n shortest_distances\n )\n olivier_ricci_curvatures[u, v] = olivier_ricci_curvatures[v, u] = \\\n ElementType(1) - wasserstein_distance_uv / shortest_distances[u, v]\n # print(\"Wasserstein distance between {} and {}: {}\"\n # .format(u, v, wasserstein_distance_uv)\n # )\n # if u == 0 and v < 6:\n # print(\"Olivier-Ricci curvature between {} and {}: {}\"\n # .format(u, v, olivier_ricci_curvatures[u, v])\n # )\n\n return olivier_ricci_curvatures\n\n def DoDiscreteRicciFlowIteration(adj_matrix: MatrixType) -> MatrixType:\n shortest_distances: MatrixType = CalcShortestDistances(adj_matrix)\n olivier_ricci_curvatures = CalcEdgeOlivierRicciCurvatures(shortest_distances)\n olivier_ricci_curvatures *= eps\n one_minus_edge_curvatures = np.subtract(\n ElementType(1),\n olivier_ricci_curvatures,\n out=olivier_ricci_curvatures\n )\n one_minus_edge_curvatures[non_neighbor_mask] = ElementType(0)\n new_adj_matrix = np.multiply(\n shortest_distances,\n one_minus_edge_curvatures,\n out=adj_matrix if inplace else None\n )\n # renorm\n new_adj_matrix *= n_edges / np.sum(new_adj_matrix)\n return new_adj_matrix\n\n DoDiscreteRicciFlowIteration.non_neighbor_mask = non_neighbor_mask\n DoDiscreteRicciFlowIteration.olivier_ricci_curvatures = olivier_ricci_curvatures\n\n return DoDiscreteRicciFlowIteration","repo_name":"NechkaP/ricci-flow","sub_path":"discrete_ricci_flow.py","file_name":"discrete_ricci_flow.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27471253032","text":"import random\nimport numbers\nfrom itertools import repeat\nfrom collections.abc import Sequence\nfrom geom.mesh.op.cpu.base import vertex_normals, vertex_moments, \\\n padded_part_by_mask, flip_vertex_mask, normalize_to_l2_ball, estimate_vertex_normals\nfrom geom.mesh.op.cpu.descriptor import heat_kernel_signature\nfrom geom.matrix.cpu import random_sign_vector\nimport numpy as np\nimport math\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Transforms- Abstract\n# ----------------------------------------------------------------------------------------------------------------------\nfrom geom.mesh.vis.base import plot_mesh_montage\n\n\nclass Transform:\n def name(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass PostCompilerTransform(Transform):\n pass\n\n\nclass PreCompilerTransform(Transform):\n pass\n\n\nclass SystemUtilTransform(Transform):\n pass\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, x):\n for t in self.transforms:\n x = t(x)\n return x\n\n def append(self, new_transform):\n self.transforms.append(new_transform)\n\n def insert(self, index, new_transform):\n self.transforms.insert(index, new_transform)\n\n def __repr__(self):\n string_t = [str(t) for t in self.transforms]\n return '(' + \",\".join(string_t) + ')'\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Special Transforms\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass AlignChannels(SystemUtilTransform):\n def __init__(self, keys, n_channels):\n self.keys = keys\n self.n_channels = n_channels\n\n def __call__(self, x):\n for k in self.keys:\n x[k] = align_channels(x[k], x[f'{k}_f'], self.n_channels)\n return x\n\n\nclass PartCompiler(SystemUtilTransform):\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, x):\n # Done last, since we might transform the mask\n for (part_key, mask_key, full_key) in self.keys:\n x[part_key] = padded_part_by_mask(x[mask_key], x[full_key])\n return x\n\n\nclass RemoveFaces(SystemUtilTransform):\n def __call__(self, x):\n x.pop('gt_f', None)\n x.pop('tp_f', None)\n return x\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Data Normalization Transforms\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass AppendHKS(PreCompilerTransform):\n def __init__(self, keys=('gt', 'tp'), t=(5e-3, 1e1, 10), k=200):\n self._keys = keys\n self._t = t\n self._k = k\n\n def __call__(self, x):\n for k in self._keys:\n x[f'{k}_hks'] = heat_kernel_signature(x[k][:, 0:3], x[f'{k}_f'], t=self._t, k=self._k)\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self._keys},t={self._t},k={self._k})'\n\n\nclass NormalizeScale(PreCompilerTransform):\n r\"\"\"Centers and normalizes node positions to the interval :math:`(-1, 1)`.\n \"\"\"\n\n def __init__(self, slicer=slice(0, 3), keys=('gt', 'tp')):\n self._slicer = slicer\n self._keys = keys\n self.center = Center(slicer, keys)\n\n def __call__(self, x):\n x = self.center(x)\n\n for k in self._keys:\n x[k] *= (1 / x[k].abs().max()) * 0.999999\n\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(channels={self._slicer},keys={self._keys})'\n\n\nclass Center(PreCompilerTransform):\n def __init__(self, slicer=slice(0, 3), keys=('gt', 'tp')):\n self._slicer = slicer\n self._keys = keys\n\n def __call__(self, x):\n for k in self._keys:\n center_offset = x[k][:, self._slicer].mean(axis=0, keepdims=True)\n x[k][:, self._slicer] -= center_offset\n if k == 'gt' and 'gt_world_joints' in x.keys():\n joint_trans = []\n for trans in x['gt_world_joints']:\n trans[0:3, 3] -= center_offset.squeeze()\n joint_trans += [trans]\n x['gt_world_joints'] = np.array(joint_trans)\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(channels={self._slicer},keys={self._keys})'\n\n\nclass L2BallNormalize(PreCompilerTransform):\n def __init__(self, keys=('gt', 'tp')):\n self._keys = keys\n\n def __call__(self, x):\n for k in self._keys:\n x[k][:, 0:3] = normalize_to_l2_ball(x[k][:, 0:3])\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self._keys})'\n\n\nclass UniformVertexScale(PreCompilerTransform):\n def __init__(self, scale, keys=('gt', 'tp')):\n self._scale = scale\n self._keys = keys\n\n def __call__(self, x):\n for k in self._keys:\n x[k][:, 0:3] = x[k][:, 0:3] * self._scale\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(scale={self._scale},keys={self._keys})'\n\n\nclass AlignPose(PreCompilerTransform):\n pass\n # TODO - using PCA\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Data Augmentation Transforms - Post Compiler\n# ----------------------------------------------------------------------------------------------------------------------\nclass RandomizeNormalDirections(PostCompilerTransform):\n def __init__(self, keys=('gt_part',)):\n self._keys = keys\n\n def __call__(self, x):\n for k in self._keys:\n assert x[k].shape[1] >= 6, \"Could not find normal field\"\n rs = random_sign_vector(x[k].shape[0])[:, np.newaxis]\n x[k][:, 3:6] *= rs\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self._keys})'\n\n\nclass ReplaceNormalsWithApproximation(PostCompilerTransform):\n def __init__(self, keys=('gt_part',), k=7, smoothing_iter=0): # Params as taken from the grid search for scans\n self._keys = keys\n self._smoothing_iter = smoothing_iter\n self._k = k\n\n def __call__(self, x):\n for k in self._keys:\n dims = x[k].shape[1]\n v = x[k][0:3]\n if dims > 6:\n #Oshri Patch to estimate normalson up-padded vertices\n #x[part_key] = padded_part_by_mask(x[mask_key], x[full_key])\n x[k] = np.concatenate([v, estimate_vertex_normals(v), x[k][6:]], axis=1)\n else:\n x[k] = np.concatenate([v, estimate_vertex_normals(v)], axis=1)\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self._keys},k={self._k},smoothing_iter={self._smoothing_iter})'\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Data Augmentation Transforms\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass RandomMaskFlip(PreCompilerTransform):\n def __init__(self, prob, keys=('gt',)): # Probability of mask flip\n self._prob = prob\n self._keys = keys\n self._mask_keys = [k + '_mask' for k in keys] # TODO - Review for more than one mask\n\n def __call__(self, x):\n for (k, mk) in zip(self._keys, self._mask_keys):\n if random.random() < self._prob:\n nv = x[k].shape[0]\n x[mk] = flip_vertex_mask(nv, x[mk])\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(prob={self._prob},keys={self._keys})'\n\n\nclass RandomMaskDecimation(PreCompilerTransform):\n def __init__(self, frac, keys=('gt',)): # Probability of mask flip\n if isinstance(frac,(tuple,list)):\n self._low = frac[0]\n self._high = frac[1]\n else:\n self._low = 0\n self._high = frac\n\n self._keys = keys\n self._mask_keys = [k + '_mask' for k in keys] # TODO - Review for more than one mask\n\n def __call__(self, x):\n for (k, mk) in zip(self._keys, self._mask_keys):\n num_mask_vi = x[mk].shape[0]\n frac = np.random.uniform(self._low,self._high)\n num_to_remove = int(num_mask_vi * frac)\n assert num_to_remove < num_mask_vi, \"Decimated the entire mask - trying upping decimation frac\"\n if num_to_remove > 0:\n keepers_i = sorted(np.random.choice(range(num_mask_vi), size=num_mask_vi - num_to_remove, replace=False))\n x[mk] = x[mk][keepers_i]\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(frac=({self._low},{self._high}),keys={self._keys})'\n\n\nclass RandomScale(PreCompilerTransform):\n \"\"\"\n scales (tuple): scaling factor interval, e.g. :obj:`(a, b)`, then scale\n is randomly sampled from the range\n \"\"\"\n\n def __init__(self, scales, keys=('gt', 'tp')):\n assert isinstance(scales, (tuple, list)) and len(scales) == 2\n self._scales = scales\n self._keys = keys\n\n def __call__(self, x):\n for k in self._keys:\n scale = np.random.uniform(*self._scales)\n x[k][:, :3] *= scale\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(scales={self._scales},keys={self._keys})'\n\n\nclass RandomRotate(PreCompilerTransform):\n r\"\"\"Rotates node positions around a specific axis by a randomly sampled\n factor within a given interval.\n\n Args:\n degrees (tuple or float): Rotation interval from which the rotation\n angle is sampled. If :obj:`degrees` is a number instead of a\n tuple, the interval is given by :math:`[-\\mathrm{degrees},\n \\mathrm{degrees}]`.\n axis (int, optional): The rotation axis. (default: :obj:`0`)\n \"\"\"\n\n def __init__(self, degrees, axis=0, keys=('gt', 'tp')):\n if not isinstance(degrees, tuple):\n degrees = (-abs(degrees), abs(degrees))\n assert isinstance(degrees, (tuple, list)) and len(degrees) == 2\n assert axis in [0, 1, 2]\n self._degrees = degrees\n self._axis = axis\n self._keys = keys\n\n def __call__(self, x):\n for k in self._keys:\n degree = math.pi * random.uniform(*self._degrees) / 180.0\n sin, cos = math.sin(degree), math.cos(degree)\n\n if self._axis == 0:\n rot = np.array([[1, 0, 0], [0, cos, sin], [0, -sin, cos]])\n elif self._axis == 1:\n rot = np.array([[cos, 0, -sin], [0, 1, 0], [sin, 0, cos]])\n else:\n rot = np.array([[cos, sin, 0], [-sin, cos, 0], [0, 0, 1]])\n x[k][:, :3] = np.matmul(x[k][:, :3], rot)\n if x[k].shape[1] >= 6:\n x[k][:, 3:6] = np.matmul(x[k][:, 3:6], rot) # Rotate normals as well\n\n def __repr__(self):\n return self.__class__.__name__ + f'(degrees={self._degrees},axis={self._axis},keys={self._keys})'\n\n\nclass RandomTranslate(PreCompilerTransform):\n r\"\"\"Translates node positions by randomly sampled translation values\n within a given interval. In contrast to other random transformations,\n translation is applied separately at each position.\n\n Args:\n translate (sequence or float or int): Maximum translation in each\n dimension, defining the range\n :math:`(-\\mathrm{translate}, +\\mathrm{translate})` to sample from.\n If :obj:`translate` is a number instead of a sequence, the same\n range is used for each dimension.\n WARNING: After this operation, vertex normals are not going to fit- TODO\n \"\"\"\n\n def __init__(self, translate, keys=('gt', 'tp')):\n self._translate = translate\n self._keys = keys\n\n def __call__(self, x):\n for k in self._keys:\n nv, t = x[k].shape[0], self._translate\n if isinstance(t, numbers.Number):\n t = list(repeat(t, times=3))\n assert len(t) == 3\n\n ts = []\n for d in range(3):\n ts.append(np.random.uniform(low=-abs(t[d]), high=abs(t[d]), size=(nv,)))\n # ts.append(x[k].empty_like(n).uniform_(-abs(t[d]), abs(t[d])))\n\n x[k][:, :3] += np.stack(ts, axis=1)\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + f'(translate={self._translate},keys={self._keys})'\n\n\nclass RandomGaussianNoise(PreCompilerTransform):\n # WARNING: After this operation, vertex normals are not going to fit- TODO\n # STD - Either a range or a float\n # Output keys must be same as keys or completely new\n def __init__(self, std, keys=('gt',), okeys=('gt',), slicer=slice(0, 3)):\n if isinstance(std, Sequence):\n std = tuple(std)\n self._std = std\n self._keys = keys\n self._okeys = okeys\n self._slicer = slicer\n\n def __call__(self, x):\n for ok, k in zip(self._okeys, self._keys):\n arr = x[k][:, self._slicer]\n std = np.random.uniform(low=self._std[0], high=self._std[1]) if isinstance(self._std, tuple) else self._std\n noise = (std * np.random.standard_normal(arr.shape)).astype(arr.dtype)\n if ok == k:\n x[k][:, self._slicer] = arr + noise\n else:\n x[ok] = arr + noise\n return x\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(std={self._std},keys={self._keys},okeys={self._okeys},slicer={self._slicer})'\n\n\nclass RandomInputDropout(PreCompilerTransform):\n # TODO - Correct this\n def __init__(self, max_dropout_ratio=0.875):\n assert 0 <= max_dropout_ratio < 1\n self.max_dropout_ratio = max_dropout_ratio\n\n def __call__(self, points):\n pc = points.numpy()\n\n dropout_ratio = np.random.random() * self.max_dropout_ratio # 0~0.875\n drop_idx = np.where(np.random.random((pc.shape[0])) <= dropout_ratio)[0]\n if len(drop_idx) > 0:\n pc[drop_idx] = pc[0] # set to the first point\n\n return pc\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Data Augmentation Utils\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef align_channels(v, f, req_in_channels):\n available_in_channels = v.shape[1]\n if available_in_channels > req_in_channels:\n return v[:, 0:req_in_channels]\n else:\n combined = [v]\n if req_in_channels >= 6 > available_in_channels:\n combined.append(vertex_normals(v, f))\n if req_in_channels >= 12 > available_in_channels:\n combined.append(vertex_moments(v))\n\n return np.concatenate(combined, axis=1)\n\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef _test_transforms():\n from data.sets import DatasetMenu\n from geom.mesh.vis.base import plot_mesh\n ds = DatasetMenu.order('FaustPyProj')\n # single_ldr = ds.loaders(s_nums=1000, s_shuffle=True,\n # s_transform=[RandomGaussianNoise((0, 0.05), okeys=('gt_noise',))],\n # n_channels=6, method='rand_f2f', batch_size=1, device='cpu-single')\n single_ldr = ds.loaders(s_nums=1000, s_shuffle=True, s_transform=[AppendHKS()], n_channels=6, method='rand_f2p',\n batch_size=1,\n device='cpu-single')\n\n for dp in single_ldr:\n dp['gt'] = dp['gt'].squeeze()\n gt = dp['gt']\n # mask = dp['gt_mask'][0]\n # gt_part = gt[mask, :]\n trans = RandomTranslate(0.01, keys=['gt'])\n # print(trans)\n # v = gt_part[:, :3]\n # n = gt_part[:, 3:6]\n # _, f = trunc_to_vertex_mask(gt[:, :3], ds.faces(), mask)\n plot_mesh_montage(vb=[gt[:, :3], dp['gt_noise']], fb=ds.faces(), strategy='spheres')\n # dp = trans(dp)\n v = gt[:, :3]\n n = gt[:, 3:6]\n plot_mesh(v=v, n=n, f=ds.faces(), strategy='mesh')\n break\n\n\nif __name__ == '__main__':\n _test_transforms()\n","repo_name":"Project1HY/Ramp","sub_path":"shape_completion-main/src/core/data/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":16894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"33791733522","text":"from sys import stdin\n\ncolor = ['black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue', 'violet', 'grey', 'white']\nvalues = {color[x]: x for x in range(10)}\ng = {color[x]: 10 ** x for x in range(10)}\n\nres = ''\nfor _ in range(2):\n data = values[stdin.readline().rstrip()]\n if res != '' and data != 0:\n res += str(data)\n else:\n res += str(data)\n\nres = int(res)\nprint(res * g[stdin.readline().rstrip()])","repo_name":"hunnam5220/Solved_ac","sub_path":"01. Bronze/Bronze II/solutions/1076. 저항.py","file_name":"1076. 저항.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16598716966","text":"from rest_framework import serializers\nfrom backend.api.product.models import Product, Category, ProductImage\nfrom rest_framework.reverse import reverse\nfrom django.utils.text import slugify\nfrom backend.api.comment.api.serializers import CommentSerializer\n\n\nclass ProductImageSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductImage\n fields = ['image']\n\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n category = serializers.SerializerMethodField('category_details')\n url = serializers.HyperlinkedIdentityField(\n view_name='product-detail',\n lookup_field='slug'\n )\n image = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = ['id', 'url', 'slug', 'name', 'image', 'category',\n 'price', 'description', 'quantity_avialable']\n\n # def get_description(self, obj):\n # return obj.description[:30] + '...' if len(obj.description) > 30 else obj.description\n\n def get_image(self, obj):\n image_obj = obj.images.first()\n serializer = ProductImageSerializer(image_obj)\n return serializer.data\n\n def category_details(self, obj):\n request = self.context.get('request')\n data = {\n 'url': reverse(\"category-detail\", kwargs={'slug': obj.category.slug}, request=request),\n 'slug': obj.category.slug,\n 'name': obj.category.name\n }\n return data\n\n\nclass DetailCategorySerialiser(serializers.ModelSerializer):\n product = ProductSerializer(many=True, read_only=True)\n\n class Meta:\n model = Category\n fields = ['slug', 'name', 'description', 'product']\n\n\nclass CategorySerialiser(serializers.HyperlinkedModelSerializer):\n # product = serializers.SlugRelatedField(\n # slug_field='name',\n # many=True,\n # read_only=True\n # )\n url = serializers.HyperlinkedIdentityField(\n view_name='category-detail',\n lookup_field='slug'\n )\n\n class Meta:\n model = Category\n fields = ['id', 'url', 'name', 'slug', 'description']\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Snippet` instance, given the validated data.\n \"\"\"\n instance.name = validated_data.get('name', instance.name)\n instance.slug = slugify(validated_data.get('name', instance.slug))\n instance.description = slugify(\n validated_data.get('description', instance.description))\n instance.save()\n return instance\n\n\nclass DetialedProductSerializer(serializers.ModelSerializer):\n category = serializers.SerializerMethodField('category_details')\n\n # comments = CommentSerializer(many=True, read_only=True)\n comments = serializers.SerializerMethodField()\n images = ProductImageSerializer(many=True)\n\n class Meta:\n model = Product\n fields = ['id', 'name', 'slug', 'images', 'category', 'description',\n 'price', 'quantity_avialable', 'comments']\n\n def category_details(self, obj):\n request = self.context.get('request')\n data = {\n 'url': reverse(\"category-detail\", kwargs={'slug': obj.category.slug}, request=request),\n 'slug': obj.category.slug,\n 'name': obj.category.name\n }\n return data\n\n def get_comments(self, obj):\n serializer = CommentSerializer(obj.comments.all_comments(), many=True)\n return serializer.data\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Snippet` instance, given the validated data.\n \"\"\"\n instance.name = validated_data.get('name', instance.name)\n instance.slug = slugify(validated_data.get('name', instance.slug))\n instance.price = validated_data.get('price', instance.price)\n instance.description = validated_data.get(\n 'description', instance.description)\n instance.quantity_avialable = validated_data.get(\n 'quantity_avialable', instance.quantity_avialable)\n instance.save()\n return instance\n","repo_name":"0mri/GStore","sub_path":"backend/api/product/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2654628369","text":"def timeconversion(time):\n time = time.split(\":\")\n H, M, AP = time[0], time[1][:2], time[1][2:]\n AM = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',\n 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',\n 11: 'eleven', 12: 'zero'}\n PM = {1: 'thirteen', 2: 'fourteen', 3: 'fifteen', 4: 'sixteen', 5: 'seventeen', 6: 'eighteen',\n 7: 'nineteen', 8: 'twenty', 9: 'twenty-one', 10: 'twenty-two', 11: 'twenty-three', 12: 'Twelve'}\n Mins = {1: 'one', 2: 'two'\t, 3: 'three'\t, 4: 'four'\t, 5: 'five'\t, 6: 'six', 7: 'seven'\t, 8: 'eight'\t, 9: 'nine'\t, 10: 'ten', 11: 'eleven'\t, 12: 'twelve'\t, 13: 'thirteen'\t, 14: 'fourteen'\t, 15: 'fifteen'\t, 16: 'sixteen'\t, 17: 'seventeen'\t, 18: 'eighteen'\t, 19: 'nineteen', 20: 'twenty', 21: 'twenty-one', 22: 'twenty-two'\t, 23: 'twenty-three'\t, 24: 'twenty-four', 25: 'twenty-five', 26: 'twenty-six', 27: 'twenty-seven', 28: 'twenty-eight'\t, 29: 'twenty-nine', 30: 'thirty', 31: 'thirty-one'\t, 32: 'thirty-two', 33: 'thirty-three', 34: 'thirty-four',\n35: 'thirty-five', 36: 'thirty-six', 37: 'thirty-seven'\t, 38: 'thirty-eight', 39: 'thirty-nine', 40: 'forty',\n41: 'forty-one', 42: 'forty-two'\t, 43: 'forty-three', 44: 'forty-four', 45: 'forty-five', 46: 'forty-six', 47: 'forty-seven', 48: 'forty-eight', 49: 'forty-nine', 50: 'fifty', 51: 'fifty-one', 52: 'fifty-two'\t, 53: 'fifty-three', 54: 'fifty-four', 55: 'fifty-five', 56: 'fifty-six', 57: 'fifty-seven', 58: 'fifty-eight', 59: 'fifty-nine', 60: 'sixty'\n}\n if M == \"00\":\n if AP == \"AM\":\n hours = AM[int(H)]\n else:\n hours = PM[int(H)]\n print(hours + \" hundred hours\")\n else:\n mins = Mins[int(M)]\n if AP == \"AM\":\n hours = AM[int(H)]\n if int(H) < 10:\n hours = \"zero \" + hours\n else:\n hours = PM[int(H)]\n if int(M) < 10:\n mins = \"zero \" + mins\n print(hours + \" \" + mins)\n\nwhile(1):\n time = input(\"Enter the timing:(press q to quit)\")\n if time == \"q\":\n break\n timeconversion(time)\n","repo_name":"zytan98/--Military-Time-Conversion","sub_path":"military.py","file_name":"military.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"44684054702","text":"# Write a program to simulate the library and customer to borrow the book and return the book.\r\n\r\nclass Library:\r\n\r\n def __init__(self, listofbooks):\r\n self.availablebooks = listofbooks\r\n\r\n def displayavailablebooks(self):\r\n print(\"Available Books :\")\r\n for book in self.availablebooks:\r\n print(book)\r\n\r\n def lendBook(self, requestedBook):\r\n if requestedBook in self.availablebooks:\r\n print(\"The Book is Borrowed successfully\")\r\n self.availablebooks.remove(requestedBook)\r\n else:\r\n print(\"Sorry !!! the Book is not available\")\r\n\r\n def addBook(self, returnedbook):\r\n print(\"Book is now added to the library.. thanks for returning\")\r\n self.availablebooks.append(returnedbook)\r\n\r\n\r\nclass Customer:\r\n\r\n def requestBook(self):\r\n print(\"Enter the book you want to Borrow:\")\r\n self.requestedBook = input()\r\n print(\"thanks for requesting a Book\")\r\n return self.requestedBook\r\n\r\n def returnBook(self):\r\n print()\r\n print(\"Enter the BOOK's name you would like to return:\")\r\n self.returnedbook = input()\r\n return self.returnedbook\r\n\r\n\r\nlibrary = Library(['Think like a monk', 'success','Secret','Power'])\r\ncustomer =Customer()\r\n\r\n## Menu starts Here\r\nwhile 1:\r\n print(\"Enter 1 to Display the Available BOOKs in the library\")\r\n print(\"Enter 2 to request the BOOK from the library\")\r\n print(\"Enter 3 to return the BOOK to the library\")\r\n print(\"Enter 4 to exit\")\r\n print()\r\n userchoice = int(input())\r\n if userchoice == 1:\r\n library.displayavailablebooks()\r\n elif userchoice == 2:\r\n requestedbook = customer.requestBook()\r\n library.lendBook(requestedbook)\r\n elif userchoice == 3:\r\n returnedbook = customer.returnBook()\r\n library.addBook(returnedbook)\r\n elif userchoice == 4:\r\n print(\"Thanks for Visiting...Bye !!!\")\r\n quit()","repo_name":"Madhuchigri/Python_work_New","sub_path":"OOPs_notes_Programs/Library_customer.py","file_name":"Library_customer.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74944606642","text":"import icmparser.postprocessors as postprocessors\n\n\nclass Detector(object):\n pass\n\n\nclass CurvyLine(Detector):\n def __init__(self, color):\n self.color = color\n\n def __call__(self, img):\n data = {}\n for x in range(img.width):\n col = []\n for y in range(img.height):\n if img(x, y) == self.color:\n col.append(y)\n data[x] = col\n return data\n\n\nclass VerticalScale(Detector):\n def __init__(self, ocr):\n self.ocr = ocr\n\n def _read_scale(self, img_scale):\n data = {}\n for y in range(img_scale.height - self.ocr.glyph_height):\n clip = img_scale.clip(0, y, img_scale.width-1, y+ self.ocr.glyph_height-1)\n decoded = self.ocr.decode_rtl(clip)\n if len(decoded):\n label = float(''.join(decoded))\n data[y+self.ocr.glyph_height/2] = label\n return data\n\n DOTTED_LINE_COLOR = 60\n def _detect_dotted_lines(self, img_chart):\n ys = []\n for y in range(img_chart.height):\n count = 0\n for x in range(img_chart.width):\n if max(img_chart(x, y)) <= self.DOTTED_LINE_COLOR:\n count += 1\n if count*3 >= img_chart.width:\n ys.append(y)\n return ys\n\n def __call__(self, img_scale, img_chart):\n assert(img_scale.height == img_chart.height)\n scale = self._read_scale(img_scale)\n lines = self._detect_dotted_lines(img_chart)\n nearest = postprocessors.Nearest(lines)\n def as_nearest(item):\n approx_y, label = item\n return nearest(approx_y), label\n scale = dict(map(as_nearest, scale.items()))\n return scale\n","repo_name":"aszady/icmparser","sub_path":"icmparser/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"6548204310","text":"# This function serves as a image cutter for RTI images to smaller pieces so that the labelling process is easier\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport rasterio\nfrom data.data_utils import patch_tile_single\nfrom mrs_utils import misc_utils\nimport gc\n\nfrom multiprocessing import Pool\n\npatch_size = 8000\ntest_file = '/scratch/sr365/RTI_Rwanda_full/rti_rwanda_crop_type_raw/rti_rwanda_crop_type_raw_Kinyaga_Processed_Phase3/image.tif'\ndest_dir = '/home/sr365/Gaia/rti_rwanda_cut_tiles' + '_ps_{}'.format(patch_size)\n# test_file = '/scratch/sr365/RTI_Rwanda_full/rti_rwanda_crop_type_raw/rti_rwanda_crop_type_raw_Kinyaga_Processed_Phase3/image.tif'\n# dest_dir = '/scratch/sr365/RTI_Rwanda_full/cut_tiles' + '_ps_{}'.format(patch_size)\n\n\n\ndef read_tiff(tiff_file):\n \"\"\"\n Read the tiff file into a big numpy array, meanwhile printing the overall information about this tile\n :param: tiff_file: The file name\n \"\"\"\n # Open the tiff file\n dataset = rasterio.open(tiff_file)\n # output some basic information\n print('The tiff file opened is :{}\\n There are {} channels in this file, width = {}, height = {} pixels\\n'.format(tiff_file, len(dataset.indexes), \n dataset.width, dataset.height))\n # Get the number of channels and initialize the big array\n num_channels = len(dataset.indexes) - 1 # The last layer is just the valid map, which should be discarded\n img_big = np.zeros([dataset.height, dataset.width, num_channels])\n # Loop over the number of channels\n for i in range(num_channels):\n print('reading channel {}'.format(i))\n img = dataset.read(i+1)\n img_big[:, :, i] = img\n # for real-application, no subsampling of image\n # img_subsampled = img_big[::sample_rate, ::sample_rate, :]\n \n # Delete the variable so that memory consumption is kept low\n del img\n gc.collect() # Call the garbage collecter\n \n return img_big\n\n\ndef cut_img_big_into_tiles(tiff_file, dest_dir, ps, pad=0, overlap=0):\n \"\"\"\n This function cut the big image RTI tiles that read from the read_tiff() into small tiles\n :param: tiff_file: The file name\n :param patch_size: The size of the output patch [ps, ps, 3]\n :param dest_dir: The destination directory to save the cut image tiles\n :param pad, overlap: The padding on the peripheral of the image, overlap between patches on the original image\n :return None: (It saves the tiles in the dest_dir)\n \"\"\"\n patch_size = (ps, ps)\n #########################\n # Step 1: read the tiff #\n #########################\n img_big = read_tiff(tiff_file)\n \n ##########################################\n # Step 2: Cut the images into small tiles#\n ##########################################\n # Set destination folder name first\n RTI_img_label = tiff_file.split('/')[-2]\n patch_dir = os.path.join(dest_dir, RTI_img_label)\n print('saving your patch to patch_dir {}'.format(patch_dir))\n \n # Create the directory if that does not exist\n if not os.path.isdir(patch_dir):\n os.makedirs(patch_dir)\n \n # Loop over the patches cutted\n for rgb_patch, y, x in patch_tile_single(img_big, patch_size, pad, overlap):\n print('saving patch {} {} now'.format(x, y))\n img_patchname = '{}_y{}x{}.png'.format(RTI_img_label, int(y), int(x))\n misc_utils.save_file(os.path.join(\n patch_dir, img_patchname), rgb_patch.astype(np.uint8))\n \n\ndef cut_all(data_dir):\n \"\"\"\n The master function that cuts all the RTI image data\n :param data_dir: The master diectory that contains the RTI imagery\n \"\"\"\n # Loop over all the sub-directories\n for folder in os.listdir(data_dir):\n cur_folder = os.path.join(data_dir, folder)\n img_tif = os.path.join(cur_folder, 'image.tif')\n # Skip if either this is not a folder or the image.tif does not exist\n if not os.path.isdir(cur_folder) or not os.path.exists(img_tif):\n continue\n try:\n # Start cutting\n cut_img_big_into_tiles(img_tif, ps=patch_size, dest_dir=dest_dir, pad=0, overlap=0)\n except:\n print('The cutting for {} folder failed!!! Continuing now'.format(img_tif))\n continue\n \n\nif __name__ == '__main__':\n #read_tiff(test_file)\n #cut_img_big_into_tiles(test_file, ps=8000, dest_dir=dest_dir, pad=0, overlap=0)\n\n cut_all('/home/sr365/Gaia/rti_rwanda_crop_type_raw/Cyampirita')\n #cut_all('/scratch/sr365/RTI_Rwanda_full/rti_rwanda_crop_type_raw')\n\n \n","repo_name":"BensonRen/Drone_based_solar_PV_detection","sub_path":"solar_PV_utils/cut_Rwanda.py","file_name":"cut_Rwanda.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"10069616065","text":"def main():\n text = 'Input: '\n user_input = str(input(text))\n print(shorten(user_input))\n\n\ndef shorten(user_input):\n s = list(user_input)\n outcome = ''\n list_of_vowels = ['e', 'u', 'i', 'o', 'a']\n for i in s:\n if i.lower() in list_of_vowels:\n outcome = outcome\n elif i.lower() not in list_of_vowels:\n outcome += i\n return outcome\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Anpilogov-V/code50","sub_path":"twttr/twttr.py","file_name":"twttr.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31805009817","text":"# https://www.rfc-editor.org/rfc/rfc1945.html\nimport typing, enum\nimport logging, sys, platform\nimport asyncio\nimport datetime\n\nserver_name = f'YukiHTTP/1.0 ({platform.python_implementation()}/{platform.python_version()})'\n\n\n# TODO: POST, additional methods?\n# TODO: More convenient headers editor?\n# TODO: Additional headers support\n# TODO: proper CRLF handle in response headers (should never occur, 500?)\n# TODO: unescape of URL query's kv (and reverse...)\n# TODO: more clear interface for 3xx[Redirect]. What if every status will be a class?\n# TODO: HEAD, If-Modified-Since\n\n\n@enum.unique\nclass Method(enum.Enum):\n GET = 1\n HEAD = 2\n POST = 3\n\n def __str__(self) -> str:\n return self.name\n\n\n@enum.unique\nclass Status(enum.Enum):\n OK = 200\n Created = 201\n Accepted = 202\n NoContent = 204\n\n MovedPermamently = 301\n Found = 302\n NotModified = 304\n\n BadRequest = 400\n Unauthorized = 401\n Forbidden = 403\n NotFound = 404\n\n InternalServerError = 500\n NotImplemented = 501\n BadGateway = 502\n ServiceUnavailable = 503\n\n def __str__(self) -> str:\n return self.name\n\n\n# URI: [protocol://network_addr[:port]][/path][?query_args]\n# known protocols: HTTP\n# Parse(str) -> { addr: Optional[str], port: int|80, path: str, query_args: dict[str, str] }\nclass URI(typing.NamedTuple):\n path: str = '/'\n query_args: dict[str, str] = {}\n\n\n# TODO undo URL-escape\ndef unescape(s: bytes) -> bytes:\n return s\n\n\ndef to_key_value(args: bytes, argsep: bytes = b'\\r\\n', kvsep: bytes = b':', escaped: bool = False, lstrip: bool = True) -> dict[str, str]:\n res: dict[str, str] = {}\n if args:\n for kv in args.split(argsep):\n pos = kv.find(kvsep)\n if pos == -1:\n continue\n k, v = kv[:pos], kv[pos+1:]\n if lstrip:\n v = v.lstrip()\n if escaped:\n k, v = unescape(k), unescape(v)\n ks, vs = k.decode('utf8'), v.decode('utf8')\n res[ks] = vs\n return res\n\n\ndef to_path(s: bytes) -> URI:\n qpos = s.find(b'?')\n if qpos == -1:\n qpos = len(s)\n path, args = s[:qpos], s[qpos+1:]\n\n path_s = unescape(path).decode('utf8')\n query_args = to_key_value(args, b'&', b'=', escaped=True, lstrip=False) \n return URI(path=path_s, query_args=query_args)\n\n\n# REQUEST\n# Request Line:\n# Method URI [Protocol|default:HTTP/0.9]\n# Examples:\n# GET /index.html\n# -- HTTP/0.9 \"GET\" request to /index.html\n# POST https://example.com/index.html HTTP/1.0\n# -- HTTP/1.0 \"POST\" request to /index.html in https://example.com/\n# Headers:\n# General: Date, Pragma\n# Request: Authorization, From, If-Modified-Since, Referer, User-Agent\n# Response: Location, Server, WWW-Authenticate\n# Entity: Allow, Content-Encoding, Content-Length, Content-Type, Expires, Last-Modified, \nclass Request(typing.NamedTuple):\n method: Method\n path: URI\n proto: str\n headers: dict[str, str] = {}\n reader: typing.Optional[asyncio.StreamReader] = None\n\n\nclass Response(typing.NamedTuple):\n proto: str\n code: Status\n code_str: typing.Optional[str]\n headers: dict[str, typing.Any]\n body: typing.Any # TODO\n\n\ndef date_to_str(dt: datetime.datetime) -> str:\n # RFC1123\n weekday = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"][dt.weekday()]\n month = [\"\", \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"][dt.month]\n return f\"{weekday}, {dt.day:02} {month} {dt.year} {dt.hour:02}:{dt.minute:02}:{dt.second:02} GMT\"\n\n\ndef make_response(proto: str, code: Status, expl: typing.Optional[str], head: dict[str, typing.Any], body: typing.Any) -> Response:\n return Response(proto, code, expl, head | { 'Date': date_to_str(datetime.datetime.utcnow()), 'Server': server_name }, body)\n\n\ndef make_html(proto: str, code: Status, expl: typing.Optional[str], head: dict[str, typing.Any], body: typing.Any) -> Response:\n body_enc = body.encode('utf8')\n return make_response(proto, code, expl, head | { 'Content-Type': 'text/html', 'Content-Length': len(body_enc) }, body_enc)\n\n\ndef error(code: Status, expl: typing.Optional[str] = None, proto: str = 'HTTP/1.0') -> Response:\n title = f\"{code.value} {code}\"\n stub = f\"{title}

{title}

{expl or ''}
{server_name} \"\n return make_html(proto, code, expl, dict(), stub)\n\n\nasync def parse_request(reader: asyncio.StreamReader) -> typing.Union[Request, Response]:\n try:\n req = await reader.readuntil(b'\\r\\n')\n meth, uri, *ver = req[:-2].split()\n method = Method[meth.decode('utf8').upper()]\n path = to_path(uri)\n if len(ver) > 1:\n return error(Status.BadRequest, 'Wrong request line format: too much arguments')\n proto = ver[0].decode('utf8').upper() if ver else 'HTTP/0.9'\n if not proto.startswith('HTTP/'):\n return error(Status.BadRequest, 'Unknown protocol')\n if proto == 'HTTP/0.9':\n if method != Method.GET:\n return error(Status.BadRequest, 'Protocol unsupported method')\n if not path.path.startswith('/'):\n return error(Status.BadRequest, 'HTTP/0.9 does not support full paths')\n return Request(method, path, proto)\n except asyncio.LimitOverrunError:\n return error(Status.BadRequest, 'Request line too long')\n except asyncio.IncompleteReadError:\n return error(Status.BadRequest, 'Connection break')\n except UnicodeError:\n return error(Status.BadRequest, 'UTF-8 decode error in request line')\n except ValueError:\n return error(Status.BadRequest, 'Wrong request line format: not enough arguments')\n except KeyError:\n return error(Status.BadRequest, 'Server unsupported method')\n\n try:\n head = await reader.readuntil(b'\\r\\n\\r\\n')\n fixed_head = head[:-4].replace(b'\\r\\n ', b' ').replace(b'\\r\\n\\t', b' ')\n headers = to_key_value(fixed_head)\n except asyncio.LimitOverrunError:\n return error(Status.BadRequest, 'Headers too long')\n except asyncio.IncompleteReadError:\n return error(Status.BadRequest, 'Connection break')\n except UnicodeError:\n return error(Status.BadRequest, 'UTF-8 decode error in headers')\n\n return Request(method, path, proto, headers, reader)\n\n\nasync def request_handler(req: Request) -> Response:\n res = ''.join([\n f\"{req.path}\",\n \"\",\n f\"\",\n f\"\",\n f\"\",\n f\"\",\n *[f\"\" for kv in req.headers.items()],\n \"
KeyValue
Server name
{server_name}
Protocol version
{req.proto}
Method
{req.method}
Path
{req.path}
(Header) {kv[0]}
{kv[1]}
\",\n ])\n return make_html(req.proto, Status.OK, None, dict(), res)\n\n\ndef get_status_line(resp: Response) -> str:\n add = f\" - {resp.code_str}\" if resp.code_str else ''\n proto = resp.proto if resp.proto <= 'HTTP/1.0' else 'HTTP/1.0'\n return f\"{proto} {resp.code.value} {resp.code}{add}\\r\\n\"\n\n\nasync def response_sender(writer: asyncio.StreamWriter, resp: Response) -> None:\n data: list[bytes] = []\n if resp.proto != 'HTTP/0.9':\n # TODO proper CRLF encode!!\n data.append(get_status_line(resp).encode('utf8'))\n data.extend(\n f\"{key}: {value}\\r\\n\".encode('utf8') for key, value in resp.headers.items()\n )\n data.append(b'\\r\\n')\n\n if isinstance(resp.body, str):\n data.append(resp.body.encode('utf8'))\n else:\n data.append(resp.body)\n\n writer.write(b''.join(data))\n await writer.drain()\n\n\nasync def server(\n host: str = '0.0.0.0',\n port: int = 8008,\n request_handler: typing.Callable[[Request], typing.Awaitable[Response]] = request_handler\n) -> None:\n async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n req = await parse_request(reader)\n if isinstance(req, Request):\n resp = await request_handler(req)\n logging.info(f\"{req.method} {req.path.path} {req.proto} -> {get_status_line(resp).strip()}\")\n else:\n resp = req\n logging.info(f\"-> {get_status_line(resp).strip()}\")\n\n await response_sender(writer, resp)\n\n writer.close()\n await writer.wait_closed()\n\n server = await asyncio.start_server(server_handler, host, port)\n\n addrs = ', '.join(str(sock.getsockname()) for sock in server.sockets)\n logging.info(f'{server_name} is serving on {addrs} with port {port}')\n\n async with server:\n await server.serve_forever()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=(logging.DEBUG if 'debug' in sys.argv else logging.INFO))\n asyncio.get_event_loop().run_until_complete(server('0.0.0.0', 8008))\n\n","repo_name":"yuki0iq/http0","sub_path":"http1.py","file_name":"http1.py","file_ext":"py","file_size_in_byte":9085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2919063593","text":"from dash import dcc, html, Input, Output, callback\nimport pandas as pd\nimport plotly.graph_objects as go\nimport pathlib\n\nPATH = pathlib.Path(__file__).parent\nDATA_PATH = PATH.joinpath(\"../datasets\").resolve()\nmethod_quality = pd.read_csv(DATA_PATH.joinpath(\"method_quality.csv\"), index_col='n_clusters', sep=\";\")\nmethod_quality_norm = pd.read_csv(DATA_PATH.joinpath(\"method_quality_norm.csv\"), index_col='n_clusters', sep=\";\")\nmethod_quality_corr = pd.read_csv(DATA_PATH.joinpath(\"method_quality_corr.csv\"), index_col='n_clusters', sep=\";\")\nmethod_quality_corr_norm = pd.read_csv(DATA_PATH.joinpath(\"method_quality_corr_norm.csv\"), index_col='n_clusters', sep=\";\")\n\nlayout = html.Div([\n html.H3('Measuring the Quality of Clustering Methods'),\n html.H6('Metric:'),\n dcc.RadioItems(\n options=[\n {'label': 'Silhouette score', 'value': 'silhouette score'},\n {'label': 'Mean of correlation', 'value': 'mean of correlation'},\n ],\n value='silhouette score', inline=False, id='radio_methods'\n ),\n html.H6('Preprocess data:'),\n dcc.RadioItems(\n options=[\n {'label': \"Not normalized 'page impressions' data\", 'value': 'not normalized'},\n {'label': \"Normalized 'page impressions' data\", 'value': 'normalized'},\n ],\n value='not normalized', inline=False, id='radio_methods_norm'\n ),\n dcc.Graph(id=\"graph_methods\"),\n])\n\n@callback(\n Output(component_id='graph_methods', component_property='figure'),\n Input(component_id='radio_methods_norm', component_property='value'),\n Input(component_id='radio_methods', component_property='value')\n)\ndef update_chart(norm, metric):\n if norm == 'normalized':\n if metric == 'silhouette score':\n df = method_quality_norm\n else: \n df = method_quality_corr_norm\n else: \n if metric == 'silhouette score':\n df = method_quality\n else:\n df = method_quality_corr\n\n fig = go.Figure()\n plot_title = f\"Clustering quality measurement with {metric} ({norm} data)\"\n # Loop df columns and plot columns to the figure\n for i in range(0, len(df.columns)):\n col_name = df.columns.values[i]\n fig.add_trace(go.Scatter(x=df.index, y=df[col_name], mode='lines', name=col_name))\n fig.update_xaxes(title_text=\"n_clusters\", tickmode='linear', tick0=1, dtick=1)\n fig.update_yaxes(title_text=\"silhouette score\")\n fig.update_layout(xaxis_rangeslider_visible=False)\n fig.update_layout(title_text=plot_title, template=\"plotly_dark\", height=600)\n return fig\n\n","repo_name":"to-schi/seasonality-clustering","sub_path":"dash_app/pages/method_quality.py","file_name":"method_quality.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"8570212906","text":"import cv2\nimport numpy as np\nimport math\nimport os\n\n\n\n\ndef main():\n img_path = '/mnt/misk/misk/lplate/data/data_rt/1'\n for file in os.listdir(img_path):\n img = cv2.imread(os.path.join(img_path, file))\n if img is None:\n continue\n img = cv2.resize(img, (152, 34))\n # img_bw = 255 * (cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) > 5).astype('uint8')\n # #\n # se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))\n # se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n # mask = cv2.morphologyEx(img_bw, cv2.MORPH_CLOSE, se1)\n # mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)\n # #\n # mask = np.dstack([mask, mask, mask]) / 255\n # out = img * mask\n # canvas = np.zeros(img.shape, np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img[img.shape[0] - 4: img.shape[0], 0:img.shape[1]] = 255\n img[0: 4, 0:img.shape[1]] = 255\n ret, img = cv2.threshold(img, 100, 255, 0)\n im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)\n for cnt in contours:\n if cv2.contourArea(cnt) < 60:\n cv2.fillPoly(img, pts=[cnt], color=(255, 255, 255))\n im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n for cnt in contours:\n epsilon = 0.05 * cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, epsilon, True)\n approx = np.reshape(approx, (approx.shape[0], 2))\n min_x, min_y = np.min(approx, axis=0)\n max_x, max_y = np.max(approx, axis=0)\n if (max_x - min_x) > 0:\n koeff = math.fabs((max_y - min_y) / (max_x - min_x))\n if 0.5 < koeff < 2.2 and cv2.contourArea(cnt) > 80:\n print(koeff, max_x - min_x)\n cv2.rectangle(img, (min_x, min_y), (max_x, max_y), (0, 0, 255), 1)\n #cv2.drawContours(canvas, approx, -1, (0, 255, 0), 1)\n cv2.imshow(\"Contour\", cv2.resize(img, (img.shape[1] * 3, img.shape[0] * 3)))\n cv2.waitKey(2000)\n\n # cv2.imshow('Output', out)\n\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"BEugen/Rslp","sub_path":"DataGenerate/test-clear-img.py","file_name":"test-clear-img.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72875774322","text":"from flask import Blueprint\n\nfrom models import User, Activity\nfrom models import session\n\nviews = Blueprint('views', __name__)\n\n\n@views.route('/')\ndef home():\n return f'

Hello from home

'\n\n\n@views.route('//')\ndef show(name):\n user = session.query(User).filter_by(name=name).first()\n return f'

{user.name} with role {user.role}. Created at {user.date}

'\n\n\n@views.route('/id//')\ndef by_id(user_id):\n user = session.query(User).filter_by(id=user_id).first()\n return f'

{user.name} with role {user.role}. Created at {user.date}

'\n\n\n@views.route('/update///')\ndef update(user_id, n, r):\n user = session.query(User).filter_by(id=user_id).first()\n user.name = n\n user.role = r\n return f'

{user.name} with role {user.role}

'\n\n\n@views.route('//')\ndef create(n, r):\n user = User(name=n, role=r)\n session.add(user)\n session.commit()\n print(f'{user.id}\\t{user.name}\\t{user.role}\\t{user.date}')\n return f'

Added user {user.name} with role {user.role}

'\n\n\n@views.route('/activity/add//')\ndef create_activity(user_id, distance):\n activity = Activity(user_id=user_id, distance=distance)\n session.add(activity)\n session.commit()\n s = f'

Added activity

{activity.user_id}
{activity.distance}

'\n return s\n\n\n@views.route('/activity/show/')\ndef show_activity(activity_id):\n activity = session.query(Activity).filter_by(id=activity_id).first()\n s = f'

Showing activity

Activity ID = {activity.id}
\\\n User id = {activity.user_id}
Distance = {activity.distance}

'\n return s\n\n\n@views.route('/activity/show_all/')\ndef show_activity_all(user_id):\n user = session.query(User).filter_by(id=user_id).first()\n activity = session.query(Activity).filter_by(user_id=user.id).all()\n s = f'

Showing all activities of user {user.name}

'\n for a in activity:\n s = s + f'

{a.id} | {a.distance}

'\n return s\n\n\n@views.route('/activity/show_all_user/')\ndef show_activity_all_from_user(user_id):\n user = session.query(User).filter_by(id=user_id).first()\n s = f'

Showing all activities of user {user.name}

'\n for a in user.activity:\n s = s + f'

{a.id} | {a.distance}

'\n return s\n","repo_name":"h4sski-programming/Flask_SQLAlchemy_testing","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28086364572","text":"import sys\nsys.path.append('../')\nfrom autocross.AutoCross import *\nimport multiprocessing as mp\nnp.random.seed(998244353)\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\ndef get_model(name, shape):\n return LogisticRegressionModel(name, shape)\n\n\ndef loss_func(pred, y):\n l = 0.\n for p, v in zip(pred, y):\n if v==1: l-=p.log()\n elif v==0: l-=(1-p).log()*2\n else: print('Error, unknow label',v)\n return l/len(y)\n\n\ndef get_trainer(model):\n return LogisticRegressionTrainer(model, batchsize=32, optim=(lambda x: torch.optim.Adam(x.parameters(), lr=1e-3)),\n l1_reg=1e-4, l2_reg=1e-4, loss_func=loss_func)\n\n\ndef next_func(x):\n return -1\n\ndef run_autocross(train_x, val_x, test_x, train_y, val_y, n_jobs, n_new_feats):\n ctx = mp.get_context('spawn') # parallel torch needs 'spawn' $@^@#$^$%*&%@^$%%@\n pool = ctx.Pool(n_jobs) # Set n_jobs\n # pool = None\n df = pd.concat([train_x, val_x], axis=0).reset_index(drop=True)\n label = pd.concat([train_y, val_y], axis=0).reset_index(drop=True)\n point = len(train_x.index.tolist())\n idxT = df.index[:point]\n idxV = df.index[point:]\n print(\"idxT: {}\".format(idxT))\n print(\"idxV: {}\".format(idxV))\n discretization = MultiGranularityDiscretization(min_glt=100, next_func=next_func)\n crypto = Crypto(special_char='$')\n featgenerator = FeatGenerator(order=2)\n hashingtrick = HashingTrick(mod=47)\n indexmanager = IndexManager(idx_train_iloc=idxT, idx_valid_iloc=idxV, min_block=16)\n featevaluator = FeatEvaluator(get_model=get_model, get_trainer=get_trainer, hashingtrick=hashingtrick,\n indexmanager=indexmanager)\n autocross = AutoCross(df=df, label=label, max_rounds=n_new_feats, pool=pool, # pool=None means single thread\n discretization=discretization, crypto=crypto, featgenerator=featgenerator,\n featevaluator=featevaluator,\n test=None)\n new_df = pd.concat([df, autocross.df.iloc[:,-n_new_feats:]], axis=1)\n train_x = new_df.iloc[idxT]\n val_x = new_df.iloc[idxV]\n super_print('Start discretization convert.')\n test_x_new = autocross.discretization.convert(test_x)\n super_print('Start featgenerator convert.')\n test_x_new = autocross.featgenerator.convert(test_x_new, autocross.df.columns)\n test_x = pd.concat([test_x, test_x_new.iloc[:,-n_new_feats:]], axis=1)\n for c in train_x.columns[-n_new_feats:]: train_x[c] = train_x[c].astype('category')\n for c in val_x.columns[-n_new_feats:]: val_x[c] = val_x[c].astype('category')\n for c in test_x.columns[-n_new_feats:]: test_x[c] = test_x[c].astype('category')\n return train_x, val_x, test_x\n\n\ndef process_cat(data, categorical_features):\n for feature in categorical_features:\n data[feature] = data[feature].astype('category')\n data[feature] = data[feature].cat.codes\n data[feature] = data[feature].astype('category')\n return data\n\n\nif __name__ == '__main__':\n data = pd.read_csv('./train.csv', nrows=12800, usecols=['target', 'v1', 'v2', 'v3', 'v4', 'v110'])\n categorical_features = list(data.select_dtypes(exclude=np.number).columns)\n numerical_features = []\n for feature in data.columns:\n if (feature == 'target') or (feature in categorical_features):\n continue\n else:\n numerical_features.append(feature)\n\n print(categorical_features)\n data = process_cat(data, categorical_features)\n train_x = data[:6400]\n test_x = data[6400:]\n train_index = train_x[:int(len(train_x) * 0.75)].index\n val_index = train_x[int(len(train_x) * 0.75):].index\n train_y = train_x[['target']]\n test_y = test_x[['target']]\n del train_x['target']\n del test_x['target']\n train_x, val_x, test_x = run_autocross(train_x.loc[train_index], train_x.loc[val_index], test_x,\n train_y.loc[train_index], train_y.loc[val_index], test_y)\n","repo_name":"ZhangTP1996/OpenFE_reproduce","sub_path":"runs/baseline/autocross/run_autocross.py","file_name":"run_autocross.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"5500158788","text":"from argparse import ArgumentParser\nimport os\n\nimport torch\nfrom tqdm.auto import tqdm\n\nfrom training_pipeline import calculate_epoch_metrics, do_phase, get_dataloaders, initialize_model\nfrom models import *\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nif __name__ == '__main__':\n psr = ArgumentParser()\n psr.add_argument(\"--model_file\", type=str, required=True)\n psr.add_argument(\"--model_name\", type=str, required=True)\n psr.add_argument(\"--dataset\", type=str, required=True, choices=['mnist', 'camelyon17', 'iwildcam'])\n psr.add_argument(\"--corr\", type=float, required=True)\n psr.add_argument(\"--batch_size\", type=int, default=32)\n psr.add_argument(\"--num_workers\", type=int, default=os.cpu_count() // 2)\n psr.add_argument(\"--seed\", type=int, default=42)\n psr.add_argument(\"--root_dir\", type=str, default='/scratch/eecs542f21_class_root/eecs542f21_class/shared_data/dssr_datasets/WildsData/')\n psr.add_argument(\"--no-state-dict\", action='store_true')\n psr.add_argument(\"--augment\", type=str, choices=['none','rand_augment'], default = 'none')\n psr.add_argument(\"--ra_n\",type=int,default = 0)\n psr.add_argument(\"--ra_m\",type=int,default = 0)\n args = psr.parse_args()\n\n print(\"Loading model...\")\n model = initialize_model(\n args.model_name,\n 2, # num classes is always 2\n True, # set this feature extraction flag to true to freeze stuff\n use_pretrained=False\n )\n if not args.no_state_dict:\n weights = torch.load(args.model_file)\n else: # need to extract the state dict from the model file\n weights = torch.load(args.model_file).state_dict() \n model.load_state_dict(weights)\n model.to(device)\n model.eval()\n\n print(\"Loading dataset...\")\n _, test_dl = get_dataloaders(\n args.dataset,\n args.root_dir,\n args.corr,\n args.seed,\n args.batch_size,\n args.num_workers,\n args.augment,\n (args.ra_n, args.ra_m),\n test_only=True,\n )\n\n pbar = tqdm(enumerate(test_dl), total=len(test_dl))\n _, _, all_y, all_preds, all_scores, _, all_domains, all_domain_preds, all_domain_scores = do_phase('val', model, pbar)\n _, acc_cls, f1_cls, auc_cls = calculate_epoch_metrics(0, all_y, all_preds, all_scores)\n _, acc_dom, f1_dom, auc_dom = calculate_epoch_metrics(0, all_domains, all_domain_preds, all_domain_scores)\n print('C-Acc: {:.4f} C-F1: {:.4f} C-AUC: {:.4f}'.format(acc_cls, f1_cls, auc_cls))\n print('D-Acc: {:.4f} D-F1: {:.4f} D-AUC: {:.4f}'.format(acc_dom, f1_dom, auc_dom))\n \n","repo_name":"tchang1997/eecs542_final","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18516221440","text":"\"\"\"clbased URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom clapp import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(\"\", views.myview.as_view()),\n path(\"register\", views.registration.as_view(), name=\"register\"),\n # we can give template name here also and point this views also in another url\n path(\"sample\", views.testingurl.as_view(\n template=\"clapp/mysample.html\"), name=\"sample\"),\n # here we give diff template name\n path(\"sample2\", views.testingurl.as_view(\n template=\"clapp/mysample2.html\"), name=\"sample2\"),\n\n #######################################################\n # here we can give dynamic tempalte name in our views in function based views also\n path(\"myfunview\", views.myfunc1, {\n \"template\": \"clapp/funcview.html\"}, name=\"myfunview\"),\n\n]\n","repo_name":"Gauravraj1141/Django-100-Days","sub_path":"Pr17/clbased/clbased/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38318018984","text":"from typing import List\n\nimport webviz_core_components as wcc\nfrom dash import html\nfrom dash.development.base_component import Component\nfrom webviz_config.utils import StrEnum\nfrom webviz_config.webviz_plugin_subclasses import SettingsGroupABC\n\n\nclass SensitivityFilter(SettingsGroupABC):\n class Ids(StrEnum):\n SENSITIVITY_FILTER = \"sensitivity-filter\"\n SENSITIVITY_FILTER_LINK = \"sensitivity-filter-link\"\n\n def __init__(self, sensitivities: List[str]) -> None:\n super().__init__(\"Sensitivity Filter\")\n self._sensitivities = sensitivities\n\n def layout(self) -> List[Component]:\n return [\n html.Div(\n style={\"margin-bottom\": \"10px\"},\n children=[\n wcc.Checklist(\n id=self.register_component_unique_id(\n self.Ids.SENSITIVITY_FILTER_LINK\n ),\n options=[\n {\n \"label\": \"Apply filter only on timeseries\",\n \"value\": \"no link\",\n }\n ],\n value=[],\n ),\n ],\n ),\n wcc.SelectWithLabel(\n id=self.register_component_unique_id(self.Ids.SENSITIVITY_FILTER),\n options=[{\"label\": i, \"value\": i} for i in self._sensitivities],\n value=self._sensitivities,\n size=min(20, len(self._sensitivities)),\n ),\n ]\n","repo_name":"equinor/webviz-subsurface","sub_path":"webviz_subsurface/plugins/_simulation_time_series_onebyone/_views/_onebyone_view/_settings/_sensitivity_filter.py","file_name":"_sensitivity_filter.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"75"} +{"seq_id":"15209579362","text":"import pandas as pd\nimport os\nimport re\nimport shutil\n\n# Specify the path to your CSV file\nfolder = '/Users/darrenmp/Documents/vscode/db-ppdm-copy/permen_csv'\ndata_types = []\ntable_names = []\ncur_tables = {}\n\n\n# Loop through files in the folder and get file names\nfor filename in os.listdir(folder):\n # Create the full file path by joining the folder path and filename\n file_path = os.path.join(folder, filename)\n\n\n # Check if the path corresponds to a file (not a subfolder)\n if os.path.isfile(file_path):\n # Extract the file name from the full file path\n file_name = os.path.basename(file_path)\n\n # Process the file name\n data_types.append(file_name)\n\n\ndata_types.remove(\".DS_Store\")\n\nfor file in data_types:\n cur_file = f\"/Users/darrenmp/Documents/vscode/db-ppdm-copy/permen_csv/{file}\"\n # Read the CSV file\n df = pd.read_csv(cur_file)\n\n # Extract the first column\n first_column = df.iloc[:, 0]\n\n second_column = df.iloc[:, 1]\n\n\n cur_fields = []\n field_types = []\n\n count_col1 = 0\n count_col2 = 0\n data_type_check = 0\n\n for value in first_column:\n if pd.isna(value):\n pass\n else:\n field = value.split('_')\n\n go_field = \"\"\n\n for i in range(len(field)):\n if i == 0 and len(field) > 1:\n go_field += field[i].title()+\"_\"\n elif i == 0 and len(field) == 1:\n go_field+= field[i].title()\n elif i == len(field)-1:\n go_field+= field[i].lower()\n else:\n go_field+= field[i].lower()+\"_\"\n\n cur_fields.append(go_field)\n count_col1 += 1\n # print(cur_fields)\n\n for value in second_column:\n if isinstance(value, str):\n if re.match(r'^\\s*Contoh\\s*:\\s*.*$', value):\n if \"Tabel Isian Parameter Casing Sumur (Well Casing) Yang Digunakan \" in cur_fields:\n cur_fields.remove(\"Tabel Isian Parameter Casing Sumur (Well Casing) Yang Digunakan \")\n\n checker = False\n get_data_type = value.split(\":\")\n get_data_type[0] = get_data_type[0].replace(\" \", \"\")\n\n # if file == \"WELL DATA.csv\":\n # print(f'{get_data_type[0]} {cur_fields[count_col2]} :{get_data_type[-1]}')\n\n\n if re.search(r'\\d',get_data_type[-1]) or re.search(r'[a-zA-Z]', get_data_type[-1]) or \"-\" and checker == False:\n field_types.append(\"*string\")\n data_type_check += 1\n # if file == \"WELL DATA.csv\":\n # print(f'{get_data_type[0]} {cur_fields[count_col2]} :{get_data_type[-1]}')\n checker = True\n\n if get_data_type[-1].isdigit() and checker == False:\n field_types.append(\"*int\")\n data_type_check += 1\n # if file == \"WELL DATA.csv\":\n # print(f'{get_data_type[0]} {cur_fields[count_col2]} :{get_data_type[-1]}')\n checker = True\n\n \n if re.match(r'^[\\d+\\-*/.]+$', get_data_type[-1]) and checker == False:\n field_types.append(\"*int\")\n data_type_check += 1\n # if file == \"WELL DATA.csv\":\n # print(f'{get_data_type[0]} {cur_fields[count_col2]} :{get_data_type[-1]}')\n checker = True\n\n count_col2 += 1\n\n print(f\"\\n{file.split('.')[0]}\\nThe 1st column contains: {len(cur_fields)}\\nThe 2nd column contains: {len(field_types)}\\nThe amount of data types found column contains: {data_type_check}\\n\")\n\n # table_name = f\"\\n{file.split('.')[0]}\"\n # with open(\"field.txt\", 'a') as file:\n # file.write(table_name)\n # file.write(str(cur_fields))\n\n get_struct_name = file.split('.')[0]\n\n table_names.append(get_struct_name)\n cur_tables[get_struct_name] = cur_fields\n\n get_struct_name = get_struct_name.split(\"_\")\n\n\n struct_name = \"\"\n\n for word in range(len(get_struct_name)):\n if len(get_struct_name) == 1:\n struct_name += get_struct_name[word].title()\n elif len(get_struct_name) > 1:\n if word == 0:\n struct_name += get_struct_name[word].title()+\"_\"\n elif word == len(get_struct_name)-1:\n struct_name += get_struct_name[word].lower()\n else:\n struct_name += get_struct_name[word].lower()+\"_\"\n\n\n\n print(struct_name)\n\n opener = \"package dto\\n\\n\"+\"type \"+struct_name+\" struct{\\n\\n\"\n\n\n content = \"\"\n\n for field in range(len(cur_fields)):\n if cur_fields[field] not in content:\n content += f'{cur_fields[field].replace(\" \", \"\")} {field_types[field]} `json:\"{cur_fields[field].replace(\" \", \"\").lower()}\" default:\"\"`\\n'\n\n closer = \"}\"\n\n # abbreviation_list = struct_name.split(\"_\")\n\n # abbreviation = \"\"\n\n # for word in range(len(abbreviation_list)):\n # if len(abbreviation_list) == 1:\n # abbreviation += abbreviation_list[0]\n # else:\n # get_letter = abbreviation_list[word][0]\n # abbreviation += get_letter\n\n # abbreviation = abbreviation.title()\n foreign_id = struct_name+\"_id\".lower()\n\n file_name_list = struct_name.split(\"_\")\n file_name = \"\"\n\n for name in file_name_list:\n file_name += name.title()\n\n\n \n\n\n workspace_field = [\"Id\", \"Afe_number\"]\n\n workspace_field.append(foreign_id)\n print(workspace_field)\n\n\n content = \"\"\n\n for field in range(len(cur_fields)):\n if cur_fields[field] not in content:\n content += f'{cur_fields[field].replace(\" \", \"\")} {field_types[field]} `json:\"{cur_fields[field].replace(\" \", \"\").lower()}\" default:\"\"`\\n'\n\n\n\n closer = \"}\"\n\n\n opener_workspace = \"package dto\\n\\n\"+\"type Workspace struct{\\n\\n\"\n\n content_workspace = \"\"\n\n\n for field in range(len(workspace_field)):\n if field == 0:\n content_workspace += f'{workspace_field[field].replace(\" \", \"\")} int `json:\"{workspace_field[field].replace(\" \", \"\").lower()}\" default:\"\"`\\n'\n else:\n content_workspace += f'{workspace_field[field]} *int `json:\"{workspace_field[field].replace(\" \", \"\").lower()}\" default:\"\"`\\n'\n \n\n\n # os.makedirs(f\"permen_workspace_dto/{file_name}\")\n\n\n with open(f\"permen_workspace_dto/{file_name}/workspace.go\", 'w') as file:\n file.write(opener_workspace+content_workspace+closer)\n\n\n with open(f\"permen_dto/{file_name}.go\", 'w') as file:\n file.write(opener+content+closer)\n\n\n afe = \"/Users/darrenmp/Documents/vscode/db-ppdm-copy/temp_dto/Afe.go\"\n submission = \"/Users/darrenmp/Documents/vscode/db-ppdm-copy/temp_dto/submission.go\"\n token = \"/Users/darrenmp/Documents/vscode/db-ppdm-copy/temp_dto/Token.go\"\n credential = \"/Users/darrenmp/Documents/vscode/db-ppdm-copy/temp_dto/Credential.go\"\n dto = f\"/Users/darrenmp/Documents/vscode/db-ppdm-copy/permen_dto/{file_name}.go\"\n folder = f\"permen_workspace_dto/{file_name}/\"\n\n\n\n shutil.copy(afe, folder)\n shutil.copy(submission, folder)\n shutil.copy(dto, folder)\n shutil.copy(token, folder)\n shutil.copy(credential, folder)\n\n\n\n\nwith open(f\"table_names.txt\", 'w') as file:\n file.write(str(table_names))\n\n\nwith open(f\"table_field_names.txt\", 'w') as file:\n file.write(str(cur_tables))\n\n\n# print(opener+content+closer)\n\n\n\n\n\n","repo_name":"DarrenMannuela/db-ppdm","sub_path":"permen_csv.py","file_name":"permen_csv.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41110658705","text":"#!/usr/bin/env python3\n\nfrom os.path import dirname, realpath\nfrom re import compile\n\ndir_path = dirname(realpath(__file__))\n\nwith open(f'{dir_path}/input') as f:\n puzzle_input = f.read().split('\\n')[:-1]\n\ndef parse_input(line_fn=str):\n for line in puzzle_input:\n yield line_fn(line)\n\ndef part1():\n pattern = compile(r'd (.)|n (.)|p (.)')\n\n pi = parse_input(lambda line: [int(n) if n else 0 for n in pattern.search(line).groups()])\n\n x = 0\n y = 0\n\n for dx, dd, du in pi:\n x += dx\n y += dd - du\n\n return x*y\n\n\ndef part2():\n pattern = compile(r'd (.)|n (.)|p (.)')\n\n pi = parse_input(lambda line: [int(n) if n else 0 for n in pattern.search(line).groups()])\n\n aim = 0\n x = 0\n y = 0\n\n for dx, dd, du in pi:\n x += dx\n aim += dd - du\n y += dx * aim\n\n return x*y\n\ndef main():\n part1_res = part1()\n print(f'Part 1: {part1_res}')\n\n part2_res = part2()\n print(f'Part 2: {part2_res}')\n\nif __name__ == '__main__':\n main()\n","repo_name":"tannerstephens/advent-of-code","sub_path":"2021/day2/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36868475","text":"from django.urls import path,re_path\nfrom . import views\nfrom django.conf import settings\n\nurlpatterns = [\n path('api/post/', views.PostList.as_view()),\n path('api/profile/', views.ProfileList.as_view()),\n path('',views.homepage,name='homepage'),\n path('more/',views.more_on_pic,name='more'),\n path('profile/',views.profile,name='profile'),\n # path('rate/',views.rate,name='rate'),\n path('newpost/',views.new_post,name='new_post'),\n path('updateProfile/',views.updateProfile,name='updateProfile'),\n path('search/',views.search,name='search')\n]","repo_name":"kurgatfelo/awwards","sub_path":"awwardpage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18579000738","text":"# READ ME\n# aby uruchomic ten bentchamrk nalezy zmienic wersje biblioteki transformers na 2.11.0\n\nfrom transformers import PyTorchBenchmark, PyTorchBenchmarkArguments\nfrom transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments\n\n\ndef bentchmark_pytorch(model, batchsizes, sequencelengths):\n print(\"BENTCHMARK - PYTORCH\")\n args = PyTorchBenchmarkArguments(\n models=model,\n batch_sizes=batchsizes,\n sequence_lengths=sequencelengths,\n )\n benchmark = PyTorchBenchmark(args)\n return benchmark.run()\n\n\ndef bentchmark_tensorflow(model, batchsizes, sequencelengths):\n print(\"BENTCHMARK - TENSORFLOW\")\n args = TensorFlowBenchmarkArguments(\n models=model,\n batch_sizes=batchsizes,\n sequence_lengths=sequencelengths,\n )\n benchmark = TensorFlowBenchmark(args)\n return benchmark.run()\n\n\ndef main():\n model_name = [\"bert-base-uncased\"]\n batch_sizes = [1, 2, 4, 8, 16, 32]\n sequence_lengths = [8, 64, 128, 256, 512]\n result_pytorch = bentchmark_pytorch(model_name, batch_sizes, sequence_lengths)\n # result_tensorflow = bentchmark_tensorflow(model_name, batch_sizes, sequence_lengths)\n\n print(\"-------------- PYTORCH --------------\")\n print(result_pytorch)\n # print(\"-------------- TENSORFLOW --------------\")\n # print(result_tensorflow)\n\n\nmain()\n","repo_name":"RozanskiP/Dyplom","sub_path":"dyplom/4-parameters/bentchmark.py","file_name":"bentchmark.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30871940275","text":"import numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\npath_common = Path(\n r\"C:\\Users\\a-bibeka\\PycharmProjects\\airport_ei\\data\\processed\\epa_ltos_review\"\n)\npath_iah_out = Path.joinpath(path_common, \"iah_epa_aedt_out.xlsx\")\npath_dfw_out = Path.joinpath(path_common, \"dfw_epa_aedt_out.xlsx\")\npath_epa_ltos = Path(\n r\"C:\\Users\\a-bibeka\\PycharmProjects\\airport_ei\\data\" r\"\\external\\LTO for State.xlsx\"\n)\npath_unkn_erg_17 = Path.joinpath(path_common, \"erg_2017_unknown_factors.xlsx\")\nunkn_erg_17 = (\n pd.read_excel(path_unkn_erg_17)\n .loc[lambda df: df.Unit == \"ST\"]\n .drop(columns=[\"Aircraft Type\", \"SCC\"])\n)\nunkn_erg_17_1 = (\n unkn_erg_17.set_index([\"Code\", \"Unit\"])\n .stack()\n .reset_index()\n .rename(columns={\"level_2\": \"pollutants\", 0: \"emis_st_per_op\"})\n)\n\nepa_ltos = pd.read_excel(path_epa_ltos)\nepa_ltos_iah_dfw = epa_ltos.loc[epa_ltos.FacilitySiteIdentifier.isin([\"DFW\", \"IAH\"])]\nepa_ltos_iah_dfw[\n \"FacilitySiteIdentifier\"\n] = epa_ltos_iah_dfw.FacilitySiteIdentifier.str.lower()\nepa_ltos_iah_dfw_fil = (\n epa_ltos_iah_dfw.groupby([\"FacilitySiteIdentifier\", \"AircraftEngineTypeCode\"])\n .EPA_LTO.sum()\n .reset_index()\n)\nepa_ltos_iah_dfw_fil_unknowns = epa_ltos_iah_dfw_fil.loc[\n epa_ltos_iah_dfw_fil.AircraftEngineTypeCode > 999900\n].rename(\n columns={\"AircraftEngineTypeCode\": \"Code\", \"FacilitySiteIdentifier\": \"facility_id\"}\n)\nepa_ltos_iah_dfw_fil_unknowns_1 = epa_ltos_iah_dfw_fil_unknowns.merge(\n unkn_erg_17_1, on=\"Code\", how=\"left\"\n)\nepa_ltos_iah_dfw_fil_unknowns_1[\"emis_st\"] = (\n epa_ltos_iah_dfw_fil_unknowns_1.emis_st_per_op\n * epa_ltos_iah_dfw_fil_unknowns_1.EPA_LTO\n)\nepa_ltos_iah_dfw_fil_unknowns_2 = (\n epa_ltos_iah_dfw_fil_unknowns_1.groupby([\"facility_id\", \"pollutants\"])\n .agg(emis_st=(\"emis_st\", sum), ltos=(\"EPA_LTO\", sum))\n .reset_index()\n)\nepa_ltos_iah_dfw_fil_unknowns_2[\"type\"] = \"unknown\"\n\niah_df_known = pd.read_excel(path_iah_out)\niah_df_known[\"facility_id\"] = \"iah\"\n\ndfw_df_known = pd.read_excel(path_dfw_out)\ndfw_df_known[\"facility_id\"] = \"dfw\"\n\ndf_known = pd.concat([iah_df_known, dfw_df_known])\ndf_known[\"Mode\"] = np.select(\n [\n df_known[\"Mode\"] == \"Climb Below Mixing Height\",\n df_known[\"Mode\"] == \"Descend Below Mixing Height\",\n df_known[\"Mode\"] == \"GSE LTO\",\n df_known[\"Mode\"] == \"APU\",\n ],\n [\"Climb Below Mixing Height\", \"Descend Below Mixing Height\", \"GSE LTO\", \"APU\"],\n np.nan,\n)\ndf_known = df_known.loc[df_known[\"Mode\"] != \"nan\"]\ndf_known = df_known.rename(\n columns={\n \"Equipment Type\": \"equip_type\",\n \"CO (ST)\": \"CO\",\n \"NOx (ST)\": \"NOx\",\n \"PM 10 (ST)\": \"PM10\",\n \"PM 2.5 (ST)\": \"PM25\",\n \"SOx (ST)\": \"SOx\",\n \"VOC (ST)\": \"VOC\",\n \"Num Ops\": \"op\",\n }\n)\ndf_known_1 = df_known.filter(\n items=[\n \"facility_id\",\n \"equip_type\",\n \"Mode\",\n \"CO\",\n \"NOx\",\n \"PM10\",\n \"PM25\",\n \"SOx\",\n \"VOC\",\n \"op\",\n ]\n)\n\ndf_known_2 = (\n df_known_1.set_index([\"facility_id\", \"equip_type\", \"Mode\", \"op\"])\n .stack()\n .reset_index()\n .rename(columns={\"level_4\": \"pollutants\", 0: \"emis_st_per_op\"})\n)\ndf_known_2[\"emis_st\"] = df_known_2.emis_st_per_op * df_known_2.op\ndf_known_3 = (\n df_known_2.groupby([\"facility_id\", \"pollutants\"])\n .agg(emis_st=(\"emis_st\", sum))\n .reset_index()\n)\ndf_known_3[\"type\"] = \"known\"\ndf_known_3.groupby([\"facility_id\", \"pollutants\"]).emis_st.sum()\n\ndf_ltos = (\n df_known_2.loc[lambda df: ~df.Mode.isin([\"GSE LTO\", \"APU\"])]\n .drop_duplicates([\"facility_id\", \"equip_type\"])\n .assign(ltos=lambda df: df.op)\n)\ndf_ltos_agg = df_ltos.groupby([\"facility_id\"]).agg(ltos=(\"ltos\", \"sum\")).reset_index()\n\n\ndf_known_4 = df_known_3.merge(df_ltos_agg, on=\"facility_id\")\n\ndf_tot = pd.concat([epa_ltos_iah_dfw_fil_unknowns_2, df_known_4])\ndf_tot = (\n df_tot.groupby([\"facility_id\", \"pollutants\"])\n .agg(emis_st=(\"emis_st\", sum), ltos=(\"ltos\", sum))\n .reset_index()\n)\n\nepa_ltos_iah_dfw.groupby(\"FacilitySiteIdentifier\").EPA_LTO.sum()\nerg_epa_emis_iah_dfw = df_tot\nerg_epa_emis_iah_dfw_1 = erg_epa_emis_iah_dfw.rename(\n columns={\"emis_st\": \"erg_epa_emis_tons\"}\n).drop(columns=\"ltos\")\n\npath_tti_emis = Path(\n r\"C:\\Users\\a-bibeka\\PycharmProjects\\airport_ei\\data\\processed\\report_tables\\emis_ltos_2020_v2.xlsx\"\n)\ndf_tti = pd.read_excel(path_tti_emis, \"cntr_uncntr\", index_col=0)\ndf_tti_emis_iah_dfw = df_tti.loc[df_tti.facility_id.isin([\"iah\", \"dfw\"])]\ndf_tti_emis_iah_dfw_1 = (\n df_tti_emis_iah_dfw.groupby([\"facility_id\", \"eis_pollutant_id\"])\n .cntr_enis_tons.sum()\n .reset_index()\n)\n\npol_tti = [\"CO\", \"NOX\", \"PM10-PRI\", \"PM25-PRI\", \"SO2\", \"VOC\"]\npol_erg = [\"CO\", \"NOx\", \"PM10\", \"PM25\", \"SOx\", \"VOC\"]\npol_map = {i: j for i, j in zip(pol_tti, pol_erg)}\n\ndf_tti_emis_iah_dfw_1[\"pollutants\"] = df_tti_emis_iah_dfw_1.eis_pollutant_id.map(\n pol_map\n)\ndf_tti_emis_iah_dfw_2 = (\n df_tti_emis_iah_dfw_1.loc[lambda df: ~df.pollutants.isna()]\n .drop(columns=\"eis_pollutant_id\")\n .rename(columns={\"cntr_enis_tons\": \"tti_emis_tons\"})\n)\n\ndf_tti_erg_emis = erg_epa_emis_iah_dfw_1.merge(\n df_tti_emis_iah_dfw_2, on=[\"facility_id\", \"pollutants\"]\n)\n\ndf_tti_erg_emis[\"diff1\"] = (\n df_tti_erg_emis.tti_emis_tons - df_tti_erg_emis.erg_epa_emis_tons\n)\ndf_tti_erg_emis[\"per_diff\"] = (\n df_tti_erg_emis.diff1\n) / df_tti_erg_emis.erg_epa_emis_tons\npath_out_emis = Path.joinpath(path_common, \"erg_epa_tti_emis_iah_dfw.xlsx\")\ndf_tti_erg_emis.to_excel(path_out_emis)\n","repo_name":"Apoorb/Airport-Emission-Inventories","sub_path":"analysis/epa_dfw_iah/iii_erg_iah_dfw_pp.py","file_name":"iii_erg_iah_dfw_pp.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25275804081","text":"\"\"\"Randomly label images (optionally based on a substring).\"\"\"\n\nimport os\nfrom random import shuffle\n\nimport prodigy\nfrom labeling.export import export\nfrom prodigy.components.loaders import Images\nfrom minifigures_model.constants import get_data_folder\nfrom data.data import get_already_labeled, OPTIONS\n\n\n@prodigy.recipe(\n \"minifigures.simple\",\n substring=(\n \"The substring that should be present in the file name when loading in the stream\",\n \"option\",\n \"s\",\n str,\n ),\n)\ndef image_caption(substring: str | None = None):\n \"\"\"Prodigy labeling recipe.\"\"\"\n images_path = get_data_folder() / \"minifigures\"\n \n def load_stream(substring: str | None = None):\n \"\"\"Load in the stream to label.\"\"\"\n stream = list(Images(images_path))\n already_labeled = get_already_labeled()\n stream = [image for image in stream if image[\"text\"] not in already_labeled]\n stream = [image for image in stream if substring in image[\"text\"]] if substring else stream\n shuffle(stream)\n for task in stream:\n task[\"options\"] = OPTIONS\n yield task\n\n def progress(ctrl, update_return_value):\n \"\"\"Display the progress of the labeling session.\"\"\"\n total = len(os.listdir(images_path))\n return ctrl.session_annotated / total\n\n def on_exit(controller):\n examples = controller.db.get_dataset(controller.session_id)\n examples = [eg for eg in examples if eg[\"answer\"] == \"accept\"]\n for option in [option[\"id\"] for option in OPTIONS]:\n count = len([eg for eg in examples if option in eg[\"accept\"]])\n print(f\"Annotated {count} {option} examples\")\n export()\n\n return {\n \"dataset\": \"minifigures-db\",\n \"view_id\": \"choice\",\n \"stream\": load_stream(substring=substring),\n \"progress\": progress,\n \"on_exit\": on_exit,\n \"config\": { \"choice_style\": \"multiple\" },\n }\n\n","repo_name":"kuleuven-realization-of-ai/shared-resources","sub_path":"src/prodigy_recipes/recipe-simple.py","file_name":"recipe-simple.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71833204403","text":"from sklearn import tree\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier as knn_clf\n\n\nLOGISTIC_REGRESSION = {\n 'estimator': LogisticRegression(),\n 'param_distributions': {\n 'solver': ['lbfgs', 'liblinear', 'saga'],\n 'max_iter': [100, 150, 200],\n 'warm_start': [True, False],\n },\n 'n_iter': 50,\n 'scoring': 'accuracy',\n 'n_jobs': 4,\n}\n\nKNN_CLF = {\n 'estimator': knn_clf(),\n 'param_distributions': {\n 'n_neighbors': range(3, 20),\n 'weights': ['uniform', 'distance'],\n 'p': [1, 2, 3, 4],\n },\n 'n_iter': 50,\n 'scoring': 'accuracy',\n 'n_jobs': 4,\n}\n\nD_TREE = {\n 'estimator': tree.DecisionTreeClassifier(),\n 'param_distributions': {\n 'criterion': ['gini', 'entropy', 'log_loss'],\n 'splitter': ['best', 'random'],\n 'min_samples_split': range(2, 10),\n },\n 'n_iter': 50,\n 'scoring': 'accuracy',\n 'n_jobs': 4,\n}\n","repo_name":"yasser-sulaiman/interactive-data-science-pipeline","sub_path":"model/parameter_search_settings.py","file_name":"parameter_search_settings.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17566856029","text":"import argparse\nfrom pathlib import Path\nimport logging\nfrom train import main\n\n\ndef parse_args():\n root_dir = Path(\"../dataset_atmaCup11\")\n parser = argparse.ArgumentParser(description=\"Train model with cross validation.\")\n parser.add_argument(\n \"--architecture\",\n type=str,\n default=\"resnet18\",\n help=\"Base architecture.\",\n choices=[\"resnet18\", \"resnet50\", \"efficientnetb0\"],\n )\n parser.add_argument(\n \"--image_dir\", type=Path, default=root_dir / \"photos\", help=\"Image directory.\"\n )\n parser.add_argument(\n \"--folds\",\n type=int,\n default=5,\n help=\"Number of folds.\",\n )\n parser.add_argument(\n \"--train_csv\",\n type=str,\n default=str(root_dir / \"train_cv{fold}.csv\"),\n help=\"Location of train_cvX.csv.\",\n )\n parser.add_argument(\n \"--val_csv\",\n type=str,\n default=str(root_dir / \"val_cv{fold}.csv\"),\n help=\"Location of val_cvX.csv.\",\n )\n parser.add_argument(\n \"--max_epochs\", type=int, default=100, help=\"Max number of epochs.\"\n )\n parser.add_argument(\n \"--logdir\", type=Path, default=\"./logs\", help=\"Path to save logs.\"\n )\n parser.add_argument(\n \"--init_lr\", type=float, default=1e-4, help=\"Learning rate at epoch 0.\"\n )\n parser.add_argument(\"--seed\", type=int, default=2021, help=\"Random seed.\")\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n args = parse_args()\n for fold in range(args.folds):\n main(\n architecture=args.architecture,\n train_csv=Path(args.train_csv.format(fold=fold)),\n val_csv=Path(args.val_csv.format(fold=fold)),\n image_dir=args.image_dir,\n max_epochs=args.max_epochs,\n logdir=args.logdir,\n init_lr=args.init_lr,\n seed=args.seed,\n )\n","repo_name":"Kyoroid/atmacup11","sub_path":"src/train_cv.py","file_name":"train_cv.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18188065802","text":"import os\nimport urllib.request\nimport zipfile\n\ndata_path = 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8'\ndata_file = 'SST-2.zip'\nprint(\"Downloading and extracting %s...\" % data_file)\nurllib.request.urlretrieve(data_path, data_file)\nwith zipfile.ZipFile(data_file) as zip_ref:\n zip_ref.extractall()\nos.remove(data_file)\nprint(\"Completed!\")\n","repo_name":"microsoft/NeuronBlocks","sub_path":"dataset/get_SST-2.py","file_name":"get_SST-2.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":1440,"dataset":"github-code","pt":"75"} +{"seq_id":"15381949261","text":"#coding:utf-8\nimport os\nimport sys\nimport time\nimport cv2\nimport numpy as np\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))\nfrom uarm.wrapper import SwiftAPI\nimport pdb\nimport io\nfrom collections import OrderedDict\nimport serial\n#port = '/dev/ttyACM1'\nport = 'COM19'\nbps = 115200\ntimex = 1\nser = serial.Serial(port, bps, timeout=timex)\nsio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))\n\ndef parse_radio_data(data_string):\n \"\"\"parse the radio data which is in format \n like 'c1:1000 c2:1023 c3:1033 c4:1972\"\"\"\n result = []\n for data in data_string.split():\n result.append(float(data.split(':')[1]))\n return result\ndef main(video_filename):\n swift = SwiftAPI()\n swift.reset()\n swift.set_position(z=75,speed=1000)\n sensor_cap = cv2.VideoCapture(1)\n sensor_cap.set(3, 320)\n sensor_cap.set(4, 240)\n width = sensor_cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = sensor_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n sensor_writer = cv2.VideoWriter(video_filename + '.avi',\n cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),\n 30,\n (int(width), int(height)))\n count = 0\n mode=1\n while True:\n ret, frame = sensor_cap.read()\n swift.set_position(x=0.6*mode,speed=200,relative=True)\n pos=swift.get_position()\n print(pos)\n count += 1\n print(count)\n if ret:\n\n ret, thresh = cv2.threshold(cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)\n # findContours函数查找图像里的图形轮廓\n # 函数参数thresh是图像对象\n # 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构\n # 轮廓逼近方法\n # 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型\n image, contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(frame, contours, -1, (255, 0, 0), 2)\n\n sensor_writer.write(frame)\n cv2.imshow('sensor data', frame)\n sio.write(\"s\")\n sio.flush()\n data = sio.readline()\n print('raw data:', data)\n# data = parse_radio_data(data)\n# print('joystick data:',data)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n time.sleep(0.2)\n if count==100:\n mode=(-1)*mode\n count=0\n sensor_cap.release()\n sensor_writer.release() \n cv2.destroyAllWindows()\n swift.reset()\n\nif __name__ == '__main__':\n main('3sensor')","repo_name":"liugz18/joystick-manipulation","sub_path":"uarm/uArm-Python-SDK-2.0/examples/api/single/auto_sample_data_uArm.py","file_name":"auto_sample_data_uArm.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"35000321009","text":"import uuid\nfrom pathlib import Path\nfrom typing import Union\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import DeclarativeBase, Session\n\nin_memory_db = \"sqlite+pysqlite:///:memory:\"\n\ndefault_db_dir = \"db\"\ndefault_db_filename = \"providers.sqlite\"\ndefault_db_path = f\"{default_db_dir}/{default_db_filename}\"\ndefault_db_uri = f\"sqlite+pysqlite:///{default_db_path}\"\n\nif not Path(default_db_dir).exists():\n Path(default_db_dir).mkdir(parents=True, exist_ok=True)\n\nSQLALCHEMY_DATABASE_URI = default_db_uri\n\n## connect_args only necessary for SQLite database\nengine = create_engine(\n SQLALCHEMY_DATABASE_URI, echo=True, connect_args={\"check_same_thread\": False}\n)\n\nSessionLocal = Session(bind=engine)\n\n\nclass Base(DeclarativeBase):\n pass\n\n\ndef get_db(engine=engine) -> Session:\n db = SessionLocal()\n try:\n yield db\n except Exception as exc:\n raise Exception(\n f\"Unhandled exception getting DB connection. Exception details: {exc}\"\n )\n finally:\n db.close()\n\n\ndef generate_uuid() -> uuid.UUID:\n \"\"\"Generate a UUID. Return string if string=True.\"\"\"\n _uuid = uuid.uuid4()\n\n return _uuid\n\n\ndef generate_uuid_str() -> str:\n _uuid = generate_uuid()\n\n return str(_uuid)\n","repo_name":"redjax/ohio_utility_scraper","sub_path":"ohioenergy/core/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32460893683","text":"#!/usr/bin/python3\n\n# from advisory_processor.advisory_processor import AdvisoryProcessor\nimport argparse\nimport configparser\nimport logging\nimport os\nimport signal\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\npath_root = os.getcwd()\nif path_root not in sys.path:\n sys.path.append(path_root)\n\nimport log # noqa: E402\n\nfrom client.cli.console import ConsoleWriter, MessageStatus # noqa: E402\nfrom client.cli.console_report import report_on_console # noqa: E402\nfrom client.cli.html_report import report_as_html # noqa: E402\nfrom client.cli.json_report import report_as_json # noqa: E402\nfrom client.cli.prospector_client import ( # noqa: E402\n MAX_CANDIDATES, # noqa: E402\n TIME_LIMIT_AFTER, # noqa: E402\n TIME_LIMIT_BEFORE, # noqa: E402\n prospector, # noqa: E402\n)\nfrom git.git import GIT_CACHE # noqa: E402\nfrom stats.execution import execution_statistics # noqa: E402\nfrom util.http import ping_backend # noqa: E402\n\n_logger = log.util.init_local_logger()\n\nDEFAULT_BACKEND = \"http://localhost:8000\"\n# VERSION = '0.1.0'\n# SCRIPT_PATH=os.path.dirname(os.path.realpath(__file__))\n# print(SCRIPT_PATH)\n\n\ndef parseArguments(args):\n parser = argparse.ArgumentParser(description=\"Prospector CLI\")\n parser.add_argument(\n \"vulnerability_id\",\n nargs=\"?\",\n type=str,\n help=\"ID of the vulnerability to analyze\",\n )\n\n parser.add_argument(\"--repository\", default=\"\", type=str, help=\"Git repository\")\n\n parser.add_argument(\n \"--pub-date\", default=\"\", help=\"Publication date of the advisory\"\n )\n\n parser.add_argument(\"--descr\", default=\"\", type=str, help=\"Text of the advisory\")\n\n parser.add_argument(\n \"--max-candidates\",\n default=MAX_CANDIDATES,\n type=int,\n help=\"Maximum number of candidates to consider\",\n )\n\n parser.add_argument(\n \"--tag-interval\",\n default=\"\",\n type=str,\n help=\"Tag interval (X,Y) to consider (the commit must be reachable from Y but not from X, and must not be older than X)\",\n )\n\n parser.add_argument(\n \"--version-interval\",\n default=\"\",\n type=str,\n help=\"Version interval (X,Y) to consider (the corresponding tags will be inferred automatically, and the commit must be reachable from Y but not from X, and must not be older than X)\",\n )\n\n parser.add_argument(\n \"--modified-files\",\n default=\"\",\n type=str,\n help=\"Files (partial names are ok, comma separated) that the candidate commits are supposed to touch\",\n )\n\n parser.add_argument(\n \"--filter-extensions\",\n default=\"java\",\n type=str,\n help=\"Filter out commits that do not modify at least one file with this extension\",\n )\n\n parser.add_argument(\n \"--advisory-keywords\",\n default=None,\n type=str,\n help=\"Add the specified keywords to the advisory record\",\n )\n\n parser.add_argument(\n \"--use-nvd\", default=None, action=\"store_true\", help=\"Get data from NVD\"\n )\n\n # FIXME: with python 3.9 we can use the new built-in capabilities of argparse to get rid of this\n parser.add_argument(\n \"--no-use-nvd\", default=None, action=\"store_true\", help=\"Get data from NVD\"\n )\n\n parser.add_argument(\n \"--fetch-references\",\n default=True,\n action=\"store_true\",\n help=\"Fetch content of references linked from the advisory\",\n )\n\n parser.add_argument(\n \"--backend\", default=DEFAULT_BACKEND, type=str, help=\"URL of the backend server\"\n )\n\n parser.add_argument(\n \"--use-backend\",\n default=\"always\",\n choices=[\"always\", \"never\", \"optional\"],\n type=str,\n help=\"Use the backend server\",\n )\n\n parser.add_argument(\n \"--report\",\n default=\"html\",\n choices=[\"html\", \"json\", \"console\"],\n type=str,\n help=\"Format of the report (options: console, json, html)\",\n )\n\n parser.add_argument(\n \"--report-filename\",\n default=\"prospector-report\",\n type=str,\n help=\"File where to save the report\",\n )\n\n parser.add_argument(\"-c\", \"--conf\", help=\"specify configuration file\")\n\n parser.add_argument(\n \"-p\", \"--ping\", help=\"Contact server to check it's alive\", action=\"store_true\"\n )\n\n parser.add_argument(\n \"-l\",\n \"--log-level\",\n dest=\"log_level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Set the logging level\",\n )\n\n return parser.parse_args(args[1:])\n\n\ndef getConfiguration(customConfigFile=None):\n # simple is better: only one configuration file is\n # taken into account, no overriding of options from\n # one file to the other!\n\n # the order is (as soon as one is found, the rest is ignored):\n # 1) the file passed as argument to this function\n # 2) ./prospector.conf\n # 3) ~/.prospector/conf\n\n localConfigFile = os.path.join(os.getcwd(), \"prospector.conf\")\n userConfigFile = os.path.join(Path.home(), \".prospector/conf\")\n\n config = configparser.ConfigParser()\n\n if customConfigFile and os.path.isfile(customConfigFile):\n configFile = customConfigFile\n elif os.path.isfile(localConfigFile):\n configFile = localConfigFile\n elif os.path.isfile(userConfigFile):\n configFile = userConfigFile\n else:\n return None\n\n _logger.info(\"Loading configuration from \" + configFile)\n config.read(configFile)\n return parse_config(config)\n\n\ndef parse_config(configuration: configparser.ConfigParser) -> Dict[str, Any]:\n \"\"\"Parse the configuration file and return the options as a dictionary.\"\"\"\n options = {}\n for section in configuration.sections():\n for option in configuration.options(section):\n try:\n options[option] = configuration.getboolean(section, option)\n except ValueError:\n options[option] = configuration.get(section, option)\n return options\n\n\ndef main(argv): # noqa: C901\n with ConsoleWriter(\"Initialization\") as console:\n args = parseArguments(argv) # print(args)\n\n if args.log_level:\n log.config.level = getattr(logging, args.log_level)\n\n _logger.info(\n f\"global log level is set to {logging.getLevelName(log.config.level)}\"\n )\n\n if args.vulnerability_id is None:\n _logger.error(\"No vulnerability id was specified. Cannot proceed.\")\n console.print(\n \"No vulnerability id was specified. Cannot proceed.\",\n status=MessageStatus.ERROR,\n )\n return False\n\n configuration = getConfiguration(args.conf)\n\n if configuration is None:\n _logger.error(\"Invalid configuration, exiting.\")\n return False\n\n report = args.report or configuration.get(\"report\")\n\n nvd_rest_endpoint = configuration.get(\"nvd_rest_endpoint\", \"\") # default ???\n\n backend = args.backend or configuration.get(\"backend\", DEFAULT_BACKEND) # ???\n\n use_backend = args.use_backend\n\n if args.ping:\n return ping_backend(backend, log.config.level < logging.INFO)\n\n vulnerability_id = args.vulnerability_id\n repository_url = args.repository\n vuln_descr = args.descr\n filter_extensions = args.filter_extensions.split(\",\")\n\n # if no backend the filters on the advisory do not work\n use_nvd = False\n if args.vulnerability_id.casefold().startswith(\"cve-\"):\n use_nvd = True\n if args.use_nvd is True:\n use_nvd = True\n elif args.no_use_nvd is True:\n use_nvd = False\n\n fetch_references = args.fetch_references or configuration.get(\n \"fetch_references\", False\n )\n\n tag_interval = args.tag_interval\n version_interval = args.version_interval\n time_limit_before = TIME_LIMIT_BEFORE\n time_limit_after = TIME_LIMIT_AFTER\n max_candidates = args.max_candidates\n modified_files = args.modified_files.split(\",\") if args.modified_files else []\n advisory_keywords = (\n args.advisory_keywords.split(\",\") if args.advisory_keywords else []\n )\n\n publication_date = \"\"\n if args.pub_date != \"\":\n publication_date = args.pub_date + \"T00:00Z\"\n # if the date is forced manually, the time interval can\n # be restricted\n # time_limit_before = int(time_limit_before / 5)\n # time_limit_after = int(time_limit_after / 2)\n\n git_cache = os.getenv(\"GIT_CACHE\", default=GIT_CACHE)\n\n git_cache = configuration.get(\"git_cache\", git_cache)\n\n _logger.debug(\"Using the following configuration:\")\n _logger.pretty_log(configuration)\n\n _logger.debug(\"Vulnerability ID: \" + vulnerability_id)\n _logger.debug(\"time-limit before: \" + str(time_limit_before))\n _logger.debug(\"time-limit after: \" + str(time_limit_after))\n\n active_rules = [\"ALL\"]\n\n results, advisory_record = prospector(\n vulnerability_id=vulnerability_id,\n repository_url=repository_url,\n publication_date=publication_date,\n vuln_descr=vuln_descr,\n tag_interval=tag_interval,\n filter_extensions=filter_extensions,\n version_interval=version_interval,\n modified_files=set(modified_files),\n advisory_keywords=set(advisory_keywords),\n time_limit_before=time_limit_before,\n time_limit_after=time_limit_after,\n use_nvd=use_nvd,\n nvd_rest_endpoint=nvd_rest_endpoint,\n fetch_references=fetch_references,\n backend_address=backend,\n use_backend=use_backend,\n git_cache=git_cache,\n limit_candidates=max_candidates,\n rules=active_rules,\n )\n\n with ConsoleWriter(\"Generating report\") as console:\n report_file = None\n if report == \"console\":\n report_on_console(results, advisory_record, log.config.level < logging.INFO)\n elif report == \"json\":\n report_file = report_as_json(\n results, advisory_record, args.report_filename + \".json\"\n )\n elif report == \"html\":\n report_file = report_as_html(\n results, advisory_record, args.report_filename + \".html\"\n )\n else:\n _logger.warning(\"Invalid report type specified, using 'console'\")\n console.set_status(MessageStatus.WARNING)\n console.print(\n f\"{report} is not a valid report type, 'console' will be used instead\",\n )\n report_on_console(results, advisory_record, log.config.level < logging.INFO)\n\n _logger.info(\"\\n\" + execution_statistics.generate_console_tree())\n execution_time = execution_statistics[\"core\"][\"execution time\"][0]\n console.print(f\"Execution time: {execution_time:.4f} sec\")\n if report_file:\n console.print(f\"Report saved in {report_file}\")\n return True\n\n\ndef signal_handler(signal, frame):\n _logger.info(\"Exited with keyboard interrupt\")\n sys.exit(0)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n signal.signal(signal.SIGINT, signal_handler)\n main(sys.argv)\n","repo_name":"hawkjjyy/project-kb","sub_path":"prospector/client/cli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"26358862712","text":"import pika\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\ninput(\"Tekan [enter] untuk inisialisasi RMQ parameters.\")\ncredential = pika.PlainCredentials(os.getenv(\"user\"), os.getenv(\"pass\"))\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n host=os.getenv(\"host\"),\n port=int(os.getenv(\"port\")),\n virtual_host=os.getenv(\"vhost\"),\n credentials=credential\n))\nprint(\">> Inisialisasi RMQ parameters berhasil!!\")\nprint(\"=============================================================\")\n\ninput(\"Tekan [enter] untuk membuka koneksi ke RMQ.\")\nchannel = connection.channel()\nprint(\">> Koneksi ke RMQ berhasil dibuka!!\")\nprint(\"=============================================================\")\n\nprint(\"Masukkan nama queue channel untuk mengirim pesan melalui RMQ.\")\nqueue_name = input(\">> channel: \")\nchannel.queue_declare(\n queue=queue_name, # menentukan nama queue\n durable=True # param untuk mempertahankan queue meskipun server rabbitMQ berhenti\n)\n\n# publish pesan (mengirim)\ntujuan = input(f\">> tujuan: \")\nprint(\"Masukkan pesan yang akan dikirim atau ketik 'exit' to close.\")\nwhile True:\n message = input(\">> pesan: \")\n if message == 'exit':\n break\n\n channel.basic_publish(\n exchange='',\n routing_key=tujuan, # nama queue\n body=message, # isi pesan dari queue yang dikirim\n properties=pika.BasicProperties(delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE) # pesan disimpan di cache\n )\n print(f\" [x] Sent to {tujuan}\")\nconnection.close() # menutup koneksi setelah mengirim pesan\n","repo_name":"twentiecker/chatting-rabbitmq-python","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"ms","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2788703349","text":"\"\"\"\n最长回文子串\nhttps://leetcode-cn.com/problems/longest-palindromic-substring\n\n给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。\n\n示例 1:\n\n输入: \"babad\"\n输出: \"bab\"\n注意: \"aba\" 也是一个有效答案。\n\n示例 2:\n\n输入: \"cbbd\"\n输出: \"bb\"\n\"\"\"\n\n\nclass Solution:\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"\n 解题思路:\n 算法基于一个观察结果:\n 所有的长度超过1的回文字符串都是由比它小的回文字符串两端添加字符得来的,比如ababa是由bab在两端添加a得来的。最小的回文字符串是单个\n 字母组成的回文字符串。\n 其中又分两种,\n - 长度为2的回文字符串是由长度为1的回文字符串添加一个与原来的字母相同的字母构建而来。\n - 长度超过2的回文字符串都是由比它长度小2的回文字符串在两端各添加一个相同字母构建而来。\n 因此,使用动态规划制表,以长度为1和2的回文字符串为起点做表,依次算出长度更长的回文字符串,做表结束后查表即可。\n\n 效率:\n 执行用时 : 3824 ms , 在所有 Python3 提交中击败了 36.71% 的用户\n 内存消耗 : 102.6 MB , 在所有 Python3 提交中击败了 5.01% 的用户\n \"\"\"\n # 处理长度小于2的特殊输入\n if not s:\n return ''\n if len(s) == 1:\n return s[0]\n\n # 初始化一个n行的回文表,并填充上第一行和第二行\n max_index = len(s)-1\n palindromes = [[] for _ in range(len(s))]\n palindromes[0] = [(i, i) for i in range(len(s))]\n for start, end in palindromes[0]:\n if start-1>= 0 and s[start-1] == s[end]:\n palindromes[1].append((start-1, end))\n if end+1 <= max_index and s[start] == s[end+1]:\n palindromes[1].append((start, end+1))\n\n # 从第一行开始,逐行处理在该行回文的基础上生成的新回文。\n for r in range(len(s)):\n for start, end in palindromes[r]:\n if start-1 >= 0 and end+1 < max_index and s[start-1] == s[end+1]:\n palindromes[r+2].append((start-1, end+1))\n\n # 从下向上查表,取出最长的回文\n for p in palindromes[::-1]:\n if p:\n start, end = p[0]\n return s[start: end+1]\n\n def longestPalindromeImproved(self, s: str) -> str:\n \"\"\"\n 解题思路:\n 与之前一样,但是只保留三行,而不再存储整个表\n\n 效率:\n 执行用时 : 2780 ms , 在所有 Python3 提交中击败了 48.14% 的用户\n 内存消耗 : 13.9 MB , 在所有 Python3 提交中击败了 34.52% 的用户\n \"\"\"\n # 处理长度小于2的特殊输入\n if not s:\n return ''\n if len(s) == 1:\n return s[0]\n\n # 初始化前两行回文表\n max_index = len(s)-1\n current_palindromes = [(i, i) for i in range(len(s))]\n next_palindromes = []\n for start, end in current_palindromes:\n if start-1>= 0 and s[start-1] == s[end]:\n next_palindromes.append((start-1, end))\n if end+1 <= max_index and s[start] == s[end+1]:\n next_palindromes.append((start, end+1))\n\n new_palindromes = []\n longest_palindromes = None\n # 从current palindromes生成新的回文,存储在new_palindromes\n # 每次迭代向前递进一个回文字符串长度\n while current_palindromes or next_palindromes or new_palindromes:\n for start, end in current_palindromes:\n if start-1 >= 0 and end+1 <= max_index and s[start-1] == s[end+1]:\n new_palindromes.append((start-1, end+1))\n if current_palindromes:\n longest_palindromes = current_palindromes\n current_palindromes = next_palindromes\n next_palindromes = new_palindromes\n new_palindromes = []\n\n # 取出最长的回文\n start, end = longest_palindromes[0]\n return s[start: end+1]\n","repo_name":"GoogleGu/leetcode","sub_path":"leetcode/dynamic programming/leet_5.py","file_name":"leet_5.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"31978587445","text":"from selenium import webdriver \r\nfrom selenium.webdriver.chrome.service import Service \r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport pandas as pd\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\n# two sections to script which are first getting schedule and game results (under scrape past games) and second scraping lineups and betting odds (under scrape game data and betting lines)\r\n# the schedule and game results must be updated before lineups and betting odds can be scraped accurately \r\n# player batting statistics is used for lineups and that csv can be downloaded at https://www.baseball-reference.com/leagues/majors/2022-standard-batting.shtml\r\n# doubleheaders can get messed up if earlier games are placed later in the mlb.com scraping so would recommend checking accuracy of those rows of data manually \r\n# best thing to do is comment out with block comments each section of code and run it separately\r\n\r\nclass Scraper():\r\n \r\n # class-wide variables that set up browser and create dataframe for scraped games\r\n executable_path = Service('C:/Computer Science/chromedriver_win32/chromedriver.exe')\r\n chrome_options = Options()\r\n chrome_options.add_argument('--disable-dev-shm-usage') \r\n browser = webdriver.Chrome(options = chrome_options, service = executable_path)\r\n wait = WebDriverWait(browser, 5)\r\n df = pd.DataFrame(columns=['Week', 'Home_Team', 'Away_Team', 'Result', 'Date', 'Home_Team_Win', 'Home_Team_Loss', 'Away_Team_Win', 'Away_Team_Loss'])\r\n \r\n # turns scraped web elements into a usable list \r\n def turn_list_to_text(self, list_passed):\r\n for i in range(0, len(list_passed)):\r\n list_passed[i] = list_passed[i].text\r\n \r\n # scrapes past mlb games\r\n def scrape_past_games(self):\r\n \r\n # months that mlb games are played and used to navigate through website\r\n months = ['Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct']\r\n \r\n # opens up browser to website and starts count\r\n self.browser.get('http://www.playoffstatus.com/mlb/mlbaprschedule.html')\r\n count = 0\r\n \r\n # loops through months \r\n for month in months:\r\n \r\n # clicks on month by using link text \r\n self.browser.find_element(By.PARTIAL_LINK_TEXT, month).click()\r\n \r\n # loops through table of information \r\n for i in range(2, 1000):\r\n \r\n # gets the week of the game\r\n # try except is needed if the end of table is hit and an error is thrown\r\n try:\r\n week = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"sflx\"]/div/table[3]/tbody/tr[' + str(i) + ']/td[1]')))\r\n self.turn_list_to_text(week)\r\n week = int(week[0])\r\n except:\r\n break\r\n \r\n # prints count\r\n count += 1\r\n print(str(count) + '/' + str(2430))\r\n \r\n # gets the home team and record \r\n home_team = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"sflx\"]/div/table[3]/tbody/tr[' + str(i) + ']/td[2]')))\r\n self.turn_list_to_text(home_team)\r\n home_team_win = ''.join(list(home_team[0].split()[-1].split('‑')[0])[1:])\r\n home_team_loss = ''.join(list(home_team[0].split()[-1].split('‑')[1])[:-1])\r\n home_team = home_team[0].split()[:-1]\r\n home_team = ' '.join(home_team)\r\n \r\n # try except needed because scraping upcoming matches that don't have a score yet and this throws an error\r\n try:\r\n \r\n # gets the score\r\n score = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"sflx\"]/div/table[3]/tbody/tr[' + str(i) + ']/td[3]')))\r\n self.turn_list_to_text(score)\r\n home_score = int(score[0].split()[0].split('‑')[0])\r\n away_score = int(score[0].split()[0].split('‑')[1])\r\n \r\n # determines the winner\r\n if(home_score > away_score):\r\n result = 1\r\n elif(away_score > home_score):\r\n result = 0\r\n else:\r\n continue\r\n \r\n # for future games makes the result column No Result\r\n except:\r\n result = 'No Result'\r\n \r\n # gets the away team and record\r\n away_team = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"sflx\"]/div/table[3]/tbody/tr[' + str(i) + ']/td[4]')))\r\n self.turn_list_to_text(away_team)\r\n away_team_win = ''.join(list(away_team[0].split()[-1].split('‑')[0])[1:])\r\n away_team_loss = ''.join(list(away_team[0].split()[-1].split('‑')[1])[:-1])\r\n away_team = away_team[0].split()[:-1]\r\n away_team = ' '.join(away_team)\r\n \r\n # gets the date\r\n date = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"sflx\"]/div/table[3]/tbody/tr[' + str(i) + ']/td[5]')))\r\n self.turn_list_to_text(date)\r\n date = date[0].split(',')[0].split()[1:]\r\n date = ' '.join(date)\r\n \r\n # appends information to dataframe as a row\r\n self.df.loc[len(self.df)] = [week, home_team, away_team, result, date, home_team_win, home_team_loss, away_team_win, away_team_loss]\r\n \r\n # returns the dataframe \r\n return self.df\r\n \r\n # scrapes pitching and batting data from previously scraped games\r\n def scrape_game_data(self, batter_stats_df=pd.DataFrame(), game_date=''):\r\n \r\n # adds new empty columns for pitching data to the end of the dataframe where scraped data will go\r\n self.df['Away_Pitching_Win'] = 'N/A'\r\n self.df['Away_Pitching_Loss'] = 'N/A'\r\n self.df['Away_Pitching_ERA'] = 'N/A'\r\n self.df['Away_Pitching_SO'] = 'N/A'\r\n self.df['Home_Pitching_Win'] = 'N/A'\r\n self.df['Home_Pitching_Loss'] = 'N/A'\r\n self.df['Home_Pitching_ERA'] = 'N/A'\r\n self.df['Home_Pitching_SO'] = 'N/A'\r\n \r\n # loops through columns of batter data and adds columns to dataframe for all 9 batters for home and away \r\n for i in range(1, 10):\r\n for column in list(batter_stats_df.columns):\r\n self.df['Away_B' + str(i) + '_' + column] = 'N/A'\r\n for i in range(1, 10):\r\n for column in list(batter_stats_df.columns):\r\n self.df['Home_B' + str(i) + '_' + column] = 'N/A'\r\n \r\n # if no starting date was provided begin at the start of the season\r\n if(game_date == ''):\r\n \r\n # opens up browser to mlb website \r\n self.browser.get('https://www.mlb.com/starting-lineups/2022-04-06')\r\n else:\r\n \r\n # opens up browser to mlb website but at specific date\r\n self.browser.get('https://www.mlb.com/starting-lineups/' + game_date)\r\n \r\n # loops through dates \r\n for iteration in range(1000):\r\n \r\n # for first date refresh browser to help ensure website loads properly\r\n if(iteration == 0):\r\n self.browser.refresh()\r\n \r\n # clicks the next date\r\n self.browser.find_element(By.CLASS_NAME, 'p-button__button.p-button__button--secondary.p-datepicker__next').click()\r\n \r\n # gets header and refreshes if it is not english\r\n header = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"starting-lineups_index\"]/main/div[2]/div/div/div/section/div[1]/h2')))\r\n self.turn_list_to_text(header) \r\n if(header != 'Starting Lineups'):\r\n self.browser.refresh()\r\n \r\n # gets the date from page and formats it \r\n date = self.wait.until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"starting-lineups_index\"]/main/div[2]/div/div/div/section/div[1]/h2')))\r\n self.turn_list_to_text(date)\r\n date = date[0].split()[:2]\r\n date[0] = ''.join(list(date[0])[:3])\r\n date[1] = ''.join(list(date[1])[:-3])\r\n date = ' '.join(date)\r\n \r\n # prints date \r\n print(date)\r\n \r\n # gets the home teams\r\n home_teams = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'starting-lineups__team-name.starting-lineups__team-name--home')))\r\n self.turn_list_to_text(home_teams)\r\n \r\n # get pitching data, pitcher names, and batter lineups for each game\r\n pitcher_data = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'starting-lineups__pitcher-stats-summary')))\r\n pitcher_names = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'starting-lineups__pitcher-name')))\r\n batter_lineups_away = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'starting-lineups__team.starting-lineups__team--away')))\r\n batter_lineups_home = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'starting-lineups__team.starting-lineups__team--home')))\r\n \r\n # formats pitcher data and creates list for pitcher stats\r\n self.turn_list_to_text(pitcher_data)\r\n pitcher_stats = []\r\n \r\n # formats pitcher names \r\n self.turn_list_to_text(pitcher_names)\r\n \r\n # new list for pitcher data and variable to help with further formatting of pitcher data\r\n pitcher_summaries = []\r\n adjust = 0\r\n \r\n # loops through pitcher names \r\n for i in range(0, len(pitcher_names)):\r\n \r\n # if pitcher names is TBD, add 1 to the adjust variable and append N/A to pitcher summaries\r\n if(pitcher_names[i] == 'TBD'):\r\n adjust += 1\r\n pitcher_summaries.append('N/A')\r\n \r\n # else append pitcher data with adjustment so no index error is thrown\r\n else:\r\n pitcher_summaries.append(pitcher_data[i - adjust])\r\n \r\n # formats batter lineups and creates list for stats\r\n self.turn_list_to_text(batter_lineups_away)\r\n self.turn_list_to_text(batter_lineups_home)\r\n batter_stats = []\r\n \r\n # removes empty strings from batter lineup lists\r\n try:\r\n while True:\r\n batter_lineups_away.remove('')\r\n batter_lineups_home.remove('')\r\n except:\r\n pass\r\n \r\n # new combined list for batter lineup which will be used after more formatting \r\n batter_lineups = []\r\n \r\n # loops through away batter lineups \r\n for i in range(0, len(batter_lineups_away)):\r\n \r\n # if TBD append TBD to batter lineups list\r\n if(batter_lineups_away[i] == 'TBD'):\r\n batter_lineups.append('TBD')\r\n \r\n # else do more formatting and then append\r\n else:\r\n batter_lineups_away[i] = batter_lineups_away[i].split('\\n')\r\n for batter in batter_lineups_away[i]:\r\n batter_lineups.append(' '.join(batter.split()[:2]))\r\n \r\n # same code as above but for home instead\r\n if(batter_lineups_home[i] == 'TBD'):\r\n batter_lineups.append('TBD')\r\n else:\r\n batter_lineups_home[i] = batter_lineups_home[i].split('\\n')\r\n for batter in batter_lineups_home[i]:\r\n batter_lineups.append(' '.join(batter.split()[:2])) \r\n \r\n # loops through batter lineups and removes special characters from names that cause issues\r\n for i in range(0, len(batter_lineups)):\r\n batter_lineups[i] = batter_lineups[i].replace('á', 'a')\r\n batter_lineups[i] = batter_lineups[i].replace('í', 'i')\r\n batter_lineups[i] = batter_lineups[i].replace('ñ', 'n')\r\n batter_lineups[i] = batter_lineups[i].replace('é', 'e')\r\n batter_lineups[i] = batter_lineups[i].replace('ó', 'o')\r\n batter_lineups[i] = batter_lineups[i].replace('ú', 'u')\r\n \r\n # if statement for breaking out of for loop if all batting lineups had same value (likely means end of data cause all lineups are TBD)\r\n if(len(set(batter_lineups)) == 1):\r\n break\r\n \r\n # gets all batter names from batter stats dataframe \r\n data_batter_names = list(batter_stats_df['PB_Name'])\r\n \r\n # list of data for the average league player \r\n # will be used if data for some players in a team can't be found\r\n league_average = ['Average',27,164,600,537,69,129,26,2,16,65,8,3,51,133,.239,.310,.387,.697,75,208,11,6,1,4,1]\r\n \r\n # loops through all data and will be switching off between home and away \r\n for i in range(0, len(pitcher_summaries)):\r\n \r\n # pitcher and batter row is each row of pitcher and batter data including home and away\r\n if(i % 2 == 0):\r\n \r\n # list for pitching data for a single game\r\n pitcher_row = []\r\n \r\n # if no pitcher or pitcher stats contains empty data then append N/A\r\n if(pitcher_summaries[i] == 'N/A' or pitcher_summaries[i].find('-.--') != -1):\r\n for _ in range(4):\r\n pitcher_row.append('N/A')\r\n \r\n # else format and append pitching data\r\n else:\r\n pitcher_row.append(int(pitcher_summaries[i].split()[0].split('-')[0]))\r\n pitcher_row.append(int(pitcher_summaries[i].split()[0].split('-')[1].split(',')[0]))\r\n \r\n # maxes out ERA at 10\r\n if(float(pitcher_summaries[i].split()[1])>=10):\r\n pitcher_row.append(10)\r\n else:\r\n pitcher_row.append(float(pitcher_summaries[i].split()[1]))\r\n pitcher_row.append(int(pitcher_summaries[i].split()[3]))\r\n \r\n # creates batter row and if first value in lineup is TBD then assume no players in lineup\r\n batter_row = []\r\n if(batter_lineups[0] != 'TBD'):\r\n \r\n # if players in lineup then loop through all 9 of them\r\n for x in range(0, 9):\r\n \r\n # if player in data get the index \r\n if(batter_lineups[x] in data_batter_names):\r\n row_index = batter_stats_df.index[batter_stats_df['PB_Name'] == batter_lineups[x]].tolist()[0]\r\n \r\n # adds their data to the batter row \r\n for value in batter_stats_df.loc[row_index, :].values.tolist():\r\n batter_row.append(value)\r\n \r\n # if player not in data then append average player to the batter row for that player \r\n else:\r\n for average in league_average:\r\n batter_row.append(average) \r\n \r\n # removes the 9 players looked at from the batting lineup list\r\n batter_lineups = batter_lineups[9:]\r\n \r\n # if the whole lineup is TBD than add N/A for every players data (234 comes from 9 * 26)\r\n else:\r\n for x in range(0, 234):\r\n batter_row.append('N/A')\r\n \r\n # remove TBD from batting lineup\r\n batter_lineups = batter_lineups[1:] \r\n \r\n # for home teams because i % 2 == 1\r\n # code is very similar to away teams above except at the end each row appends to pitcher and better stats lists \r\n else:\r\n if(pitcher_summaries[i] == 'N/A' or pitcher_summaries[i].find('-.--') != -1):\r\n for _ in range(4):\r\n pitcher_row.append('N/A')\r\n else:\r\n pitcher_row.append(int(pitcher_summaries[i].split()[0].split('-')[0]))\r\n pitcher_row.append(int(pitcher_summaries[i].split()[0].split('-')[1].split(',')[0]))\r\n if(float(pitcher_summaries[i].split()[1])>=10):\r\n pitcher_row.append(10)\r\n else:\r\n pitcher_row.append(float(pitcher_summaries[i].split()[1]))\r\n pitcher_row.append(int(pitcher_summaries[i].split()[3]))\r\n \r\n # append to pitcher stats list\r\n pitcher_stats.append(pitcher_row)\r\n if(batter_lineups[0] != 'TBD'):\r\n for x in range(0, 9):\r\n if(batter_lineups[x] in data_batter_names):\r\n row_index = batter_stats_df.index[batter_stats_df['PB_Name'] == batter_lineups[x]].tolist()[0]\r\n for value in batter_stats_df.loc[row_index, :].values.tolist():\r\n batter_row.append(value)\r\n else:\r\n for average in league_average:\r\n batter_row.append(average)\r\n batter_lineups = batter_lineups[9:]\r\n else:\r\n for x in range(0, 234):\r\n batter_row.append('N/A')\r\n batter_lineups = batter_lineups[1:]\r\n \r\n # append to batter stats list\r\n batter_stats.append(batter_row)\r\n \r\n # gets portion of overall dataframe that is from the scraped date \r\n date_df = self.df.loc[self.df['Date'] == date]\r\n \r\n # replaces value of Diamondbacks in the data because it is called D-backs\r\n home_teams = ['Diamondbacks' if i=='D-backs' else i for i in home_teams]\r\n \r\n # loops through home teams seeing if in the played game data\r\n for i in range(0, len(home_teams)):\r\n if(home_teams[i] in list(date_df['Home_Team'])):\r\n \r\n # if in played game data then gets the row index\r\n row_index = date_df.index[date_df['Home_Team'] == home_teams[i]].tolist()[0]\r\n \r\n # gets the data from current row at row index \r\n # index of 9 represents the data before the N/A parts of current row and that includes week, home team, away team, team records, etc.\r\n current_row = date_df.loc[row_index, :].values.tolist()[:9]\r\n \r\n # try except is for outliers\r\n try:\r\n \r\n # adds pitcher data and batter data to current data and adds back to dataframe\r\n new_row = current_row + pitcher_stats[i] + batter_stats[i]\r\n date_df.loc[row_index]= new_row\r\n except:\r\n continue \r\n \r\n # updates orginial dataframe \r\n self.df.update(date_df)\r\n \r\n # drops row of data just added to original dataframe from the data dataframe \r\n # used for doubleheaders\r\n date_df.drop(row_index, inplace=True)\r\n \r\n # drops empty rows and away and home batter name columns\r\n self.df.drop(self.df.loc[self.df['Home_Pitching_Win']=='N/A'].index, inplace=True)\r\n for i in range(1, 10):\r\n self.df.drop('Away_B' + str(i) + '_PB_Name', axis=1, inplace=True)\r\n self.df.drop('Home_B' + str(i) + '_PB_Name', axis=1, inplace=True)\r\n \r\n # returns completed dataframe\r\n return self.df\r\n \r\n # scrapes betting lines for upcoming games \r\n def scrape_betting_lines(self, mlb_game_df=pd.DataFrame()):\r\n \r\n # opens up browser to odds website \r\n self.browser.get('https://www.oddsshark.com/mlb/consensus-picks')\r\n \r\n # inserts odds columns and separates future game data from played game data\r\n mlb_game_df.insert(5, 'Home_Betting_Odds', 'N/A')\r\n mlb_game_df.insert(6, 'Away_Betting_Odds', 'N/A')\r\n future_game_df = mlb_game_df[mlb_game_df['Result']=='No Result']\r\n \r\n # gets the betting lines and teams\r\n odds = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'pick-spread-price')))\r\n teams = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'pick-teams-desktop')))\r\n self.turn_list_to_text(odds)\r\n self.turn_list_to_text(teams)\r\n \r\n # the teams need a bit of formatting\r\n teams_list = []\r\n for team in teams:\r\n teams_list.append(team.split('\\n')[0].title())\r\n teams_list.append(team.rsplit('\\n', 1)[-1].title())\r\n \r\n # loops through teams \r\n for i in range(0, len(teams_list)):\r\n \r\n # if away team\r\n if(i % 2 == 0):\r\n \r\n # if away team is in upcoming games get the row index of it \r\n if(teams_list[i] in list(future_game_df['Away_Team'])):\r\n row_index = future_game_df.index[future_game_df['Away_Team'] == teams_list[i]].tolist()[0]\r\n \r\n # add the teams odds to the dataframe\r\n future_game_df.at[row_index, 'Away_Betting_Odds'] = odds[i]\r\n \r\n # code for home is the same\r\n else:\r\n if(teams_list[i] in list(future_game_df['Home_Team'])):\r\n row_index = future_game_df.index[future_game_df['Home_Team'] == teams_list[i]].tolist()[0]\r\n future_game_df.at[row_index, 'Home_Betting_Odds'] = odds[i]\r\n \r\n # update orginal dataframe with changed betting lines\r\n mlb_game_df.update(future_game_df)\r\n \r\n # drops data from dataframe \r\n # for doubleheaders\r\n try:\r\n future_game_df.drop(row_index, inplace=True)\r\n except:\r\n pass\r\n \r\n # return dataframe\r\n return mlb_game_df\r\n\r\n# prepares the batter dataframe for scraping data \r\ndef data_preparation(batter_stats_df = pd.DataFrame()):\r\n \r\n # dropy empty rows and useless columns \r\n batter_stats_df = batter_stats_df.dropna()\r\n batter_stats_df = batter_stats_df.drop('Rk', 1)\r\n batter_stats_df = batter_stats_df.drop('Tm', 1)\r\n batter_stats_df = batter_stats_df.drop('Lg', 1)\r\n batter_stats_df = batter_stats_df.drop('Pos Summary', 1)\r\n \r\n # adds PB_ in front of every column\r\n batter_stats_columns = list(batter_stats_df.columns)\r\n for i in range(0, len(batter_stats_columns)):\r\n batter_stats_columns[i] = 'PB_' + batter_stats_columns[i]\r\n batter_stats_df.columns = batter_stats_columns\r\n \r\n # reformats the names of batters cause they are read in with some problems\r\n batter_names = list(batter_stats_df['PB_Name'])\r\n for i in range(0, len(batter_names)):\r\n first_name = list(batter_names[i].split()[0])[0]\r\n last_name = batter_names[i].split()[1]\r\n last_name = last_name.split('\\\\')[0]\r\n last_name = last_name.split('*')[0]\r\n last_name = last_name.split('#')[0]\r\n batter_names[i] = first_name + ' ' + last_name\r\n batter_stats_df['PB_Name'] = batter_names\r\n \r\n # resets index and drops index column\r\n batter_stats_df = batter_stats_df.reset_index()\r\n batter_stats_df = batter_stats_df.drop(batter_stats_df.columns[0], axis=1)\r\n \r\n # returns dataframe\r\n return batter_stats_df\r\n \r\ndef main():\r\n \r\n # scraper object\r\n Scraper_obj = Scraper()\r\n \r\n # scrapes past games and doesn't take any parameters \r\n df = Scraper_obj.scrape_past_games()\r\n \r\n # exports df to csv \r\n df.to_csv('C:/Computer Science/MLB-Game-Prediction-v1/MLB Game Prediction/Dated MLB Game Data.csv', index = False) \r\n \r\n # can read in game data instead of scraping it\r\n game_data_df = pd.read_csv('C:/Computer Science/MLB-Game-Prediction-v1/MLB Game Prediction/Dated MLB Game Data.csv')\r\n Scraper_obj.df = game_data_df\r\n \r\n # reads in the batter statistics data\r\n batter_stats_df = pd.read_csv('C:/Computer Science/MLB-Game-Prediction-v1/MLB Game Prediction/Batter Statistics.csv')\r\n \r\n # prepares the batter dataframe and takes the batter dataframe as a parameter\r\n batter_stats_df = data_preparation(batter_stats_df)\r\n \r\n # reads in past mlb game data to try and avoid scraping what has already been scraped before \r\n # try except in case there is no past mlb game data\r\n try:\r\n mlb_game_df = pd.read_csv('C:/Computer Science/MLB-Game-Prediction-v1/MLB Game Prediction/MLB Game Data.csv')\r\n \r\n # need to rescrape data which didn't have a result for so removing that data\r\n mlb_game_df = mlb_game_df[mlb_game_df['Result']!='No Result']\r\n \r\n # prints out the last date of data that was scraped\r\n print(list(mlb_game_df['Date'])[-1])\r\n \r\n # enter the date that was printed in correct format yyyy-mm-dd\r\n game_date = input('Enter the date that was printed in correct format yyyy-mm-dd: ') \r\n \r\n # if couldn't find file set game date to default value\r\n except:\r\n game_date = ''\r\n \r\n # scrapes pitching and batting data for games and takes the batter stats dataframe and date as parameters\r\n df = Scraper_obj.scrape_game_data(batter_stats_df, game_date)\r\n \r\n # if didn't start at beginning of the season\r\n if(game_date != ''):\r\n \r\n # concatenate old and new dataframes together\r\n mlb_game_df = pd.concat([mlb_game_df, df])\r\n \r\n # else just set mlb game df to scraped df\r\n else:\r\n mlb_game_df = df\r\n \r\n # exports df to csv\r\n mlb_game_df.to_csv('C:/Computer Science/MLB-Game-Prediction-v1/MLB Game Prediction/MLB Game Data.csv', index = False) \r\n \r\n # can read in mlb game data instead of scraping it \r\n mlb_game_df = pd.read_csv('C:/Computer Science/MLB-Game-Prediction-v1/MLB Game Prediction/MLB Game Data.csv')\r\n \r\n # scrapes betting lines for upcoming games and takes mlb game data df as a parameter\r\n complete_game_df = Scraper_obj.scrape_betting_lines(mlb_game_df)\r\n \r\n # exports df to csv\r\n complete_game_df.to_csv('C:/Computer Science/MLB-Game-Prediction-v1/MLB Game Prediction/Complete MLB Game Data.csv', index = False) \r\n \r\n # quits browser after done\r\n Scraper_obj.browser.quit()\r\n \r\nmain()","repo_name":"AdiB2002/MLB-Game-Prediction-v1","sub_path":"MLB Game Prediction/MLB Game Data Scraper.py","file_name":"MLB Game Data Scraper.py","file_ext":"py","file_size_in_byte":28384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"28027192640","text":"# For API\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nimport pandas as pd\n# For Update lamp post csv\nimport threading\nimport time\nimport schedule\nimport requests\nimport os\n\napp = Flask(__name__)\napi = Api(app)\n\n\nclass LampPost(Resource):\n\n def get(self): \n data = pd.read_csv('lamppost_en.csv', index_col=0, header=0) # read local CSV\n args = request.args\n # query (example: http://127.0.0.1:5000/lp?q=BE1240)\n if(\"q\" in args): \n try:\n # Lamp post ID appears in capital letters only and no space\n return data.loc[args[\"q\"].upper().replace(\" \", \"\")].to_dict(), 200 \n except KeyError as err:\n return {'KeyError': str(err)},200\n else:\n return 200\n\n elif(\"update\" in args):\n # older than 3 months - (3 * 30 * 24 * 60 * 60) seconds\n if (time.time() - os.path.getmtime('lamppost_en.csv') > (1 * 30 * 24 * 60 * 60)):\n print(\"File older than 1 month, updating...\")\n updateCSV()\n else:\n print(\"File up to date\")\n\n else:return 200\n \n\n# === CSV File updates\ndef updateCSV():\n # From https://data.gov.hk/en-data/dataset/hk-hyd-plis-\n try:\n url = \"http://218.253.203.24/datagovhk/plis/lamppost_en.csv\"\n lamppostFile = requests.get(url, allow_redirects=True)\n open('lamppost_en.csv', 'wb').write(lamppostFile.content)\n print(\"Downloaded\")\n except:\n print(\"Download error\")\n# === CSV File updates\n\n\napi.add_resource(LampPost, '/') # add endpoints\nif __name__ == '__main__':\n app.run() # run our Flask app\n \n","repo_name":"chrisleunglkw/HKLampPostParse","sub_path":"lamppost.py","file_name":"lamppost.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6833115659","text":"\t\t\t\n\t\t\t\ndef convertToLines():\n\tg = CurrentGlyph()\t\n\tif g and g.selection != []:\n\t\tg.prepareUndo()\n\t\ta_list_x = []\n\t\ta_list_y = []\n\t\tinsertAttributes = []\n\t\t\n\t\tfor c_index in range(len(g.contours)):\n\t\t\tc = g.contours[c_index]\n\t\t\tfor s_index in range(len(c.segments)):\n\t\t\t\ts = c.segments[s_index]\n\t\t\t\tif s.selected and s.type == \"curve\":\n\t\t\t\t s.type = \"line\"\n\t\tg.update()\n\n\t\t\t\t\nconvertToLines()\n\n","repo_name":"BlackFoundry/ConvertToLines","sub_path":"ConvertToLines.py","file_name":"ConvertToLines.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"10793435956","text":"import sys;input=sys.stdin.readline\nn = int(input()) # 후보의 수\ns = [int(input()) for _ in range(n)]\nans = 0\nwhile True:\n _max = max(s)\n _idx = s.index(_max)\n _count = s.count(_max)\n\n if _count > 1 and _idx == 0:\n ans += 1\n break\n\n if _idx == 0 :\n break\n \n s[_idx] -= 1\n s[0] += 1\n ans += 1\n\nprint(ans)","repo_name":"hyunjinee/Algorithm","sub_path":"solved.ac/python/1417.py","file_name":"1417.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"41544127842","text":"from django.shortcuts import redirect, render\nfrom .models import Job\nfrom django.core.paginator import Paginator\nfrom .forms import Apply, JobForm\nfrom django.contrib.auth.decorators import login_required\nfrom .filter import JobFilter\n# Create your views here.\n\ndef job_list(request):\n jobs_list = Job.objects.all()\n\n my_filter = JobFilter(request.GET, queryset= jobs_list)\n jobs_list = my_filter.qs\n\n paginator = Paginator(jobs_list, 3) # Show jobs per page.\n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n context = {\n 'jobs': page_obj,\n 'count': jobs_list,\n 'my_filter': my_filter,\n }\n return render(request, 'jobs.html', context)\n\n\ndef job_detail(request, slug):\n jobs_detail = Job.objects.get(slug=slug)\n\n if request.method == 'POST':\n form = Apply(request.POST, request.FILES)\n if form.is_valid():\n my_form = form.save(commit=False)\n my_form.job = jobs_detail\n my_form.user = request.user\n my_form.save()\n\n else:\n form = Apply()\n\n\n context = {\n 'job': jobs_detail,\n 'form': form,\n }\n return render(request, 'job_details.html', context)\n\n@login_required\ndef add_job(request):\n if request.method == 'POST':\n form = JobForm(request.POST, request.FILES)\n if form.is_valid():\n my_form = form.save(commit=False)\n my_form.owner = request.user\n my_form.save()\n return redirect(\"jobs\")\n\n else:\n form = JobForm()\n\n\n return render(request, 'add_job.html', {'form': form})\n\n","repo_name":"OsamaMohammed3332/Django-Job-Board","sub_path":"job-board/project/job/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25674486362","text":"import codecs\nimport os.path\n\nfrom setuptools import setup, find_packages\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\ndef read(rel_path):\n here = os.path.abspath(os.path.dirname(__file__))\n with codecs.open(os.path.join(here, rel_path), \"r\") as fp:\n return fp.read()\n\n\ndef get_version(rel_path):\n for line in read(rel_path).splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\n\nsetup(\n name=\"resalloc\",\n version=get_version(\"resalloc/__init__.py\"),\n description=\"Fungible resource allocation\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n setup_requires=[\"setuptools>=18.0\"],\n install_requires=[\n \"numpy >= 1.17.5\",\n \"torch\",\n ],\n packages=find_packages(),\n license=\"Apache License, Version 2.0\",\n license_files=[\"LICENSE\"],\n url=\"https://github.com/cvxgrp/resalloc\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n author=\"Akshay Agrawal\",\n author_email=\"akshayka@cs.stanford.edu\",\n)\n","repo_name":"cvxgrp/resalloc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"40367879687","text":"# The sequence consists of natural numbers and ends with the number 0. In total, no more than 10,000 numbers are entered (not counting the trailing number 0). \n# Determine how many elements of this sequence are equal to its largest element.\n# Numbers following 0 do not need to be read.\n# A sequence of integers is entered, ending with the number 0 (the number 0 itself is not included in the sequence).\n# Print the answer to the problem\n\nnumber = int(input()) # input natural number\nmax_num = number # this number is assigned as maximum\ncount = 1\nwhile number != 0:\n number = int(input()) # if number = 0, the loop is broken\n # if the number is more than maximum, the number is assigned as maximum\n if number > max_num:\n max_num = number\n count = 1\n # if the number is equal to maximum, the number is maximum\n elif number == max_num:\n count += 1\nprint(count)\n","repo_name":"dyakubowski/Portfolio","sub_path":"Yandex_tasks/Linear search/Number_equal_to_the_maximum.py","file_name":"Number_equal_to_the_maximum.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71117654002","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns(\n 'home_application.task.views',\n (r'^$', 'task'),\n (r'^create_task/$', 'create_task'),\n (r'^up_file/$', 'up_file'),\n url(r'^excel_upload/', views.excel_upload, name='uploads'),\n)\n","repo_name":"xuwei13253838782/upload-file","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29970940989","text":"# Dana jest tabela kursów walut. Dla każdych dwóch walut 'x' oraz 'y' wpis K[x][y] oznacza ile trzeba\n# zapłacić waluty 'x' żeby otrzymać jednostkę waluty 'y'. Proszę zaproponować algorytm, który sprawdza\n# czy istnieje taka waluta 'z', że za jednostkę 'z' można uzyskać więcej niż jednostkę 'z' przez serię\n# wymian walut.\n\n\ndef relax(currencies, cost, parent, j):\n if cost[currencies[j][1]] < currencies[j][2] * cost[currencies[j][0]]:\n cost[currencies[j][1]] = currencies[j][2] * cost[currencies[j][0]]\n if currencies[j][0] == parent[currencies[j][1]]:\n parent[currencies[j][1]] = currencies[j][0]\n return True\n parent[currencies[j][1]] = currencies[j][0]\n return False\n\n\ndef currency_exchange(currencies, ):\n max_vertex = 0\n for i in range(len(currencies)):\n max_vertex = max(max_vertex, currencies[i][0], currencies[i][1])\n E = len(currencies)\n cost = [0] * (max_vertex + 1)\n parent = [None] * (max_vertex + 1)\n cost[0] = 1\n for i in range(max_vertex - 1):\n for j in range(E):\n if i != 0:\n if relax(currencies, cost, parent, j):\n return True\n else:\n relax(currencies, cost, parent, j)\n return False\n\n\ncurrencies = [(0, 1, 4.5),\n (0, 2, 4),\n (2, 0, 0.25),\n (1, 2, 0.75),\n (3, 2, 100),\n (0, 3, 0.4),\n (1, 4, 6),\n (3, 4, 2)]\nprint(currency_exchange(currencies))\n","repo_name":"Szymon-Budziak/Algorithms_and_Data_Structures_course_AGH","sub_path":"Exercises/Exercise_10/05_exercise.py","file_name":"05_exercise.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"pl","doc_type":"code","stars":16,"dataset":"github-code","pt":"75"} +{"seq_id":"12365294491","text":"from torch.utils.data import Dataset, DataLoader\n\n\ndef get_dataloader(\n dataset: Dataset,\n data_path: str,\n phase: str,\n transform,\n batch_size: int = 1,\n num_workers: int = 4,\n):\n \"\"\"Build an iterable data loader\n\n Args:\n dataset (Dataset): Dataset.\n data_path (str): The path to store the image.\n phase (str): Train or valulation.\n transform (bool): Data augmentation techniques.\n batch_size (int, optional): Batches of the training set or valudation set. Defaults to 1.\n num_workers (int, optional): Multithreading. Defaults to 4.\n\n Returns:\n Dataloader: Return dataloader.\n \"\"\"\n dataset = dataset(data_path, phase, transform)\n dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=True,\n shuffle=True,\n )\n return dataloader\n","repo_name":"yezichu/undergraduate-thesis","sub_path":"src/Dataset/get_dataloader.py","file_name":"get_dataloader.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"71907074482","text":"from skimage import data, color, measure,filters,io\nimport matplotlib.pyplot as plt\nimport skimage.io\nimport numpy as np\n\nimage = data.coins() # Load the \"coins\" image\n\ndef filterImage(img):\n filtered_img = filters.gaussian(img)\n return filtered_img\n\ndef findContours(img):\n return measure.find_contours(img)\n\nimages = {\n 'planes/samolot02.jpg',\n 'planes/samolot17.jpg',\n 'planes/samolot08.jpg',\n 'planes/samolot09.jpg',\n 'planes/samolot10.jpg',\n 'planes/samolot11.jpg',\n}\n\nplt.figure(figsize=(10, 4))\n\nfor i,img in enumerate(images,start=1):\n plt.subplot(2,3,i)\n loaded_img = skimage.io.imread(img, as_gray=True)\n filtered_img = filterImage(loaded_img)\n contours = findContours(filtered_img)\n black_img = np.zeros((loaded_img.shape[0],loaded_img.shape[1])) #shape[0] for height , shape[1] for width\n plt.imshow(black_img, cmap='gray')\n \n #remove axis\n plt.axis('off')\n\n for contour in contours:\n plt.plot(contour[:, 1], contour[:, 0], linewidth=2, c='white')\n\nplt.subplots_adjust(wspace=0,hspace=0)\nplt.savefig('contours.pdf')\nplt.show()\n","repo_name":"SzramStaR/img_extracting","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1918165699","text":"from django.forms import ModelForm, DateInput\n\nfrom .models import HoraMedica\n\n\nclass HoraMedicaForm(ModelForm):\n class Meta:\n model = HoraMedica\n fields = ['especialidad','centroMedico','medico','fechaAtencion','horaAtencion']\n widgets = {'fechaAtencion': DateInput(attrs={'type':'date'})}\n\nclass EstadoHoraMedicaForm(ModelForm):\n class Meta:\n model = HoraMedica\n fields = ['estadoHora']\n labels = {\n 'estadoHora':'Anular', \n }\n \n","repo_name":"stivenmoreta/Proyecto-CentroMedicoGalenos","sub_path":"CentroMedicoGalenos/core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"3729041421","text":"#거스름돈 계산하는 함수 작성\ndef solution(price, money):\n sum = 0\n for i in range(len(price)): #가격리스트의 길이만큼 포문 돌리기\n sum += price[i] #리스트의 모든 요소의 합 구하기\n\n if sum <= money: #가진 돈이 계산해야할 돈보다 많다면,\n answer = money - sum #계산\n return answer\n else: #가진 돈이 계산해야할 돈보다 작다면,\n return -1 #-1 리턴\n\nprice = [2100, 3200, 2100, 800]\nmoney = 1000\nret = solution(price, money)\nprint(ret)","repo_name":"choigabin/Certificate_Cospro","sub_path":"cospro_winter2/day02/m01_01.py","file_name":"m01_01.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33174585578","text":"#!/usr/bin/env python2\n\n\"\"\"Trains a CNN using Keras. Includes a bunch of fancy stuff so that you don't\nlose your work when you Ctrl + C.\"\"\"\n\nfrom argparse import ArgumentParser, ArgumentTypeError\nimport datetime\nfrom itertools import groupby\nfrom json import dumps\nimport logging\nfrom logging import info, warn\nfrom os import path, makedirs, environ\nfrom multiprocessing import Process, Queue, Event, Lock\nfrom Queue import Full\nfrom random import randint\nfrom sys import stdout, argv\nfrom time import time\n\nimport h5py\n\nfrom keras.models import Graph\nfrom keras.optimizers import SGD\nfrom keras.utils.generic_utils import Progbar\n\nimport numpy as np\n\nfrom scipy.io import loadmat\n\nimport models\nfrom utils import get_model_lr\n\n\nINIT = 'he_normal'\n\n\ndef mkdir_p(dir_path):\n try:\n makedirs(dir_path)\n except OSError as e:\n # 17 means \"already exists\"\n if e.errno != 17:\n raise e\n\n\nclass TimedContext(object):\n def __init__(self, name=None):\n self.name = name\n\n def __enter__(self):\n self.start = time()\n\n def __exit__(self, *args, **kwargs):\n elapsed = time() - self.start\n name_str = self.name or 'Unnamed timed context'\n print('%s took %fs' % (name_str, elapsed))\n\n\nclass NumericLogger(object):\n def __init__(self, dest):\n self.dest = dest\n self.lock = Lock()\n\n def append(self, data):\n if 'time' not in data:\n data['time'] = datetime_str()\n json_data = dumps(data)\n # No file-level locking because YOLO\n with self.lock:\n with open(self.dest, 'a') as fp:\n # Should probably use a binary format or something. Oh well.\n fp.write(json_data + '\\n')\n\n# Needs to be set up in main function\nnumeric_log = None\n\n\ndef group_sort_dedup_indices(indices):\n \"\"\"Takes a list of the form `[(major_index, minor_index)]` and returns a\n list of the form `[(major_index, [minor_indices])]`, where major indices\n and minor indices are still grouped as before, but both major and minor\n indices are sorted. Minor indices are also deduplicated, so `p[1]` will be\n a sorted, deduplicated list for each `p` in the return value.\n\n This function is very useful for generating h5py slices, since h5py\n requires sorted, deduplicated indices to batch-select specific elements\n from a dataset.\"\"\"\n sorted_ins = sorted(indices)\n keyfun = lambda indices: indices[0]\n rv = []\n for k, g in groupby(sorted_ins, keyfun):\n sorted_minors = [t[1] for t in g]\n # Deduplicate as well\n minors = []\n for idx, val in enumerate(sorted_minors):\n if idx == 0 or sorted_minors[idx - 1] != val:\n minors.append(val)\n rv.append((k, minors))\n return rv\n\n\nclass BatchReader(object):\n def __init__(self, h5_paths, inputs, outputs, batch_size, mark_epochs,\n shuffle, mean_pixels):\n \"\"\"Initialise the worker.\n\n :param list h5_paths: List of paths to HDF5 files to read.\n :param list inputs: List of input names, corresponding to HDF5\n datasets.\n :param list outputs: List of output names, again corresponding to HDF5\n datasets.\n :param int batch_size: Number of datums in batches to return.\n :param bool mark_epochs: Whether to push None to the queue at the end\n of an epoch.\n :param bool shuffle: Should data be shuffled?\n :param dict mean_pixels: Dictionary giving mean pixels for each\n channel.\"\"\"\n self.h5_files = [h5py.File(path, 'r') for path in h5_paths]\n self.inputs = inputs\n self.outputs = outputs\n for filename in inputs + outputs:\n assert not filename.startswith('/')\n assert len(inputs) > 0, \"Need at least one input\"\n assert len(outputs) > 0, \"Need at least one output\"\n self.batch_size = batch_size\n self.mark_epochs = mark_epochs\n self.shuffle = shuffle\n self.mean_pixels = mean_pixels\n self._index_pool = []\n\n def _refresh_index_pool(self):\n self._index_pool = []\n\n for idx, fp in enumerate(self.h5_files):\n some_output = self.outputs[0]\n label_set = fp[some_output]\n data_size = len(label_set)\n self._index_pool.extend(\n (idx, datum_idx) for datum_idx in xrange(data_size)\n )\n\n if self.shuffle:\n np.random.shuffle(self._index_pool)\n\n def _pop_n_indices(self, n):\n rv = self._index_pool[:n]\n del self._index_pool[:n]\n\n return rv\n\n def _get_batch_indices(self):\n if self.mark_epochs and not self._index_pool:\n # The index pool has been exhausted, so we need to return [] and\n # refresh the index pool\n self._refresh_index_pool()\n\n indices = self._pop_n_indices(self.batch_size)\n\n while len(indices) < self.batch_size:\n start_len = len(indices)\n self._refresh_index_pool()\n indices.extend(self._pop_n_indices(self.batch_size - start_len))\n # Just check that the number of indices we have is actually\n # increasing\n assert len(indices) - start_len > 0, \\\n \"Looks like we ran out of indices :/\"\n\n return group_sort_dedup_indices(indices)\n\n def _get_ds(self, ds_name, indices):\n sub_batch = None\n for fp_idx, data_indices in indices:\n fp = self.h5_files[fp_idx]\n # This line used to yield a cryptic internal error whenever\n # data_indices contained duplicates.\n fp_data = fp[ds_name][data_indices].astype('float32')\n if sub_batch is None:\n sub_batch = fp_data\n else:\n assert fp_data.shape[1:] == sub_batch.shape[1:]\n sub_batch = np.concatenate((sub_batch, fp_data), axis=0)\n assert sub_batch is not None\n mean_pixel = self.mean_pixels.get(ds_name)\n\n if mean_pixel is not None:\n # The .reshape() allows Numpy to broadcast it\n assert sub_batch.ndim == 4, \"Can only mean-subtract images\"\n sub_batch -= mean_pixel.reshape(\n (1, sub_batch.shape[1], 1, 1)\n )\n elif sub_batch.ndim > 2:\n warn(\"There's no mean pixel for dataset %s\" % ds_name)\n\n return sub_batch\n\n def _get_sub_batches(self, ds_fields, indices):\n sub_batch = {}\n\n for ds_name in ds_fields:\n sub_batch[ds_name] = self._get_ds(ds_name, indices)\n\n return sub_batch\n\n def get_batch(self):\n # First, fetch a batch full of data\n batch_indices = self._get_batch_indices()\n\n assert(batch_indices or self.mark_epochs)\n\n if self.mark_epochs and not batch_indices:\n return None\n\n inputs = self._get_sub_batches(self.inputs, batch_indices)\n outputs = self._get_sub_batches(self.outputs, batch_indices)\n assert inputs.viewkeys() != outputs.viewkeys(), \\\n \"Can't mix inputs and outputs\"\n # 'inputs' is just going to be our batch now\n inputs.update(outputs)\n return inputs\n\n def close(self):\n for fp in self.h5_files:\n fp.close()\n\n\ndef h5_read_worker(\n out_queue, end_evt, h5_paths, inputs, outputs, mean_pixels,\n batch_size, mark_epochs, shuffle\n ):\n \"\"\"This function is designed to be run in a multiprocessing.Process, and\n communicate using a ``multiprocessing.Queue``. It will just keep reading\n batches and pushing them (complete batches!) to the queue in a tight loop;\n obviously this will block as soon as the queue is full, at which point this\n process will wait until it can read again. Note that ``end_evt`` is an\n Event which should be set once the main thread wants to exit.\n\n At the end of each epoch, ``None`` will be pushed to the output queue iff\n ``mark_epochs`` is True. This notifies the training routine that it should\n perform validation or whatever it is training routines do nowadays.\"\"\"\n # See https://bugs.python.org/issue8426\n # mp.Queues work by pickling objects to be enqueued and then sending them\n # across a Unix pipe. By default, Queue installs an atexit handler which\n # waits for all enqueued data to be written to the Unix pipe. This seems to\n # hang when the receiver has exited, possibly due to limits on the amount\n # of data which can be buffered in a pipe. cancel_join_thread() either\n # removes that atexit handler or makes it a noop. This is not the default\n # behaviour because it results in unwritten data being irrevocably lost\n # (which is okay for us!).\n out_queue.cancel_join_thread()\n\n reader = BatchReader(\n h5_paths=h5_paths, inputs=inputs, outputs=outputs,\n batch_size=batch_size, mark_epochs=mark_epochs, shuffle=shuffle,\n mean_pixels=mean_pixels\n )\n # Outer loop is to keep pushing forever, inner loop just polls end_event\n # periodically if we're waiting to push to the queue\n try:\n while True:\n batch = reader.get_batch()\n while True:\n if end_evt.is_set():\n return\n\n try:\n out_queue.put(batch, timeout=0.05)\n break\n except Full:\n # Queue.Full (for Queue = the module in stdlib, not the\n # class) is raised when we time out\n pass\n finally:\n print('Worker hit finally block; exiting')\n\n\ndef get_sample_weight(data, classname, masks):\n # [:] is for benefit of h5py\n class_data = data[classname][:].astype('int')\n assert class_data.ndim == 2\n # Quick checks to ensure it's valid one-hot data\n assert class_data.shape[1] > 1\n assert (np.sum(class_data, axis=1) == 1).all()\n assert np.logical_or(class_data == 1, class_data == 0).all()\n # The data is one-hot, so we need to change it to be just integer class\n # labels\n classes = np.argmax(class_data, axis=1)\n assert classes.ndim == 1\n # num_classes = class_data.shape[1]\n # Make sure that the number of masks is the number of classes or the number\n # of classes + 1\n mask_names = set(masks.keys())\n\n # XXX: I've commented out these assertions because they only made sense\n # when we needed a 1:1 mapping between classes and outputs.\n # assert len(mask_names) == len(masks)\n # assert num_classes - 1 <= len(mask_names) <= num_classes\n # mask_vals = {val for name, val in masks}\n # assert(len(mask_vals) == len(mask_names))\n # Masks are in [0, num_classes); assert that this is the case\n # Note that if the number of classes is one fewer than the number of input\n # masks, then we assume that the zeroth class does not control any external\n # loss.\n # assert max(mask_vals) == num_classes - 1\n # assert len(mask_names) < num_classes or min(mask_vals) == 0\n\n sample_weight = {}\n\n for mask_name, mask_vals in masks.iteritems():\n sample_weight[mask_name] = np.in1d(classes, mask_vals).astype('float32')\n\n assert len(sample_weight) == len(mask_names)\n\n return sample_weight\n\n\ndef horrible_get(q):\n \"\"\"Logically equivalent to ``q.get()``, but (possibly!) works around a\n horrible, undocumented Python bug: https://bugs.python.org/issue1360. I say\n \"possibly\" because I don't know whether this is actually fixing anything.\n Might have to remove it later to find out.\"\"\"\n while True:\n try:\n return q.get(timeout=60)\n except Full:\n continue\n\n\ndef train(model, queue, iterations, mask_class_name, masks):\n \"\"\"Perform a fixed number of training iterations.\"\"\"\n assert (mask_class_name is None) == (masks is None)\n\n info('Training for %i iterations', iterations)\n p = Progbar(iterations)\n loss = 0.0\n p.update(0)\n mean_loss = 0\n\n for iteration in xrange(iterations):\n # First, get some data from the queue\n start_time = time()\n data = horrible_get(queue)\n fetch_time = time() - start_time\n\n if mask_class_name is not None:\n sample_weight = get_sample_weight(data, mask_class_name, masks)\n else:\n sample_weight = {}\n\n # Next, do some backprop\n start_time = time()\n loss, = model.train_on_batch(data, sample_weight=sample_weight)\n loss = float(loss)\n bp_time = time() - start_time\n learning_rate = get_model_lr(model)\n\n # Finally, write some debugging output\n extra_info = [\n ('loss', loss),\n ('lr', learning_rate),\n ('fetcht', fetch_time),\n ('bpropt', bp_time)\n ]\n p.update(iteration + 1, extra_info)\n\n # Update mean loss\n mean_loss += float(loss) / iterations\n\n # Log results to numeric log\n numlog_data = dict(extra_info)\n numlog_data['type'] = 'train'\n numlog_data['bsize'] = len(data[data.keys()[0]])\n numeric_log.append(numlog_data)\n\n learning_rate = model.optimizer.get_config()['lr']\n info('Finished {} training batches, mean loss-per-batch {}, LR {}'.format(\n iterations, mean_loss, learning_rate\n ))\n\n\ndef validate(model, queue, batches, mask_class_name, masks):\n \"\"\"Perform one epoch of validation.\"\"\"\n info('Testing on validation set')\n samples = 0\n weighted_loss = 0.0\n\n for batch_num in xrange(batches):\n data = horrible_get(queue)\n assert data is not None\n\n if mask_class_name is not None:\n sample_weight = get_sample_weight(data, mask_class_name, masks)\n else:\n sample_weight = {}\n\n loss, = model.test_on_batch(data, sample_weight=sample_weight)\n loss = float(loss)\n\n # Update stats\n sample_size = len(data[data.keys()[0]])\n samples += sample_size\n weighted_loss += sample_size * loss\n\n numeric_log.append({\n 'type': 'val_batch',\n 'loss': loss,\n 'w_loss': weighted_loss,\n 'bsize': sample_size\n })\n\n if (batch_num % 10) == 0:\n info('%i validation batches tested', batch_num)\n\n mean_loss = weighted_loss / max(1, samples)\n info(\n 'Finished %i batches (%i samples); mean loss-per-sample %f',\n batches, samples, mean_loss\n )\n numeric_log.append({\n 'type': 'val_done',\n 'mean_loss': mean_loss\n })\n\n\ndef save(model, iteration_no, dest_dir):\n \"\"\"Save the model to a checkpoint file.\"\"\"\n filename = 'model-iter-{}-r{:06}.h5'.format(iteration_no, randint(1, 1e6-1))\n full_path = path.join(dest_dir, filename)\n # Note that save_weights will prompt if the file already exists. This is\n # why I've added a small random number to the end; hopefully this will\n # allow unattended optimisation runs to complete even when Keras decides\n # that it doesn't want to play nicely.\n info(\"Saving model to %s\", full_path)\n model.save_weights(full_path)\n return full_path\n\n\ndef read_mean_pixels(mat_path):\n if mat_path is None:\n # No mean pixel\n return {}\n mat = loadmat(mat_path)\n mean_pixels = {\n k: v.flatten() for k, v in mat.iteritems() if not k.startswith('_')\n }\n return mean_pixels\n\n\ndef sub_mean_pixels(mean_pixels, all_data):\n rv = {}\n\n for name, data in all_data.iteritems():\n mean_pixel = mean_pixels.get(name)\n\n if mean_pixel is not None:\n assert data.ndim == 4, \"Can only mean-subtract images\"\n rv[name] = data - mean_pixel.reshape(\n (1, data.shape[1], 1, 1)\n )\n elif data.ndim > 2:\n # Only warn about image-like things\n warn(\"There's no mean pixel for dataset %s\" % name)\n\n return rv\n\n\ndef infer_sizes(h5_path):\n \"\"\"Just return shapes of all datasets, assuming that different samples are\n indexed along the first dimension.\"\"\"\n rv = {}\n\n with h5py.File(h5_path, 'r') as fp:\n for key in fp.keys():\n rv[key] = fp[key].shape[1:]\n\n return rv\n\n\ndef _model_io_map(config):\n return {\n cfg['name']: cfg for cfg in config\n }\n\n\ndef get_model_io(model):\n assert isinstance(model, Graph)\n inputs = _model_io_map(model.input_config)\n outputs = _model_io_map(model.output_config)\n return (inputs, outputs)\n\n\ndef h5_parser(h5_string):\n # parse strings,like,this without worrying about ,,stuff,like,,this,\n rv = [p for p in h5_string.split(',') if p]\n if not rv:\n raise ArgumentTypeError('Expected at least one path')\n return rv\n\ndef loss_mask_parser(arg):\n \"\"\"Takes as input a string dictating which losses should be enabled by\n which class values, and returns a dictionary with the same information in a\n more malleable form.\n\n :param arg: String of form\n ``:=,=,...``\n :returns: Dictionary with keys corresponding to seen outputs, each with an\n array value indicating which class values should unmask it; if\n the class holds any of the values in the set associated with an\n output, then that output should count towards the total loss of\n the model.\"\"\"\n classname, rest = arg.split(':', 1)\n nosep = rest.split(',')\n pairs = [s.split('=', 1) for s in nosep if '=' in s]\n set_dict = {}\n for label, clas in pairs:\n set_dict.setdefault(label, set()).add(int(clas))\n # We return a dictionary mapping output names to *Numpy arrays* of\n # corresponding classes. This lets us use np.in1d to test for membership in\n # a vectorised way.\n rv_dict = {\n k: np.fromiter(v, dtype='int') for k, v in set_dict.iteritems()\n }\n return classname, rv_dict\n\n\ndef datetime_str():\n return datetime.datetime.now().isoformat()\n\n\ndef setup_logging(work_dir):\n global numeric_log\n logging.basicConfig(level=logging.DEBUG)\n log_dir = path.join(work_dir, 'logs')\n mkdir_p(log_dir)\n t = datetime_str()\n num_log_fn = path.join(log_dir, 'numlog-' + t + '.log')\n log_file = path.join(log_dir, 'log-' + t + '.log')\n file_handler = logging.FileHandler(log_file, mode='a')\n logging.getLogger().addHandler(file_handler)\n numeric_log = NumericLogger(num_log_fn)\n info('=' * 80)\n info('Logging started at ' + datetime_str())\n info('Human-readable log path: {}'.format(log_file))\n info('Numeric log path: {}'.format(num_log_fn))\n\n\ndef load_model(args):\n ds_shape = infer_sizes(args.train_h5s[0])\n solver = SGD(\n lr=args.learning_rate, decay=args.decay, momentum=0.9, nesterov=True\n )\n model_to_load = getattr(models, args.model_name)\n info('Using loader %s' % args.model_name)\n model = model_to_load(\n ds_shape, solver, INIT\n )\n info('Loaded model of type {}'.format(type(model)))\n opt_cfg = model.optimizer.get_config()\n info('Solver data: decay={decay}, lr={lr}, momentum={momentum}, '\n 'nesterov={nesterov}'.format(**opt_cfg))\n if args.finetune_path is not None:\n info(\"Loading weights from '%s'\", args.finetune_path)\n model.load_weights(args.finetune_path)\n return model\n\n\ndef get_parser():\n \"\"\"Grab the ``argparse.ArgumentParser`` for this application. For some\n reason ``argparse`` needs access to ``sys.argv`` to build an\n ``ArgumentParser`` (not just to actually evaluate it), so I've put this\n into its own function so that it doesn't get executed when running from\n environments with no ``sys.argv`` (e.g. Matlab)\"\"\"\n parser = ArgumentParser(description=\"Train a CNN to regress joints\")\n\n # Mandatory arguments\n parser.add_argument(\n 'train_h5s', metavar='TRAINDATA', type=h5_parser,\n help='h5 files in which training samples are stored (comma separated)'\n )\n parser.add_argument(\n 'val_h5s', metavar='VALDATA', type=h5_parser,\n help='h5 file in which validation samples are stored (comma separated)'\n )\n parser.add_argument(\n 'working_dir', metavar='WORKINGDIR', type=str,\n help='directory in which to store checkpoint files and logs'\n )\n\n # Optargs\n parser.add_argument(\n '--queued-batches', dest='queued_batches', type=int, default=32,\n help='number of unused batches stored in processing queue (in memory)'\n )\n parser.add_argument(\n '--batch-size', dest='batch_size', type=int, default=16,\n help='batch size for both training (backprop) and validation'\n )\n parser.add_argument(\n '--checkpoint-epochs', dest='checkpoint_epochs', type=int, default=5,\n help='training intervals to wait before writing a checkpoint file'\n )\n parser.add_argument(\n '--train-interval-batches', dest='train_interval_batches', type=int,\n default=256, help='number of batches to train for between validation'\n )\n parser.add_argument(\n '--mean-pixel-mat', dest='mean_pixel_path', type=str, default=None,\n help='.mat containing mean pixel'\n )\n parser.add_argument(\n '--learning-rate', dest='learning_rate', type=float, default=0.0001,\n help='learning rate for SGD'\n )\n parser.add_argument(\n '--decay', dest='decay', type=float, default=1e-6,\n help='decay for SGD'\n )\n parser.add_argument(\n '--finetune', dest='finetune_path', type=str, default=None,\n help='finetune from these weights instead of starting again'\n )\n parser.add_argument(\n '--write-fc-weights', dest='fc_weight_path', type=str, default=None,\n help='use this to write fully convolutional net weights to some path'\n )\n parser.add_argument(\n '--write-fc-json', dest='fc_json_path', type=str, default=None,\n help='use this to write fully convolutional net spec to some path'\n )\n parser.add_argument(\n '--max-iter', dest='max_iter', type=int, default=None,\n help='maximum number of iterations to run for'\n )\n # TODO: Add configuration option to just run through the entire validation set\n # like I was doing before. That's a lot faster than using randomly sampled\n # stuff. Edit: I think I pushed down the validaiton block size, so now random\n # selection should be relatively fast.\n parser.add_argument(\n '--val-batches', dest='val_batches', type=int, default=50,\n help='number of batches to run during each validation step'\n )\n parser.add_argument(\n # Syntax proposal: 'class:out1=1,out2=2,out3=3', where 'class' is the name\n # of the class output (we only look at the ground truth) and out1,out2,out3\n # are names of outputs to be masked. In this case, out1's loss is only\n # enabled when the GT class is 1 (or [0 1 0 0 ...] in one-hot notation),\n # out2's loss is only enabled whne the GT class is 2 ([0 0 1 0 ...]), etc.\n '--cond-losses', dest='loss_mask', type=loss_mask_parser, default=None,\n help=\"use given GT class to selectively enable losses\"\n )\n parser.add_argument(\n '--model-name', dest='model_name', type=str,\n default='vggnet16_joint_reg_class_flow',\n help='name of model to use'\n )\n\n return parser\n\n\nif __name__ == '__main__':\n # Start by parsing arguments and setting up logger\n parser = get_parser()\n args = parser.parse_args()\n\n # Set up checkpointing and logging\n work_dir = args.working_dir\n checkpoint_dir = path.join(work_dir, 'checkpoints')\n mkdir_p(checkpoint_dir)\n setup_logging(work_dir)\n info('argv: {}'.format(argv))\n info('THEANO_FLAGS: {}'.format(environ.get('THEANO_FLAGS')))\n\n # Model-building\n model = load_model(args)\n inputs, outputs = [d.keys() for d in get_model_io(model)]\n\n mod_json = model.to_json()\n model_json_path = path.join(checkpoint_dir, 'train_model.json')\n with open(model_json_path, 'w') as fp:\n info('Saving model definition to ' + model_json_path)\n fp.write(mod_json)\n\n # Prefetching stuff\n end_event = Event()\n mean_pixels = read_mean_pixels(args.mean_pixel_path)\n # Training data prefetch\n train_queue = Queue(args.queued_batches)\n # We supply everything as kwargs so that I know what I'm passing in\n train_kwargs = dict(\n h5_paths=args.train_h5s, batch_size=args.batch_size,\n out_queue=train_queue, end_evt=end_event, mark_epochs=False,\n shuffle=True, mean_pixels=mean_pixels, inputs=inputs, outputs=outputs\n )\n train_worker = Process(target=h5_read_worker, kwargs=train_kwargs)\n\n # Validation data prefetch\n val_queue = Queue(args.queued_batches)\n val_kwargs = dict(\n h5_paths=args.val_h5s, batch_size=args.batch_size, out_queue=val_queue,\n end_evt=end_event, mark_epochs=False, shuffle=True,\n mean_pixels=mean_pixels, inputs=inputs, outputs=outputs\n )\n val_worker = Process(target=h5_read_worker, kwargs=val_kwargs)\n\n if args.loss_mask is not None:\n mask_class_name, masks = args.loss_mask\n else:\n warn('No masks supplied for conditional regression!')\n mask_class_name, masks = None, None\n\n try:\n # Protect this in a try: for graceful cleanup of workers\n train_worker.start()\n val_worker.start()\n\n # Stats\n epochs_elapsed = 0\n batches_used = 0\n\n try:\n while True:\n # Train and validate\n validate(\n model, val_queue, args.val_batches, mask_class_name, masks\n )\n train(\n model, train_queue, args.train_interval_batches,\n mask_class_name, masks\n )\n\n # Update stats\n epochs_elapsed += 1\n batches_used += args.train_interval_batches\n is_checkpoint_epoch = epochs_elapsed % args.checkpoint_epochs == 0\n if epochs_elapsed > 0 and is_checkpoint_epoch:\n save(model, batches_used, checkpoint_dir)\n if args.max_iter is not None \\\n and epochs_elapsed > args.max_iter:\n info(\n 'Maximum iterations (%i) exceeded; terminating',\n args.max_iter\n )\n break\n finally:\n # Always save afterwards, even if we get KeyboardInterrupt'd or\n # whatever\n stdout.write('\\n')\n save(model, batches_used, checkpoint_dir)\n\n # Convert to fully convolutional net if necessary\n if args.fc_json_path or args.fc_weight_path:\n info('Upgrading to fully convolutional network')\n upgraded = models.upgrade_multipath_poselet_vggnet(model)\n if args.fc_json_path:\n info('Saving model spec as JSON to %s', args.fc_json_path)\n mod_json = upgraded.to_json()\n with open(args.fc_json_path, 'w') as fp:\n fp.write(mod_json)\n if args.fc_weight_path:\n info('Saving weights to %s', args.fc_weight_path)\n upgraded.save_weights(args.fc_weight_path, overwrite=True)\n finally:\n # Make sure workers shut down gracefully\n end_event.set()\n stdout.write('\\n')\n info('Waiting for workers to exit (could take some time)')\n # XXX: My termination scheme (with end_event) is not working, and I\n # can't tell where the workers are getting stuck. It seems to be in a\n # cleanup function somewhere (maybe attach GDB to it?)\n train_worker.join()\n val_worker.join()\n","repo_name":"qxcv/joint-regressor","sub_path":"keras/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":27660,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"26202358444","text":"import os\nimport urllib\nimport pydub\nimport speech_recognition as sr\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom browser import driver\n\ndef bypass():\n def delay(waiting_time=5):\n driver.implicitly_wait(waiting_time)\n\n # switch to recaptcha frame\n frames = driver.find_elements_by_tag_name(\"iframe\")\n driver.switch_to.frame(frames[0])\n delay()\n\n # click on checkbox to activate recaptcha\n driver.find_element_by_class_name(\"recaptcha-checkbox-border\").click()\n\n # switch to recaptcha audio control frame\n driver.switch_to.default_content()\n frames = driver.find_element_by_xpath(\"/html/body/div[2]/div[4]\").find_elements_by_tag_name(\"iframe\")\n driver.switch_to.frame(frames[0])\n delay()\n\n # click on audio challenge\n driver.find_element_by_id(\"recaptcha-audio-button\").click()\n\n # switch to recaptcha audio challenge frame\n driver.switch_to.default_content()\n frames = driver.find_elements_by_tag_name(\"iframe\")\n driver.switch_to.frame(frames[-1])\n delay()\n\n # get the mp3 audio file\n src = driver.find_element_by_id(\"audio-source\").get_attribute(\"src\")\n print(\"[INFO] Audio src: %s\" % src)\n\n # download the mp3 audio file from the source\n urllib.request.urlretrieve(src, os.path.normpath(os.getcwd() + \"\\\\sample.mp3\"))\n delay()\n\n # load downloaded mp3 audio file as .wav\n try:\n sound = pydub.AudioSegment.from_mp3(os.path.normpath(os.getcwd() + \"\\\\sample.mp3\"))\n sound.export(os.path.normpath(os.getcwd() + \"\\\\sample.wav\"), format=\"wav\")\n sample_audio = sr.AudioFile(os.path.normpath(os.getcwd() + \"\\\\sample.wav\"))\n except Exception:\n print(\"[-] Please run program as administrator or download ffmpeg manually, \"\n \"http://blog.gregzaal.com/how-to-install-ffmpeg-on-windows/\")\n\n # translate audio to text with google voice recognition\n r = sr.Recognizer()\n with sample_audio as source:\n audio = r.record(source)\n key = r.recognize_google(audio)\n print(\"[INFO] Recaptcha Passcode: %s\" % key)\n\n # key in results and submit\n driver.find_element_by_id(\"audio-response\").send_keys(key.lower())\n driver.find_element_by_id(\"audio-response\").send_keys(Keys.ENTER)\n driver.switch_to.default_content()\n delay()\n driver.find_element_by_id(\"recaptcha-demo-submit\").click()\n delay()","repo_name":"daremau/setautoval","sub_path":"bypass.py","file_name":"bypass.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22783667181","text":"import asyncio\nimport datetime\nfrom functools import cached_property\nfrom itertools import chain\nfrom typing import Union\n\nimport altair as alt\nimport httpx\nimport pandas as pd\n\n\nclass onePA:\n FACILITIES = (\n \"BADMINTON COURTS\",\n \"BASKETBALL COURT\",\n \"BBQ PIT (CC)\",\n \"BBQ PIT (RC)\",\n \"FUTSAL COURT\",\n \"SOCCER FIELD\",\n \"SQUASH COURT\",\n \"STUDY & WORKSPACES\",\n \"TABLE TENNIS ROOM\",\n \"TENNIS COURT\",\n )\n\n BASE_URL = \"https://www.onepa.gov.sg/pacesapi\"\n FACILITY_SEARCH_ENDPOINT = \"/facilitysearch/searchjson\"\n FACILITY_SLOTS_ENDPOINT = \"/facilityavailability/GetFacilitySlots\"\n\n def __init__(self, facility: str):\n assert facility in self.FACILITIES\n self.facility = facility\n\n @cached_property\n def outlet_names(self):\n outlet_names = []\n page_num = 1\n while True:\n params = {\"facility\": self.facility, \"page\": page_num}\n r = httpx.get(self.BASE_URL + self.FACILITY_SEARCH_ENDPOINT, params=params)\n\n results = r.json()[\"data\"][\"results\"]\n\n page_outlet_names = [result[\"outlet\"] for result in results]\n outlet_names.extend(page_outlet_names)\n\n page_num += 1\n if len(page_outlet_names) < 10:\n break\n outlet_names = sorted(outlet_names)\n return outlet_names\n\n async def _batch_available_outlets(self, client, batch_outlets, date):\n date_str = date.strftime(\"%d/%m/%Y\")\n params = {\n \"outlet\": batch_outlets,\n \"facility\": self.facility,\n \"date\": date_str,\n \"time\": \"all\",\n }\n r = await client.get(\n self.BASE_URL + self.FACILITY_SEARCH_ENDPOINT, params=params\n )\n results = r.json()[\"data\"][\"results\"]\n results = [dict(data, **{\"date\": date}) for data in results]\n return results\n\n async def available_outlets(self, dates: Union[datetime.date, list[datetime.date]]):\n if isinstance(dates, datetime.date):\n dates = [dates]\n\n PAGE_SIZE = 10\n async with httpx.AsyncClient(timeout=60 * 2) as client:\n tasks = []\n for date in dates:\n outlet_names = self.outlet_names\n\n for i in range(0, len(outlet_names), PAGE_SIZE):\n batch_outlets = \",\".join(\n outlet_names[i : i + PAGE_SIZE]\n ) # batch of 10 outlets\n result = self._batch_available_outlets(client, batch_outlets, date)\n tasks.append(asyncio.ensure_future(result))\n outlet_availability_nested = await asyncio.gather(*tasks)\n return list(chain.from_iterable(outlet_availability_nested))\n\n async def available_outlets_df(\n self, dates: Union[datetime.date, list[datetime.date]]\n ):\n outlet_availability = await self.available_outlets(dates)\n expanded_df = pd.json_normalize(outlet_availability)\n if \"outlet\" not in expanded_df.columns:\n outlet_df = pd.DataFrame(\n columns=[\"outlet\", \"count\", \"bookingUrl\", \"publicPrice\", \"membersPrice\"]\n )\n return outlet_df\n outlet_df = (\n expanded_df[\n [\n \"outlet\",\n \"count\",\n \"productUrl\",\n \"price.publicPrice\",\n \"price.membersPrice\",\n \"date\",\n ]\n ]\n .assign(\n productUrl=lambda row: \"https://www.onepa.gov.sg\" + row[\"productUrl\"]\n )\n .rename(\n columns={\n \"productUrl\": \"bookingUrl\",\n \"price.publicPrice\": \"publicPrice\",\n \"price.membersPrice\": \"membersPrice\",\n }\n )\n )\n outlet_df[\"date\"] = pd.to_datetime(outlet_df[\"date\"])\n return outlet_df\n\n async def _batch_available_times_per_outlet(self, client, outlet, date):\n\n date_str = date.strftime(\"%d/%m/%Y\")\n selected_facility = (\n f\"{outlet.replace(' ' , '')}_{self.facility.replace(' ', '')}\"\n )\n\n date_slot_info = []\n params = {\"selectedFacility\": selected_facility, \"selectedDate\": date_str}\n\n r = await client.get(\n self.BASE_URL + self.FACILITY_SLOTS_ENDPOINT,\n params=params,\n timeout=60,\n )\n resource_statuses = r.json()[\"response\"][\"resourceList\"]\n\n if resource_statuses is not None:\n for resource_status in resource_statuses:\n slot_info_list = resource_status[\"slotList\"]\n # Add link to page\n\n date_slot_info.extend(slot_info_list)\n\n date_slot_info = [\n dict(\n slot_info,\n **{\n \"bookingUrl\": \"https://www.onepa.gov.sg/facilities/availability?facilityId=\"\n + selected_facility\n },\n )\n for slot_info in date_slot_info\n ]\n return date_slot_info\n\n async def available_times_per_outlet(\n self, outlet, dates: Union[datetime.date, list[datetime.date]]\n ):\n async with httpx.AsyncClient(timeout=60 * 2) as client:\n tasks = []\n for date in dates:\n date_slot_info = self._batch_available_times_per_outlet(\n client, outlet, date\n )\n tasks.append(asyncio.ensure_future(date_slot_info))\n date_slot_info_nested = await asyncio.gather(*tasks)\n return list(chain.from_iterable(date_slot_info_nested))\n\n async def available_times_per_outlet_df(\n self, outlet, dates: Union[datetime.date, list[datetime.date]]\n ):\n responses = await self.available_times_per_outlet(outlet, dates)\n\n df = (\n pd.DataFrame(responses)\n .groupby(\n [\n \"timeRangeId\",\n \"timeRangeName\",\n \"startTime\",\n \"endTime\",\n \"isPeak\",\n \"bookingUrl\",\n ]\n )[[\"isAvailable\"]]\n .sum()\n .reset_index()\n )\n return df\n\n\ndef availability_plot_times(df):\n base_chart = alt.Chart(df).encode(\n x=alt.X(\n \"date(startTime):O\",\n title=\"Day of Month\",\n sort=alt.EncodingSortField(field=\"startTime\", order=\"ascending\"),\n ),\n y=alt.Y(\"hoursminutes(startTime):O\", title=\"Start Time\"),\n tooltip=[\n \"timeRangeName\",\n \"isPeak\",\n \"isAvailable\",\n alt.Tooltip(\"day(startTime)\", title=\"day\"),\n alt.Tooltip(\"startTime:T\", title=\"date\"),\n ],\n href=\"bookingUrl:N\",\n )\n\n available_slots = base_chart.transform_filter(alt.datum.isAvailable > 0).encode(\n color=alt.Color(\n \"isAvailable:O\",\n scale=alt.Scale(\n scheme=\"yellowgreen\"\n ), # https://vega.github.io/vega/docs/schemes/\n legend=alt.Legend(title=\"Available slots\"),\n )\n )\n\n chart = available_slots.mark_rect() + base_chart.mark_rect(opacity=0)\n\n chart[\"usermeta\"] = {\"embedOptions\": {\"loader\": {\"target\": \"_blank\"}}}\n return chart.properties(title=\"No. of available slots by time, date\")\n\n\ndef availability_plot_dates(df):\n chart = (\n alt.Chart(df)\n .mark_rect()\n .encode(\n x=alt.X(\n \"date(date):O\",\n title=\"Day of Month\",\n sort=alt.EncodingSortField(field=\"date\", order=\"ascending\"),\n ),\n y=alt.Y(\"outlet:N\"),\n color=alt.Color(\n \"count:O\",\n scale=alt.Scale(\n scheme=\"yellowgreen\"\n ), # https://vega.github.io/vega/docs/schemes/\n legend=alt.Legend(title=\"Available slots\"),\n ),\n href=\"bookingUrl:N\",\n tooltip=[\n \"outlet\",\n \"count\",\n \"publicPrice\",\n \"membersPrice\",\n alt.Tooltip(\"day(date)\", title=\"day\"),\n \"date\",\n ],\n )\n )\n chart[\"usermeta\"] = {\"embedOptions\": {\"loader\": {\"target\": \"_blank\"}}}\n return chart.properties(title=\"No. of slots by location, date\")\n","repo_name":"kayeschong/onepa-facilities-checker","sub_path":"checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":8517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26747070144","text":"\"\"\"Generic functions tests.\"\"\"\nimport os\nimport sys\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom unittest import mock\n\nimport pytest\nfrom furl import furl\nfrom testfixtures import compare\n\nfrom nitpick.constants import EDITOR_CONFIG, TOX_INI\nfrom nitpick.generic import _url_to_posix_path, _url_to_windows_path, relative_to_current_dir\n\n\n@mock.patch.object(Path, \"cwd\")\n@mock.patch.object(Path, \"home\")\ndef test_relative_to_current_dir(home, cwd):\n \"\"\"Mock the home and current dirs, and test relative paths to them (testing Linux-only).\"\"\"\n if sys.platform == \"win32\":\n home_dir = \"C:\\\\Users\\\\john\"\n project_dir = f\"{home_dir}\\\\project\"\n else:\n home_dir = \"/home/john\"\n project_dir = f\"{home_dir}/project\"\n home.return_value = Path(home_dir)\n cwd.return_value = Path(project_dir)\n sep = os.path.sep\n\n examples = {\n None: \"\",\n project_dir: \"\",\n Path(project_dir): \"\",\n f\"{home_dir}{sep}another\": f\"{home_dir}{sep}another\",\n Path(f\"{home_dir}{sep}bla{sep}bla\"): f\"{home_dir}{sep}bla{sep}bla\",\n f\"{project_dir}{sep}{TOX_INI}\": TOX_INI,\n f\"{project_dir}{sep}{EDITOR_CONFIG}\": EDITOR_CONFIG,\n Path(f\"{project_dir}{sep}apps{sep}manage.py\"): f\"apps{sep}manage.py\",\n f\"{home_dir}{sep}another{sep}one{sep}bites.py\": f\"{home_dir}{sep}another{sep}one{sep}bites.py\",\n Path(f\"{home_dir}{sep}bla{sep}bla.txt\"): f\"{home_dir}{sep}bla{sep}bla.txt\",\n }\n if sys.platform == \"win32\":\n examples.update(\n {\n \"d:\\\\Program Files\\\\MyApp\": \"d:\\\\Program Files\\\\MyApp\",\n Path(\"d:\\\\Program Files\\\\AnotherApp\"): \"d:\\\\Program Files\\\\AnotherApp\",\n \"C:\\\\System32\\\\win32.dll\": \"C:\\\\System32\\\\win32.dll\",\n Path(\"E:\\\\network\\\\file.txt\"): \"E:\\\\network\\\\file.txt\",\n }\n )\n else:\n examples.update(\n {\n \"/usr/bin/some\": \"/usr/bin/some\",\n Path(\"/usr/bin/awesome\"): \"/usr/bin/awesome\",\n \"/usr/bin/something/wicked/this-way-comes.cfg\": \"/usr/bin/something/wicked/this-way-comes.cfg\",\n Path(\"/usr/bin/.awesome\"): \"/usr/bin/.awesome\",\n }\n )\n\n for path, expected in examples.items():\n compare(actual=relative_to_current_dir(path), expected=expected, prefix=f\"Path: {path}\")\n\n\n@pytest.mark.skipif(os.name != \"nt\", reason=\"Windows-only test\")\n@pytest.mark.parametrize(\n \"test_path\",\n [\n \"C:\\\\\",\n r\"C:\\path\\file.toml\",\n r\"//server/share/path/file.toml\",\n ],\n)\ndef test_url_to_windows_path(test_path):\n \"\"\"Verify that Path to URL to Path conversion preserves the path.\"\"\"\n path = WindowsPath(test_path)\n url = furl(path.as_uri())\n assert _url_to_windows_path(url) == path\n\n\n@pytest.mark.skipif(os.name == \"nt\", reason=\"POSIX-only test\")\n@pytest.mark.parametrize(\n \"test_path\",\n [\n \"/\",\n \"/path/to/file.toml\",\n \"//double/slash/path.toml\",\n ],\n)\ndef test_url_to_posix_path(test_path):\n \"\"\"Verify that Path to URL to Path conversion preserves the path.\"\"\"\n path = PosixPath(test_path)\n url = furl(path.as_uri())\n assert _url_to_posix_path(url) == path\n","repo_name":"andreoliwa/nitpick","sub_path":"tests/test_generic.py","file_name":"test_generic.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":369,"dataset":"github-code","pt":"75"} +{"seq_id":"29629426513","text":"MAX_N = 100000\nsieve = [0] * (MAX_N+1)\nprimes = [[] for _ in range(MAX_N+1)]\nfor i in range(2, MAX_N+1):\n if sieve[i] == 0:\n for j in range(i, MAX_N+1, i):\n sieve[j] += 1\n primes[j].append(i)\n\ndp = [[0] * (MAX_N+1) for _ in range(6)]\nfor k in range(1, 6):\n for n in range(2, MAX_N+1):\n if sieve[n] == k:\n dp[k][n] = dp[k][n-1] + 1\n else:\n dp[k][n] = dp[k][n-1]\n\n# read input and process test cases\nT = int(input())\nfor _ in range(T):\n A, B, K = map(int, input().split())\n count = dp[K][B] - dp[K][A-1]\n print(count)\n","repo_name":"MrBaten/problemsloving","sub_path":"count k-prime.py","file_name":"count k-prime.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"38237760146","text":"\n\nfrom django.urls import path \nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('catalogo/', views.catalogo, name=\"catalogoMas\"),\n path('registro/', views.registro, name=\"registro\"),\n path('inicioSesion/', views.signin, name=\"inicioSesion\"),\n path('logout/', views.signout, name='logout'),\n path('registroMascota/', views.registromascota, name=\"registroMascota\"),\n path('mensajes/', views.mensajes, name=\"mensajes\"),\n path('consultarSolicitudes/', views.consultarSolicitudes, name=\"consultarSolicitudes\"),\n path('misMascotas/', views.misMascotas, name=\"misMascotas\"),\n path('perfilUsuario/', views.perfilUsuario, name=\"perfilUsuario\"),\n path('perfilMascota/', views.perfilMascota, name=\"perfilMascota\"),\n path('solicitudMascota/', views.solicitudMascota, name=\"solicitudMascota\"),\n \n]","repo_name":"MarcoMR4/Mascotas","sub_path":"PettyQuestApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37200367020","text":"import re\n\n# word = input('Enter a word')\n# pattern = r'ne'\n# if re.match(pattern, word):\n# print('Match')\n# else:\n# print('No match')\n#\n\n# email = 'nelsong@mail.com'\n# pattern = r'@'\n# if re.search(pattern, email):\n# print('Match')\n# else:\n# print('No match')\n\n# pattern = r'c..n'\n# if re.match(pattern, 'coin'):\n# print('match')\n\n# emails = '''\n# nelson@gmail.com\n# jae-net@edu.net.\n# Text.good_1234@gov.ng\n# '''\n# pattern = re.compile(r'[a-zA-Z0-9_.-]+@[a-zA-Z]+\\.[a-z]+')\n# matches = pattern.finditer(emails)\n# for match in matches:\n# print(match)\nfrom threading import *\nfrom time import sleep\n\nclass Nelson(Thread):\n def run(self):\n for i in range(10):\n print('nelson')\n sleep(1)\n\nclass Wisdom(Thread):\n def run(self):\n for i in range(10):\n print('wisdom')\n sleep(1)\n\n\nn = Nelson()\nw = Wisdom()\nn.start()\nsleep(0.2)\nw.start()\n","repo_name":"peternelson22/python-tutorials","sub_path":"even.py","file_name":"even.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17453660759","text":"from typing import Dict, List\nfrom project_library.user import User\n\n\nclass Library:\n\n def __init__(self):\n self.user_records: List[User] = [] # User objects\n self.books_available: Dict[str: List[str]] = {} # {'author': [books]\n self.rented_books: Dict[str: Dict[str: int]] = {} # {'usernames': {'book_names': days_to_return}}\n\n def get_book(self, author: str, book_name: str, days_to_return: int, user: User) -> str:\n\n if book_name in self.books_available[author]: # if book name in dict of available books\n self.books_available[author].remove(book_name) # remove book from available dictionary\n user.books.append(book_name) # add book to user books list\n\n if user.username in self.rented_books: # if username in rented books dictionary\n self.rented_books[user.username][book_name] = days_to_return # add book name to his username dict\n else: # if user not in rented books dictionary\n self.rented_books[user.username] = {book_name: days_to_return} # create dictionary to his username\n\n return f\"{book_name} successfully rented for the next {days_to_return} days!\"\n\n for username, data in self.rented_books.items():\n if book_name in data:\n return f'The book \"{book_name}\" is already rented and will be available in {data[book_name]} days!'\n\n def return_book(self, author: str, book_name: str, user: User) -> [str, None]:\n if book_name in self.rented_books[user.username]:\n del self.rented_books[user.username][book_name]\n user.books.remove(book_name)\n self.books_available[author].append(book_name)\n else:\n return f\"{user.username} doesn't have this book in his/her records!\"\n","repo_name":"entermix123/Python_OOP","sub_path":"2. Classes and Objects/project_library/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34042637892","text":"import numpy as np\r\nimport pandas as pd\r\nimport warnings\r\nfrom matplotlib.font_manager import FontProperties\r\n\r\n\r\n\r\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\", size=15)\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\nof = pd.read_excel('D:\\GPR\\检验报告Pearson统计结果.xlsx')\r\nitem = ['白细胞', '血小板压积', '红细胞数', '血小板数', '中性粒细胞绝对值', '淋巴细胞绝对值', '血红蛋白浓度', '氯', '钾', '钠', '钙', '尿素氮', '肌酐', '谷草转氨酶', '谷丙转氨酶', '直接胆红素', '间接胆红素', '总胆红素', '总蛋白', '白蛋白', '球蛋白', '白球比例', '乳酸脱氢酶', '总胆汁酸', 'γ-谷氨酰转移酶', '碱性磷酸酶', '尿酸', '降钙素原(PCT)', 'C-反应蛋白', 'D-二聚体含量', '肌酸激酶', '肌红蛋白', '谷草谷丙']\r\nfor itemindex in range(0, 33):\r\n m = item[itemindex]\r\n df = pd.read_excel('D:\\GPR\\检验报告_' + m + '.xlsx')\r\n a = list(df.groupby(by='姓名').pearson.unique())\r\n of.总数.loc[of['项目']==m] = len(a)\r\n of.强正相关.loc[of['项目']==m] = np.sum(list(map(lambda x: x > 0.9, a)))\r\n of.强负相关.loc[of['项目']==m] = np.sum(list(map(lambda x: x < -0.9, a)))\r\n of.正相关比例.loc[of['项目']==m] = np.sum(list(map(lambda x: x > 0, a)))/len(a)\r\n of.负相关比例.loc[of['项目']==m] = np.sum(list(map(lambda x: x < 0, a)))/len(a)\r\n of.强正相关比例.loc[of['项目'] == m] = np.sum(list(map(lambda x: x > 0.9, a))) / len(a)\r\n of.强负相关比例.loc[of['项目'] == m] = np.sum(list(map(lambda x: x < -0.9, a))) / len(a)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nof.to_excel('D:\\GPR\\检验报告Pearson统计结果.xlsx')","repo_name":"Chenxr93/Stepwise-Diagnosis-and-Recovery-Prediction-AI-system-for-COVID-19","sub_path":"GPRPrediction/calculate_Inspection.py","file_name":"calculate_Inspection.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20378169656","text":"import os\nimport soundfile as sf\nimport math\n\ndef split_wav_file(input_path, output_path, duration):\n audio, sample_rate = sf.read(input_path)\n\n total_samples = len(audio)\n\n num_chunks = total_samples // (duration * sample_rate)\n\n for i in range(num_chunks):\n start_sample = i * duration * sample_rate\n end_sample = (i + 1) * duration * sample_rate\n chunk = audio[start_sample:end_sample]\n\n filename = os.path.splitext(os.path.basename(input_path))[0]\n output_filename = f\"{filename}_chunk{i + 1}.wav\"\n output_file_path = os.path.join(output_path, output_filename)\n\n sf.write(output_file_path, chunk, sample_rate)\n \n print(f\"split {input_path} into {num_chunks} chunks of {duration} seconds each.\")\n\ninput_directory = \"./wav\"\noutput_directory = \"./samples\"\nduration = 5 #duration\nfor filename in os.listdir(input_directory):\n if filename.endswith(\".wav\"):\n input_file_path = os.path.join(input_directory, filename)\n split_wav_file(input_file_path, output_directory, duration)\n","repo_name":"IgorTelles9/football-music-identifier","sub_path":"audio_splitter2.py","file_name":"audio_splitter2.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29854321424","text":"def char_to_priority(character: str):\n intercept = 38 if character.isupper() else 96\n return ord(character) - intercept\n\nrucksacks = [line[:-1] for line in open('input')]\n\nfirst_compartments = [set(rucksack[:len(rucksack)//2]) for rucksack in rucksacks]\nsecond_compartments = [set(rucksack[len(rucksack)//2:]) for rucksack in rucksacks]\n\nintersections = [\n first_compartment.intersection(second_compartment).pop()\n for first_compartment, second_compartment in zip(first_compartments, second_compartments)\n]\n\nbadges = [\n set(rucksacks[rucksack_index]).intersection(set(rucksacks[rucksack_index + 1])).intersection(set(rucksacks[rucksack_index + 2])).pop()\n for rucksack_index in range(0, 300, 3)\n]\n\nprint(sum(list(map(char_to_priority, intersections))))\nprint(sum(list(map(char_to_priority, badges))))","repo_name":"Tecnarca/advent_of_code_2022","sub_path":"src/day3/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40956605011","text":"class Backpack:\n \"\"\" Рюкзак \"\"\"\n\n def __init__(self, gift=None):\n self.content = []\n if gift is not None:\n self.content.append(gift)\n\n def add(self, item):\n \"\"\" Положить в рюкзак \"\"\"\n self.content.append(item)\n print(\"В рюкзак положили:\", item)\n\n def inspect(self, ):\n \"\"\" Проверить содержимое \"\"\"\n print(\"В рюкзак лежит:\")\n for item in self.content:\n print(' ', item)\n\n\nmy_backpack = Backpack(gift='флешка')\nmy_backpack.add(item='ноутбук')\nmy_backpack.add(item='зарядка для ноутбука')\nmy_backpack.inspect()","repo_name":"kalmykovalexsander/skillbox_kurs_base","sub_path":"lesson_007/probe.py","file_name":"probe.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4422362025","text":"import json\r\nimport requests\r\nimport urllib3\r\n\r\nimport readop.utility.config as config\r\nfrom readop.models.credentials import Credentials\r\nfrom readop.models.parameters import Parameters\r\nfrom readop.utility import validator\r\n\r\n# suppresses SSL warnings\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\n\r\n\r\ndef _response_data_to_parameters(response_data: dict) -> Parameters:\r\n parameters = Parameters()\r\n\r\n if 'tagset' not in response_data:\r\n raise ValueError('Expected tagset field in response data')\r\n\r\n tagset = response_data['tagset']\r\n\r\n for tag in tagset:\r\n if 'key' not in tag:\r\n raise ValueError('Expected key in tag')\r\n if 'value' not in tag:\r\n raise ValueError('Expected value in tag')\r\n\r\n key = tag['key']\r\n value = int(tag['value'])\r\n\r\n if key == 'OST_DEFAULT_ACCESS_PATTERN':\r\n parameters.default_access_pattern = value\r\n elif key == 'OST_APD_DATA_SEQ_THRESHOLD':\r\n parameters.apd_data_seq_threshold = value\r\n elif key == 'OST_INIT_IO_REGION_SIZE':\r\n parameters.init_io_region_size = value\r\n elif key == 'OST_MAX_IO_REGIONS':\r\n parameters.max_io_regions = value\r\n elif key == 'OST_APD_ACCESS_SEQ_THRESHOLD':\r\n parameters.apd_access_seq_threshold = value\r\n elif key == 'OST_APD_ACCESS_RANDOM_THRESHOLD':\r\n parameters.apd_access_random_threshold = value\r\n elif key == 'OST_APD_ACCESS_RANDOM_INC_THRESHOLD':\r\n parameters.apd_access_random_inc_threshold = value\r\n else:\r\n raise ValueError('Unexpected tag in response: {0}'.format(key))\r\n\r\n return parameters\r\n\r\n\r\ndef _parameters_to_response_body(parameters):\r\n return {\r\n 'mdtag_object_modify': {\r\n 'namespace': 'file',\r\n 'tagset': [\r\n {\r\n 'key': 'OST_DEFAULT_ACCESS_PATTERN',\r\n 'value': 'int:' + str(parameters.default_access_pattern)\r\n },\r\n {\r\n 'key': 'OST_APD_DATA_SEQ_THRESHOLD',\r\n 'value': 'int:' + str(parameters.apd_data_seq_threshold)\r\n },\r\n {\r\n 'key': 'OST_INIT_IO_REGION_SIZE',\r\n 'value': 'int:' + str(parameters.init_io_region_size)\r\n },\r\n {\r\n 'key': 'OST_MAX_IO_REGIONS',\r\n 'value': 'int:' + str(parameters.max_io_regions)\r\n },\r\n {\r\n 'key': 'OST_APD_ACCESS_SEQ_THRESHOLD',\r\n 'value': 'int:' + str(parameters.apd_access_seq_threshold)\r\n },\r\n {\r\n 'key': 'OST_APD_ACCESS_RANDOM_THRESHOLD',\r\n 'value': 'int:' + str(parameters.apd_access_random_threshold)\r\n },\r\n {\r\n 'key': 'OST_APD_ACCESS_RANDOM_INC_THRESHOLD',\r\n 'value': 'int:' + str(parameters.apd_access_random_inc_threshold)\r\n }\r\n ]\r\n }\r\n }\r\n\r\n\r\nclass AuthenticationException(Exception):\r\n pass\r\n\r\n\r\nclass InvalidAuthTokenException(Exception):\r\n pass\r\n\r\n\r\nclass HTTP:\r\n def __init__(self):\r\n self.auth_token = 'NOT YET AUTHENTICATED'\r\n self.credentials = Credentials(config.get_ddve_username(), config.get_ddve_password())\r\n self.url = 'https://' + config.get_ddve_host() + ':' + config.get_ddve_rest_port() + '/rest/v1.0'\r\n\r\n self.authenticate()\r\n \r\n def authenticate(self):\r\n headers = {\r\n 'Content-type': 'application/json',\r\n 'Accept': 'application/json'\r\n }\r\n \r\n body = {\r\n 'auth_info': {\r\n 'username': self.credentials.username,\r\n 'password': self.credentials.password\r\n }\r\n }\r\n \r\n url = self.url + '/auth'\r\n \r\n response = requests.post(url, headers=headers, json=body, verify=False)\r\n \r\n if not response.headers['X-DD-AUTH-TOKEN']:\r\n raise AuthenticationException('Failed to authenticate')\r\n\r\n self.auth_token = response.headers['X-DD-AUTH-TOKEN']\r\n\r\n def get_parameters(\r\n self,\r\n storage_unit: str,\r\n should_authenticate: bool = False\r\n ) -> Parameters:\r\n if should_authenticate:\r\n self.authenticate()\r\n \r\n headers = {\r\n 'Content-type': 'application/json',\r\n 'Accept': 'application/text',\r\n 'X-DD-AUTH-TOKEN': self.auth_token\r\n }\r\n \r\n url = self.url + '/dd-systems/0/mdtags/%2Fdata%2Fcol1%2F' + storage_unit + '?filter=namespace%3Dfile'\r\n \r\n response = requests.get(url, headers=headers, verify=False)\r\n response_data = json.loads(response.text)\r\n \r\n if response.status_code == 302 and response_data['code'] == 5427:\r\n raise InvalidAuthTokenException\r\n \r\n return _response_data_to_parameters(response_data)\r\n \r\n def set_parameters(\r\n self,\r\n storage_unit: str,\r\n parameters: Parameters,\r\n should_authenticate: bool = False\r\n ) -> None:\r\n if should_authenticate:\r\n self.authenticate()\r\n \r\n headers = {\r\n 'Content-type': 'application/json',\r\n 'Accept': 'application/text',\r\n 'X-DD-AUTH-TOKEN': self.auth_token\r\n }\r\n \r\n body = _parameters_to_response_body(parameters)\r\n \r\n url = self.url + '/dd-systems/0/mdtags/%2Fdata%2Fcol1%2F' + storage_unit\r\n \r\n response = requests.put(url, headers=headers, json=body, verify=False)\r\n response_data = json.loads(response.text)\r\n \r\n if response.status_code == 302 and response_data['code'] == 5427:\r\n raise InvalidAuthTokenException\r\n \r\n @property\r\n def auth_token(self):\r\n return self.__auth_token\r\n \r\n @property\r\n def credentials(self):\r\n return self.__credentials\r\n \r\n @property\r\n def url(self):\r\n return self.__url\r\n \r\n @auth_token.setter\r\n def auth_token(self, auth_token):\r\n self.__auth_token = validator.validate_string_not_empty(auth_token)\r\n \r\n @credentials.setter\r\n def credentials(self, credentials):\r\n self.__credentials = validator.validate_credentials(credentials)\r\n \r\n @url.setter\r\n def url(self, url):\r\n self.__url = validator.validate_string_not_empty(url)\r\n","repo_name":"wma8/SeniorDesign","sub_path":"readop/protocols/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":6607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20821035113","text":"import SimpleITK\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series, concat, merge\nfrom jsonloader import load_predictions_json\nfrom joblib import Parallel, delayed\nfrom pathlib import Path\nfrom sklearn import metrics\nfrom functools import reduce, partial\n\nfrom evalutils import ClassificationEvaluation\nfrom evalutils.io import SimpleITKLoader\nfrom evalutils.validators import (\n NumberOfCasesValidator, UniquePathIndicesValidator, UniqueImagesValidator\n)\n\ndef dice_norm_metric(ground_truth, predictions):\n \"\"\"\n For a single example returns DSC_norm, fpr, fnr\n \"\"\"\n\n # Reference for normalized DSC\n r = 0.001\n # Cast to float32 type\n gt = ground_truth.astype(\"float32\")\n seg = predictions.astype(\"float32\")\n im_sum = np.sum(seg) + np.sum(gt)\n if im_sum == 0:\n return 1.0\n else:\n if np.sum(gt) == 0:\n k = 1.0\n else:\n k = (1-r) * np.sum(gt) / ( r * ( len(gt.flatten()) - np.sum(gt) ) )\n tp = np.sum(seg[gt==1])\n fp = np.sum(seg[gt==0])\n fn = np.sum(gt[seg==0])\n fp_scaled = k * fp\n dsc_norm = 2 * tp / (fp_scaled + 2 * tp + fn)\n\n return dsc_norm\n\ndef get_nDSC_aac(gts, preds, uncs, n_jobs=8):\n\n def compute_dice_norm(frac_, preds_, gts_, N_):\n pos = int(N_ * frac_)\n curr_preds = preds if pos == N_ else np.concatenate((preds_[:pos], gts_[pos:]))\n return dice_norm_metric(gts_, curr_preds)\n\n ordering = uncs.argsort()\n uncs = uncs[ordering]\n gts = gts[ordering]\n preds = preds[ordering]\n N = len(gts)\n\n # # Significant class imbalance means it is important to use logspacing between values\n # # so that it is more granular for the higher retention fractions\n num_values = 200\n fracs_retained = np.log(np.arange(num_values+1)[1:])\n fracs_retained = fracs_retained / np.amax(fracs_retained)\n\n process = partial(compute_dice_norm, preds_=preds, gts_=gts, N_=N)\n dsc_norm_scores = np.asarray(\n Parallel(n_jobs=n_jobs)(delayed(process)(frac) for frac in fracs_retained)\n )\n\n return 1. - metrics.auc(fracs_retained, dsc_norm_scores)\n\nclass Shifts2022seg(ClassificationEvaluation):\n def __init__(self):\n super().__init__(\n file_loader=SimpleITKLoader(),\n validators=(\n NumberOfCasesValidator(num_cases=25),\n #UniquePathIndicesValidator(),\n UniqueImagesValidator(),\n ),\n )\n self._mask_path = Path(\"/opt/evaluation/brain-mask/\")\n self._relative_segmentation_path = \"/output/images/white-matter-multiple-sclerosis-lesion-segmentation/\"\n self._relative_uncertainty_path = \"/output/images/white-matter-multiple-sclerosis-lesion-uncertainty-map/\"\n\n self.mapping_dict = load_predictions_json(Path(\"/input/predictions.json\"))\n\n self._segmentation_cases = DataFrame()\n self._uncertainty_cases = DataFrame()\n\n def score_case(self, *, idx, case):\n gt_path = case[\"path_ground_truth\"]\n segmentation_path = case[\"path_segmentation\"]\n uncertainty_path = case[\"path_uncertainty\"]\n mask_path = case[\"path_mask\"]\n\n # Load the images for this case\n gt = self._file_loader.load_image(gt_path)\n seg = self._file_loader.load_image(segmentation_path)\n unc = self._file_loader.load_image(uncertainty_path)\n mask = self._file_loader.load_image(mask_path)\n\n gt_array = np.transpose(SimpleITK.GetArrayFromImage(gt))\n seg_array = SimpleITK.GetArrayFromImage(seg)\n unc_array = SimpleITK.GetArrayFromImage(unc)\n mask_array = np.transpose(SimpleITK.GetArrayFromImage(mask))\n\n\n # Checks to ensure that the predictions are binary - if not, prediction is punished\n if len(np.unique(seg_array)) > 2:\n seg_array = np.zeros_like(seg_array, dtype=int)\n\n nDSC = dice_norm_metric(gt_array, seg_array)\n nDSC_AAC = get_nDSC_aac(gt_array[mask_array==1].flatten(), seg_array[mask_array==1].flatten(), unc_array[mask_array==1].flatten())\n\n return {\n 'nDSC': nDSC,\n 'nDSC_AAC':nDSC_AAC,\n 'seg_fname': segmentation_path.name,\n 'unc_fname': uncertainty_path.name,\n 'gt_fname': gt_path.name,\n }\n\n\n def _load_shuffled_cases(self, rel_path):\n\n cases = None\n\n job_pks = self.mapping_dict.keys()\n\n for pk in job_pks:\n folder = Path(\"/input/\" + pk + rel_path)\n\n new_cases = self._load_cases(folder=folder)\n new_cases[\"ground_truth_path\"] = [\n self._ground_truth_path / self.mapping_dict[pk]\n for _ in new_cases.path\n ]\n\n if cases is None:\n cases = new_cases\n else:\n cases = pd.concat([cases, new_cases])\n \n return cases\n\n def load(self):\n\n self._ground_truth_cases = self._load_cases(folder=self._ground_truth_path)\n self._mask_cases = self._load_cases(folder=self._mask_path)\n self._segmentation_cases = self._load_shuffled_cases(rel_path=self._relative_segmentation_path)\n self._uncertainty_cases = self._load_shuffled_cases(rel_path=self._relative_uncertainty_path)\n\n self._ground_truth_cases = self._ground_truth_cases.sort_values(\n \"path\"\n ).reset_index(drop=True)\n self._mask_cases = self._mask_cases.sort_values(\n \"path\"\n ).reset_index(drop=True)\n self._segmentation_cases = self._segmentation_cases.sort_values(\n \"ground_truth_path\"\n ).reset_index(drop=True)\n self._uncertainty_cases = self._uncertainty_cases.sort_values(\n \"ground_truth_path\"\n ).reset_index(drop=True)\n\n\n def validate(self):\n \"\"\"Validates each dataframe separately\"\"\"\n self._validate_data_frame(df=self._ground_truth_cases)\n self._validate_data_frame(df=self._segmentation_cases)\n self._validate_data_frame(df=self._uncertainty_cases)\n\n def merge_ground_truth_and_predictions(self):\n\n def agg_df(dfList):\n temp = reduce(lambda left, right: pd.merge(left, right, left_index=True, right_index=True, how=\"outer\"), dfList)\n return temp\n\n dfs = {0: self._ground_truth_cases, 1: self._segmentation_cases, 2: self._uncertainty_cases, 3: self._mask_cases}\n suffixes = (\"_ground_truth\", \"_segmentation\", \"_uncertainty\", \"_mask\")\n for i in dfs:\n dfs[i] = dfs[i].add_suffix(suffixes[i])\n self._cases = agg_df(dfs.values())\n\n def cross_validate(self):\n pass\n\n\n\nif __name__ == \"__main__\":\n Shifts2022seg().evaluate()\n","repo_name":"VatsalRaina/EvaluationContainer_msseg","sub_path":"shifts2022seg/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29312601070","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os import path\n\nimport tensorflow.compat.v1 as tf\n\n\ndef get_dataset(data_name, split, args):\n return dataset_dict[data_name](split, args)\n\n\ndef shapenet(split, args):\n \"\"\"ShapeNet Dataset.\n\n Args:\n split: string, the split of the dataset, either \"train\" or \"test\".\n args: tf.app.flags.FLAGS, configurations.\n\n Returns:\n dataset: tf.data.Dataset, the shapenet dataset.\n \"\"\"\n total_points = 100000\n data_dir = args.data_dir\n sample_bbx = args.sample_bbx\n if split != \"train\":\n sample_bbx = total_points\n sample_surf = args.sample_surf\n if split != \"train\":\n sample_surf = 0\n image_h = args.image_h\n image_w = args.image_w\n image_d = args.image_d\n n_views = args.n_views\n depth_h = args.depth_h\n depth_w = args.depth_w\n depth_d = args.depth_d\n batch_size = args.batch_size if split == \"train\" else 1\n dims = args.dims\n\n def _parser(example):\n fs = tf.parse_single_example(\n example,\n features={\n \"rgb\":\n tf.FixedLenFeature([n_views * image_h * image_w * image_d],\n tf.float32),\n \"depth\":\n tf.FixedLenFeature([depth_d * depth_h * depth_w], tf.float32),\n \"bbox_samples\":\n tf.FixedLenFeature([total_points * (dims + 1)], tf.float32),\n \"surf_samples\":\n tf.FixedLenFeature([total_points * (dims + 1)], tf.float32),\n \"name\":\n tf.FixedLenFeature([], tf.string),\n })\n fs[\"rgb\"] = tf.reshape(fs[\"rgb\"], [n_views, image_h, image_w, image_d])\n fs[\"depth\"] = tf.reshape(fs[\"depth\"], [depth_d, depth_h, depth_w, 1])\n fs[\"bbox_samples\"] = tf.reshape(fs[\"bbox_samples\"],\n [total_points, dims + 1])\n fs[\"surf_samples\"] = tf.reshape(fs[\"surf_samples\"],\n [total_points, dims + 1])\n return fs\n\n def _sampler(example):\n image = tf.gather(\n example[\"rgb\"],\n tf.random.uniform((),\n minval=0,\n maxval=n_views if split == \"train\" else 1,\n dtype=tf.int32),\n axis=0)\n image = tf.image.resize_bilinear(tf.expand_dims(image, axis=0), [224, 224])\n\n depth = example[\"depth\"] / 1000.\n\n sample_points = []\n sample_labels = []\n\n if sample_bbx > 0:\n if split == \"train\":\n indices_bbx = tf.random.uniform([sample_bbx],\n minval=0,\n maxval=total_points,\n dtype=tf.int32)\n bbx_samples = tf.gather(example[\"bbox_samples\"], indices_bbx, axis=0)\n else:\n bbx_samples = example[\"bbox_samples\"]\n bbx_points, bbx_labels = tf.split(bbx_samples, [3, 1], axis=-1)\n sample_points.append(bbx_points)\n sample_labels.append(bbx_labels)\n\n if sample_surf > 0:\n indices_surf = tf.random.uniform([sample_surf],\n minval=0,\n maxval=total_points,\n dtype=tf.int32)\n surf_samples = tf.gather(example[\"surf_samples\"], indices_surf, axis=0)\n surf_points, surf_labels = tf.split(surf_samples, [3, 1], axis=-1)\n sample_points.append(surf_points)\n sample_labels.append(surf_labels)\n\n points = tf.concat(sample_points, axis=0)\n point_labels = tf.cast(tf.concat(sample_labels, axis=0) <= 0., tf.float32)\n\n image = tf.reshape(image, [224, 224, image_d])\n depth = tf.reshape(depth, [depth_d, depth_h, depth_w])\n depth = tf.transpose(depth, [1, 2, 0])\n points = tf.reshape(points, [sample_bbx + sample_surf, 3])\n point_labels = tf.reshape(point_labels, [sample_bbx + sample_surf, 1])\n\n return {\n \"image\": image,\n \"depth\": depth,\n \"point\": points,\n \"point_label\": point_labels,\n \"name\": example[\"name\"],\n }\n\n data_pattern = path.join(data_dir, \"{}-{}-*\".format(args.obj_class, split))\n data_files = tf.gfile.Glob(data_pattern)\n if not data_files:\n raise ValueError(\"{} did not match any files\".format(data_pattern))\n file_count = len(data_files)\n filenames = tf.data.Dataset.list_files(data_pattern, shuffle=True)\n data = filenames.interleave(\n lambda x: tf.data.TFRecordDataset([x]),\n cycle_length=file_count,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n data = data.map(_parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n data = data.map(_sampler, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if split == \"train\":\n data = data.shuffle(batch_size * 5).repeat(-1)\n\n return data.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n\n\ndataset_dict = {\n \"shapenet\": shapenet,\n}\n","repo_name":"tensorflow/graphics","sub_path":"tensorflow_graphics/projects/cvxnet/lib/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","stars":2734,"dataset":"github-code","pt":"75"} +{"seq_id":"18537582809","text":"# Databricks notebook source\n#Start a simple Spark session\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.appName(\"walmart\").getOrCreate()\n\n# COMMAND ----------\n\n#Load the Walmart Stock CSV File, have Spark infer the data types\n# Este punto, se realiza además con la carga de datos por la interfaz visual de databricks\ndf= sqlContext.sql('Select * from walmart_stock_csv')\n\n# COMMAND ----------\n\n#What are the column names?\ndf.columns\n\n# COMMAND ----------\n\n#What does the Schema look like?\ndf.printSchema()\n\n# COMMAND ----------\n\n#Print out the first 5 columns\nfor row in df.head(5):\n print(row)\n print('\\n')\n\n# COMMAND ----------\n\n#Use describe() to learn about the DataFrame\ndf.describe().show()\n\n# COMMAND ----------\n\n#There are too many decimal places for mean and stddev in the describe() dataframe. Format the numbers to just show up to two decimal places. Pay careful attention to the datatypes that .describe() returns, we didn't cover how to do this exact formatting, but we covered something very similar. [Check this link for a hint](http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.Column.cast)\n\n#df.describe().printSchema()\nfrom pyspark.sql.functions import format_number\nresultd=df.describe()\nresultd.select(resultd['summary'],format_number(resultd['Open'].cast('float'),2).alias('Open'),\n format_number(resultd['High'].cast('float'),2).alias('High'),\n format_number(resultd['Low'].cast('float'),2).alias('Low'),\n format_number(resultd['Close'].cast('float'),2).alias('Close'),\n resultd['Volume'].cast('int').alias('Volume')).show()\n\n# COMMAND ----------\n\n#create a new dataframe with a column called HV Ratio that is the ratio of the High Price versus volume of stock traded for a day.\ndf2 = df.withColumn(\"HV Ratio\",df[\"High\"]/df[\"Volume\"])\ndf2.select('HV Ratio').show()\n\n# COMMAND ----------\n\n#What day had the Peak High in Price?\ndf.orderBy(df[\"High\"].desc()).head(1)[0][0]\n\n# COMMAND ----------\n\n#What is the mean of the Close column?\nfrom pyspark.sql.functions import mean\ndf.select(mean('Close')).show()\n\n# COMMAND ----------\n\n#What is the max and min of the Volume column?\nfrom pyspark.sql.functions import max, min\ndf.select(max(\"Volume\"),min(\"Volume\")).show()\n\n# COMMAND ----------\n\n#How many days was the Close lower than 60 dollars?\ndf.filter('Close<60').count()\n\n# COMMAND ----------\n\n#What percentage of the time was the High greater than 80 dollars\n(df.filter('High>80').count()*1.0/df.count())*100\n\n# COMMAND ----------\n\n#What is the Pearson correlation between High and Volume?\nfrom pyspark.sql.functions import corr\ndf.select(corr('High','Volume')).show()\n\n# COMMAND ----------\n\n#What is the max High per year?\nfrom pyspark.sql.functions import year\n\nyeardf=df.withColumn('Year',year(df['Date']))\nmax_yf= yeardf.groupBy('Year').max()\nmax_yf.select('Year', 'max(High)').show()\n\n# COMMAND ----------\n\n#What is the average Close for each Calendar Month?\nfrom pyspark.sql.functions import month\nmonthdf = df.withColumn(\"Month\",month(\"Date\"))\nmonthavgs = monthdf.select(\"Month\",\"Close\").groupBy(\"Month\").mean()\nmonthavgs.select(\"Month\",\"avg(Close)\").orderBy('Month').show()\n\n# COMMAND ----------\n\n\n","repo_name":"mcalle28/BigDataMocc","sub_path":"ejercicioSpark/py/Ejercicios_Dataframe.py","file_name":"Ejercicios_Dataframe.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42749263982","text":"import os\nimport pathlib \nimport urllib\nimport urllib.request\nimport shutil \nimport random\nimport zipfile\nTEMP_FILENAME = \"tmp.zip\"\ndef download_data(out_path, csv_filename, url, force=False):\n \n \"\"\" downloads the data to the specified out_path \"\"\"\n dir_path = pathlib.Path(out_path)\n dir_path.mkdir(exist_ok=True)\n tmp_path = dir_path.joinpath(TEMP_FILENAME)\n csv_path = dir_path.joinpath(csv_filename)\n \n if csv_path.is_file() and not force:\n print(f'csv file {str(csv_path)} exists, skipping download.')\n else:\n if tmp_path.is_file() and not force:\n print(f'zip file {str(tmp_path)} exists, skipping download.') \n else:\n print(f'Downloading {url}...')\n with urllib.request.urlopen(url) as response, open(str(tmp_path), 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n print(f'Saved to {str(tmp_path)}.')\n print(f'Extracting zip {str(tmp_path)}.')\n with zipfile.ZipFile(str(tmp_path), 'r') as zip_ref: \n zipinfo = zip_ref.infolist()[0]\n zipinfo.filename = str(csv_path)\n zip_ref.extract(zipinfo)\n tmp_path.unlink()\n\n return csv_filename","repo_name":"idanbudin93/epitope-predictor","sub_path":"download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26831864323","text":"from random import randint\r\n\r\n# Максимальное мулчайное значение в матрице\r\nmax_elem_val = 60\r\n\r\n# Размер матрицы\r\nmartix_size = 5\r\n\r\n# Создаём матицу\r\nmatrix = [[randint(0, max_elem_val) for i in range(martix_size)] for j in range(martix_size)]\r\n\r\n# Инициализируем перемменные для записи (в качестве массива вида [значение, индекс_ряда, индекс_колонки])\r\nmin_val, max_val = [matrix[0][0], 0, 0], [matrix[0][0], 0, 0]\r\n\r\n# Перебераем циклом для ряда и циклом для каждого элементов нём\r\nfor row in matrix: #для каждого ряда\r\n for elem in row: #для каждого эдемента в ряду\r\n\r\n # Сравнения и перезапись если больше/меньше\r\n if (elem <= min_val[0]):\r\n min_val = [elem, matrix.index(row), row.index(elem)]\r\n if (elem >= max_val[0]):\r\n max_val = [elem, matrix.index(row), row.index(elem)]\r\n\r\n# Вывод матрицы\r\nprint('===== Матрица =====')\r\nfor row in matrix:\r\n for elem in row:\r\n # Просто форматирование для красивого вывода от длины максимального значения\r\n print(f\"{elem:>{len(max_val)}}\", end=\" \")\r\n print()\r\n\r\n# Вывод минимального и максимального\r\nprint(f'Минимальное значение: {min_val[0]}; Индекс ряда: {min_val[1]}; Индекс колонки: {min_val[2]}')\r\nprint(f'Максимальное значение: {max_val[0]}; Индекс ряда: {max_val[1]}; Индекс колонки: {max_val[2]}')\r\n \r\n \r\n\r\n","repo_name":"4YDnO1/python_some_tasks","sub_path":"1, 3, 4/task 1.py","file_name":"task 1.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20684184056","text":"import json\nimport os, sys\nfrom aiohttp import web\n\nimport numpy as np\nimport scipy.fftpack\nimport pandas as pd\nfrom pandas_profiling import ProfileReport\nsys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))\nfrom src.topics.views import make_spectrogram\nfrom src.utils import RouteTableDefDocs, dumps, try_get, get_client, try_get_validate, try_get_all\nroutes = RouteTableDefDocs()\n\n\n@routes.post('/project/dataset', name='testing')\nasync def testing(request: web.Request):\n r = await request.post()\n data = r['file'] # data is the file\n\n headers = request.headers\n content_length = int(headers['Content-length'])\n projectName = \"testing\"\n\n os.makedirs(request.app['settings'].PROJECT_DIR + \"/\" + projectName, exist_ok=True)\n\n # Write \".FMU\" to disc\n if \".csv\" in data.filename:\n fmuPath = request.app['settings'].PROJECT_DIR + \"/\" + projectName + \"/\" + data.filename\n with open(fmuPath, 'wb') as file:\n file.write(data.file.read(content_length)) # writes .fmu to file\n df = pd.read_csv(request.app['settings'].PROJECT_DIR + \"/\" + projectName + \"/\" + data.filename)\n\n profile = ProfileReport(df, title='Pandas Profiling Report', html={'style': {'full_width': True}})\n\n profile.to_file(output_file=\"your_report.html\")\n with open(\"your_report.html\", \"r\", encoding='utf-8') as f:\n text = f.read()\n print(text)\n return web.Response(\n text=text,\n content_type='text/html')\n\n else:\n return web.HTTPOk()\n\n #profile.to_file(output_file=\"your_report.json\")\n\n\n@routes.get('/project/{project}/files/{file}', name='get_datafile')\nasync def get_datafile(request: web.Request):\n filename = request.match_info['file']\n project = request.match_info['project']\n\n if \".csv\" in filename or \".xlsx\" in filename:\n path = request.app['settings'].PROJECT_DIR + \"/\" + project + \"/files/\" + filename\n if \".csv\" in filename:\n data = pd.read_csv(path)\n header = data.iloc[0]\n window, cols = data.shape\n y = [[val for val in data.values]]\n print(\"header\", header)\n print(\"head\", data.head())\n return web.json_response({\n \"values\": data.values,\n \"y\": y,\n \"names\": list(data)\n }, dumps=dumps)\n elif \".xlsx\" in filename:\n data = pd.read_excel(path)\n return web.json_response({\n \"values\": data.values,\n \"names\": list(data)\n }, dumps=dumps)\n\n\n@routes.get('/project/{project}/files/{file}/inspect', name='inspect_data')\nasync def inspect_data(request: web.Request):\n filename = request.match_info['file']\n project = request.match_info['project']\n df = pd.read_csv(request.app['settings'].PROJECT_DIR + \"/\" + project + \"/files/\" + filename)\n\n profile = ProfileReport(df, title='Pandas Profiling Report', html={'style': {'full_width': True}})\n path = request.app['settings'].PROJECT_DIR + \"/\" + project + \"/files/\" + filename.replace(\".csv\", \".html\")\n profile.to_file(output_file=path)\n with open(path, \"r\", encoding='utf-8') as f:\n text = f.read()\n print(\"responding now\")\n return web.Response(\n text=text,\n content_type='text/html')\n\n\n@routes.get('/project/datafile/get/{project}/{file}', name='send_historical_data')\nasync def send_historical_data(request: web.Request):\n file_name = request.match_info['file']\n project_id = request.match_info['project']\n\n df = []\n if \".csv\" in file_name:\n df = pd.read_csv(request.app['settings'].PROJECT_DIR + \"/\" + project_id + \"/files/\" + file_name)\n elif \".xlsx\" in file_name:\n df = pd.read_excel(request.app['settings'].PROJECT_DIR + \"/\" + project_id + \"/files/\" + file_name)\n else:\n web.HTTPBadRequest()\n\n headers = [title for title in df.columns.values]\n x_axis = [int(timestamp) for timestamp in df.iloc[:, 0].values]\n y_values = [[item for item in df.iloc[:, i].values] for i in range(1, df.shape[1])]\n test = [headers, x_axis, y_values]\n\n return web.json_response(test, dumps=dumps)\n\n\n@routes.post('/project/datafile', name='upload_datafile')\nasync def upload_datafile(request: web.Request):\n r = await request.post()\n data = r['file']\n\n headers = request.headers\n content_length = int(headers['Content-length'])\n project_name = headers[\"projectName\"]\n os.makedirs(request.app['settings'].PROJECT_DIR + \"/\" + project_name + \"/files\", exist_ok=True)\n\n # Write \".FMU\" to disc\n if \".csv\" in data.filename or \".xlsx\" in data.filename:\n path = request.app['settings'].PROJECT_DIR + \"/\" + project_name + \"/files/\" + data.filename\n with open(path, 'wb') as file:\n file.write(data.file.read(content_length))\n return web.HTTPAccepted()\n else:\n return web.HTTPBadRequest()\n\n\n@routes.post('/machinelearning/{project_id}/{tile}/model', name='save_machinelearning_model')\nasync def save_machinelearning_model(request: web.Request):\n r = await request.post()\n project_id = request.match_info['project_id']\n tile = request.match_info['tile']\n print(r, project_id, tile)\n data = r['model.json']\n data2 = r['model.weights.bin']\n\n headers = request.headers\n content_length = int(headers['Content-length'])\n os.makedirs(request.app['settings'].PROJECT_DIR + \"/\" + project_id + \"/\" + tile, exist_ok=True)\n\n try:\n path = request.app['settings'].PROJECT_DIR + \"/\" + project_id + \"/\" + tile + \"/model.json\"\n with open(path, 'wb') as file:\n file.write(data.file.read(content_length))\n path2 = request.app['settings'].PROJECT_DIR + \"/\" + project_id + \"/\" + tile + \"/models.weights.bin\"\n with open(path2, 'wb') as file2:\n file2.write(data2.file.read(content_length))\n return web.HTTPAccepted()\n except:\n return web.HTTPNotFound()\n\n\n@routes.get('/machinelearning/{project_id}/{tile}/model.json', name='load_machinelearning_model')\nasync def load_machinelearning_model(request: web.Request):\n project_id = request.match_info['project_id']\n tile = request.match_info['tile']\n with open(request.app['settings'].PROJECT_DIR + \"/\" + project_id + \"/\" + tile + \"/model.json\", 'r') as json_file:\n js = json.load(json_file)\n return web.json_response(js, dumps=dumps)\n\n\n@routes.get('/machinelearning/{project_id}/{tile}/model.weights.bin', name='load_machinelearning_model_weights')\nasync def load_machinelearning_model_weights(request: web.Request):\n project_id = request.match_info['project_id']\n tile = request.match_info['tile']\n return web.FileResponse(request.app['settings'].PROJECT_DIR + \"/\" + project_id + \"/\" + tile + \"/models.weights.bin\")\n\n\n\n@routes.get('/project/spectrogram/file/{project_id}/{file_path}/{frequency}', name='spectrogram_from_file')\nasync def spectrogram_from_file(request: web.Request):\n project_name = request.match_info['project_id']\n file_path = request.match_info['file_path']\n frequency = float(request.match_info['frequency'])\n path = request.app['settings'].PROJECT_DIR + \"/\" + project_name + '/files/' + file_path\n print(\"path\", path)\n if \".csv\" in file_path:\n data = pd.read_csv(path)\n elif \".xlsx\" in file_path:\n data = pd.read_excel(path)\n else:\n print(\"No file?\")\n\n measurements = [m for m in data.iloc[:, 1].values]\n duration = len(measurements)/frequency\n print(\"duration\", duration)\n file_data = make_spectrogram(data.iloc[:, 1].values, duration)\n\n return web.json_response(file_data, dumps=dumps)\n\n\n@routes.get('/fft/file/{project_id}/{file_path}/{sample_spacing}', name='fetch_fft_from_file')\nasync def fetch_fft_from_file(request: web.Request):\n project_name = request.match_info['project_id']\n file_path = 'files/' + request.match_info['file_path']\n sample_spacing = float(request.match_info['sample_spacing'])\n path = request.app['settings'].PROJECT_DIR + \"/\" + project_name + '/' + file_path\n\n if \".csv\" in file_path:\n data = pd.read_csv(path)\n elif \".xlsx\" in file_path:\n data = pd.read_excel(path)\n else:\n raise web.HTTPNotFound(reason='No file found')\n\n if data.columns.values[0] == 'Timestamp':\n fft_values = scipy.fftpack.fft(data.iloc[:, 1].values)\n name = data.columns.values[1]\n else:\n fft_values = scipy.fftpack.fft(data.iloc[:, 0].values)\n name = data.columns.values[0]\n\n window = data.shape[0] - 1\n\n x = np.fft.rfftfreq(int(window), d=sample_spacing)\n y = 2.0 / window * np.abs(fft_values[:window // 2])\n plots = [name, [val for val in x], [float(val) for val in y]]\n\n return web.json_response(plots, dumps=dumps)\n\n\n\n\n\n\n\n","repo_name":"erikkjernlie/cbms_backend","sub_path":"src/configuration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33435749759","text":"from operator import attrgetter\nfrom collections import Counter\nfrom itertools import permutations\nimport random\nimport logging\n\nfrom data.model import Tournament, Team, Game, Round\nfrom controller.helper import get_games_of_team, get_game_of_teams\nfrom controller.errors import NoMatchError\n\n\n__logger = logging.getLogger('swiss_system')\n\n\ndef check_and_fix_initial_performance_values(tournament):\n # first check whether there is already a correct ranking\n performance_value_count = Counter(\n [t.performance_value for t in tournament.teams])\n if performance_value_count['-1'] == 0 and \\\n performance_value_count.most_common(1)[0][1] == 1:\n return\n\n __logger.info(\"No valid inital order of teams. Fix performance values.\")\n\n teams_without_performance_value = []\n teams_with_performance_value = []\n for t in tournament.teams:\n if t.performance_value == -1:\n teams_without_performance_value.append(t)\n else:\n teams_with_performance_value.append(t)\n\n # any team with performance value should be higher ranked than teams\n # without, so we just add the number of teams without performance value to\n # the performance value of the other teams\n number_of_teams_without_performance_value = len(\n teams_without_performance_value)\n if number_of_teams_without_performance_value > 0:\n for t in teams_with_performance_value:\n t.performance_value += number_of_teams_without_performance_value\n\n # assign random performance value to teams without one, in the range\n # 0 .. len(teams_without_performance_value)\n random_performance_values = random.sample(\n range(0, number_of_teams_without_performance_value),\n number_of_teams_without_performance_value)\n for t in teams_without_performance_value:\n t.performance_value = random_performance_values.pop()\n\n # fix several teams with same value\n # if all values are unique, we are done\n if len(teams_with_performance_value) > 1:\n performance_value_count_2 = Counter(\n [t.performance_value for t in teams_with_performance_value])\n if performance_value_count_2.most_common(1)[0][1] == 1:\n return\n\n # so there are some teams with the same value\n for t in teams_with_performance_value:\n same_performance_value = {t}\n\n for t2 in teams_with_performance_value:\n if t.performance_value == t2.performance_value:\n same_performance_value.add(t2)\n\n # this performance value is unique\n if len(same_performance_value) == 1:\n continue\n\n # here we have some duplicates, so we shift all other teams with a\n # higher performance value\n _shift_performance_values(teams_with_performance_value,\n len(same_performance_value) - 1,\n t.performance_value)\n\n # and randomly order teams with same value\n random_shift = random.sample(range(0, len(same_performance_value)),\n len(same_performance_value))\n for t3 in same_performance_value:\n t3.performance_value = t3.performance_value \\\n + random_shift.pop()\n\n\ndef _shift_performance_values(teams, value, threshold):\n for t in teams:\n if t.performance_value > threshold:\n t.performance_value = t.performance_value + value\n\n\ndef calculate_next_round(tournament):\n calculate_standings(tournament)\n rnd = Round()\n teams = sorted(tournament.teams, key=attrgetter('position'))\n \n if len(tournament.rounds) > 0: # some round in the tournament\n game_id = len(tournament.rounds) * len(tournament.rounds[0].games) + 1\n\n # determine free round\n if len(tournament.teams) % 2 == 1:\n min_free_rounds = min([t.fl for t in teams])\n for t in reversed(teams):\n if (t.fl == min_free_rounds):\n t.fl = t.fl + 1\n game = Game(game_id, t, None, tournament.points_fr_win,\n tournament.points_fr_loss)\n game_id += 1\n rnd.games.append(game)\n teams.remove(t)\n break\n\n # case if there are no teams...\n if len(list(teams)) == 0:\n tournament.rounds.append(rnd)\n return\n\n # used for fallback if pool matching fails\n teams_without_free_round = list(teams)\n game_id_fallback = game_id\n\n # go through all pools (teams with same number of wins) and\n # determine games\n max_wins = max([t.wins for t in teams])\n min_wins = min([t.wins for t in teams])\n\n games = []\n\n for wins in range(max_wins, min_wins - 1, -1):\n pool = [x for x in teams if x.wins == wins]\n\n hl_candidates = sorted([x for x in teams if x.wins == wins - 1],\n key=attrgetter('hl', 'position'))\n\n try:\n games_in_pool = match_pool(tournament, pool, hl_candidates,\n game_id)\n game_id += len(games_in_pool)\n games += games_in_pool\n except:\n # not possible to find matches for a pool\n # abandon pool system and try to find any possible way for next\n # round\n __logger.info('Pool system does not produce valid round.' \n 'Try any possibility for matches.')\n try:\n games = match_teams_greedy(tournament, \n teams_without_free_round, game_id_fallback)\n except NoMatchError:\n __logger.error('No valid round could be determined.')\n print('No valid round could be determined. Maybe you want '\n 'to play too many rounds for too few teams? '\n 'Usually no match between two teams is repeated.')\n return\n break\n else:\n # remove already matched teams from list of teams\n #for team_a, team_b in [(tournament.get_team_by_name(g.team_a), tournament.get_team_by_name(g.team_b)) for g in rnd.games]:\n for team in [tournament.get_team_by_name(g.team_a) for g in rnd.games] + [tournament.get_team_by_name(g.team_b) for g in rnd.games]:\n try:\n teams.remove(team)\n except ValueError:\n pass\n\n rnd.games += games\n \n else: # special case first round\n game_id = 1\n # free round for team in the middle\n if len(teams) % 2 == 1:\n t = teams[len(teams) // 2]\n t.fl = t.fl + 1\n game = Game(game_id, t, None, tournament.points_fr_win,\n tournament.points_fr_loss)\n game_id += 1\n rnd.games.append(game)\n teams.remove(t)\n\n # split teams in two and play 1. of upper half against 1. of lower\n # half and so on\n try:\n rnd.games += match_teams_half_split(tournament, teams, game_id)\n except NoMatchError:\n __logger.error('Something went horribly wrong.'\n 'No match possible in first round.') \n raise NoMatchError\n\n tournament.rounds.append(rnd)\n\n\ndef match_pool(tournament, pool, hl_candidates, game_id):\n try: \n # if uneven number, one team plays against a team with less\n # wins. Take best team from next pool that has not played in a \n # higher pool before.\n if len(pool) % 2 == 1:\n return _match_pool_uneven(tournament, pool, hl_candidates, game_id)\n else:\n return _match_pool_even(tournament, pool, game_id)\n except NoMatchError:\n raise NoMatchError\n\n\ndef _match_pool_uneven(tournament, pool, hl_candidates, game_id):\n \n games = []\n\n # there is an uneven number of teams in the pool\n # go through all candidates\n while len(hl_candidates) > 0:\n hl_candidate = hl_candidates.pop(0)\n\n # find opponent for that team\n candidates_for_hl = sorted(pool, key=lambda x: (x.games_against_hl,\n x.position * -1))\n\n # go through all possible opponents\n while len(candidates_for_hl) > 0:\n team_a = candidates_for_hl.pop(0)\n if get_game_of_teams(tournament, team_a, hl_candidate) is None:\n game = Game(game_id, team_a, hl_candidate)\n game_id += 1\n games.append(game)\n\n tmp_pool = list(pool)\n tmp_pool.remove(team_a)\n # try to match the rest\n try:\n games += _match_pool_even(tournament, tmp_pool, game_id)\n except NoMatchError:\n # that did not work out, so try the next\n game_id -= 1\n games.pop()\n continue\n else:\n # matching worked\n hl_candidate.hl += 1\n team_a.games_against_hl += 1\n return games\n else:\n continue\n \n # no possible matching with all candidates\n raise NoMatchError\n \n\ndef _match_pool_even(tournament, pool, game_id):\n # first try two split\n try:\n return match_teams_half_split(tournament, pool, game_id)\n except NoMatchError:\n # split did not work, so try greedy\n try:\n return match_teams_greedy(tournament, pool, game_id)\n except NoMatchError:\n # that did not work either...\n raise NoMatchError\n\n\ndef match_teams_half_split(tournament, teams, game_id):\n upper_half = teams[:len(teams)//2]\n lower_half = teams[len(teams)//2:]\n\n # permute lower half and try to match them with upper half\n for l in [l for l in permutations(lower_half)]: \n try:\n games = _match_teams_half_split_iteration(tournament, upper_half,\n l, game_id)\n except NoMatchError:\n # go on with next iteration \n continue\n else:\n # there were all valid matches, so we can end the loop by \n # returning all games\n return games\n\n raise NoMatchError\n\n\ndef _match_teams_half_split_iteration(tournament, upper_half, lower_half,\n game_id):\n games = []\n for team_a, team_b in zip(upper_half, lower_half):\n if get_game_of_teams(tournament, team_a, team_b) is None:\n game = Game(game_id, team_a, team_b)\n game_id += 1\n games.append(game)\n else:\n raise NoMatchError\n\n return games\n\n\ndef match_teams_greedy(tournament, teams, game_id):\n games = []\n\n for per_teams in [perm for perm in permutations(teams)]:\n try:\n games = _match_teams_greedy_iteration(tournament, per_teams, \n game_id)\n except NoMatchError:\n continue\n else:\n return games\n \n raise NoMatchError\n\n\ndef _match_teams_greedy_iteration(tournament, teams, game_id):\n games = []\n teams = list(teams)\n while len(teams) > 0:\n team_a = teams.pop(0)\n for team_b in teams:\n if get_game_of_teams(tournament, team_a, team_b) is None:\n game = Game(game_id, team_a, team_b)\n game_id += 1\n games.append(game) \n teams.remove(team_b)\n break\n else: # no break so there was no match\n raise NoMatchError\n\n return games\n\n\ndef _move_hl_team_to_end(candidates, min_games_against_hl):\n if len(candidates) % 2 == 1:\n min_games_against_hl = min(candidates,\n key=attrgetter('games_against_hl'))\n for index, c in enumerate(reversed(candidates)):\n if c.games_against_hl == min_games_against_hl:\n candidates.append(candidates.pop(index))\n break\n\n\ndef calculate_standings(tournament):\n # wins and points\n for team in tournament.teams:\n calculate_wins_and_points_for_team(tournament, team)\n\n for team in tournament.teams:\n calculate_bh_for_team(tournament, team)\n\n for team in tournament.teams:\n calculate_fbh_for_team(tournament, team)\n\n for team in tournament.teams:\n calculate_sb_for_team(tournament, team)\n\n for team in tournament.teams:\n calculate_koya_for_team(tournament, team)\n\n calculate_standings_for_all_teams(tournament)\n\n\ndef calculate_wins_and_points_for_team(tournament, team):\n games = get_games_of_team(tournament, team)\n wins, losses, points, points_against, fr = 0, 0, 0, 0, 0\n for g in [game for game in games if game.is_finished()]:\n if g.get_winner() == team.name:\n wins += 1\n else:\n losses += 1\n if g.is_free_round():\n fr += 1\n points += g.get_points(team)\n points_against += g.get_points_against(team)\n team.wins = wins\n team.losses = losses\n team.points = points\n team.points_against = points_against\n team.fl = fr\n\n\ndef calculate_bh_for_team(tournament, team):\n games = get_games_of_team(tournament, team)\n bh = 0\n for g in [game for game in games if game.is_finished()]:\n opponent = tournament.get_team_by_name(g.get_opponent(team))\n if opponent is not None:\n bh += opponent.wins\n team.bh = bh\n\n\ndef calculate_fbh_for_team(tournament, team):\n games = get_games_of_team(tournament, team)\n fbh = 0\n for g in [game for game in games if game.is_finished()]:\n opponent = tournament.get_team_by_name(g.get_opponent(team))\n if opponent is not None:\n fbh += opponent.bh\n team.fbh = fbh\n\n\ndef calculate_sb_for_team(tournament, team):\n games = get_games_of_team(tournament, team)\n team.sb = 0\n for g in [game for game in games if game.is_finished()]:\n if g.get_winner() == team.name:\n opponent = tournament.get_team_by_name(g.get_looser())\n if opponent is not None:\n team.sb += opponent.wins\n\n\ndef calculate_koya_for_team(tournament, team):\n games = get_games_of_team(tournament, team)\n team.koya = 0\n number_of_games = len(tournament.rounds)\n for g in [game for game in games if game.is_finished()]:\n opponent = tournament.get_team_by_name(g.get_opponent(team)) \n if opponent is not None:\n if opponent.wins >= number_of_games / 2:\n team.koya += opponent.wins\n\n\ndef calculate_standings_for_all_teams(tournament):\n # 1. Wins\n # 2. BH\n # 3. FBH\n # 4. SB\n # 5. Koya\n # 6. Direct comparison\n # 7. point difference\n # 8. points\n # 9. number of free rounds\n # 10. use initial performance value: this leads to correct standings for\n # the first round and makes it possible reconstruct standings for later\n # rounds, because no randomness is introduced\n # During the tournament, it creates better matches because better teams\n # are ranked higher and play against teams, which are good in the\n # tournament.\n # Only for the final standings this becomes unfair, but also very\n # unlikely to play any role.\n\n # 1. - 5. and 7. - 10. can be done with simple sorting\n standing = sorted(tournament.teams, key=_sort_teams_for_standing_key,\n reverse=True)\n\n # 6. direct comparison\n # direct comparison only, if there are exactly two teams with equal\n # wins, bh, fbh and sb.\n index = 0\n while index < len(standing) - 1:\n # get teams\n team_a = standing[index]\n team_b = standing[index + 1]\n\n # check if the teams are equal in the standing\n if _check_same_position_for_direct_comparison(team_a, team_b):\n # there must not be more than two teams equal\n more_equal_teams = []\n for team in standing[index + 2:]:\n if _check_same_position_for_direct_comparison(team_a, team):\n more_equal_teams.append(team)\n else:\n break\n\n # only two teams --> try to apply direct comparison\n if len(more_equal_teams) == 0:\n game = get_game_of_teams(tournament, team_a, team_b)\n if game is not None and game.is_finished():\n standing[index] = tournament.get_team_by_name(\n game.get_winner())\n standing[index + 1] = tournament.get_team_by_name(\n game.get_looser())\n \n __logger.info('Direct comparison: {} wins against {}.'\n .format(game.get_winner(), game.get_looser()))\n\n index = index + 2\n continue\n # no direct comparison so skip all teams, which are equal here\n # 2 for current teams + all other equal teams\n else:\n index = index + 2 + len(more_equal_teams)\n continue\n\n # check next teams\n else:\n index = index + 1\n\n # set position in team\n for index, team in enumerate(standing):\n team.position = index + 1\n\n\ndef _sort_teams_for_standing_key(team):\n return (team.wins, team.bh, team.fbh, team.sb, team.koya,\n team.points - team.points_against, team.points, team.fl * -1,\n team.performance_value)\n\n\ndef _check_same_position_for_direct_comparison(team_a, team_b):\n return team_a.wins == team_b.wins and team_a.bh == team_b.bh and \\\n team_a.fbh == team_b.fbh and team_a.sb == team_b.sb and \\\n team_a.koya == team_b.koya\n","repo_name":"fabianWrede/tournament-manager","sub_path":"src/controller/swiss_system.py","file_name":"swiss_system.py","file_ext":"py","file_size_in_byte":18147,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"33793543892","text":"def check_board(answer):\n for frame in answer:\n x, y, a = frame\n if a == 0:\n if y == 0 or [x - 1, y, 1] in answer or [x, y, 1] in answer or [x, y - 1, 0] in answer:\n continue\n return False\n else:\n if [x, y - 1, 0] in answer or [x + 1, y - 1, 0] in answer or (\n [x - 1, y, 1] in answer and [x + 1, y, 1] in answer):\n continue\n return False\n return True\n\n\ndef solution(n, build_frame):\n answer = []\n\n for frame in build_frame:\n x, y, a, b = frame\n\n if b == 1:\n answer.append([x, y, a])\n if not check_board(answer):\n answer.remove([x, y, a])\n else:\n answer.remove([x, y, a])\n if not check_board(answer):\n answer.append([x, y, a])\n answer.sort()\n return answer\n\n\nprint(solution(5, [[1, 0, 0, 1], [1, 1, 1, 1], [2, 1, 0, 1], [2, 2, 1, 1], [5, 0, 0, 1], [5, 1, 0, 1], [4, 2, 1, 1],\n [3, 2, 1, 1]]))\n\nprint(solution(5, [[0, 0, 0, 1], [2, 0, 0, 1], [4, 0, 0, 1], [0, 1, 1, 1], [1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1, 1],\n [2, 0, 0, 0], [1, 1, 1, 0], [2, 2, 0, 1]]))\n","repo_name":"hunnam5220/coding_test_study","sub_path":"[나동빈]이것이 코딩 테스트다/09. Solutions/02. Second/02. 구현/12._*_기둥과 보설치.py","file_name":"12._*_기둥과 보설치.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33883500265","text":"# #####################################################################################################################\n'''\n\nBuilds CellNET models from the hundred thousand covid19 images from https://www.rxrx.ai/rxrx19\n\n'''\n# #####################################################################################################################\n\n# PyTorch\nfrom torchvision import datasets, models\nimport torch\nfrom torch.utils.data import DataLoader, sampler, random_split, ConcatDataset, Dataset\n\n# Data science tools\nimport numpy \nimport pandas\nimport os\nimport random\nimport sys\nimport statistics\nfrom sklearn.metrics import balanced_accuracy_score, accuracy_score\n\n# local libraries\nfrom models import *\nfrom dataset_prep import *\nfrom augmentations import *\n\n# #####################################################################################################################\n# BASIC CONFIGURATION\n# #####################################################################################################################\n\nbatch_size = 8\nepochs = 2\nDATASET_SIZE = 50000\nlearning_rate = 1e-3\nscoring = \"_\" + str(epochs) + \"epochs_lr\" + str(learning_rate).replace(\"0.\", \"_\") \ndevice = \"cuda\"\n\ncalcStats = False # turn this on if you want to calc mean and stddev of training dataset for normalization in transform\n\n# choose a GPU from the command line arguments\ngpu_num = int(sys.argv[1])\nif cuda.is_available():\n torch.cuda.set_device(gpu_num)\n print(\"starting, using GPU \" + str(gpu_num) + \"...\")\nelse:\n device = \"cpu\"\n print(\"starting, using CPU\")\n\n# #####################################################################################################################\n# DATASET PREP\n# #####################################################################################################################\n\nclass RxRxDataset(Dataset):\n \"\"\" custom class for the covid19 dataset \"\"\"\n def __init__(self, df=None, data_path = '/mnt/fs03/shared/datasets/RxRx19/RxRx19a/images', prep='train', size=224):\n assert df is not None, 'No df'\n self.df = df\n self.data_path = data_path\n\n if prep == 'train':\n self.transform = transforms.Compose([\n RGB(),\n transforms.RandomRotation(degrees=90),\n transforms.ColorJitter(),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n #Grayscale(),\n transforms.Resize(size=[size,size]),\n transforms.ToTensor(),\n #transforms.Normalize([0.0628, 0.0628, 0.0628], [0.0476, 0.0476, 0.0476])\n transforms.Normalize([0.0613], [0.0471])\n ])\n elif prep == 'test':\n self.transform = transforms.Compose([\n RGB(),\n #Grayscale(),\n transforms.Resize(size=[size,size]),\n transforms.ToTensor(),\n #transforms.Normalize([0.0628, 0.0628, 0.0628], [0.0476, 0.0476, 0.0476])\n transforms.Normalize([0.0613], [0.0471])\n ]) \n else:\n self.transform = transforms.Compose([\n RGB(),\n #Grayscale(),\n transforms.Resize(size=[size,size]),\n transforms.ToTensor(),\n ]) \n \n \n def __getitem__(self, idx):\n entry = self.df.iloc[idx]\n file = '{}/{}/Plate{}/{}_s{}_w{}.png'.format(self.data_path, \n entry['experiment'], \n entry['plate'], \n entry['well'], \n entry['site'],\n entry['channel'])\n img = Image.open(file)\n img_tensor = self.transform(img)\n return img_tensor, (entry['channel'] - 1), file\n\n def __len__(self):\n return len(self.df)\n\ndef checkFile(experiment, plate, well, site, channel):\n \"\"\" see if the file in the dataframe actually exists in the directory using fields from the metadata for this dataset \"\"\"\n data_path = '/mnt/fs03/shared/datasets/RxRx19/RxRx19a/images'\n file = '{}/{}/Plate{}/{}_s{}_w{}.png'.format(data_path, \n experiment,\n plate,\n well, \n site,\n channel)\n if os.path.exists(file):\n return 1\n else:\n print(\"missing \", file)\n return 0\n\n# open the dataframe of covid19 metadata\ndf = pandas.read_csv(\"./metadata.csv\")\n#df = df.sample(frac=0.5)\n\n# convert the dataframe into a dataframe with metadata where each image of five channels becomes five images of a \n# single channel; this will be what we predict (which channel the image comes from). Hopefully useful for transfer \n# learning from B&W datasets\nexperiment = list(df['experiment'])\nplate = list(df['plate'])\nwell = list(df['well'])\nsite = list(df['site'])\nchannels = []\nexperiments = []\nplates = []\nwells = []\nsites = []\nfor i in range(len(df)):\n channels.extend([1, 2, 3, 4, 5])\n experiments.extend([experiment[i]]*5)\n plates.extend([plate[i]]*5)\n wells.extend([well[i]]*5)\n sites.extend([site[i]]*5)\n\n# create a dataframe of images that exist, after checking if they are in the dataset (not all the metadata was valid)\n#df = pandas.DataFrame({'experiment':experiments, 'plate':plates, 'well':wells, 'site':sites, 'channel':channels})\n#df['file_present'] = df[['experiment', 'plate', 'well', 'site', 'channel']].apply(lambda x: checkFile(*x), axis=1)\n#df = df[df['file_present'] == 1]\n#df.to_csv(\"valid_covid19_images.csv\")\ndf = pandas.read_csv(\"valid_covid19_images.csv\")\ndf = df.sample(n = DATASET_SIZE)\nprint(\"length of DataFrame: \", len(df))\n\ndef group(x):\n \"\"\" set the test-train group for each image \"\"\"\n rand = random.randint(1, 10)\n if x == 10:\n return \"holdout\"\n elif x == 9:\n return \"eval\"\n else:\n return \"train\"\ndf['group'] = df['plate'].apply(lambda x: group(x))\n\ndf_eval = df[df['group'] == 'eval']\ndf_holdout = df[df['group'] == 'holdout']\ndf_train = df[df['group'] == 'train']\n\n# create the datasets for training, test, and holdout\nprint(\"creating training dataset...\", len(df_train))\ntrain_dataset = RxRxDataset(df=df_train, prep='train')\nprint(\"creating eval dataset...\", len(df_eval))\neval_dataset = RxRxDataset(df=df_eval, prep='test')\nprint(\"creating holdout dataset...\", len(df_holdout))\nholdout_dataset = RxRxDataset(df=df_holdout, prep='test')\n\n# find the mean and stddev for the training data, and quit, so these can be manually copied into the config file\nif calcStats:\n print('calculating stats')\n loader = DataLoader(RxRxDataset(df=df_train, prep='stats'), batch_size=batch_size, num_workers=0, shuffle=False)\n getMeanStddev(loader)\n\n# Dataloader iterators; each should use certain plates only\ndataloaders = {}\ndataloaders['train'] = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\ndataloaders['eval'] = DataLoader(eval_dataset, batch_size=batch_size, shuffle=False)\ndataloaders['holdout'] = DataLoader(holdout_dataset, batch_size=batch_size, shuffle=False)\n\n# train and test the model\nprint(\"training model...\")\n#neurons = CNNGreyModel(n_classes=5, learning_rate=learning_rate)\n#neurons = ResNet18ModelAllLayers(n_classes=4, learning_rate=learning_rate)\n#neurons = Vgg19OneChannelModelAllLayers(n_classes=5, learning_rate=learning_rate, pretrained=False)\nneurons = Vgg19ThreeChannelModelAllLayers(n_classes=5, learning_rate=learning_rate, pretrained=True)\n#neurons.model.load_state_dict(torch.load(\"./cellnet_vgg19_rotations_covid_noImageNet.torch\"))\nmodel_options = {'name':'cellnet_Vgg19_3channel_rotations_covid_withImageNet' + str(DATASET_SIZE) + 'samples'}\nmodel_options['file_label'] = scoring\nneurons.train(dataloaders['train'], dataloaders['eval'], epochs, device, model_options)\nneurons.test(dataloaders['eval'], device, model_options, None, \"test\")\n\n# test the model on the global holdout\nprint(\"testing model on holdout...\")\nall_preds, all_targets, confidences, paths = neurons.test(dataloaders['holdout'], device, model_options, None, \"holdout\")\nprint(\"Weighted accuracy holdout: \" + str(weighted_accuracy(all_preds, all_targets)))\n\n'''\n0.995: CNNGrey\n0.96: vgg19Grey_wImagenet\n0.997: vgg19Grey_withoutImagenet\n0.992: vgg19RGB_withImagenet\n0.985: vgg19RGB_withoutImagenet\n'''\n\n\n\n\n\n","repo_name":"IQTLabs/BioNIC","sub_path":"cellnet_transfer.py","file_name":"cellnet_transfer.py","file_ext":"py","file_size_in_byte":8634,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"41177088177","text":"import numpy as np\nfrom multiprocess import Pool\n\nfrom remu import binning, likelihood, plotting\n\npool = Pool(8)\nlikelihood.mapper = pool.map\n\nwith open(\"../01/reco-binning.yml\") as f:\n reco_binning = binning.yaml.full_load(f)\nwith open(\"truth-binning.yml\") as f:\n truth_binning = binning.yaml.full_load(f)\n\nreco_binning.fill_from_csv_file(\"real_data.txt\")\ndata = reco_binning.get_entries_as_ndarray()\ndata_model = likelihood.PoissonData(data)\n\nresponse_matrix = \"response_matrix.npz\"\nmatrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)\n\ncalc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)\nmaxi = likelihood.BasinHoppingMaximizer()\n\nimport numpy.lib.recfunctions as rfn\n\n\ndef set_signal(data):\n return rfn.append_fields(data, \"event_type\", np.full_like(data[\"true_x\"], 1.0))\n\n\ndef set_bg(data):\n return rfn.append_fields(data, \"event_type\", np.full_like(data[\"true_x\"], 0.0))\n\n\ntruth_binning.fill_from_csv_file(\"../00/modelA_truth.txt\", cut_function=set_signal)\nmodelA = truth_binning.get_values_as_ndarray()\nmodelA /= np.sum(modelA)\n\ntruth_binning.reset()\ntruth_binning.fill_from_csv_file(\"../00/modelB_truth.txt\", cut_function=set_signal)\nmodelB = truth_binning.get_values_as_ndarray()\nmodelB /= np.sum(modelB)\n\ntruth_binning.reset()\ntruth_binning.fill_from_csv_file(\"bg_truth.txt\", cut_function=set_bg)\nbg = truth_binning.get_values_as_ndarray()\nbg /= np.sum(bg)\n\ntruth_binning.reset()\nnoise = truth_binning.get_values_as_ndarray()\nnoise[0] = 1.0\n\nmodelA_only = likelihood.TemplatePredictor([modelA])\ncalcA_only = calc.compose(modelA_only)\n\nretA_only = maxi(calcA_only)\nwith open(\"modelA_only_fit.txt\", \"w\") as f:\n print(retA_only, file=f)\n\ntestA_only = likelihood.HypothesisTester(calcA_only)\nwith open(\"modelA_only_gof.txt\", \"w\") as f:\n print(testA_only.likelihood_p_value(retA_only.x), file=f)\n\nwith open(\"modelA_only_p_value.txt\", \"w\") as f:\n print(testA_only.max_likelihood_p_value(), file=f)\n\nmodelA_bg = likelihood.TemplatePredictor([noise, bg, modelA])\ncalcA_bg = calc.compose(modelA_bg)\n\nretA_bg = maxi(calcA_bg)\nwith open(\"modelA_bg_fit.txt\", \"w\") as f:\n print(retA_bg, file=f)\n\ntestA_bg = likelihood.HypothesisTester(calcA_bg)\nwith open(\"modelA_bg_gof.txt\", \"w\") as f:\n print(testA_bg.likelihood_p_value(retA_bg.x), file=f)\n\nwith open(\"modelA_bg_p_value.txt\", \"w\") as f:\n print(testA_bg.max_likelihood_p_value(), file=f)\n\nmodelB_only = likelihood.TemplatePredictor([modelB])\ncalcB_only = calc.compose(modelB_only)\n\nretB_only = maxi(calcB_only)\nwith open(\"modelB_only_fit.txt\", \"w\") as f:\n print(retB_only, file=f)\n\ntestB_only = likelihood.HypothesisTester(calcB_only)\nwith open(\"modelB_only_gof.txt\", \"w\") as f:\n print(testB_only.likelihood_p_value(retB_only.x), file=f)\n\nwith open(\"modelB_only_p_value.txt\", \"w\") as f:\n print(testB_only.max_likelihood_p_value(), file=f)\n\nmodelB_bg = likelihood.TemplatePredictor([noise, bg, modelB])\ncalcB_bg = calc.compose(modelB_bg)\n\nretB_bg = maxi(calcB_bg)\nwith open(\"modelB_bg_fit.txt\", \"w\") as f:\n print(retB_bg, file=f)\n\ntestB_bg = likelihood.HypothesisTester(calcB_bg)\nwith open(\"modelB_bg_gof.txt\", \"w\") as f:\n print(testB_bg.likelihood_p_value(retB_bg.x), file=f)\n\nwith open(\"modelB_bg_p_value.txt\", \"w\") as f:\n print(testB_bg.max_likelihood_p_value(), file=f)\n\npltr = plotting.get_plotter(reco_binning)\nmodelA_reco, modelA_weights = calcA_only.predictor(retA_only.x)\nmodelB_reco, modelB_weights = calcB_only.predictor(retB_only.x)\nmodelA_bg_reco, modelA_bg_weights = calcA_bg.predictor(retA_bg.x)\nmodelB_bg_reco, modelB_bg_weights = calcB_bg.predictor(retB_bg.x)\npltr.plot_array(\n modelA_reco, label=\"model A only\", stack_function=0.68, hatch=r\"//\", edgecolor=\"C1\"\n)\npltr.plot_array(\n modelA_bg_reco,\n label=\"model A + bg\",\n stack_function=0.68,\n hatch=r\"*\",\n edgecolor=\"C1\",\n)\npltr.plot_array(\n modelB_reco, label=\"model B only\", stack_function=0.68, hatch=r\"\\\\\", edgecolor=\"C2\"\n)\npltr.plot_array(\n modelB_bg_reco,\n label=\"model B + bg\",\n stack_function=0.68,\n hatch=r\"O\",\n edgecolor=\"C2\",\n)\npltr.plot_entries(edgecolor=\"C0\", label=\"data\", hatch=None, linewidth=2.0)\npltr.legend()\npltr.savefig(\"reco-comparison.png\")\n\npltr = plotting.get_plotter(truth_binning)\npltr.plot_array(\n modelA_only(retA_only.x)[0],\n label=\"model A only\",\n hatch=r\"//\",\n edgecolor=\"C1\",\n density=False,\n)\npltr.plot_array(\n modelA_bg(retA_bg.x)[0],\n label=\"model A + bg\",\n hatch=r\"*\",\n edgecolor=\"C1\",\n density=False,\n)\npltr.plot_array(\n modelB_only(retB_only.x)[0],\n label=\"model B only\",\n hatch=r\"\\\\\",\n edgecolor=\"C2\",\n density=False,\n)\npltr.plot_array(\n modelB_bg(retB_bg.x)[0],\n label=\"model B + bg\",\n hatch=r\"O\",\n edgecolor=\"C2\",\n density=False,\n)\npltr.legend(loc=\"upper left\")\npltr.savefig(\"truth-comparison.png\")\n\ndel pltr\nlikelihood.mapper = map\ndel pool\n","repo_name":"ast0815/remu","sub_path":"docs/examples/05/fit_models.py","file_name":"fit_models.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"35000520069","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\nimport glob\n\n'''\nCode for processing data from correlation experiments (correlation_experiment.py)\n'''\n\ndef compute_correlation(param, stability):\n PSW = sum(stability==True)/len(stability) # proportion stable webs\n num = sum(param[stability==True])-PSW*sum(param)\n denom = len(stability)*np.std(param)*np.std(stability)\n return np.where(denom < 1e-10, 0, num/denom)\n#\nfolder = 'Data//Correlation_15//raw_data//'\nfiles = glob.glob(folder + 'corr_data' + '_*')\nframes = []\nfor file in files:\n with open(file, 'rb') as f:\n df = pickle.load(f)\n frames.append(df)\n\ndata = pd.concat(frames, ignore_index = True)\n\n#add columns to dataframe for different versions of strategy parameters\nF = np.stack(data['F'].values)\nF_p = np.zeros(np.shape(F))\nF_p[F>0] = F[F>0]\ndata['F_p'] = F_p.tolist()\nF_n = np.zeros(np.shape(F))\nF_n[F<0] = np.abs(F[F<0])\ndata['F_n'] = F_n.tolist()\ndata['F_abs'] = np.abs(F).tolist()\n\nH = np.stack(data['H'].values)\nH_p = np.zeros(np.shape(H))\nH_p[H>0] = H[H>0]\ndata['H_p'] = H_p.tolist()\nH_n = np.zeros(np.shape(H))\nH_n[H<0] = np.abs(H[H<0])\ndata['H_n'] = H_n.tolist()\ndata['H_abs'] = np.abs(H).tolist()\n\nW = np.stack(data['W'].values)\nW_p = np.zeros(np.shape(W))\nW_p[W>0] = W[W>0]\ndata['W_p'] = W_p.tolist()\nW_n = np.zeros(np.shape(W))\nW_n[W<0] = np.abs(W[W<0])\ndata['W_n'] = W_n.tolist()\ndata['W_abs'] = np.abs(W).tolist()\n\nK = np.stack(data['K_p'].values)\nK_p = np.zeros(np.shape(K))\nK_p[K>0] = K[K>0]\ndata['K_plus'] = K_p.tolist()\nK_n = np.zeros(np.shape(K))\nK_n[K<0] = np.abs(K[K<0])\ndata['K_n'] = K_n.tolist()\ndata['K_abs'] = np.abs(K).tolist()\n\n# dataframes to save the aggregated values of the parameters\nmean_values = pd.DataFrame(columns = data.columns)\nvar_values = pd.DataFrame(columns = data.columns)\n\ncorrelation_df = pd.DataFrame(index = list(data.columns[:35]) + list(data.columns[36:]),\n columns = ['Mean Correlation', 'St Dev Correlation', 'Mean CI', 'St Dev CI'])\n\n# Go through each parameter (the columns of the dataframe) and aggregate the parameter values in each experiment\n# (by averaging or finding the standard deviation across actors) and then compute the correlation of the aggregated\n# parameter values with stability\nfor column in list(data.columns[:35]) + list(data.columns[36:]): #exclude the stability column\n param = np.stack(data[column].values)\n axes = np.arange(len(np.shape(param)))[1:] # shape is # of points x dimensions of the parameter itself\n param_averaged = np.mean(param, axis = tuple(axes))\n mean_values[column] = param_averaged\n # 1d case\n if len(np.shape(np.squeeze(param))) == 2: # usually 1xN\n param_var = np.std(np.squeeze(param), axis=1)\n var_values[column] = param_var\n # 2d case\n elif len(np.shape(np.squeeze(param))) == 3: # Either NxM or NxN\n #param_var = np.std(np.mean(np.squeeze(param),axis=2),axis=1)\n param_var = np.std(np.mean(np.squeeze(param),axis=1),axis=1)\n var_values[column] = param_var\n # 3d case (dg_dF, dp_dH, F, H)\n elif len(np.shape(np.squeeze(param))) == 4: # NxMxN\n #param_var = np.std(np.mean(np.squeeze(param),axis=(1,2)),axis=1)\n param_var = np.std(np.mean(np.squeeze(param),axis=(2,3)),axis=1)\n var_values[column] = param_var\n else:\n # scalar\n param_var = np.zeros(len(param))\n var_values[column] = param_var\n stability = data['stability'].values\n \n if column == ('de_dg' or 'dg_dy' or 'dp_dy' or 'da_dp'): # unlike other parameters, these are MxN so need to recalculate\n param_var = np.std(np.mean(np.squeeze(param),axis=1),axis=1)\n # compute correlation of parameter with stability\n correlation_df['Mean Correlation'][column] = compute_correlation(param_averaged, stability)\n correlation_df['St Dev Correlation'][column] = compute_correlation(param_var, stability)\n\n # bootstrap 95% confidence intervals for correlation of avg\n param = mean_values[column].values\n num_points = len(param)\n num_samples = 100\n sample_indices = np.random.randint(0,num_points,(num_points*num_samples)) # get indices for 100 samples of 1e6 each\n sample_params = param[sample_indices]\n sample_stability = stability[sample_indices]\n sample_params = np.reshape(sample_params, (num_samples,num_points))\n sample_stability = np.reshape(sample_stability, (num_samples,num_points))\n sample_corrs = np.zeros(num_samples)\n for i in range(num_samples):\n sample_corrs[i] = compute_correlation(sample_params[i], sample_stability[i])\n sorted_corrs = np.sort(sample_corrs)\n correlation_df['Mean CI'][column] = np.array([sorted_corrs[int(num_samples*0.025)], sorted_corrs[int(num_samples*0.975)]]) # take 5th and 95th percentile of\n\n #bootstrap 95% confidence intervals for correlation of standard deviation\n param = var_values[column].values\n num_points = len(param)\n num_samples = 100\n sample_indices = np.random.randint(0,num_points,(num_points*num_samples)) # get indices for 100 samples of 1e6 each\n sample_params = param[sample_indices]\n sample_stability = stability[sample_indices]\n sample_params = np.reshape(sample_params, (num_samples,num_points))\n sample_stability = np.reshape(sample_stability, (num_samples,num_points))\n sample_corrs = np.zeros(num_samples)\n for i in range(num_samples):\n sample_corrs[i] = compute_correlation(sample_params[i],sample_stability[i])\n sorted_corrs = np.sort(sample_corrs)\n correlation_df['St Dev CI'][column] = np.array([sorted_corrs[int(num_samples*0.025)],sorted_corrs[int(num_samples*0.975)]]) # take 5th and 95th percentile of\n\nwith open('corr_data_15_2', 'wb') as f:\n pickle.dump(correlation_df, f)\n\nmean_corr = correlation_df['Mean Correlation'].dropna().values\nmean_CI = correlation_df['Mean CI'].dropna().values\nmean_CI = np.stack(mean_CI)\nmean_yerr = np.c_[mean_corr-mean_CI[:,0],mean_CI[:,1]-mean_corr ].T # get confidence intervals in the right form for plotting as error bars\n# get indices of parameters that are significant (conf int don't include 0 and corr is greater than 0.01)\nsignificant_ind = (mean_CI[:,0]*mean_CI[:,1] > 0) & (abs(np.where(mean_corr<0,np.max(mean_CI,axis=1),np.min(mean_CI,axis=1))) > 5e-3)\nsignificant_ind_sorted = np.argsort(mean_corr[significant_ind])\nmean_corr_sorted = mean_corr[significant_ind][significant_ind_sorted]\nmean_CI = mean_CI[significant_ind][significant_ind_sorted]\nmean_yerr_sorted = mean_yerr[:,significant_ind][:,significant_ind_sorted]\n\nplt.figure()\nplt.bar(np.arange(len(mean_corr_sorted)), mean_corr_sorted, yerr = mean_yerr_sorted, align='center', alpha=0.5)\nlabels = [r'$\\phi$', r'$\\psi$', r'$\\alpha$', r'$\\beta$', r'$\\hat{\\beta}$',r'$\\tilde{\\beta}$',r'$\\overline{\\beta}$',r'$\\sigma$',r'$\\eta$',r'$\\lambda$',r'$\\bar{\\eta}$',r'$\\mu$',r'$\\dfrac{\\partial s}{\\partial r}$',\n r'$\\dfrac{\\partial e}{\\partial r}$',r'$\\dfrac{\\partial e}{\\partial g}$',r'$\\dfrac{\\partial g}{\\partial F}$',r'$\\dfrac{\\partial g}{\\partial y}$',\n r'$\\dfrac{\\partial p}{\\partial y}$',r'$\\dfrac{\\partial b}{\\partial e}$',r'$\\dfrac{\\partial a}{\\partial r}$',r'$\\dfrac{\\partial q}{\\partial a}$',\n r'$\\dfrac{\\partial a}{\\partial p}$',r'$\\dfrac{\\partial p}{\\partial H}$',r'$\\dfrac{\\partial c}{\\partial W_p}$',r'$\\dfrac{\\partial c}{\\partial w_n}$',\n r'$\\dfrac{\\partial l}{\\partial x}$',r'$\\dfrac{\\partial u}{\\partial x}$',r'$\\dfrac{\\partial i}{\\partial K_p}$',r'$\\dfrac{\\partial i}{\\partial K_n}$',r'$\\dfrac{\\partial i}{\\partial y_p}$',\n r'$\\dfrac{\\partial i}{\\partial y_n}$','|F|','|H|','|W|','|K|']\n\nmean_label_indices = np.array(labels)[significant_ind][significant_ind_sorted]\nplt.xticks(np.arange(len(mean_label_indices)), mean_label_indices)\nplt.title('Correlation of parameters with stability')\nplt.savefig('Correlation_15.svg')\nplt.show()\n\nstd_corr = correlation_df['St Dev Correlation'].dropna()\nstd_CI = correlation_df['St Dev CI'].dropna().values\nstd_CI = np.stack(std_CI)\nstd_yerr = np.c_[std_corr-std_CI[:,0],std_CI[:,1]-std_corr].T # get confidence intervals in the right form for plotting as error bars\nsignificant_ind = (std_CI[:,0]*std_CI[:,1] > 0) & (abs(np.where(std_corr<0,np.max(std_CI,axis=1),np.min(std_CI,axis=1))) > 5e-3)\nsignificant_ind_sorted = np.argsort(std_corr[significant_ind])\n#std_label_indices = np.concatenate([np.argsort(std_CI[:,0])[:5],np.argsort(std_CI[:,0])[-5:]])\n\nstd_corr_sorted = std_corr.values[significant_ind][significant_ind_sorted]\nstd_CI = std_CI[significant_ind][significant_ind_sorted]\nstd_yerr = std_yerr[:,significant_ind][:,significant_ind_sorted]\nplt.figure()\nplt.bar(np.arange(len(std_corr_sorted)), std_corr_sorted, yerr = std_yerr, align='center', alpha=0.5)\n\nstd_label_indices = np.array(labels)[significant_ind][significant_ind_sorted]\nplt.xticks(np.arange(len(std_label_indices)), std_label_indices)\nplt.title('Correlation of standard deviation in parameters with stability')\nplt.savefig('Correlation_15_std.svg')\nplt.show()","repo_name":"njmolla/Gen-Modeling-Governance","sub_path":"correlation_plotting.py","file_name":"correlation_plotting.py","file_ext":"py","file_size_in_byte":8871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11960904103","text":"from flask import Flask, render_template, request\r\nfrom archive_methods import write_marketplace, read_archive\r\nfrom logfile_methods import save_log\r\n\r\n\r\ndef run_web(marketplaces: list):\r\n app = Flask(__name__)\r\n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\r\n\r\n @app.route('/')\r\n def index():\r\n marketplaces = read_archive()\r\n return render_template('index.html')\r\n\r\n @app.route('/insertmarketplace')\r\n def insert_market():\r\n marketplaces = read_archive()\r\n return render_template('insert_marketplace.html')\r\n\r\n\r\n @app.route('/writearchive')\r\n def write_archive():\r\n marketplace_name = request.args.get('name_marketplace')\r\n write_marketplace(marketplace_name)\r\n return index()\r\n\r\n\r\n @app.route('/listmarketplaces')\r\n def list_marketplaces():\r\n marketplaces = read_archive()\r\n save_log('web: list_marketplaces')\r\n return render_template('list_marketplace.html', marketplaces = marketplaces)\r\n \r\n\r\n @app.route('/listcategories')\r\n def list_categories():\r\n marketplaces = read_archive()\r\n save_log('web: list_categories')\r\n return render_template('list_categories.html', marketplaces = marketplaces)\r\n\r\n\r\n @app.route('/listcategoriesandsubcategories')\r\n def list_categories_subcategories():\r\n marketplaces = read_archive()\r\n save_log('web: list_categories_subcategories')\r\n return render_template('list_categories_subcateries.html', marketplaces = marketplaces)\r\n \r\n @app.route('/listall')\r\n def list_all():\r\n marketplaces = read_archive()\r\n save_log('web: list_all')\r\n return render_template('list_all.html', marketplaces = marketplaces)\r\n\r\n\r\n app.run()","repo_name":"lucastorresolist/ots-apt4","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19143179744","text":"import pygame, sys\r\nfrom pygame.locals import * \r\nimport colors\r\nfrom word_search import WordSearch, WordMatch\r\npygame.init()\r\n\r\nclass Letter:\r\n def __init__(self, string, posx, posy):\r\n self.text = font.render(string, True, colors.BLACK, colors.WHITE)\r\n self.position = (posx, posy)\r\n\r\nword_search = WordSearch('words.txt','wordsearch.txt')\r\n\r\nWINDOWWIDTH, WINDOWHEIGHT = 600, 600\r\nWINDOW_DIMENSIONS = (WINDOWWIDTH, WINDOWHEIGHT)\r\nFPS = 30\r\nfpsClock = pygame.time.Clock()\r\nDISPLAYSURF = pygame.display.set_mode(WINDOW_DIMENSIONS, 0, 32)\r\nfont = pygame.font.Font('Anonymous.ttf', 20)\r\nletters = []\r\nXMARGIN = (WINDOWWIDTH / word_search.length_row) / 3\r\nYMARGIN = (WINDOWHEIGHT / word_search.length) / 3\r\n\r\ndef convert_to_screen_coords(x, y):\r\n return XMARGIN + (WINDOWWIDTH / word_search.length_row) * x, YMARGIN + (WINDOWHEIGHT / word_search.length) * y\r\n\r\nfor num, line in enumerate(word_search.word_search):\r\n for position, letter in enumerate(line):\r\n x, y = convert_to_screen_coords(position, num)\r\n l = Letter(letter, int(x), int(y))\r\n letters.append(l)\r\n\r\nDISPLAYSURF.fill(colors.WHITE)\r\nfor letter in letters:\r\n DISPLAYSURF.blit(letter.text, letter.position)\r\nword_search.draw(DISPLAYSURF, WINDOWWIDTH, WINDOWHEIGHT, XMARGIN, YMARGIN)\r\n\r\ndef main():\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n fpsClock.tick(FPS)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Keandre/WordSearchSolverPyGame","sub_path":"src/word_search_gui.py","file_name":"word_search_gui.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21078820423","text":"\nimport os,sys\n\nimport numpy as np\n\nimport torch\n\nimport librosa,scipy\nfrom glob import glob\n\nfrom network import LSTM\nfrom loss import ConLoss\n\n\nroot_path='./ft_local/data/VoxCeleb1'\n\n# make and save fbank for each reference\npaths=glob(root_path+'/vox1_train_wav/*/*/')\nfor psec in paths[:2000]:\n fw=psec.replace('vox1_train_wav','vox1_train_fbank')\n if not os.path.exists(fw.rsplit('/',2)[0]):\n os.makedirs(fw.rsplit('/',2)[0])\n fs=os.listdir(psec)\n print(fw)\n mels=[]\n for f in fs:\n w=librosa.load(psec+f)\n mel=librosa.feature.melspectrogram(w[0],n_mels=32)\n mels.append(mel)\n np.save(fw[:-1]+'.npy',np.concatenate(mels,1))\n\n\n\n# make dataset\npaths=glob(root_path+'/vox1_train_fbank/*/')\nbatch_size=10\nmel_len=64\nsample_size=1000\ndataset=[]\nfor _ in range(sample_size):\n for p in random.sample(paths,batch_size):\n secs=random.choices(os.listdir(p),k=2)\n batch_a=[]\n for sec in secs:\n sec=np.load(p+sec)\n start_t=random.sample(range(sec.shape[1]-mel_len),k=1)[0]\n batch_a.append(sec[:,start_t:start_t+mel_len])\n dataset.append(batch_a)\ntdataset=torch.utils.data.TensorDataset(\n dataset,shuffle=True\n)\n\n\n# train\nopt=torch.optim.SGD(lstm.parameters,lr=2e-1)\n\nmodel=LSTM(32,128,128,2)\nmodel.zero_grad()\nfor d in tdataset:\n embs,hidden=lstm(d[0],None)\nembs.shape\n\nc=ConLoss()\nloss=c.forward(embs.view(1,2,128))\n\nloss.backward()\nopt.step()\nmodel.zero_grad()\n\n\n\n","repo_name":"kids/acoustic_engineering","sub_path":"contrastive_voice/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28636415696","text":"#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"\n Solve Quals 2016 p.A\n \n Author: killerrex\n\"\"\"\n\nimport sys\n\n\ndef as_number(fd, kind=int, single=False):\n \"\"\"\n Read one line from the file and return it\n as the required kind numbers\n\n Args:\n fd: File descriptor\n kind: A transformation function like int, float...\n single: Return a single number\n\n Returns:\n The list/single values\n \"\"\"\n\n txt = fd.readline().strip()\n values = [kind(s) for s in txt.split()]\n if single:\n if values:\n return values[0]\n else:\n return None\n else:\n return values\n\n\n# Brute force\ndef counting(n):\n \"\"\"\n\n Args:\n n: first number\n\n Returns: The number searched or INSOMNIA\n\n \"\"\"\n\n # Only non-stop condition\n # Otherwise no matter the initial number, the MSD will cover all,\n # sooner or later\n if n == 0:\n return 'INSOMNIA'\n\n s = set()\n res = 0\n while len(s) < 10:\n res += n\n s |= set(str(res))\n return res\n\n\ndef solve(fd):\n \"\"\"\n Solve for the values from a file\n\n Args:\n fd: File unit\n \"\"\"\n # First line is the number of cases\n total = as_number(fd, single=True)\n\n for k in range(total):\n n = as_number(fd, single=True)\n print(\"Case #{}: {}\".format(k+1, counting(n)))\n\n\ndef start():\n if len(sys.argv) > 1:\n with open(sys.argv[1], 'r') as fd:\n solve(fd)\n else:\n solve(sys.stdin)\n\nif __name__ == '__main__':\n start()\n","repo_name":"DaHuO/Supergraph","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_killerrex_problem_a.py","file_name":"16_0_1_killerrex_problem_a.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11095746305","text":"from typing import List\n\n\nclass Transaction:\n def __init__(self, name, time, amount, city):\n self.name = name\n self.time = int(time)\n self.amount = int(amount)\n self.city = city\n\n\nclass Solution:\n def invalidTransactions(self, transactions: List[str]) -> List[str]:\n \"\"\"\n ### [1169. Invalid Transactions](https://leetcode.com/problems/invalid-transactions/)\n\n A transaction is *possibly invalid* if:\n\n - the amount exceeds $1000, or;\n - if it occurs within (and including) 60 minutes of another transaction with the same name in a different city.\n\n Each transaction string `transactions[i]` consists of comma separated values representing the name, time (in minutes), amount, and city of the transaction.\n\n Given a list of `transactions`, return a list of transactions that are possibly invalid. You may return the answer in any order.\n\n\n\n **Example 1:**\n\n ```\n Input: transactions = [\"alice,20,800,mtv\",\"alice,50,100,beijing\"]\n Output: [\"alice,20,800,mtv\",\"alice,50,100,beijing\"]\n Explanation: The first transaction is invalid because the second transaction occurs within a difference of 60 minutes, have the same name and is in a different city. Similarly the second one is invalid too.\n ```\n\n **Example 2:**\n\n ```\n Input: transactions = [\"alice,20,800,mtv\",\"alice,50,1200,mtv\"]\n Output: [\"alice,50,1200,mtv\"]\n ```\n\n **Example 3:**\n\n ```\n Input: transactions = [\"alice,20,800,mtv\",\"bob,50,1200,mtv\"]\n Output: [\"bob,50,1200,mtv\"]\n ```\n\n\n\n **Constraints:**\n\n - `transactions.length <= 1000`\n - Each `transactions[i]` takes the form `\"{name},{time},{amount},{city}\"`\n - Each `{name}` and `{city}` consist of lowercase English letters, and have lengths between `1` and `10`.\n - Each `{time}` consist of digits, and represent an integer between `0` and `1000`.\n - Each `{amount}` consist of digits, and represent an integer between `0` and `2000`.\n\n Notes\n -----\n\n References\n ---------\n .. [1] https://leetcode.com/problems/invalid-transactions/discuss/367221/Python-Both-Optimized-O(nlogn)-and-Brute-Force-O(n2)-Solutions-with-Explanations\n\n \"\"\"\n transactions = [Transaction(*t.split(',')) for t in transactions]\n transactions = sorted(transactions, key=lambda x: x.time)\n from collections import defaultdict\n d = defaultdict(list)\n ret = []\n for i, t in enumerate(transactions):\n d[t.name].append(i)\n\n for name, indexes in d.items():\n left = right = 0\n for i in indexes:\n t = transactions[i]\n if t.amount > 1000:\n ret.append(f'{t.name},{t.time},{t.amount},{t.city}')\n continue\n while left <= len(indexes)-2 and transactions[indexes[left]].time < t.time - 60:\n left += 1\n while right <= len(indexes)-2 and transactions[indexes[right+1]].time <= t.time + 60:\n right += 1\n for j in range(left, right+1):\n if transactions[indexes[j]].city != t.city:\n ret.append(f'{t.name},{t.time},{t.amount},{t.city}')\n break\n return ret\n\n","repo_name":"daydaychallenge/leetcode-python","sub_path":"01169/invalid_transactions.py","file_name":"invalid_transactions.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73137218162","text":"# @Time : 2023/03/11 16:23\n# @Author : fyq\n# @File : db_search_cache.py\n# @Software: PyCharm\n\n__author__ = 'fyq'\n\nfrom abc import ABCMeta, abstractmethod\nfrom threading import get_ident\nfrom typing import Optional, List, Any\n\nimport munch\nfrom loguru import logger\n\nfrom search import dm\n\nimport pandas as pd\n\nfrom search.core.progress import Progress\nfrom search.core.search_context import SearchContext, SearchBuffer\n\nfrom pyext import RuntimeModule\n\nfrom search.util.bytes import format_bytes\n\n\nclass DBSearchCache(metaclass=ABCMeta):\n\n @abstractmethod\n def get_data(self, search_context: SearchContext) -> Optional[pd.DataFrame]:\n pass\n\n\nclass AbstractDBSearchCache(DBSearchCache):\n\n def get_data(self, search_context: SearchContext, top: bool = False) -> Optional[pd.DataFrame]:\n conn_list = dm.get_connections()\n self.count(search_context=search_context, conn_list=conn_list, top=top)\n data_df = None\n try:\n for search_cache_index, search_buffer in enumerate(search_context.search_buffer_list):\n tmp_tablename = search_buffer.tmp_tablename.format(get_ident())\n sql_list: List[str] = []\n tmp_sql_list: List[str] = []\n select_expression = search_buffer.search_sql.select_expression\n if search_cache_index == 0:\n if top:\n select_expression = f\"{select_expression} top {search_context.search.top}\"\n\n where_expression = search_buffer.where_expression\n where_expression = where_expression. \\\n format(*[get_ident() for _ in range(0, search_buffer.where_expression.count(\"{}\"))])\n sql_list.append(\"select\")\n sql_list.append(select_expression)\n sql_list.append(\",\".join(search_buffer.field_list))\n sql_list.append(\"from\")\n sql_list.append(search_buffer.search_sql.from_expression)\n if len(where_expression) > 0:\n sql_list.append(\"where\")\n sql_list.append(where_expression)\n sql_list.append(search_buffer.search_sql.other_expression)\n\n tmp_sql_list.append(\"select\")\n tmp_sql_list.append(select_expression)\n tmp_sql_list.append(\",\".join(search_buffer.tmp_fields) + f\" into {tmp_tablename}\")\n tmp_sql_list.append(\"from\")\n tmp_sql_list.append(search_buffer.search_sql.from_expression)\n if len(where_expression) > 0:\n tmp_sql_list.append(\"where\")\n tmp_sql_list.append(where_expression)\n tmp_sql_list.append(search_buffer.search_sql.other_expression)\n\n sql = \" \".join(sql_list)\n tmp_sql = \" \".join(tmp_sql_list)\n\n data = []\n for conn in conn_list:\n res = self.exec(conn=conn,\n search_context=search_context,\n search_buffer=search_buffer,\n sql=sql,\n tmp_sql=tmp_sql)\n if res:\n data.extend(res)\n if search_cache_index == 0 and top:\n break\n\n if len(data) > 0:\n new_df = pd.DataFrame(data=data, columns=search_buffer.select_fields)\n del data\n if data_df is None:\n data_df = new_df\n else:\n data_df = data_df.merge(right=new_df,\n how=search_buffer.search_sql.how,\n on=search_buffer.join_fields)\n del new_df\n logger.info(f\"内存:{format_bytes(data_df.memory_usage(deep=True).sum())}\")\n finally:\n [conn.close() for conn in conn_list]\n\n return self.exec_new_df(search_context=search_context,\n df=data_df)\n\n @abstractmethod\n def count(self, conn_list: List, search_context: SearchContext, top: bool):\n pass\n\n @abstractmethod\n def exec(self, search_context: SearchContext, search_buffer: munch.Munch, conn, sql: str, tmp_sql: str) -> Any:\n pass\n\n @abstractmethod\n def exec_new_df(self, search_context: SearchContext, df: pd.DataFrame) -> pd.DataFrame:\n pass\n\n\nclass DefaultDBCache(AbstractDBSearchCache):\n\n def exec_new_df(self, search_context: SearchContext, df: pd.DataFrame) -> pd.DataFrame:\n search_field_dict = {search_field.name: search_field\n for search_field in search_context.search_field_list}\n new_df = pd.DataFrame(columns=search_context.search_md5.search_original_field_list)\n for field in search_context.search_md5.search_original_field_list:\n if field in search_field_dict:\n search_field = search_field_dict[field]\n try:\n if search_field.rule and search_field.rule.startswith((\"def\", \"import\", \"from\")):\n md = RuntimeModule.from_string('a', search_field.rule)\n find = False\n for v in md.__dict__.values():\n if callable(v):\n new_df[search_field.name] = df.apply(v, axis=1)\n find = True\n break\n if not find:\n new_df[search_field.name] = None\n logger.warning(f\"{search_context.search_md5.search_name}-{search_field.name}-rule未发现可执行函数\")\n else:\n if search_field.rule in df:\n new_df[search_field.name] = df[search_field.rule]\n else:\n new_df[search_field.name] = None\n logger.warning(f\"查询结果中未包含列[{search_field.name}]\")\n except Exception as e:\n new_df[search_field.name] = None\n logger.exception(e)\n else:\n new_df[field] = None\n\n return new_df\n\n execs = [\"exec\", \"exec_new_df\"]\n\n def count(self, conn_list: List, search_context: SearchContext, top: bool):\n c = len(conn_list) * len(search_context.search_buffer_list)\n if top:\n return c - len(conn_list) + 2\n else:\n return c + 1\n\n def exec(self, search_context: SearchContext, search_buffer: SearchBuffer, conn, sql: str, tmp_sql: str) -> Any:\n cur = conn.cursor()\n try:\n if len(search_buffer.tmp_fields) > 0:\n logger.info(f\"临时表sql:{tmp_sql} 参数:{search_buffer.args}\")\n cur.execute(tmp_sql, tuple(search_buffer.args))\n logger.info(f\"查询表sql:{sql} 参数:{search_buffer.args}\")\n cur.execute(sql, tuple(search_buffer.args))\n return cur.fetchall()\n except Exception as e:\n if hasattr(e, \"args\") and e.args[0] != 208:\n raise e\n else:\n logger.warning(e)\n finally:\n cur.close()\n\n\n@Progress(prefix=\"export\", suffix=\"db\")\nclass DefaultDBExportCache(DefaultDBCache):\n pass\n\n\n@Progress(prefix=\"search\", suffix=\"db\")\nclass DefaultDBSearchCache(DefaultDBCache):\n pass\n","repo_name":"fangyunqing/search","sub_path":"search/core/cache/db_search_cache.py","file_name":"db_search_cache.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21205078342","text":"from flask import request\nfrom clay import app, config\nimport json\n\nlog = config.get_logger('clay.docs')\n\n\ndef parse_docstring_param(directive, key, value):\n p = {\n 'name': key,\n 'description': value.split('{', 1)[0],\n 'required': False,\n 'dataType': 'string',\n 'type': 'primitive',\n 'allowMultiple': False,\n }\n if '{' in value and '}' in value:\n p.update(json.loads(value[value.find('{'):value.find('}')]))\n\n if directive == 'json':\n directive = 'body'\n p['type'] = 'complex'\n\n if directive in ('query', 'body', 'path', 'form'):\n p['paramType'] = directive\n elif directive == 'reqheader':\n p['paramType'] = 'header'\n else:\n log.warning('Ignoring unknown docstring param %s', directive)\n return\n return p\n\n\ndef parse_docstring(docstring):\n '''\n Turns autodoc http dialect docstrings into swagger documentation\n '''\n if not docstring:\n return\n \n params = []\n responses = []\n stripped = ''\n rtype = None\n for line in docstring.split('\\n'):\n line = line.lstrip('\\t ')\n if not line.startswith(':'):\n stripped += line + '\\n
'\n continue\n \n directive, value = line.split(':', 2)[1:]\n value = value.strip('\\t ')\n\n directive = directive.split(' ', 1)\n if len(directive) > 1:\n directive, key = directive\n else:\n directive = directive[0]\n key = None\n\n if directive in ('json', 'body', 'query', 'path', 'form', 'reqheader'):\n param = parse_docstring_param(directive, key, value)\n if param:\n params.append(param)\n continue\n\n if directive == 'status':\n responses.append({\n 'code': int(key),\n 'message': value,\n })\n continue\n \n if directive == 'rtype':\n rtype = value\n log.warning('Ignoring unknown docstring param %s', directive)\n\n return (params, responses, stripped, rtype)\n\n\ndef get_model(modelspec):\n module, name = modelspec.rsplit('.', 1)\n module = __import__(module)\n return {\n 'id': modelspec,\n 'properties': getattr(module, name),\n }\n\n\n@app.route('/_docs', methods=['GET'])\ndef clay_docs():\n '''\n Returns a JSON document describing this service's API\n\n Endpoints are inferred from routes registered with Flask and the docstrings\n bound to those methods.\n\n Dialect documentation http://pythonhosted.org/sphinxcontrib-httpdomain/\n Swagger documentation https://github.com/wordnik/swagger-core/wiki\n\n :status 200: Generated swagger documentation\n :status 500: Something went horribly wrong\n '''\n headers = {'Content-type': 'application/json'}\n response = {\n 'apiVersion': '0.2',\n 'swaggerVersion': '1.2',\n 'basePath': config.get('docs.base_path', None) or request.url_root.rstrip('/'),\n 'resourcePath': '/',\n 'apis': [],\n 'models': {},\n }\n\n for rule in app.url_map.iter_rules():\n if rule.endpoint == 'static':\n continue\n\n api = {\n 'path': rule.rule,\n 'operations': [],\n }\n view_func = app.view_functions[rule.endpoint]\n if view_func.__doc__:\n docstring = view_func.__doc__.strip('\\r\\n\\t ')\n params, responses, stripped_docstring, rtype = parse_docstring(docstring)\n\n shortdoc = [x for x in docstring.split('\\n') if x]\n if not shortdoc:\n shortdoc = 'Undocumented'\n else:\n shortdoc = shortdoc[0]\n else:\n shortdoc = 'Undocumented'\n params = []\n responses = []\n stripped_docstring = shortdoc\n rtype = None\n\n\n for http_method in rule.methods:\n if http_method in ('HEAD', 'OPTIONS'):\n continue\n doc = {\n 'method': http_method,\n 'nickname': view_func.__name__,\n 'summary': shortdoc,\n 'notes': stripped_docstring,\n 'parameters': params,\n 'responseMessage': responses,\n }\n if rtype:\n doc['responseClass'] = rtype\n model = get_model(rtype)\n response['models'][rtype] = model\n\n api['operations'].append(doc)\n response['apis'].append(api)\n\n return (json.dumps(response, indent=2), 200, headers)\n","repo_name":"uber/clay","sub_path":"clay/docs.py","file_name":"docs.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":439,"dataset":"github-code","pt":"75"} +{"seq_id":"20701378454","text":"from art import logo\nimport random\nfrom replit import clear\n\nnumbers = []\nfor n in range(1, 101):\n numbers.append(n)\n \ndef choose_number():\n return random.choice(numbers)\n\nhidden_number = choose_number()\n\ndef difficulty():\n ask_dif = input(\"Type 'easy' or 'hard' as gamemode: \")\n if ask_dif == 'easy':\n return 10\n elif ask_dif == 'hard':\n return 5\n\ndef guess_check(guess):\n if guess == hidden_number:\n print(\"You won!\")\n print(f\"The number was: {hidden_number}\")\n return 0\n elif guess > hidden_number:\n print(\"Too high!\")\n return 1\n elif guess < hidden_number:\n print(\"Too low!\")\n return 1\n\ndef play_game():\n lives = difficulty()\n\n while lives > 0 :\n \n guess = int(input(\"guess for a number between 1 and 100:\\n\"))\n\n if guess > 100 or guess < 0:\n print(\"Can't proccess number higher than 100 and lower than 0.\")\n else:\n result = guess_check(guess)\n if result == 0:\n lives = 0\n elif result == 1:\n lives -= 1\n print(f\"You have {lives} lives left...\")\n\n\ndef start_game():\n print(logo)\n if input(\"Welcome to the Number Guessing Game! Type 'y' to play:\\n\") == 'y':\n clear()\n play_game()\n else:\n clear()\n start_game()\nstart_game()","repo_name":"schizoidman1/number-guessing-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70864526001","text":"#import tensorflow as tf\n#from text.symbols import kor_symbols as symbols\n\ndef create_hparams(hparams_string=None, verbose=False):\n \"\"\"Create model hyperparameters. Parse nondefault from given string.\"\"\"\n\n hparams = {\n 'E': 512,\n 'ref_enc_filters': [32, 32, 64, 64, 128, 128],\n 'ref_enc_size': [3, 3],\n 'ref_enc_strides': [2, 2],\n 'ref_enc_pad': [1, 1],\n 'ref_enc_gru_size': 512 // 2,\n\n # Style Token Layer\n 'token_num': 56,\n 'num_heads': 8,\n 'n_mels': 80,\n }\n\n \n return hparams\n","repo_name":"v-nhandt21/MediaEval2020","sub_path":"SourceCode/EfficentModel/hparams.py","file_name":"hparams.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"32170707322","text":"i1 = input()\r\ni2 = input()\r\narr1 = i1.split()\r\narr2 = i2.split()\r\na,b = int(arr1[0]),str(arr1[1])\r\nc,d = int(arr2[0]),str(arr2[1])\r\nif (b =='M' and a >=19) or (d == 'M' and c >=19):\r\n print(1)\r\nelse:\r\n print(0)","repo_name":"Leewuc/codetree-TILs","sub_path":"231115/두 사람/two-person.py","file_name":"two-person.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31165306291","text":"import sys\nfrom collections import Counter\nn = int(input())\n\nbooks = []\nfor _ in range(n):\n books.append(sys.stdin.readline().rstrip())\n\nbooks.sort()\ncount_books = Counter(books)\nmax_book = count_books.most_common(n=1)\n\nprint(max_book[0][0])","repo_name":"hee2425/codingTestWithPython","sub_path":"문자열/백준/베스트셀러.py","file_name":"베스트셀러.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6477546076","text":"#Задание 1\na = 1\nb = 10\nprint(\"Вы ввели следующие переменные - \", a,b)\nchislo1 = int(input(\"Введите первое число \"))\nstroka1 = input(\"Введите первую строку \")\nchislo2 = int(input(\"Введите второе число \"))\nstroka2 = input(\"Введите вторую строку \")\nprint(f'Вы ввели следующие значения - {chislo1}, {stroka1}, {chislo2}, {stroka2}')\n\n#Задание 2\nvremja = int(input(\"Введите количество секунд\"))\nchasi = vremja//3600\nminuti = (vremja - chasi*3600)//60\nsekundi = vremja - (chasi*3600 + minuti*60)\nprint(f'Введенное время {chasi}, {minuti}, {sekundi}')\n\n#Задание 3\nn = int(input(\"Введите число n - \"))\nsymma = (n + int(str(n) + str(n)) + int(str(n) + str(n) + str(n)))\nprint(symma)\n\n#Задание 4\ni = int(input(\"Введите целое положительное число - \"))\nr = -1\nwhile i > 10:\n d = i % 10\n i //= 10\n if d > r:\n r = d\nprint(r)\n\n#Задание 5\nviruchka = int(input(\"Введите значение выручки фирмы - \"))\nizderzki = int(input(\"Введите значение издержек фирмы - \"))\nif viruchka > izderzki:\n print(\"Прибыль, выручка больше издержек\")\nelse:\n print(\"Убыток, издержки больше выручки\")\n\n# Задание 6\nviruchka = int(input(\"Введите значение выручки фирмы, тыс. руб. - \"))\nizderzki = int(input(\"Введите значение издержек фирмы, тыс. руб. - \"))\npribil = viruchka - izderzki\nif viruchka > izderzki:\n print(f\"Прибыль, выручка больше издержек. Рентабельность выручки составила в процентах - {(pribil/viruchka) * 100}\")\n rabochie = int(input(\"Введите количество работников фирмы - \"))\n print(f\"Прибыль в расчете на одного сотрудника составляет, тыс. руб. - {pribil/rabochie}\")\nelif viruchka == izderzki:\n print(\"Делайте планирование, прибыли нет!\")\nelse:\n print(\"Убыток, издержки больше выручки\")\n\n\n# Задание 7\na = int(input(\"Введите сколько километров Вы пробежали - \"))\nb = int(input(\"Введите километраж, пробежать который Вы бы хотели - \"))\ndays = 1\nwhile a < b:\n a = a + 0.1*a\n days = days + 1\nprint(f\"Dы достигнете желаемого на день\", days)","repo_name":"AlexeyShlupakov/pythonProject1-1","sub_path":"lesson01.py","file_name":"lesson01.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18830695960","text":"\"\"\"empty message\n\nRevision ID: 5edd85e6c999\nRevises: \nCreate Date: 2020-08-18 00:36:54.735309\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5edd85e6c999'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('members')\n op.drop_table('packages')\n op.drop_table('packag')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('packag',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('name', sa.VARCHAR(length=80), autoincrement=False, nullable=False),\n sa.Column('duration', sa.VARCHAR(), autoincrement=False, nullable=False),\n sa.Column('price', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='packag_pkey'),\n sa.UniqueConstraint('name', name='packag_name_key')\n )\n op.create_table('packages',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('name', sa.VARCHAR(length=80), autoincrement=False, nullable=False),\n sa.Column('duration', sa.VARCHAR(), autoincrement=False, nullable=False),\n sa.Column('price', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='packages_pkey'),\n sa.UniqueConstraint('name', name='packages_name_key')\n )\n op.create_table('members',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=False),\n sa.Column('gender', sa.VARCHAR(), autoincrement=False, nullable=False),\n sa.Column('phone', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='members_pkey')\n )\n # ### end Alembic commands ###\n","repo_name":"fatengh/Activelix01","sub_path":"migrations/versions/5edd85e6c999_.py","file_name":"5edd85e6c999_.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23895855754","text":"from telebot import *\nfrom bs4 import BeautifulSoup\nfrom requests import *\nimport config\n\nbot = TeleBot(config.TOKEN)\n\n\n# парсит инфу с БАНКИ.РУ\ndef parse_val():\n url = 'https://www.banki.ru/products/currency/cb/'\n r = get(url, headers=config.HEADERS)\n soup = BeautifulSoup(r.text, 'lxml')\n data = soup.find('tbody')\n data = data.text.replace('\\n\\n\\n\\n\t\t\t\t\t', ';').replace('\\n\t\t\t\t\\n', ' ').replace(\n '\\n\\n\\n',\n ' ').replace(\n '\\n\\n\t\t\t\t\t', ' ').replace(\n ' \t\t\t\t\t\\n\t\t\t\t\t\t\t\t\t\t\t',\n ' ').replace(\n '\\n\t\t\t\t\t\\n\t\t\t\t\t\t\t\t\t\t\t\\n\\n', ' ').replace(\n '\\n\t\t\t\t\t\t\t\t\t\t\t', ' ').replace(\n '\\n\t\t\t\t\t\\n\t\t\t\t\t\t\t\t\t\\n', ' ').split(' ;')\n return data\n\n\n# парсит инфу с investing.com\ndef parse_cval(x):\n url = f'https://ru.investing.com/crypto/{x}'\n r = get(url, headers=config.HEADERS)\n soup = BeautifulSoup(r.text, 'lxml')\n price = soup.find('div', class_='top bold inlineblock').text.replace('\\n', ' ').replace('\\xa0', '').replace(' ',\n ' ').replace(\n ' ', ' ').split()\n return price\n\n\n# строит основную клаву\n@bot.message_handler(commands=['start'])\ndef start(message):\n general_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n v = types.KeyboardButton('Валюта 💰')\n c = types.KeyboardButton('Криптовалюта 💳')\n general_keyboard.add(v, c)\n bot.send_message(message.chat.id, 'Выбирай', reply_markup=general_keyboard)\n\n\n# строит клаву с криптой или с обычной валютой\n@bot.message_handler(func=lambda x: x.text == 'Валюта 💰' or x.text == 'Криптовалюта 💳')\ndef general(message):\n if message.text == 'Валюта 💰':\n vl_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)\n eur = types.KeyboardButton('🇪🇺 Евро')\n usd = types.KeyboardButton('🇺🇸 Доллар США')\n uah = types.KeyboardButton('🇺🇦 Гривна')\n try_ = types.KeyboardButton('🇹🇷 Лира')\n back = types.KeyboardButton('Назад')\n vl_keyboard.add(eur, usd, uah, try_, back)\n bot.send_message(message.chat.id, 'Выберите валюту', reply_markup=vl_keyboard)\n elif message.text == 'Криптовалюта 💳':\n cvl_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)\n btc = types.KeyboardButton('Bitcoin')\n eth = types.KeyboardButton('Ethereum')\n doge = types.KeyboardButton('Dogecoin')\n usdt = types.KeyboardButton('Tether')\n back = types.KeyboardButton('Назад')\n cvl_keyboard.add(btc, eth, doge, usdt, back)\n bot.send_message(message.chat.id, 'Выберите криптовалюту', reply_markup=cvl_keyboard)\n\n\n# отвечает курсом на крипту\n@bot.message_handler(func=lambda\n x: x.text == 'Bitcoin' or x.text == 'Ethereum' or x.text == 'Dogecoin' or x.text == 'Tether' or x.text == 'Назад')\ndef cval(message):\n if message.text == 'Назад':\n start(message)\n elif message.text == 'Bitcoin':\n data = parse_cval('bitcoin')\n bot.send_message(message.chat.id, f'1 Bitcoin = {data[1]} $\\n{data[3]}', parse_mode='HTML')\n elif message.text == 'Ethereum':\n data = parse_cval('ethereum')\n bot.send_message(message.chat.id, f'1 Ethereum = {data[1]} $\\n{data[3]}', parse_mode='HTML')\n elif message.text == 'Dogecoin':\n data = parse_cval('dogecoin')\n bot.send_message(message.chat.id, f'1 Dogecoin = {data[1]} $\\n{data[3]}', parse_mode='HTML')\n elif message.text == 'Tether':\n data = parse_cval('tether')\n bot.send_message(message.chat.id, f'1 Tether = {data[1]} $\\n{data[3]}', parse_mode='HTML')\n\n\n# отвечает курсом на валюту\n@bot.message_handler(func=lambda\n x: x.text == '🇪🇺 Евро' or x.text == '🇺🇸 Доллар США' or x.text == '🇺🇦 Гривна' or x.text == '🇹🇷 Лира' or x.text == 'Назад')\ndef val(message):\n if message.text == 'Назад':\n start(message)\n elif message.text == '🇪🇺 Евро':\n data = parse_val()\n bot.send_message(message.chat.id, f'1 Евро = {data[1][-15:-10]} ₽\\n{data[1][-7:]} (1 дн.)',\n parse_mode=\"HTML\")\n elif message.text == '🇺🇸 Доллар США':\n data = parse_val()\n bot.send_message(message.chat.id, f'1 Доллар США = {data[0][-15:-10]} ₽\\n{data[0][-7:]} (1 дн.)',\n parse_mode=\"HTML\")\n elif message.text == '🇺🇦 Гривна':\n data = parse_val()\n bot.send_message(message.chat.id, f'1 Гривна = {data[27][25:-10]} ₽\\n{data[27][-7:]} (1 дн.)',\n parse_mode=\"HTML\")\n elif message.text == '🇹🇷 Лира':\n data = parse_val()\n bot.send_message(message.chat.id, f'1 Лира = {data[25][21:-10]} ₽\\n{data[25][-7:]} (1 дн.)',\n parse_mode=\"HTML\")\n\n\nbot.polling(none_stop=True)\n","repo_name":"chebubruh/Currency_bot","sub_path":"currency-bot.py","file_name":"currency-bot.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29892328358","text":"import csv\nimport codecs\n\nclass Preprocess:\n def __init__(self, datafile, linefile, conversationfile, lineFields, conversationFields):\n self.datafile = datafile\n self.linefile = linefile\n self.conversationfile = conversationfile\n self.lineFields = lineFields\n self.conversationFields = conversationFields\n\n def loadLines(self):\n \"\"\"Splits each line of the file into a dictionary of fields\n \"\"\"\n print(\"\\nProcessing corpus...\")\n self.lines = {}\n with open(self.linefile, 'r', encoding='iso-8859-1') as f:\n for line in f:\n values = line.split(\" +++$+++ \")\n # Extract fields\n lineObj = {}\n for i, field in enumerate(self.lineFields):\n lineObj[field] = values[i]\n self.lines[lineObj['lineID']] = lineObj\n\n def loadConversations(self):\n \"\"\"Groups fields of lines from `loadLines` into conversations based on *movie_conversations.txt*\n \"\"\"\n print(\"\\nLoading conversations...\")\n self.conversations = []\n with open(self.conversationfile, 'r', encoding='iso-8859-1') as f:\n for line in f:\n values = line.split(\" +++$+++ \")\n # Extract fields\n convObj = {}\n for i, field in enumerate(self.conversationFields):\n convObj[field] = values[i]\n # Convert string to list (convObj[\"utteranceIDs\"] == \"['L598485', 'L598486', ...]\")\n lineIds = eval(convObj[\"utteranceIDs\"])\n # Reassemble lines\n convObj[\"lines\"] = []\n for lineId in lineIds:\n convObj[\"lines\"].append(self.lines[lineId])\n self.conversations.append(convObj)\n\n def extractSentencePairs(self):\n \"\"\"Extracts pairs of sentences from conversations\n \"\"\"\n qa_pairs = []\n for conversation in self.conversations:\n # Iterate over all the lines of the conversation\n # We ignore the last line (no answer for it)\n for i in range(len(conversation[\"lines\"]) - 1):\n inputLine = conversation[\"lines\"][i][\"text\"].strip()\n targetLine = conversation[\"lines\"][i+1][\"text\"].strip()\n # Filter wrong samples (if one of the lists is empty)\n if inputLine and targetLine:\n qa_pairs.append([inputLine, targetLine])\n return qa_pairs\n\n def writeCSV(self):\n \"\"\"Write new csv file\n \"\"\"\n print(\"\\nWriting newly formatted file...\")\n delimiter = '\\t'\n delimiter = str(codecs.decode(delimiter, \"unicode_escape\"))\n with open(self.datafile, 'w', encoding='utf-8', newline='') as outputfile:\n writer = csv.writer(outputfile, delimiter=delimiter)\n for pair in self.extractSentencePairs():\n writer.writerow(pair)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AaronGrainer/pytorch-chatbot","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5776268534","text":"from pymongo.mongo_client import MongoClient\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.docstore.document import Document\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch\nimport os\n\nMONGO_URI = os.getenv(\"MONGO_URI\")\nclient = MongoClient(MONGO_URI)\n\ncollection = client.get_default_database().get_collection(\"note\")\n\nchange_stream = collection.watch(full_document=\"updateLookup\")\nfor change in change_stream:\n \n operation_type = change[\"operationType\"]\n if operation_type == \"replace\":\n \n full_doc = change['fullDocument']\n raw_content = full_doc['rawContent']\n original_document = full_doc['_id']\n user_id = full_doc['userId']\n \n collection = client.get_default_database().get_collection(\"embeddings\")\n collection.delete_many({\"originalDocument\": original_document})\n \n doc = Document(page_content=raw_content, metadata={\"userId\": user_id, \"originalDocument\": original_document})\n \n text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n documents = text_splitter.split_documents([doc])\n \n \n embedding = OpenAIEmbeddings()\n col = client.get_default_database().get_collection(\"embeddings\")\n vec = MongoDBAtlasVectorSearch.from_documents(documents, embedding, collection=collection)\n \n if operation_type == \"delete\":\n doc_id = change['documentKey']['_id']\n collection = client.get_default_database().get_collection(\"embeddings\")\n collection.delete_many({\"originalDocument\": doc_id})","repo_name":"patricktrp/simplenote","sub_path":"backend/embedding_worker/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"44449906264","text":"import os\nimport subprocess\nimport sys\nimport argparse\nimport logging\nimport numpy as np\nimport boto3\nimport datetime\nfrom datetime import datetime \n\ndef install(package):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n \ndef parser_args(train_notebook=False):\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"--model_id\", default='lawcompany/KLAID_LJP_base')\n parser.add_argument(\"--tokenizer_id\", default='lawcompany/KLAID_LJP_base')\n parser.add_argument(\"--dataset_name\", type=str, default='lawcompany/KLAID')\n parser.add_argument(\"--small_subset_for_debug\", type=bool, default=True)\n parser.add_argument(\"--train_dir\", type=str, default='/opt/ml/processing/train')\n parser.add_argument(\"--validation_dir\", type=str, default='/opt/ml/processing/validation') \n parser.add_argument(\"--test_dir\", type=str, default='/opt/ml/processing/test')\n parser.add_argument(\"--transformers_version\", type=str, default='4.17.0')\n parser.add_argument(\"--pytorch_version\", type=str, default='1.10.2')\n \n if train_notebook:\n args = parser.parse_args([])\n else:\n args = parser.parse_args()\n return args\n \n \nif __name__ == \"__main__\":\n args = parser_args()\n \n install(f\"torch=={args.pytorch_version}\")\n transformers_version = \"4.17.0\" \n install(f\"transformers=={transformers_version}\")\n install(\"datasets==1.18.4\")\n \n ## Data Crawling \n\n from datasets import load_dataset\n from transformers import AutoTokenizer, AutoModelForSequenceClassification\n\n # download tokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.model_id)\n\n # tokenizer helper function\n def tokenize(batch):\n return tokenizer(batch['fact'], padding='max_length', max_length=512, truncation=True)\n\n # load dataset\n train_dataset, test_dataset = load_dataset(args.dataset_name, split=['train[:80%]', 'train[80%:]'])\n\n # train dataset to dataframe \n import pandas as pd\n train_df = pd.DataFrame(train_dataset)\n\n # local file Path\n local_file_path = \"/opt/ml/processing/collected_data.csv\" \n\n # Download collected Data from S3 bucket\n s3 = boto3.client(\"s3\")\n s3_bucket = \"sagemaker-us-east-1-353411055907\"\n current_date_str = f\"data_{datetime.now().strftime('%Y-%m-%d %H')}\"\n file_name = f'GP-LJP-mlops/data/collected_data/{current_date_str}.csv'\n s3.download_file(s3_bucket, file_name, local_file_path)\n\n # Concatenate the original data + collected data \n added_df = pd.read_csv(local_file_path, encoding='utf-8')\n merged_df = pd.concat([train_df, added_df], axis=0)\n\n # Convert the merged DataFrame back to the Hugging Face Dataset class format\n from datasets import Dataset\n train_dataset = Dataset.from_pandas(merged_df)\n\n if args.small_subset_for_debug:\n train_dataset = train_dataset.shuffle().select(range(1000))\n test_dataset = test_dataset.shuffle().select(range(1000))\n\n # tokenize dataset\n train_dataset = train_dataset.map(tokenize, batched=True)\n test_dataset = test_dataset.map(tokenize, batched=True)\n\n # set format for pytorch\n train_dataset = train_dataset.rename_column(\"laws_service_id\", \"labels\")\n train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])\n test_dataset = test_dataset.rename_column(\"laws_service_id\", \"labels\")\n test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])\n \n train_dataset.save_to_disk(args.train_dir)\n test_dataset.save_to_disk(args.test_dir)\n","repo_name":"dellaanima/LJP_MLOps","sub_path":"src/processing_sklearn.py","file_name":"processing_sklearn.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27227840241","text":"#모델 학습 및 평가 시 사용되는 상수 및 데이터 딕셔너리를 정의\r\n#이 파일을 import 하면 아래의 변수(BASE_DIR, IS_UBUNTU, EPS, DIC_CLS_BGR, DIC_CLS_RGB)들을 사용할 수 있게 됨.\r\nimport os\r\nimport numpy as np\r\n\r\nBASE_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))\r\n\r\nIS_UBUNTU = False\r\n\r\nEPS = np.finfo(float).eps\r\n\r\nDIC_CLS_BGR = {\r\n 'Sedan':[0,0,255],\r\n 'Bus or Truck':[0,50,255],\r\n 'Motorcycle':[0,255,0],\r\n 'Bicycle':[0,200,255],\r\n 'Pedestrian':[255,0,0],\r\n 'Pedestrian Group':[255,0,100],\r\n 'Bicycle Group':[255,100,0],\r\n}\r\n\r\nDIC_CLS_RGB = {\r\n 'Sedan': [1, 0, 0],\r\n 'Bus or Truck': [1, 0.2, 0],\r\n 'Motorcycle': [0, 1, 0],\r\n 'Bicycle': [1, 0.8, 0],\r\n 'Pedestrian': [0, 0, 1],\r\n 'Pedestrian Group': [0.4, 0, 1],\r\n 'Bicycle Group': [0, 0.8, 1],\r\n}\r\n\r\nif __name__ == '__main__':\r\n print(BASE_DIR)\r\n print(EPS)\r\n","repo_name":"Kichan01/4D-RADAR","sub_path":"configs/config_general.py","file_name":"config_general.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22718908042","text":"class BWT():\n\tdef __init__(self, t):\n\t\tself.bwt = self.build_bwt(t)\n\t\tself.Occ = self.build_occ(t)\n\t\tself.Count = self.build_count(t)\n\t\tself.largest = sorted(list(set(t)))\n\n\t#----------------------------------------------------------\n\t# return the left rotation of t\n\tdef rotate(self, t):\n\t\tfirst = t[0]\n\t\teverything_but_first = t[1:]\n\t\treturn everything_but_first + first\n\n\t#----------------------------------------------------------\n\t# construct the Burrows-Wheeler transform of T\n\tdef build_bwt(self, T):\n\t\trotations = [T]\n\t\t# Get all rotations of T and store them to rotations.\n\t\tfor i in range(1,len(T)):\n\t\t\ts = self.rotate(rotations[-1])\n\t\t\trotations.append( s )\n\t\trotations.sort()\n\t\t# construct a list containing last characters of sorted rotations.\n\t\tt = [ x[-1] for x in rotations ]\n\t\treturn ''.join(t)\n\n\t#----------------------------------------------------------\n\tdef build_occ(self, t):\n\t\tocc = { c : [0]*len(t) for c in set(t) }\n\t\tfor i in range(len(self.bwt)):\n\t\t\tc = self.bwt[i]\n\t\t\tfor char in occ:\n\t\t\t\tocc[char][i] = occ[char][i-1]\n\t\t\tocc[c][i] += 1\n\t\treturn occ\n\n\t#----------------------------------------------------------\n\tdef build_count(self, t):\n\t\tcharacters = list(set(t))\n\t\tcharacters.sort()\n\t\tcount = {'$' : 0}\n\t\tfor i in range(1, len(characters)):\n\t\t\tcount[characters[i]] = count[characters[i-1]] + t.count(characters[i-1])\n\t\treturn count\n\n\t#----------------------------------------------------------\n\tdef reconstruct(self):\n\t\tc, i = '$', 0\n\t\tt = [c]\n\t\tfor j in range(len(self.bwt)-1):\n\t\t\tc = self.bwt[i]\n\t\t\ti = self.Count[c] + self.Occ[c][i] - 1\n\t\t\tt = [c] + t\n\t\treturn ''.join(t)\n\n#----------------------------------------------------------\n# return the number of occurances\n\n\tdef query(self,pattern):\n\t\tpass\n\n\n#--------------------------------------------------------\nimport random\n\ndef random_string(n):\n\treturn ''.join([ random.choice('actg') for i in range(n)])\n# text = 'abaaba$'\ntext = random_string(50) + '$'\nb = BWT(text)\nprint(text)\nprint(b.bwt)\nprint(b.reconstruct())\n","repo_name":"milu-buet/Bio-Algo","sub_path":"Class/wb_p9f6z7.py","file_name":"wb_p9f6z7.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32014544755","text":"class Solution:\n def squareIsWhite(self, coordinates: str) -> bool:\n alpha=['a','b','c','d','e','f','g','h']\n for i in range(len(alpha)):\n for j in range(1,9):\n if alpha[i]+str(j)==coordinates:\n if (i+j)%2==0:\n return True\n else:\n return False","repo_name":"Shodiev-Shokhrukh/Leetcode","sub_path":"Math/1812. Determine Color of a Chessboard Square.py","file_name":"1812. Determine Color of a Chessboard Square.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73142866482","text":"\r\n# These are my notes taken during the Python MOOC (completed Jan. 2018).\r\n# https://www.fun-mooc.fr/courses/course-v1:UCA+107001+session01/about\r\n# https://github.com/parmentelat/moocpython/blob/master/pdf_du_cours/from_latex/Python.pdf\r\n# https://github.com/parmentelat/moocpython/files/1639944/Python3.-.1.a.9.avec.index.pdf\r\n\r\n## ----------------------------------------------------------\r\n## ----------------------------------------------------------\r\n## Introduction\r\n\r\n## Guido Van Rossum, creator, BDFL until 2018\r\n## Highly readable language for an easy access\r\n## Pragmatic, focus on ease of use\r\n## Uniform syntax on the different types of objects\r\n## Allows a rapid development\r\n## A lot of libraries, especially for data science, ...\r\n## v1 in 1994, v2 in 2000, v3 in 2008 (breaking changes)\r\n## https://docs.python.org/3/faq/programming.html\r\n\r\nprint(\"Hello, World!\")\r\n\r\n\r\n## The zen of python\r\nimport this\r\n\r\n# several flavours python, IPython\r\n# idle, notebooks\r\n\r\n## modules\r\nimport math # import module\r\nfrom numpy import cos # import a function\r\nimport numpy as np # import with alias\r\nprint(math)\r\ndir(math) # list included functions\r\nhelp(math.pi) # math.pi? in jupyter notebooks\r\n## standard library (modules included by default)\r\n\r\n \r\n## a variable is a name that references an object\r\n'spam'.upper() # upper is a method\r\nn = 3\r\ntype(n)\r\nn = \"spam\" # dynamic (and strongly typed) language\r\ntype(n)\r\nisinstance(n, str)\r\n\r\ndel n # garbage collector will free the memory\r\n\r\n## keywords that can't be used as variable name\r\nimport keyword\r\nkeyword.kwlist\r\n\r\n\r\n## ----------------------------------------------------------\r\n## ----------------------------------------------------------\r\n## Numeric types\r\n\r\n# int, float, complex\r\ni = 1 # integer\r\ni += 1 # a = a + 1\r\nf = 0.1 # float\r\nc = 1 + 1j # complex\r\n1j ** 2 # square\r\n7 // 3 # integer division\r\n7.//3 # // with float returns float\r\n7 % 3 # modulo\r\nabs(-1) # absolute value\r\n\r\n## floating point precision\r\nz = 1.1 - 1\r\nz == 0.1\r\nprint(z)\r\nprint(\"%0.1f\" % z)\r\nformat(0.1, '.17f')\r\n## check decimal or fractions modules\r\nfrom decimal import Decimal\r\nDecimal('1.1') - Decimal('1') == Decimal('0.1')\r\n\r\nTrue + False\r\nint(\"1\") # int, float, complex, str\r\n\r\n## bitwise operators\r\nbin(3) \r\nbin(6) \r\nbin(12) \r\n3 << 1\r\n3 << 2\r\ni >>= 2\r\n0xf0\r\n# import sys; print(sys.float_info)\r\n\r\n\r\n## ----------------------------------------------------------\r\n## ----------------------------------------------------------\r\n## Basic notions\r\n\r\n## strings\r\n\r\n# reminder: bits (01) are decoded to an integer value\r\n# for a given encoding system (e.g. ASCII), a value corresponds to a character\r\n# problem: limited number of possible characters\r\n# 7 bits => 128 characters\r\n# 8 bits => 256 characters extended\r\n# several systems exist but incompatible\r\n# UNICODE project: UTF-8/16/32, UTF-8 compatible with ascii\r\n# unicode > 120k coded characters\r\n# USE UTF-8\r\n\r\n\"\\u529b\" ## python supports unicode\r\ns = \"力\"\r\nen = s.encode(\"utf-8\")\r\nen.decode(\"utf-8\")\r\n\r\ndir(str) # str? IPython only\r\nhelp(str)\r\n\r\nc = \"spam\" ## strings are immutable (see pythontutor)\r\nc.title()\r\nc.replace(\"spam\", \"ham\") \r\n\r\n# f-strings\r\na = 1\r\nb = 2\r\nf\"{a} and {b}\" \r\n\"{} and {}\".format(a, b) \r\n\r\n## some examples of string methods\r\n'abc:def:ghi:jkl'.split(':')\r\n\":\".join(['abc', 'def', 'ghi', 'jkl'])\r\n\"abcdeabcdeabcde\".replace(\"abc\", \"zoo\")\r\n\"abcdeabcdeabcde\".replace(\"abc\", \"zoo\", 2)\r\n\"[x] versus [y]\".replace(\"[x]\", \"spam\").replace(\"[y]\", \"ham\")\r\n\" abc:de f:g hi \".replace(\" \", \"\")\r\n\" \\trm_special\\n\".strip()\r\n'abc;def;ghi;jk;'.strip(';').split(';')\r\n\"abcdefcdefghefg\".find(\"def\")\r\n\"abcdefcdefghefg\".find(\"zoo\")\r\n\"abcdefcdefghefg\".rfind(\"fgh\")\r\n\"abcdefcdefghefg\".index(\"zef\") # like find but throws exception\r\n\"cde\" in \"abcdek\"\r\n\"abcdefcdefghefg\".count(\"ef\")\r\n\"abcdefcdefghefg\".startswith(\"abcd\")\r\n\"abcdefcdefghefg\".endswith(\"hefg\")\r\n\"abcdefcdefghefg\".upper()\r\n\"abcdefcdefghefg\".swapcase()\r\n\"abcdefcdefghefg\".capitalize()\r\n## https://docs.python.org/3/library/stdtypes.html#string-methods\r\n\r\n## string formatting\r\nprint(1, 'a', 12 + 4j)\r\nfirst, last, age = 'John', 'Doe', 35\r\nf\"{first} {last} is {age}\"\r\nf\"In 5 years {first} will be {age + 5}\"\r\n\"%s %s is %s\" % (first, last, age) # old version\r\n\r\n## format numbers\r\nfrom math import pi\r\nf\"pi rounded: {pi:.2f}\"\r\nx = 1\r\nf\"{x:03d}\"\r\n\r\n## fixed length\r\nx = [(1, 1, 1), (111, 111, 111), (11111, 11111, 11111)]\r\nfor col1, col2, col3 in x:\r\n print(f\"{col1:<7} -- {col2:^7} -- {col3:>7}\")\r\n\r\n# number = input(Pick a number: \")\r\n# print(f\"number={number}\")\r\n\r\n## https://docs.python.org/3/library/string.html#formatstrings \r\n\r\n## Regular expressions\r\nimport re\r\nx = \"Lorem ipsum dolor sit amet\"\r\nprint(re.findall(r\"[a-m]\", x))\r\nx = \"dsf-6 ,333 1._nb9;\"\r\nre.findall(\"[-\\d]+\", x)\r\n# ...\r\n\r\nimport string\r\nchar = string.ascii_lowercase\r\nprint(char)\r\n\r\n###########################\r\n## Sequences\r\n\r\n## sequences = list/tuple/str/bytes/...\r\n## elements with finite and ordered \r\ns = 'spam'\r\nlen(s)\r\ns[0]\r\n\"sp\" not in s \r\ns = s + \" and ham\"\r\ns.index('a')\r\ns.count('m')\r\nmax(s)\r\n\"x\" * 30\r\n## slicing\r\ns[0:3]\r\ns[2:]\r\ns[0:10:2]\r\ns[::2]\r\ns[-5:-1]\r\ns[::-1]\r\ns[:] # shallow copy\r\n\r\n###########################\r\n## Lists\r\n\r\n## Lists are sequences of heterogeneous objects\r\n## List don't store the obj, store the reference (so memory size independant)\r\n## Lists are mutable (doesn't need a copy => memory efficient)\r\n## Sequences operations can be applied on lists\r\n## Lists are highly flexible\r\n\r\na = []\r\ntype(a)\r\na = [1,2,3,4]\r\na[1:3] = [1,2,3] ## ! delete, then add\r\na[1:3] = [] ## ! delete\r\ndel a[1]\r\ndir(list)\r\nhelp(list.append) # list.append? IPython\r\na.append(9)\r\na.extend(a)\r\na.sort(reverse = True) # modified by reference\r\nb = sorted(a) # sort and copy\r\ns = \"spam egg beans\"\r\ns = s.split()\r\ns[0] = s[0].upper()\r\n\" \".join(s)\r\na = list(range(10))\r\na + a\r\na.insert(0, \"a\")\r\na.remove(\"a\") # first match only\r\nb = a.pop(2) # extract and remove (last one by default)\r\na.reverse()\r\na * 3 # duplicates n times\r\n\r\n\r\n###########################\r\n## If\r\n\r\n## instruction block : + spaces\r\n## <79 characters on one line better\r\n\r\nif 'g' in 'egg':\r\n print('yes')\r\nelse:\r\n print('no')\r\n\r\n## nested if: indent again\r\n\r\nchar = 'spam'\r\n\r\nif 'a' in char:\r\n if 'b' in char:\r\n cas11 = True\r\n print('a and b')\r\n else:\r\n cas12 = True\r\n print('a but not b')\r\nelse:\r\n if 'b' in char:\r\n cas21 = True\r\n print('b but not a')\r\n else:\r\n cas22 = True\r\n print('neither a nor b') \r\n\r\n# if elif elif ... else\r\n\r\n## More about if\r\n# test can be any expression => bool evaluated (or len, 0 => false)\r\n# built-in: False 0 [] {} () ''\r\n# comparaison / membership ==, !=, is, is not in <=, <, >, >=\r\n# operators: and or not\t\r\n\r\n\r\n###########################\r\n## For loops and functions\r\n\r\nfor i in range(10):\r\n print(i**2)\r\n\r\nfor i in [1, 2, True]:\r\n print(i)\r\n\r\na = []\r\nfor n in [1, 2, '3', 4, 'END']:\r\n a.append(str(n))\r\n\r\nprint(\",\".join(a))\r\n\r\nfor i, e in enumerate([1, 3, 5]):\r\n print(i, e)\r\n\r\ndef square(x): # note: x passed by reference\r\n L = []\r\n for i in x:\r\n L.append(i**2)\r\n return L\r\n\r\ndef foo():\r\n return\r\n\r\ntype(foo()) # None\r\n\r\ndef foo2(): ## function that does nothing\r\n pass\r\n\r\nfor integer in range(1000):\r\n # ignore numbers non multiple of 10\r\n if integer % 10 != 0:\r\n continue\r\n print(f\"processing {integer}\")\r\n # stop at 50\r\n if integer >= 50:\r\n break\r\n\r\n\r\nx = True\r\ny = 1 if x else 0\r\n\r\na = list(range(1,10))\r\nwhile a:\r\n a.pop()\r\n if len(a) == 5:\r\n continue # break # continue goes to the top of the while\r\n print(a)\r\n\r\nwhile True:\r\n s = input(\"what is your question?\\n\")\r\n if 'none' in s:\r\n break\r\n\r\n# while can take an else\r\n\r\n\r\n\r\n###########################\r\n## Lists (and list comprehension)\r\n\r\na = [1,4,18,29,13]\r\nimport math\r\nb = [math.log(i) for i in a]\r\na = [-1,4,18,29,13]\r\nb = [math.log(i) for i in a if i > 0]\r\na = [\"Eve\",\"bob\",\"TOM\"]\r\na = [p.lower() for p in a]\r\nentry = [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\nsquares = [x**2 for x in entry] \r\n[(num1, num2) for num1 in range(1, 3) for num2 in range(3, 5)]\r\n['positive' if x > 0 else 'negative' for x in [-2,2,3]]\r\n\r\n\r\n\r\n## ----------------------------------------------------------\r\n## ----------------------------------------------------------\r\n## More about basic notions, shared references\r\n\r\n###########################\r\n## Files\r\n\r\nf = open(r\"c:\\spam.txt\", \"w\", encoding = \"utf8\") \r\n# r = rawstring\r\n# r = read, w = write, a = append\r\n\r\nfor i in range(100):\r\n f.write(f\"line {i + 1}\\n\")\r\n\r\nf.close()\r\n\r\n# !type or !cat c:\\spam.txt # ipython only\r\n\r\nf = open(r\"c:\\spam.txt\", \"r\", encoding = \"utf8\") \r\nf2 = open(r\"c:\\spam2.txt\", \"w\", encoding = \"utf8\") \r\n\r\nfor line in f:\r\n line = line.split()\r\n line[0] = line[0].upper()\r\n f2.write(\",\".join(line) + \"\\n\")\r\nf.close()\r\nf2.close()\r\n\r\n## context manager: modern way to manipulate files\r\n## context manager protocol, avoids to use .close()\r\n\r\nwith open(r\"c:\\spam.txt\", \"r\", encoding = \"utf8\") as f:\r\n for line in f:\r\n print(line) # file close automatically\r\n\r\n## binary file (b before w indicates binary, e.g. pickled file)\r\nwith open(r\"c:\\spam.bin\", \"bw\") as f:\r\n for i in range(100):\r\n f.write(b'\\x3d') # bytes\r\n\r\nimport pickle\r\n# Open pickle file and load data: d\r\nwith open('data.pkl', 'rb') as file:\r\n d = pickle.load(file)\r\n\r\n# other:\r\n# https://docs.python.org/3/library/pathlib.html\r\n# https://docs.python.org/3/library/json.html\r\n\r\n###########################\r\n## Tuples (immutable lists)\r\n\r\n## a tuple is a sequence\r\n## tuples are immutable lists (useful for dictionnaries)\r\n\r\nt = ()\r\nt = (4,) # (4) is simply an integer, use the comma\r\nt = (True, 1, \"a\")\r\nt = True, 1, \"a\" # parentheses are not mandatory\r\n1 in t \r\nt[0]\r\na = list(t) # convert tuple to list\r\nt = tuple(a) # convert list to tuple\r\n(a, b) = [3, 4] # tuple unpacking\r\na, b = [3, 4] # tuple unpacking\r\na = list(range(10)) # extended tuple unpacking\r\nx, *y = a \r\n*x, y = a\r\n1,3 + 3, 4, + 2,1 + 1,2 # weird...\r\nup = [\"A\", \"B\", \"C\"]\r\nlow = [\"a\", \"b\", \"c\"]\r\nlist(zip(up, low)) # zip function, more than two possible\r\n\r\n\r\n###########################\r\n## Dictionnaries \r\n\r\n# hash tables: set and dictionnaries\r\n# time of operations is independant of the nb of elements\r\n# e.g. access, insert, test in, delete, ...\r\n# all immutable objects are 'hashable'\r\n# no order in dictionnaries\r\n\r\nage = {}\r\nage = {'ana': 35, 'eve': 30, 'bob': 38}\r\nage['ana']\r\n\r\nage = dict(ana = 35, eve = 35, bob = 35)\r\na = [('ana', 35), ('eve', 35), ('bob', 35)]\r\nage = dict(a)\r\nage['bob']\r\nage['bob'] = 45 # see also update\r\n\r\ndel age['bob']\r\nlen(age)\r\n'ana' in age\r\nage.get(\"bob\", 0)\r\nk = age.keys() # k will be modified if age modified\r\nage.values()\r\nage.items() # returns a vue (a vue is an iterable object)\r\n\r\nfor k, v in age.items():\r\n print(f\"{k} {v}\")\r\n\r\n# see also collections module\r\n\r\n###########################\r\n## Sets\r\n\r\n## related to dictionaries, but stores only keys, no values\r\n## useful for unique values, or membership test\r\n## better to convert a sequence to a set for in test\r\n\r\ns = set()\r\ntype(s)\r\ns = {1,2,3, 'a', True}\r\ns = set([1,1,2,3,20,18,4]) # s is ordered, keys are unique\r\nlen(s)\r\n1 in s\r\ns.add(\"bob\")\r\ns.update([1,2,3,4,5,6,7])\r\ns\r\ns1 = {1,2,3}\r\ns2 = {2,3,4}\r\ns1 - s2 # difference\r\ns1 | s2 # union\r\ns1 & s2 # intersection\r\ns1 ^ s2 # symetrical difference\r\na = [0]\r\n#s{0}\r\n\r\n# frozenset: set that can not be modified\r\nfs = frozenset(s) # fs.add(2) will fail\r\n\r\n###########################\r\n## Exceptions\r\n\r\n## exceptions can be captured and bring info about the error\r\n## frequently used mechanism in python\r\n\r\n# 1 / 0 => ZeroDivisionError\r\n\r\ndef div (a,b):\r\n try:\r\n print(a / b)\r\n except ZeroDivisionError:\r\n print(\"Can not divide by zero\")\r\n except TypeError:\r\n print(\"Numbers required\")\r\n print(\"next...\")\r\n\r\n# try: captures exception, except: properly handles the error\r\n# div(3, 1); div(3, 0); div(3,\"a\")\r\n\r\n## bubbling => exceptions go up\r\n\r\ndef function_with_finally(number):\r\n try:\r\n return 1 / number\r\n except ZeroDivisionError as e:\r\n print(f\"D'oh! {type(e)}, {e}\")\r\n return(\"zero-divide\")\r\n finally:\r\n print(\"Final action even if return called above\")\r\n\r\nfunction_with_finally(0)\r\n\r\ndef function_with_else(number):\r\n try:\r\n 1 / number\r\n except ZeroDivisionError as e:\r\n print(f\"D'oh! {type(e)}, {e}\")\r\n else:\r\n print(\"Do this only if non-zero number\")\r\n return 'Something else'\r\n\r\nfunction_with_else(0)\r\n\r\n\r\n###########################\r\n## Shared references\r\n\r\n## side effects\r\na = [1,2]\r\nb = a\r\nb[0] = 0\r\na\r\nb = a[:] # use a shallow copy to avoid side effects \r\n# but if the list references a mutable object, shallow is not enough\r\n# for example:\r\na = [1, [2]]\r\nb = a[:]\r\na[1][0] = 9\r\nb\r\n\r\nimport copy\r\nb = copy.deepcopy(a) # deep copy\r\n\r\na is b # check shared references\r\nid(a) # 'memory adress'\r\n\r\n# https://docs.python.org/3/reference/datamodel.html#objects-values-and-types\r\n# circular references...\r\n\r\nel = [0]\r\nmyList = [el, el, el]\r\nmyList[0][0] = 1\r\n\r\nmyList = 3 * [ [0] ]\r\nmyList\r\nmyList[0][0] = 1\r\nmyList\r\n\r\n\r\n###########################\r\n## Classes\r\n\r\nclass C:\r\n pass\r\n\r\nc1 = C()\r\n\r\n# self corresponds to the class instance\r\n\r\nclass Phrase:\r\n def __init__(self, phrase):\r\n self.words = phrase.split()\r\n \r\n def upper(self):\r\n self.words = [m.upper() for m in self.words]\r\n \r\n def __str__(self): # str allows to print the content \r\n return \"\\n\".join(self.words)\r\n\r\np = Phrase(\"hello world\")\r\np.words # words = attribute\r\np.upper() # upper = method\r\nprint(p)\r\n\r\n\r\n## ----------------------------------------------------------\r\n## ----------------------------------------------------------\r\n## Functions (scope), if, while, ...\r\n\r\n###########################\r\n## Functions\r\n\r\n# functions are object, below ff is the variable that references the function\r\n\r\ndef ff(a,b,c):\r\n print(a,b,c)\r\n\r\ng = ff\r\ng(1,2,3)\r\n\r\n# in Python, arguments are passed by reference (vs by value)\r\n# side effect *IN PLACE*\r\n# \"MUST BE USED CAREFULLY\", good for performance\r\n# need to document properly using docstring (help(list.sort))\r\n\r\ndef add1(a):\r\n \"\"\"\r\n Help of add1 function (docstring).\r\n \"\"\"\r\n a.append(1)\r\n\r\nL = []\r\nadd1(L)\r\n\r\n# but using shallow copy may be more safe\r\ndef add2(a):\r\n a = a[:]\r\n a.append(1)\r\n return a\r\n\r\nL = add2(L) # is more explicit\r\n\r\n# polymorphism => execute on all the types compatible with the function\r\n# for example\r\ndef my_add(a, b):\r\n print(f\"{a} and {b}\")\r\n return a + b\r\n\r\n# my_add works on integers/floats/characters\r\n# my_add(1, 2); my_add(1.0, 2.0); my_add(\"a\", \"b\"); \r\n\r\n# type hints inform about expected types\r\n# just to document, no check during execution\t\r\nnb_items : int = 0\r\ndef fact(n : int) -> int:\r\n return 1 if n <= 1 else n * fact(n-1)\r\n\r\nfrom typing import List\r\ndef fun_with_hints(x: List[int]) -> List[str]:\r\n pass\r\n\r\n# https://docs.python.org/3/library/typing.html#user-defined-generic-types\r\n\r\ndef fi(x):\r\n # use isinstance to check type of arguments\r\n if isinstance(x, int):\r\n return x + 1\r\n else:\r\n raise TypeError(x) # raise ValueError('Error message')\r\n\r\nfi(\"a\")\r\n\r\n\r\n###########################\r\n## Parameters and arguments\r\n\r\n## parameters: in the definition of the function\r\n## arguments: variables passed for execution\r\n\r\ndef fff(a, b = 3): # default values can be used\r\n print(a, b)\r\n\r\nfff(b = 3, a = 6) # named\r\nfff(5) # ordered\r\n\r\ndef hh(*t): # * arguments put in a tupple, number arbitraty\r\n print(t)\r\n\r\nhh(); hh(1,2,3) # tupple\r\n\r\ndef jj(**d):\r\n print(d)\r\n\r\njj(a = 1, b = 2) # dictionary\r\n\r\n# * and ** can also be used arguments even if not in parameters\r\nL = [1,2,3]\r\nhh(*L) # tupple unpacking\r\nd = {\"sep\" : \" and \", \"end\" :\"\\n\\n\"}\r\nprint(1,2, **d) # ** can be used to pass arguments as a dict\r\n\r\n# https://docs.python.org/3/reference/expressions.html#calls\r\n# be careful with mutable arguments!\r\n# https://docs.python.org/3/faq/programming.html#why-are-default-values-shared-between-objects\r\n\r\n###########################\r\n## Scoping rules\r\n\r\n## to access variables, Python follows the LEGB rule\r\n## LEGB: Local, Enclosing, Global, Built-in\r\n## Local (in the block of a function)\r\n## Global variables (defined outside functions)\r\n\r\na, b, c = 1, 1, 1\r\n\r\ndef gg():\r\n b = 2\r\n b = b + 10\r\n # print(locals()) # b\r\n def h():\r\n c = 5\r\n print(a, b, c)\r\n h()\r\n\r\ngg()\r\n\r\n# a global, b enclosing, c local\r\nprint(a, b, c) # all global (print is built-in)\r\nlocals(); globals()\r\n\r\nimport builtins\r\ndir(builtins) # accessible built-in functions \r\nbuiltins.print\r\n\r\n# UnboundLocalError\r\n# when trying to modify locally a referenced (used) global variable\r\n# need to use the global instruction for that\r\n# but not recommended: use parameters and assignment\r\n## similarly, nonlocal instruction used to modify enclosing variables\r\n\r\n# Namespaces\r\n# global variables for each module\r\n# how to communicate across namespaces?\r\n\r\n\r\n## ----------------------------------------------------------\r\n## ----------------------------------------------------------\r\n## Iteration, import, and namespaces\r\n\r\n###########################\r\n## Iterators\r\n\r\ns = {1,2,3,'a'}\r\n[x for x in s if type(x) is int] # list comphrehension\r\n\r\nit = iter(s) # creates an iterator on s\r\nit\r\nnext(it)\r\nnext(it)\r\nnext(it)\r\nnext(it)\r\nnext(it) # StopIteration error\r\n\r\n## iterable object vs iterators\r\n# __iter__() # iterable objects have this method ## ''contains the data'\r\n# iter(s) ~ s.__iter__()\r\n# this method returns an iterator\r\n## iterators have two methods: .__iter__() and .__next__() (goes through the data)\r\n## iterator can be run over only once \r\n## simple, compact, very low cost to create\r\n\r\nenum = enumerate([1, 3, 5], start = 1) # returns index and value\r\nnext(enum); next(enum); next(enum)\r\na = [1,2]\r\nb = [3,4]\r\nz = zip(a, b) # [tuple(a0,b0), tuple(a1,b1)]\r\nz is iter(z)\r\n[i for i in z]\r\n[i for i in z] # used once, now empty\r\n\r\nimport itertools ## offers more functionalities for iterators (combinations, ...)\r\n# https://docs.python.org/3/library/itertools.html\r\n\r\n\r\n###########################\r\n## Functional programming\r\n\r\n## functional programing: use functions as objects and use them as argument of other functions\r\n\r\n# lambda functions are anonymous, they are an expression\r\n# sometimes, easier to use (avoids to declare a function)\r\n\r\n# map: applies a function to each element\r\nm = map(lambda x: x**2 - 1, range(10))\r\nm\r\nlist(m)\r\n## filter function\r\n## filter an iterable object using a test\r\nn = filter(lambda x: x%2 == 0, range(10))\r\nn\r\nlist(n)\r\n\r\n# map and filter produce iterables (usable once)\r\n# map and filter are like list comphehension\r\n# list comprehension is more pythonic\r\n\r\n\r\n###########################\r\n## Comprehension: lists, sets, and dictionnary (all iterable objects)\r\n\r\nnames = [\"ana\",\"EVE\",\"Alice\", \"bob\"]\r\n[n.lower() for n in names if n.lower().startswith('a')] \r\nnames.extend(names) # duplicates\r\n[n.lower() for n in names if n.lower().startswith('a')] \r\n{n.lower() for n in names if n.lower().startswith('a')}\r\n# curly brackets => unique values directly!\r\n\r\n# list comprehension can also be used on dictionary\r\nages = [('Ana',20), ('Eve', 30), ('Bob', 40)]\r\nages = dict(ages)\r\n{p.lower():a for p, a in ages.items()}\r\n{p.lower():a for p, a in ages.items() if a < 40}\r\n\r\n##\r\n[n + p for n in [2, 4] for p in [10, 20, 30]]\r\n[n + p for n in [2, 4] for p in [10, 20, 30] if n*p >= 40]\r\n\r\n\r\n###########################\r\n## Generator expressions\r\n\r\n## Avoid to generate temporary objects\r\n## Very memory-efficient\r\n## List comprehension returns a list \r\n## Generators returns an iterator\r\n\r\nsqr = [x**2 for x in range(1000)]\r\nlen(sqr)\r\nsum(sqr)\r\nsqr = (x**2 for x in range(1000)) # parentheses instead of square brackets\r\nsqr\r\nsum(sqr)\r\nsum(sqr) ## iterator is over\r\n\r\n# can be chained!\r\ngen_sqr = (x**2 for x in range(1000))\r\npalindrome = (x for x in gen_sqr if str(x) == str(x)[::-1])\r\nlist(palindrome)\r\n\r\n# http://python-history.blogspot.com/2010/06/from-list-comprehensions-to-generator.html\r\n\r\n## generative function\r\n\r\ndef gen():\r\n yield 10\r\n\r\ngen()\r\n\r\n\r\n###########################\r\n## Modules and namespaces\r\n\r\n# object.attribut\r\n# access to an attribute in the object's namespace\r\n# namespace: group of variables belonging to an object\r\n\r\nimport os\r\nprint(os) ## os is a variable that references the module object\r\nos.environ['PYTHONPATH']\r\nimport sys\r\nsys.path # paths to search for modules\r\nsys.modules\r\nsys.builtin_module_names\r\n\r\n## when a module is imported\r\n## precompilation: byte code in __pycache__\r\n## then byte code read to create the object module\r\n\r\n## to force the reloading of a module\r\nimport importlib; importlib.reload(mod) \r\n# %load_ext autoreload # for notebooks\r\n\r\n# to build a custom module, use setuptools\r\n# modules are mutable\r\n# modules have their namespace\r\n# see also packages (collection of modules)\r\n\r\n\r\n###########################\r\n## OOP\r\n\r\n# a class allows to define a custom type\r\n# calling a class creates an instance\r\n# namespace instance first, then namespace class: inheritance tree\r\n# method = function defined in a class\r\n\r\nclass Phrase0:\r\n my_phrase = \"Python is fun.\"\r\n\r\nPhrase0\r\np = Phrase0()\r\nvars(Phrase0) # Phrase.__dict__ \r\np.my_phrase # not defined in instance, found in the class\r\nPhrase0.words = Phrase0.my_phrase.split()\r\np.words\r\n\r\n\r\nclass Phrase1:\r\n \"Docstring for the Phrase class\"\r\n def __init__(self, my_phrase):\r\n ## __init_ method is the constructor\r\n self.my_Phrase = my_phrase\r\n\r\n# self = reference of the instance\r\np=Phrase1(\"Python is fun.\")\r\nvars(p)\r\nhelp(Phrase1)\r\n\r\n\r\n## special methods __something__\r\n## allows to define methods that behave like builtin types\r\n## len, print, in, +, ...\r\n## a lot of special methods available (around eighty?)\r\n## https://docs.python.org/3/reference/datamodel.html#specialnames\r\n\r\nclass Phrase2:\r\n def __init__(self, my_phrase):\r\n self.my_phrase = my_phrase\r\n self.words = my_phrase.split()\r\n \r\n def nb_letters(self):\r\n return len(self.my_phrase)\r\n \r\n def __len__(self):\r\n return len(self.words)\r\n \r\n def __contains__(self, word):\r\n return word in self.words\r\n \r\n def __str__(self):\r\n return \" \".join(self.words)\r\n\r\n\r\np = Phrase2(\"Python is fun.\")\r\np.words\r\np.nb_letters()\r\nlen(p)\r\n\"is\" in p\r\nprint(p)\r\n\r\n\r\n## inheritance\r\n## an instance inherits from the class\r\n## a class can inherit from other classes\r\n\r\ns = \"Python is fun.\"\r\n\r\nclass PhraseNoCase(Phrase2):\r\n def __init__(self, my_phrase):\r\n Phrase2.__init__(self, my_phrase)\r\n self.words_lower = {m.lower() for m in self.words}\r\n\r\npnoc = PhraseNoCase(s)\r\nisinstance(pnoc, Phrase2)\r\nisinstance(pnoc, PhraseNoCase)\r\npnoc.words_lower\r\n\r\n## multiple inheritance\r\n## MRO: method resolution order\r\n## attribute resolution\r\n## object = super class of all the classes\r\n\r\nclass CC:\r\n pass\r\n\r\nCC.__bases__\r\nCC.mro() ## path followed in attribute resolution\r\n## mro depends on the order the super classes are defined\r\n\r\n# see also class property\r\n# https://docs.python.org/3.6/library/functions.html#property\r\n\r\n\r\n## define a class that is an iterator\r\nclass Phrase3:\r\n def __init__(self, my_phrase):\r\n self.my_phrase = my_phrase\r\n self.words = my_phrase.split()\r\n \r\n def __iter__(self):\r\n return self\r\n \r\n def __next__(self):\r\n if not self.words:\r\n raise StopIteration\r\n return self.words.pop(0)\r\n\r\npp = Phrase3(\"Python is fun\") ## Phrase3 defined as an iterator\r\n[m for m in pp]\r\n# next(pp) => StopIteration\r\niter(pp)\r\n\r\n## define a class that is iterable\r\nclass Phrase4:\r\n def __init__(self, my_phrase):\r\n self.my_phrase = my_phrase\r\n self.words = my_phrase.split()\r\n \r\n def __iter__(self):\r\n for m in self.words:\r\n yield m\r\n\r\nppp = Phrase4(\"Python is fun\") ## Phrase3 defined as an iterable object\r\n[m for m in ppp]\r\niter(ppp)\r\n\r\n\r\n## use custom exception in classes\r\n\r\nclass Phrase5:\r\n def __init__(self, my_phrase):\r\n self.my_phrase = my_phrase\r\n if not my_phrase:\r\n raise EmptyPhraseError('The phrase is empty.')\r\n self.words = my_phrase.split()\r\n\r\nclass EmptyPhraseError(Exception):\r\n pass\r\n\r\nPhrase5('ff')\r\nPhrase5('')\r\n\r\ntry:\r\n Phrase5('')\r\nexcept EmptyPhraseError as e:\r\n print(e.args) # arguments as a tuple\r\n\r\n\r\n## context manager\r\n\r\n## with \r\n\r\n\r\n\r\n##############################\r\n## variables and attributes\r\n\r\nx = 10 # variable => lexical scoping (?) , static\r\nan_object.x = 10 # attribute => MRO, dynamic\r\n## two different mechanisms\r\n# modules, packages, functions, classes, instances can have attributes\r\n\r\n\r\n##############################\r\n## date and time\r\n\r\nfrom datetime import datetime\r\nnow = datetime.now()\r\nprint(now)\r\nprint('%s/%s/%s' % (now.month, now.day, now.year))\r\nprint('%s:%s:%s' % (now.hour, now.minute, now.second))\r\n\r\n\r\n##############################\r\n## miscellaneous\r\n\r\n# python -m http.server 8080\r\n","repo_name":"Atrebas/python-code","sub_path":"mooc-python.py","file_name":"mooc-python.py","file_ext":"py","file_size_in_byte":25363,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"28318156496","text":"# run 'mypy' on this source tree, e.g. form the root repo folder, run\n# ```\n# $ mypy\n# Success: no issues found in 4 source files\n# ```\n# see mypy.ini for configuration for mypy.\n#\n\nfrom __future__ import annotations\n\nfrom typing import (\n Any as TAny,\n Callable as TLam,\n List as TList,\n Dict as TDict,\n Union as TUnion,\n Type as TType,\n TypeVar as TTypeVar,\n Generic as TGeneric,\n Tuple as TTuple,\n cast as _cast,\n Iterator as TIter,\n)\nimport sys\nfrom dataclasses import dataclass\nimport dataclasses as _dc\nimport typeguard\nimport functools\nfrom functools import reduce as fold\n\nfoldl: TLam[[TAny, TAny, TAny], TAny] = lambda func, acc, xs: functools.reduce(\n func, xs, acc\n)\n\n_dc_attrs = {\"frozen\": True, \"repr\": False}\n\n\nclass ADT:\n class _MatchFail(Exception):\n pass\n\n _T = TTypeVar(\"_T\")\n _U = TTypeVar(\"_U\")\n\n def __iter__(self) -> TIter[TAny]:\n yield from [getattr(self, field.name) for field in _dc.fields(self)]\n\n def __repr__(self) -> str:\n string = f\"{self.__class__.__name__}(\"\n keys = [field.name for field in _dc.fields(self)]\n for i, k in enumerate(keys):\n value = getattr(self, k)\n if isinstance(value, str):\n string += f\"'{value}'\"\n else:\n string += f\"{value}\"\n if i < len(keys) - 1:\n string += \",\"\n string += \")\"\n return string\n\n def __enter__(self: _T) -> _T:\n return self\n\n def __exit__(self, type, value, traceback): # type: ignore\n pass\n\n def __rshift__(self: _T, cls: TType[_U]) -> _U:\n if not isinstance(self, cls):\n raise ADT._MatchFail\n return self\n\n\nimport contextlib as ctxlib\n\n_pm = ctxlib.suppress(ADT._MatchFail)\n\n\ncheck_types = True\ncheck_types = False\ncheck_argument_types: TLam[[], bool]\nif check_types:\n check_argument_types = typeguard.check_argument_types\nelse:\n check_argument_types = lambda: True\n\n# --WAR-beg-- mypy issue: https://github.com/python/mypy/issues/5485\n_BoxT = TTypeVar(\"_BoxT\")\n\n\n@dataclass\nclass Box(TGeneric[_BoxT]):\n inner: _BoxT # pytype: disable=not-supported-yet\n\n @property\n def __call__(self) -> _BoxT:\n return self.inner\n\n\n# --WAR-end--\n\n\nabstract = dataclass(frozen=True)\n\n\n# mypy WAR for current lack of python forward declration, use strings :)\nTermI = TUnion[\n \"Ann\",\n \"Star\",\n \"Pi\",\n \"Bound\",\n \"Free\",\n \"App\",\n \"Nat\",\n \"NatElim\",\n \"Zero\",\n \"Succ\",\n \"Vec\",\n \"Nil\",\n \"Cons\",\n \"VecElim\",\n]\n\n\n@dataclass(**_dc_attrs)\nclass Ann(ADT):\n e1: TermC\n e2: TermC\n\n def __repr__(self) -> str:\n return f\"(Ann {self.e1}:{self.e2})\"\n\n\n@dataclass(**_dc_attrs)\nclass Star(ADT):\n def __repr__(self) -> str:\n return f\"*\"\n\n\n@dataclass(**_dc_attrs)\nclass Pi(ADT):\n e1: TermC\n e2: TermC\n\n\n@dataclass(**_dc_attrs)\nclass Bound(ADT):\n i: int\n\n def __repr__(self) -> str:\n return f\"(Bound {self.i})\"\n\n\n@dataclass(**_dc_attrs)\nclass Free(ADT):\n x: Name\n\n def __repr__(self) -> str:\n return f\"(Free {self.x})\"\n\n\n@dataclass(**_dc_attrs)\nclass App(ADT):\n e1: TermI\n e2: TermC\n\n\n@dataclass(**_dc_attrs)\nclass Nat(ADT):\n def __repr__(self) -> str:\n return \"Nat\"\n\n\n@dataclass(**_dc_attrs)\nclass NatElim(ADT):\n e1: TermC\n e2: TermC\n e3: TermC\n e4: TermC\n\n\n@dataclass(**_dc_attrs)\nclass Zero(ADT):\n def __repr__(self) -> str:\n return \"Zero\"\n\n\n@dataclass(**_dc_attrs)\nclass Succ(ADT):\n k: TermC\n\n def __repr__(self) -> str:\n return f\"(Succ {self.k})\"\n\n\n@dataclass(**_dc_attrs)\nclass Vec(ADT):\n a: TermC\n n: TermC\n\n def __repr__(self) -> str:\n return f\"(Vec {self.a} {self.n})\"\n\n\n@dataclass(**_dc_attrs)\nclass Nil(ADT):\n a: TermC\n\n def __repr__(self) -> str:\n return f\"(Nil {self.a})\"\n\n\n@dataclass(**_dc_attrs)\nclass Cons(ADT):\n a: TermC\n n: TermC\n x: TermC\n xs: TermC\n\n def __repr__(self) -> str:\n return f\"(Cons {self.a} {self.n} {self.x} {self.xs})\"\n\n\n@dataclass(**_dc_attrs)\nclass VecElim(ADT):\n a: TermC\n m: TermC\n mn: TermC\n mc: TermC\n n: TermC\n xs: TermC\n\n\nTermC = TUnion[\"Inf\", \"Lam\"]\n\n\n@dataclass(**_dc_attrs)\nclass Inf(ADT):\n e: TermI\n\n def __repr__(self) -> str:\n return f\"Inf({self.e})\"\n\n\n@dataclass(**_dc_attrs)\nclass Lam(ADT):\n e: TermC\n\n\nName = TUnion[\"Global\", \"Local\", \"Quote\"]\n\n\n@dataclass(**_dc_attrs)\nclass Global(ADT):\n str_: str\n\n def __repr__(self) -> str:\n return f\"Global('{self.str_}')\"\n\n@dataclass(**_dc_attrs)\nclass Local(ADT):\n i: int\n\n\n@dataclass(**_dc_attrs)\nclass Quote(ADT):\n i: int\n\n\n_VFunT0 = TLam[[\"Value\"], \"Value\"]\n_VFunT = TUnion[Box[_VFunT0], _VFunT0]\n\n\n@dataclass(**_dc_attrs)\nclass VLam(ADT):\n f: _VFunT\n\n def __repr__(self) -> str:\n return f\"{quote0(self)}\"\n\n\n@dataclass(**_dc_attrs)\nclass VNeutral(ADT):\n n: Neutral\n\n\n@dataclass(**_dc_attrs)\nclass VStar(ADT):\n def __repr__(self) -> str:\n return f\"*\"\n\n\n@dataclass(**_dc_attrs)\nclass VPi(ADT):\n v: Value\n f: _VFunT\n\n def __repr__(self) -> str:\n return f\"{quote0(self)}\"\n\n\n@dataclass(**_dc_attrs)\nclass VNat(ADT):\n pass\n\n\n@dataclass(**_dc_attrs)\nclass VZero(ADT):\n pass\n\n\n@dataclass(**_dc_attrs)\nclass VSucc(ADT):\n k: Value\n\n def __repr__(self) -> str:\n return f\"{quote0(self)}\"\n\n\n@dataclass(**_dc_attrs)\nclass VNil(ADT):\n a: Value\n\n def __repr__(self) -> str:\n return f\"{quote0(self)}\"\n\n\n@dataclass(**_dc_attrs)\nclass VCons(ADT):\n a: Value\n n: Value\n x: Value\n xs: Value\n\n def __repr__(self) -> str:\n return f\"{quote0(self)}\"\n\n\n@dataclass(**_dc_attrs)\nclass VVec(ADT):\n a: Value\n n: Value\n\n def __repr__(self) -> str:\n return f\"{quote0(self)}\"\n\n\nValue = TUnion[VLam, VNeutral, VStar, VPi, VNat, VZero, VSucc, VNil, VCons, VVec]\n\n\nNeutral = TUnion[\"NFree\", \"NApp\", \"NNatElim\", \"NVecElim\"]\n\n\n@dataclass(**_dc_attrs)\nclass NFree(ADT):\n x: Name\n\n\n@dataclass(**_dc_attrs)\nclass NApp(ADT):\n n: Neutral\n v: Value\n\n\n@dataclass(**_dc_attrs)\nclass NNatElim(ADT):\n a: Value\n n: Value\n x: Value\n xs: Neutral\n\n\n@dataclass(**_dc_attrs)\nclass NVecElim(ADT):\n v1: Value\n v2: Value\n v3: Value\n v4: Value\n v5: Value\n n: Neutral\n\n\nType = Value\nvfree: TLam[[Name], Value] = lambda n: VNeutral(NFree(n))\nEnv = TList[Value]\nContext = TDict[Name, Type]\n\n\ndef evalI(term: TermI, env: Env) -> Value:\n with _pm, term >> Ann as (e, _):\n return evalC(e, env)\n with _pm, term >> Free as (x,):\n return vfree(x)\n with _pm, term >> Bound as (i,):\n return env[i]\n with _pm, term >> App as (e, e1):\n return vapp(evalI(e, env), evalC(e1, env))\n with _pm, term >> Star:\n return VStar()\n with _pm, term >> Pi as (t, t1):\n return VPi(evalC(t, env), lambda x: evalC(t1, [x] + env))\n with _pm, term >> Nat:\n return VNat()\n with _pm, term >> Zero:\n return VZero()\n with _pm, term >> Succ as (k,):\n return VSucc(evalC(k, env))\n with _pm, term >> NatElim as (m, mz, ms, k):\n mzVal = evalC(mz, env)\n msVal = evalC(ms, env)\n\n def rec1(kVal: Value) -> Value:\n with _pm, kVal >> VZero:\n return mzVal\n with _pm, kVal >> VSucc as (k,):\n return vapp(vapp(msVal, k), rec1(k))\n with _pm, kVal >> VNeutral as (n,):\n return VNeutral(NNatElim(evalC(m, env), mzVal, msVal, n))\n raise TypeError(f\"Unknown instance '{type(kVal)}'\")\n\n return rec1(evalC(k, env))\n with _pm, term >> Vec as (a, n):\n return VVec(evalC(a, env), evalC(n, env))\n with _pm, term >> Nil as (a,):\n return VNil(evalC(a, env))\n with _pm, term >> Cons as (a, n, x, xs):\n return VCons(evalC(a, env), evalC(n, env), evalC(x, env), evalC(xs, env))\n with _pm, term >> VecElim as (a, m, mn, mc, n, xs):\n mnVal = evalC(mn, env)\n mcVal = evalC(mc, env)\n\n def rec2(nVal: Value, xsVal: Value) -> Value:\n with _pm, xsVal >> VNil:\n return mnVal\n with _pm, xsVal >> VCons as (_, l, x, xs):\n return fold(vapp, [l, x, xs, rec2(l, xs)], mcVal)\n with _pm, xsVal >> VNeutral as (n,):\n return VNeutral(\n NVecElim(evalC(a, env), evalC(m, env), mnVal, mcVal, nVal, n)\n )\n raise TypeError(f\"Unknown instance '{type(xsVal)}'\")\n\n return rec2(evalC(n, env), evalC(xs, env))\n raise TypeError(f\"Unknown instance '{type(term)}'\")\n\n\ndef vapp(value: Value, v: Value) -> Value:\n with _pm, value >> VLam as (f,):\n return f(v)\n with _pm, value >> VNeutral as (n,):\n return VNeutral(NApp(n, v))\n raise TypeError(f\"Unknown instance '{type(v)}'\")\n\n\ndef evalC(term: TermC, env: Env) -> Value:\n with _pm, term >> Inf as (e,):\n return evalI(e, env)\n with _pm, term >> Lam as (lam_expr,):\n return VLam(lambda x: evalC(lam_expr, [x] + env))\n raise TypeError(f\"Unknown instance '{type(term)}'\")\n\n\ndef typeI0(c: Context, term: TermI) -> Type:\n check_argument_types()\n return typeI(0, c, term)\n\n\ndef typeI(i: int, c: Context, term: TermI) -> Type:\n # with _pm, Ann|term as p:\n # reveal_type(p)\n with _pm, term >> Ann as (e1, e2):\n typeC(i, c, e2, VStar())\n t = evalC(e2, [])\n typeC(i, c, e1, t)\n return t\n with _pm, term >> Free as (x,):\n return c[x]\n with _pm, term >> App as (e1, e2):\n s = typeI(i, c, e1)\n with _pm, s >> VPi as (v, f):\n typeC(i, c, e2, v)\n return f(evalC(e2, []))\n raise TypeError(f\"Illegal application: {e1}({e2})\")\n with _pm, term >> Star:\n return VStar()\n with _pm, term >> Pi as (p, p1):\n typeC(i, c, p, VStar())\n t = evalC(p, [])\n typeC(i + 1, {Local(i): t, **c}, substC(0, Free(Local(i)), p1), VStar())\n return VStar()\n with _pm, term >> Nat:\n return VStar()\n with _pm, term >> Zero:\n return VNat()\n with _pm, term >> Succ:\n return VNat()\n with _pm, term >> NatElim as (m, mz, ms, k):\n typeC(i, c, m, VPi(VNat(), lambda _: VStar()))\n mVal = evalC(m, [])\n typeC(i, c, mz, vapp(mVal, VZero()))\n typeC(\n i,\n c,\n ms,\n VPi(VNat(), lambda l: VPi(vapp(mVal, l), lambda _: vapp(mVal, VSucc(l)))),\n )\n typeC(i, c, k, VNat())\n kVal = evalC(k, [])\n return vapp(mVal, kVal)\n with _pm, term >> Vec as (a, n):\n typeC(i, c, a, VStar())\n typeC(i, c, n, VNat())\n return VStar()\n with _pm, term >> Nil as (a,):\n typeC(i, c, a, VStar())\n aVal = evalC(a, [])\n return VVec(aVal, VZero())\n with _pm, term >> Cons as (a, k, x, xs):\n typeC(i, c, a, VStar())\n aVal = evalC(a, [])\n typeC(i, c, k, VNat())\n kVal = evalC(k, [])\n typeC(i, c, x, aVal)\n typeC(i, c, xs, VVec(aVal, kVal))\n return VVec(aVal, VSucc(kVal))\n with _pm, term >> VecElim as (a, m, mn, mc, k, vs):\n typeC(i, c, a, VStar())\n aVal = evalC(a, [])\n typeC(i, c, m, VPi(VNat(), lambda k: VPi(VVec(aVal, k), lambda _: VStar())))\n mVal = evalC(m, [])\n typeC(i, c, mn, foldl(vapp, mVal, [VZero(), VNil(aVal)]))\n typeC(\n i,\n c,\n mc,\n VPi(\n VNat(),\n lambda l: VPi(\n aVal,\n lambda y: VPi(\n VVec(aVal, l),\n lambda ys: VPi(\n foldl(vapp, mVal, [l, ys]),\n lambda _: foldl(\n vapp, mVal, [VSucc(l), VCons(aVal, l, y, ys)]\n ),\n ),\n ),\n ),\n ),\n )\n typeC(i, c, k, VNat())\n kVal = evalC(k, [])\n typeC(i, c, vs, VVec(aVal, kVal))\n vsVal = evalC(vs, [])\n return fold(vapp, [kVal, vsVal], mVal)\n raise TypeError(f\"Unknown instance '{type(term)}'\")\n\n\ndef typeC(i: int, c: Context, term: TermC, type_: Type) -> None:\n with _pm, term >> Inf as (e,):\n v = type_\n vp = typeI(i, c, e)\n if quote0(v) != quote0(vp):\n raise TypeError(f\"type mismatch: {quote0(v)} != {quote0(vp)}\")\n return\n with _pm, term >> Lam as (e,), type_ >> VPi as (t, tp):\n typeC(\n i + 1,\n {Local(i): t, **c},\n substC(0, Free(Local(i)), e),\n tp(vfree(Local(i))),\n )\n return\n raise TypeError(f\"Type mismatch: term={type(term)}', type={type(type_)}\")\n\n\ndef substI(i: int, r: TermI, t: TermI) -> TermI:\n with _pm, t >> Ann as (e1, e2):\n e1, e2 = t\n return Ann(substC(i, r, e1), e2)\n with _pm, t >> Bound as (j,):\n return r if i == j else Bound(j)\n with _pm, t >> Free:\n return t\n with _pm, t >> App as (e1, e2):\n return App(substI(i, r, e1), substC(i, r, e2))\n with _pm, t >> Star:\n return Star()\n with _pm, t >> Pi as (f, v):\n return Pi(substC(i, r, f), substC(i + 1, r, v))\n with _pm, t >> Nat:\n return Nat()\n with _pm, t >> Zero:\n return Zero()\n with _pm, t >> Succ as (k,):\n return Succ(substC(i, r, k))\n with _pm, t >> NatElim as (m, mz, ms, k):\n return NatElim(\n substC(i, r, m), substC(i, r, mz), substC(i, r, ms), substC(i, r, k)\n )\n with _pm, t >> Vec as (a, n):\n return Vec(substC(i, r, a), substC(i, r, n))\n with _pm, t >> Nil as (a,):\n return Nil(substC(i, r, a))\n with _pm, t >> Cons as (a, n, x, xs):\n return Cons(substC(i, r, a), substC(i, r, n), substC(i, r, x), substC(i, r, xs))\n with _pm, t >> VecElim as (a, m, mn, mc, n, xs):\n return VecElim(\n substC(i, r, a),\n substC(i, r, m),\n substC(i, r, mn),\n substC(i, r, mc),\n substC(i, r, n),\n substC(i, r, xs),\n )\n raise TypeError(f\"Unknown instance '{type(t)}'\")\n\n\ndef substC(i: int, r: TermI, t: TermC) -> TermC:\n with _pm, t >> Inf as (e,):\n return Inf(substI(i, r, e))\n with _pm, t >> Lam as (e,):\n return Lam(substC(i + 1, r, e))\n raise TypeError(f\"Unknown instance '{type(t)}'\")\n\n\ndef quote0(v: Value) -> TermC:\n check_argument_types()\n return quote(0, v)\n\n\ndef quote(i: int, value: Value) -> TermC:\n with _pm, value >> VLam as (f,):\n return Lam(quote(i + 1, f(vfree(Quote(i)))))\n with _pm, value >> VNeutral as (n,):\n return Inf(neutralQuote(i, n))\n with _pm, value >> VStar:\n return Inf(Star())\n with _pm, value >> VPi as (v, f):\n return Inf(Pi(quote(i, v), quote(i + 1, f(vfree(Quote(i))))))\n with _pm, value >> VNat:\n return Inf(Nat())\n with _pm, value >> VZero:\n return Inf(Zero())\n with _pm, value >> VSucc as (k,):\n return Inf(Succ(quote(i, k)))\n with _pm, value >> VNil as (a,):\n return Inf(Nil(quote(i, a)))\n with _pm, value >> VVec as (a, n):\n return Inf(Vec(quote(i, a), quote(i, n)))\n with _pm, value >> VCons as (a, n, x, xs):\n return Inf(Cons(quote(i, a), quote(i, n), quote(i, x), quote(i, xs)))\n raise TypeError(f\"Unknown instance '{type(value)}'\")\n\n\ndef neutralQuote(i: int, neutral: Neutral) -> TermI:\n with _pm, neutral >> NFree as (x,):\n return boundfree(i, x)\n with _pm, neutral >> NApp as (n, v):\n return App(neutralQuote(i, n), quote(i, v))\n with _pm, neutral >> NNatElim as (a, n, x, xs):\n return NatElim(quote(i, a), quote(i, n), quote(i, x), Inf(neutralQuote(i, xs)))\n with _pm, neutral >> NVecElim as (a, m, mn, mc, n, xs):\n return VecElim(\n quote(i, a),\n quote(i, m),\n quote(i, mn),\n quote(i, mc),\n quote(i, n),\n Inf(neutralQuote(i, xs)),\n )\n raise TypeError(f\"Unknown instance '{type(neutral)}'\")\n\n\ndef boundfree(i: int, x: Name) -> TermI:\n check_argument_types()\n with _pm, x >> Quote as (k,):\n return Bound(i - k - 1)\n return Free(x)\n\n\n###############################################################################\n# Examples\n###############################################################################\n\ndef vapply(f: Value, args: TList[Value]) -> Value:\n for arg in args:\n f = vapp(f, arg)\n return f\n\n\ne0 = quote0(VLam(lambda x: VLam(lambda y: x)))\nprint(\"e0=\", e0)\n\nid_ = Lam(Inf(Bound(0)))\nconst_ = Lam(Lam(Inf(Bound(1))))\nfree: TLam[[str], TermC] = lambda x: Inf(Free(Global(x)))\npi: TLam[[TermC, TermC], TermC] = lambda x, y: Inf(Pi(x, y))\nterm1 = App(Ann(id_, (pi(free(\"a\"), free(\"a\")))), free(\"y\"))\nterm2 = App(\n App(\n Ann(\n const_,\n pi(pi(free(\"b\"), free(\"b\")), pi(free(\"a\"), pi(free(\"b\"), free(\"b\")))),\n ),\n id_,\n ),\n free(\"y\"),\n)\nenv1: Context = {Global(\"y\"): VNeutral(NFree(Global(\"a\"))), Global(\"a\"): VStar()}\nenv2 = env1.copy()\nenv2.update({Global(\"b\"): VStar()})\n\nprint(\"eval(term1)=\", evalI(term1, []))\nprint(\"qeval(term1)=\", quote0(evalI(term1, [])))\nprint(\"qqeval(term2)=\", quote0(evalI(term2, [])))\nprint(\"type(term1)=\", typeI0(env1, term1))\nprint(\"type(term2)=\", typeI0(env2, term2))\n\n# example for the following concrete syntax\n# > let id = (\\a x -> x) :: Pi (a :: *).a -> a\n# id :: Pi (x::*) (y::x).x\ne35 = Ann(\n Lam(Lam(Inf(Bound(0)))),\n pi(\n (Inf(Star())),\n pi(\n Inf(Bound(0)),\n Inf(Bound(1)),\n ),\n ),\n)\nprint(f\"e35= {e35}\")\n\nenv35: Context\nenv35 = {Global(\"Bool\"): VStar(), Global(\"False\"): VNeutral(NFree(Global(\"Bool\")))}\nprint(f\"type(e35)= {typeI0(env35, e35)}\")\n\napply35a = App(e35, free(\"Bool\"))\nprint(f\"apply35a= {apply35a}\")\nprint(f\"type(apply35a)= {typeI0(env35, apply35a)}\")\n\napply35b = App(apply35a, free(\"False\"))\nprint(f\"apply35b= {apply35b}\")\nprint(f\"type(apply35b)= {typeI0(env35, apply35b)}\")\n\n## > let plus = natElim (\\_ -> Nat -> Nat)\n## (\\n -> n)\n## (\\k rec n -> Succ (rec n))\n## plus :: Pi (x :: Nat) (y :: Nat) . Nat\n\nplusl: TLam[[TermC], TermI] = lambda x: NatElim(\n Lam(pi(Inf(Nat()), Inf(Nat()))),\n Lam(Inf(Bound(0))),\n Lam(Lam(Lam(Inf(Succ(Inf(App(Bound(1), Inf(Bound(0))))))))),\n x,\n)\n\nnatElimL = Lam(\n Lam(\n Lam(\n Lam(\n Inf(NatElim(Inf(Bound(3)), Inf(Bound(2)), Inf(Bound(1)), Inf(Bound(0))))\n )\n )\n )\n)\nnatElimTy = VPi(\n VPi(VNat(), lambda _: VStar()),\n lambda m: VPi(\n vapp(m, VZero()),\n lambda _: VPi(\n VPi(VNat(), lambda k: VPi(vapp(m, k), lambda _: vapp(m, VSucc(k)))),\n lambda _: VPi(VNat(), lambda n: vapp(m, n)),\n ),\n ),\n)\n\nnatElim = Ann(natElimL, quote0(natElimTy))\nprint(\"natElim=\", natElim)\nprint(\"type(natElim)=\", typeI0({}, natElim))\nPlus = App(\n App(App(natElim, Lam(pi(Inf(Nat()), Inf(Nat())))), Lam(Inf(Bound(0)))),\n Lam(Lam(Lam(Inf(Succ(Inf(App(Bound(1), Inf(Bound(0))))))))),\n)\nVnatElim = evalI(natElim, [])\nvplus = vapply(VnatElim, [\\\n VLam(lambda _: VPi(VNat(), lambda _ : VNat())),\n VLam(lambda n : n),\n VLam(lambda p: VLam(lambda rec: VLam(lambda n : VSucc(vapp(rec, n)))))])\nprint(\"vplus=\", vplus)\n#Plus2 : TermI\n#plus2env : Context\n#plus2env = {Global(\"VnatElim\"): natElimTy}\n#Plus2, _ = (quote0(vplus) >> Inf).e >> App\n#print(\"plus2env=\", plus2env)\n#print(\"Plus2=\",Plus2)\n#print(\"type(Plus2)=\", typeI0(plus2env, Plus2))\n\nPlus1 = Ann(\n Lam(\n Inf(\n NatElim(\n Lam(pi(Inf(Nat()), Inf(Nat()))),\n Lam(Inf(Bound(0))),\n Lam(Lam(Lam(Inf(Succ(Inf(App(Bound(1), Inf(Bound(0))))))))),\n Inf(Bound(0)),\n )\n )\n ),\n pi(Inf(Nat()), pi(Inf(Nat()), Inf(Nat()))),\n)\nprint(\"type(Plus)=\", typeI0({}, Plus))\n\n\ndef int2nat(n: int) -> TermC:\n if n == 0:\n return Inf(Zero())\n else:\n return Inf(Succ(int2nat(n - 1)))\n\n\ndef nval2int(v: Value) -> int:\n with _pm, v >> VZero:\n return 0\n with _pm, v >> VSucc as (k,):\n return 1 + nval2int(k)\n raise TypeError(f\"Unknown instance '{type(v)}'\")\n\n\n\n#e1 = evalI(App(App(Plus2, int2nat(2)), int2nat(2)), [])\n#print(\"e1= \", e1)\n#print(\"2+2 ->\", nval2int(evalI(App(App(Plus, int2nat(2)), int2nat(2)), [])))\n# sys.exit(0)\n\n## > plus 40 2\n## 42 :: Nat\nn40 = int2nat(40)\nprint(\"n40=\", n40)\nn2 = int2nat(2)\nprint(\"n2=\", n2)\nprint(\"type(n40)=\", typeI0({}, _cast(Inf, n40).e))\nprint(\"type(plusl(n40))=\", typeI0({}, plusl(n40)))\nn42term = App(plusl(n40), n2)\nprint(\"type(n42term)=\", typeI0({}, n42term))\nn42v = evalI(n42term, [])\nprint(\"n42v=\", n42v)\nn42 = nval2int(n42v)\nprint(\"n42=\", n42)\n## > n42\n## 42\n\nfrom functools import partial\n\n\"\"\"\nclass Infix1(object):\n T = TTypeVar(\"T\")\n U = TTypeVar(\"U\")\n R = TTypeVar(\"R\")\n def __init__(self, func : TUnion[TLam[[U],R], TLam[[T,U],R]]):\n self.func = func\n def __or__(self, other : U) -> R:\n return _cast(TLam[[Infix.U],Infix.R],self.func)(other)\n def __ror__(self, other : T) -> Infix:\n return Infix(partial(self.func, other)) #type: ignore\n\n@Infix\ndef app1(x : TermI, y : TermC) -> TermI:\n return App(x,y)\n\"\"\"\n\n\nclass Infix(object):\n def __init__(self, func: TUnion[TLam[[TAny], TAny], TLam[[TAny, TAny], TAny]]):\n self.func = func\n\n def __or__(self, other: TAny) -> TAny:\n return _cast(TLam[[TAny], TAny], self.func)(other)\n\n def __ror__(self, other: TAny) -> Infix:\n return Infix(partial(self.func, other))\n\n\n@Infix\ndef app(x: TermI, y: TermC) -> TermI:\n return App(x, y)\n\n\nn1 = int2nat(1)\nn2a = plusl(n1) | app | n1\n\nprint(\"n2a=\", n2a)\nprint(\"type(n2a)=\", typeI0({}, n2a))\nn2e = evalI(n2a, [])\nprint(\"n2e=\", n2e)\nprint(\"n2e=\", nval2int(n2e))\nn4 = App(plusl(Inf(n2a)), Inf(n2a))\nprint(\"n4=\", n4, type(n4).__class__)\nprint(\"type(n4)=\", typeI0({}, n4))\nprint(\"eval(n4)=\", nval2int(evalI(n4, [])))\n\n## example from 4.2\n## ##################\n## > let append =\n## > (\\a -> vecElim a\n## (\\m _ -> Pi (n :: Nat) . Vec a n -> Vec a (plus m n))\n## (\\_ v -> v)\n## (\\m v vs rec n w -> Cons a (plus m n) v (rec n w)))\n## :: Pi (a :: *) (m :: Nat) (v :: Vec a m) (n :: Nat) (w :: Vec a n) .\n## Vec a (plus m n)\n\n## > assume (a :: *) (x :: a) (y :: a)\n## > append a 2 (Cons a 1 x (Cons a 0 x (Nil a)))\n## 1 (Cons a 0 y (Nil a))\n## Cons a 2 x (Cons a 1 x (Cons a 0 y (Nil a))) :: Vec a 3\n\n\ndef plus(x: TermC, y: TermC) -> TermC:\n return Inf(App(App(Plus, x), y))\n\n\ndef bound(i: int) -> TermC:\n return Inf(Bound(i))\n\n\ndef vec(a: TermC, n: TermC) -> TermC:\n return Inf(Vec(a, n))\n\n\nvecElimL = Lam(\n Lam(\n Lam(\n Lam(\n Lam(\n Lam(\n Inf(\n VecElim(\n Inf(Bound(5)),\n Inf(Bound(4)),\n Inf(Bound(3)),\n Inf(Bound(2)),\n Inf(Bound(1)),\n Inf(Bound(0)),\n )\n )\n )\n )\n )\n )\n )\n)\n\n\n\nvecElimTy = VPi(\n VStar(),\n lambda a: VPi(\n VPi(VNat(), lambda n: VPi(VVec(a, n), lambda _: VStar())),\n lambda m: VPi(\n vapp(vapp(m, VZero()), VNil(a)),\n lambda _: VPi(\n VPi(\n VNat(),\n lambda n: VPi(\n a,\n lambda x: VPi(\n VVec(a, n),\n lambda xs: VPi(\n vapp(vapp(m, n), xs),\n lambda _: vapp(vapp(m, VSucc(n)), VCons(a, n, x, xs)),\n ),\n ),\n ),\n ),\n lambda _: VPi(\n VNat(), lambda n: VPi(VVec(a, n), lambda xs: vapp(vapp(m, n), xs))\n ),\n ),\n ),\n ),\n)\nvecElim = Ann(vecElimL, quote0(vecElimTy))\nprint(\"vecElim=\", vecElim)\nprint(\"type(vecElim)\", typeI0({}, vecElim))\nAppendE = Lam(\n Inf(\n App(\n App(\n App(\n App(vecElim, bound(0)),\n Lam(\n Lam(\n pi(\n Inf(Nat()),\n pi(\n vec(bound(3), bound(0)),\n vec(bound(4), plus(bound(3), bound(1))),\n ),\n )\n )\n ),\n ),\n Lam(Lam(bound(0))),\n ),\n Lam(\n Lam(\n Lam(\n Lam(\n Lam(\n Lam(\n Inf(\n Cons(\n bound(6),\n plus(bound(5), bound(1)),\n bound(4),\n Inf(\n App(\n App(Bound(2), bound(1)),\n bound(0),\n )\n ),\n )\n )\n )\n )\n )\n )\n )\n ),\n )\n )\n)\n\n\nvplus = evalI(Plus, [])\nvvecelim = evalI(vecElim, [])\nAppendTy = VPi(\n VStar(),\n lambda a: VPi(\n VNat(),\n lambda m: VPi(\n VVec(a, m),\n lambda v: VPi(\n VNat(),\n lambda n: VPi(VVec(a, n), lambda w: VVec(a, vapp(vapp(vplus, m), n))),\n ),\n ),\n ),\n)\nprint(\"AppenTy=\", quote0(AppendTy))\nVVecElim = evalI(vecElim, [])\nappend = Ann(AppendE, quote0(AppendTy))\nprint(\"type(append)= \", typeI0({}, append))\nVAppend = VLam(\n lambda a: vapply(\n VVecElim,\n [\n a,\n VLam(\n lambda m: VLam(\n lambda _: VPi(\n VNat(),\n lambda n: VPi(\n VVec(a, n), lambda _: VVec(a, vapply(vplus, [m, n]))\n ),\n )\n )\n ),\n VLam(lambda _: VLam(lambda v: v)),\n VLam(\n lambda m: VLam(\n lambda v: VLam(\n lambda vs: VLam(\n lambda rec: VLam(\n lambda n: VLam(\n lambda w: VCons(\n a, vapply(vplus, [m, n]), v, vapply(rec, [n, w])\n )\n )\n )\n )\n )\n )\n ),\n ],\n )\n)\nAppendC = quote0(VAppend)\nprint(\"AppendC=\", AppendC)\n# sys.exit(0)\n# print(\"append=\", append)\nAppend = Ann(AppendC, quote0(AppendTy))\n_Append = Ann(\n Lam(\n Lam(\n Lam(\n Inf(\n VecElim(\n bound(2),\n Lam(\n Lam(\n pi(\n Inf(Nat()),\n pi(\n vec(bound(5), bound(0)),\n vec(bound(6), plus(bound(3), bound(1))),\n ),\n )\n )\n ),\n Lam(Lam(bound(0))),\n Lam(\n Lam(\n Lam(\n Lam(\n Lam(\n Lam(\n Inf(\n Cons(\n bound(8),\n plus(bound(5), bound(1)),\n bound(4),\n Inf(\n App(\n App(Bound(2), bound(1)),\n bound(0),\n )\n ),\n )\n )\n )\n )\n )\n )\n )\n ),\n bound(1),\n bound(0),\n )\n )\n )\n )\n ),\n pi(\n Inf(Star()),\n pi(\n Inf(Nat()),\n pi(\n Inf(Vec(bound(1), bound(0))),\n pi(\n Inf(Nat()),\n pi(\n Inf(Vec(bound(3), bound(0))),\n Inf(Vec(bound(4), plus(bound(3), bound(1)))),\n ),\n ),\n ),\n ),\n ),\n)\n\nprint(\"Append=\", Append)\nprint(\"type(Append)=\", typeI0({}, Append))\n\n\nenv42: Context\nenv42 = {\n Global(\"a\"): VStar(),\n Global(\"x\"): VNeutral(NFree(Global(\"a\"))),\n Global(\"y\"): VNeutral(NFree(Global(\"a\"))),\n}\ne42_v2 = Inf(\n Cons(\n free(\"a\"),\n int2nat(1),\n free(\"x\"),\n Inf(Cons(free(\"a\"), int2nat(0), free(\"x\"), Inf(Nil(free(\"a\"))))),\n )\n)\ne42_v1 = Inf(Cons(free(\"a\"), int2nat(0), free(\"y\"), Inf(Nil(free(\"a\")))))\ne42_v3 = App(\n App(App(App(App(Append, free(\"a\")), int2nat(2)), e42_v2), int2nat(1)), e42_v1\n)\n\nprint(\"e42_v3=\", e42_v3)\nprint(\"type(ev42_v3)=\", typeI0(env42, e42_v3))\nprint(\"eval(ev42_v3)=\", evalI(e42_v3, []))\n\nimport time\n\nt1 = time.time()\nfor i in range(1000):\n evalI(e42_v3, [])\nt2 = time.time()\nprint(t2 - t1)\n","repo_name":"3gx/lambdapi_tutorial_paper","sub_path":"dtlc_Vec.py","file_name":"dtlc_Vec.py","file_ext":"py","file_size_in_byte":30921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27130588741","text":"import numpy as np\r\nimport pandas as pd\r\nimport time\r\nfrom sklearn import linear_model\r\n#RW gets 1.01, 1.01\r\ndef mean_user(x):\r\n user_count=np.bincount(x[0])\r\n zeros_train = np.array(np.where(user_count[1:len(user_count)] == 0))\r\n non_zero_train = np.array([np.where(user_count[1:len(user_count)] != 0)])\r\n times_user_train_correct = np.delete(user_count[1:len(user_count)], zeros_train)\r\n mean_user = np.array(x.groupby([0])[2].mean())\r\n full = np.repeat(mean_user,times_user_train_correct)\r\n return np.array(full)\r\n\r\ndef mean_movie(x):\r\n movie_count = np.bincount(x[1])\r\n zeros_train = np.array(np.where(movie_count[1:len(movie_count)] == 0))\r\n non_zero_train = np.array([np.where(movie_count[1:len(movie_count)] != 0)])\r\n times_movie_train_correct = np.delete(movie_count[1:len(movie_count)], zeros_train)\r\n mean_movie = np.array(x.groupby([1])[2].mean())\r\n full = np.repeat(mean_movie, times_movie_train_correct)\r\n return np.array(full)\r\n\r\ndef combo(fn):\r\n df = pd.DataFrame(fn)\r\n ratings_user=pd.DataFrame(fn)\r\n ratings_user=ratings_user.append(ratings_user)\r\n user_average = df.groupby(by=0, as_index=False)[2].mean()\r\n user_average = user_average.append(user_average)\r\n ratings_movie=pd.DataFrame(fn)\r\n ratings_movie=ratings_movie.append(ratings_movie)\r\n movie_average = df.groupby(by=1, as_index=False)[2].mean()\r\n movie_average = movie_average.append(movie_average)\r\n global_average = np.mean(fn[:,2])\r\n\r\n nfolds = 5\r\n\r\n err_train=np.zeros(nfolds)\r\n err_test=np.zeros(nfolds)\r\n mae_train=np.zeros(nfolds)\r\n mae_test=np.zeros(nfolds)\r\n alpha=np.zeros(nfolds)\r\n beta=np.zeros(nfolds)\r\n gamma=np.zeros(nfolds)\r\n\r\n np.random.seed(1)\r\n\r\n seqs=[x%nfolds for x in range(len(fn))]\r\n np.random.shuffle(seqs)\r\n\r\n start_time = time.time()\r\n print ('Recommendations from a combination of user and movie averages:')\r\n for fold in range(nfolds):\r\n\r\n train_set=np.array([x!=fold for x in seqs])\r\n test_set=np.array([x==fold for x in seqs])\r\n train = pd.DataFrame(ratings_movie.iloc[test_set], columns=[0, 1, 2], dtype=int)\r\n test = pd.DataFrame(ratings_movie.iloc[test_set], columns=[0, 1, 2], dtype=int)\r\n X = np.vstack([np.array(mean_user(train)), np.array(mean_movie(train))]).T\r\n reg = linear_model.LinearRegression()\r\n\r\n reg.fit(X[:,:],np.array(train[2]))\r\n\r\n alpha[fold] = reg.coef_[0] # coeff of alpha\r\n beta[fold] = reg.coef_[1] # coeff of beta\r\n gamma[fold] = reg.intercept_ # coeff of the intercept (gamma)\r\n #print alpha[fold], beta, gamma\r\n # applying the values above to the formula in the book\r\n\r\n pred_train = alpha[fold] * mean_user((train)) + beta[fold] * mean_movie((train)) + gamma[fold]\r\n pred_test= alpha[fold] * mean_user((test)) + beta[fold] * mean_movie((test)) + gamma[fold]\r\n pred_train[pred_train > 5] = 5\r\n pred_train[pred_train < 1] = 1\r\n pred_test[pred_test>5]=5\r\n pred_test[pred_test<1]=1\r\n\r\n err_train[fold] = np.sqrt(np.mean((np.array(train[2]) - pred_train)**2))\r\n mae_train[fold] = np.mean(np.abs(np.array(train[2]) - pred_train))\r\n err_test[fold] = np.sqrt(np.mean((np.array(test[2]) - pred_test)**2))\r\n mae_test[fold] = np.mean(np.abs(test[2] - pred_test))\r\n print(\"Fold \" + str(fold+1) + \": RMSE_train = \" + str(err_train[fold]) + \"; RMSE_test = \" + str(err_test[fold]))\r\n\r\n\r\n\r\n print(\"\\n\")\r\n print('Mean error on TRAIN: '+ str(np.mean(err_train)))\r\n print('Mean error on TEST: ' + str(np.mean(err_test)))\r\n print ('MAE on TRAIN: ' + str(np.mean(mae_train)))\r\n print ('MAE on TEST: ' + str(np.mean(mae_test)))\r\n print (\"alpha =\", np.mean(alpha), \"; beta =\",np.mean(beta) , \"; gamma =\", np.mean(gamma))\r\n\r\n print(\"Linear regression runtime: %s seconds ---\" % (time.time() - start_time))\r\n","repo_name":"rywjhzd/Advances-In-Data-Mining","sub_path":"Assignment1/combo.py","file_name":"combo.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"42769912493","text":"import os\nimport warnings\nimport json\nimport random\n\n\ndef get_entities(seq, suffix=False):\n \"\"\"Gets entities from sequence.\n\n Args:\n seq (list): sequence of labels.\n\n Returns:\n list: list of (chunk_type, chunk_start, chunk_end).\n\n Example:\n >>> from seqeval.metrics.sequence_labeling import get_entities\n >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']\n >>> get_entities(seq)\n [('PER', 0, 1), ('LOC', 3, 3)]\n \"\"\"\n\n def _validate_chunk(chunk, suffix):\n if chunk in ['O', 'B', 'I', 'E', 'S']:\n return\n\n if suffix:\n if not chunk.endswith(('-B', '-I', '-E', '-S')):\n warnings.warn('{} seems not to be NE tag.'.format(chunk))\n\n else:\n if not chunk.startswith(('B-', 'I-', 'E-', 'S-')):\n warnings.warn('{} seems not to be NE tag.'.format(chunk))\n\n # for nested list\n if any(isinstance(s, list) for s in seq):\n seq = [item for sublist in seq for item in sublist + ['O']]\n\n prev_tag = 'O'\n prev_type = ''\n begin_offset = 0\n chunks = []\n for i, chunk in enumerate(seq + ['O']):\n _validate_chunk(chunk, suffix)\n\n if suffix:\n tag = chunk[-1]\n type_ = chunk[:-1].rsplit('-', maxsplit=1)[0] or '_'\n else:\n tag = chunk[0]\n type_ = chunk[1:].split('-', maxsplit=1)[-1] or '_'\n\n if end_of_chunk(prev_tag, tag, prev_type, type_):\n chunks.append((prev_type, begin_offset, i - 1))\n if start_of_chunk(prev_tag, tag, prev_type, type_):\n begin_offset = i\n prev_tag = tag\n prev_type = type_\n\n return chunks\n\n\ndef end_of_chunk(prev_tag, tag, prev_type, type_):\n \"\"\"Checks if a chunk ended between the previous and current word.\n\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n\n Returns:\n chunk_end: boolean.\n \"\"\"\n chunk_end = False\n\n if prev_tag == 'E':\n chunk_end = True\n if prev_tag == 'S':\n chunk_end = True\n\n if prev_tag == 'B' and tag == 'B':\n chunk_end = True\n if prev_tag == 'B' and tag == 'S':\n chunk_end = True\n if prev_tag == 'B' and tag == 'O':\n chunk_end = True\n if prev_tag == 'I' and tag == 'B':\n chunk_end = True\n if prev_tag == 'I' and tag == 'S':\n chunk_end = True\n if prev_tag == 'I' and tag == 'O':\n chunk_end = True\n\n if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:\n chunk_end = True\n\n return chunk_end\n\n\ndef start_of_chunk(prev_tag, tag, prev_type, type_):\n \"\"\"Checks if a chunk started between the previous and current word.\n\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n\n Returns:\n chunk_start: boolean.\n \"\"\"\n chunk_start = False\n\n if tag == 'B':\n chunk_start = True\n if tag == 'S':\n chunk_start = True\n\n if prev_tag == 'E' and tag == 'E':\n chunk_start = True\n if prev_tag == 'E' and tag == 'I':\n chunk_start = True\n if prev_tag == 'S' and tag == 'E':\n chunk_start = True\n if prev_tag == 'S' and tag == 'I':\n chunk_start = True\n if prev_tag == 'O' and tag == 'E':\n chunk_start = True\n if prev_tag == 'O' and tag == 'I':\n chunk_start = True\n\n if tag != 'O' and tag != '.' and prev_type != type_:\n chunk_start = True\n\n return chunk_start\n\n\ndef preprocess(input_path, save_path, mode, split=None, ratio=None):\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n result = []\n tmp = {}\n tmp['id'] = 0\n tmp['text'] = ''\n tmp['labels'] = []\n # =======先找出句子和句子中的所有实体和类型=======\n with open(input_path, 'r', encoding='utf-8') as fp:\n lines = fp.readlines()\n texts = []\n words = []\n entities = []\n char_label_tmp = []\n for line in lines:\n line = line.strip().split(\" \")\n if len(line) == 2:\n word = line[0]\n label = line[1]\n words.append(word)\n char_label_tmp.append(label)\n else:\n texts.append(\"\".join(words))\n entities.append(get_entities(char_label_tmp))\n words = []\n char_label_tmp = []\n\n # ==========================================\n # =======找出句子中实体的位置=======\n # entities里面每一个元素:[实体类别, 实体起始位置, 实体结束位置]\n i = 0\n labels = set()\n for text, entity in zip(texts, entities):\n if entity:\n tmp['id'] = i\n tmp['text'] = text\n for j, ent in enumerate(entity):\n labels.add(ent[0])\n tmp['labels'].append([\"T{}\".format(str(j)), ent[0], ent[1], ent[2] + 1,\n text[int(ent[1]):int(ent[2] + 1)]])\n else:\n tmp['id'] = i\n tmp['text'] = text\n tmp['labels'] = []\n result.append(tmp)\n # print(i, text, entity, tmp)\n tmp = {}\n tmp['id'] = 0\n tmp['text'] = ''\n tmp['labels'] = []\n i += 1\n\n if mode == \"train\":\n label_path = os.path.join(save_path, \"labels.json\")\n with open(label_path, 'w', encoding='utf-8') as fp:\n fp.write(json.dumps(list(labels), ensure_ascii=False))\n\n\n if split:\n train_data_path = os.path.join(save_path, mode + \".json\")\n dev_data_path = os.path.join(save_path, \"dev\" + \".json\")\n random.shuffle(result)\n train_result = result[:int(len(result) * (1 - ratio))]\n dev_result = result[int(len(result) * (1 - ratio)):]\n with open(train_data_path, 'w', encoding='utf-8') as fp:\n fp.write(json.dumps(train_result, ensure_ascii=False))\n with open(dev_data_path, 'w', encoding='utf-8') as fp:\n fp.write(json.dumps(dev_result, ensure_ascii=False))\n else:\n data_path = os.path.join(save_path, mode + \".json\")\n with open(data_path, 'w', encoding='utf-8') as fp:\n fp.write(json.dumps(result, ensure_ascii=False))\n\n\npath = '../mid_data/'\npreprocess(\"train.txt\", path, \"train\", split=True, ratio=0.2)\n# preprocess(\"train.txt\", path, \"train\", split=None, ratio=None)\n# preprocess(\"dev.txt\", path, \"dev\", split=None, ratio=None)\n\nlabels_path = os.path.join(path, 'labels.json')\nwith open(labels_path, 'r') as fp:\n labels = json.load(fp)\n\ntmp_labels = []\ntmp_labels.append('O')\nfor label in labels:\n tmp_labels.append('B-' + label)\n tmp_labels.append('I-' + label)\n tmp_labels.append('E-' + label)\n tmp_labels.append('S-' + label)\n\nlabel2id = {}\nfor k, v in enumerate(tmp_labels):\n label2id[v] = k\n\nif not os.path.exists(path):\n os.makedirs(path)\nwith open(os.path.join(path, \"nor_ent2id.json\"), 'w') as fp:\n fp.write(json.dumps(label2id, ensure_ascii=False))\n","repo_name":"taishan1994/pytorch_bert_bilstm_crf_ner","sub_path":"data/attr/raw_data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","stars":387,"dataset":"github-code","pt":"75"} +{"seq_id":"43279503406","text":"from init import *\r\nfrom engine import *\r\nfrom time import time\r\nfrom random import randint\r\nfrom math import cos,sin,radians, degrees, atan2\r\n\r\npg.mixer.init()\r\n\r\n# получает угол между точками\r\ndef getAngle(a, b, c):\r\n ang = degrees(atan2(c[1]-b[1], c[0]-b[0]) - atan2(a[1]-b[1], a[0]-b[0]))\r\n return ang + 360 if ang < 0 else ang\r\n\r\n# ставит элемент по центру\r\ndef setElementToCenterX(element):\r\n try:\r\n element.setPos(x=sc.get_width()/2 - element.model.get_width()/2)\r\n except:\r\n element.setPos(x=sc.get_width()/2 - element.w/2)\r\n\r\n# ставит максимальный размер картинки без изменения соотношения сторон\r\ndef setMaxScreenSizeToElement(image):\r\n w, h = sc.get_size()\r\n\r\n if w/image.w <= h/image.h:\r\n multiplier = w/image.w\r\n image.x = 0; image.y = (h - image.h * multiplier) / 2\r\n else:\r\n multiplier = h/image.h\r\n image.x = (w - image.w * multiplier) / 2; image.y = 0\r\n image.resize(image.w*multiplier, image.h*multiplier)\r\n return multiplier\r\n\r\n# диалоговое окно\r\nclass DialogManager:\r\n bg = Image('images/dialogBg.png', 0, 0, 4000, 300, True)\r\n talkImageSize = (70, 70)\r\n work = False\r\n def start(self, gameMap, text, letterWriteTime=.1, firstWaitTime=1, talkImage=None, talkImagePos=(.1, .1)):\r\n try:\r\n for i in self.labels:\r\n self.map.delElement(i)\r\n except:\r\n pass\r\n \r\n self.bg.resize(h=300)\r\n self.map = gameMap\r\n self.text = text\r\n self.labels = [Label(\"\", x=self.talkImageSize[0]+10, color=(240, 240, 240), anchor=True)]\r\n self.time = time() + firstWaitTime\r\n self.letterWriteTime = letterWriteTime\r\n self.map.addElement([self.labels[-1]])\r\n self.image = None\r\n if talkImage:\r\n self.image = Image(f'images/{talkImage}', 10, self.bg.y + self.bg.h - self.talkImageSize[1],*self.talkImageSize, True)\r\n self.map.addElement(self.image)\r\n self.map.eventHandler.onLoopUpdate.addElement(self.showDialog)\r\n self.work = True\r\n \r\n def stop(self):\r\n self.map.eventHandler.onLoopUpdate.delElement(self.showDialog)\r\n for i in self.labels:\r\n self.map.delElement(i)\r\n for i in self.labels[1:]:\r\n self.labels.remove(i)\r\n if self.image:\r\n self.map.delElement(self.image)\r\n self.map.delElement(self.bg)\r\n self.work = False\r\n\r\n def showDialog(self):\r\n self.work = True\r\n self.map.delElement(self.bg)\r\n for i in self.labels:\r\n self.map.delElement(i)\r\n for i in self.labels[1:]:\r\n self.labels.remove(i)\r\n self.map.addElement(self.bg)\r\n if self.image:\r\n self.map.delElement(self.image)\r\n self.map.addElement(self.image)\r\n k = 0\r\n text = ''\r\n w,h = sc.get_size()\r\n for i in self.text:\r\n if k > (time() - self.time) // self.letterWriteTime:\r\n break\r\n if self.labels[-1].font.size(text)[0] >= w - self.talkImageSize[0] - 30:\r\n self.labels[-1].updateText(\" \".join(text.split()[:-1]))\r\n self.labels.append(Label(\"\", x=self.talkImageSize[0]+10, y=self.labels[-1].y + 30, color=(240, 240, 240), anchor=True))\r\n text = text.split()[-1:][0]\r\n\r\n text += i\r\n self.labels[-1].updateText(text)\r\n k += 1\r\n for i in self.labels:\r\n self.map.addElement(i)\r\n self.bg.resize(sc.get_size()[0], 120)\r\n if self.image:\r\n self.image.setPos(10, self.bg.h - self.image.h)\r\n\r\n def onResize(self):\r\n pass\r\n\r\n# основной код\r\nclass GameStory:\r\n # каждый класс новая карта\r\n class Story0:\r\n # сбросс данных карты\r\n def reset(self):\r\n try: self.myMap.clear()\r\n except: pass\r\n \r\n # запускающая все механики функция\r\n def start(self):\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('дом')\r\n self.myMap = game.getMap('дом')\r\n self.reset()\r\n self.myMap.resize(*homeFirstSize)\r\n\r\n self.oldManPos = (.72, .6)\r\n self.heroPos = (.2, .76)\r\n mapW, mapH = self.myMap.w, self.myMap.h\r\n self.oldMan = Image(['images/old-man-sitting.png', 'images/old-man-sitting1.png'], mapW*self.oldManPos[0], mapH*self.oldManPos[1], 100, 100)\r\n self.hero = Image(['images/hero-laying.png', 'images/hero-sitting.png', 'images/hero1.png'], mapW*self.heroPos[0], mapH*self.heroPos[1], 90, 90)\r\n\r\n self.currentDialog = -1\r\n # время сколько будет идти диалог, функция выполняемая по запуску диалога, текст, скорость печатания буквы, пауза перел началом диалона, картинка\r\n self.dialogs = [\r\n [(3, None), 'Мхмхм', .08, 1, 'hero-cut1.png'],\r\n [(5, self.heroNextImage), '(Неужели я переиграл в игры и отрубился на полу)', .03, .6, 'hero-cut0.png'],\r\n [(8.4, None), 'Мдам и как ты заснул на полу, ну да ладно. Пока ты спал я полазил на складе и нашел акваланг, не хочешь посмотреть ?', .05, .7, 'old-man-cut0.png'],\r\n [(5, None), 'Конечно хочу, а где он ?', .06, .6, 'hero-cut0.png'],\r\n [(4.6, self.heroNextImage), 'Вот он, держи примерь', .06, .6, 'old-man-cut1.png'],\r\n [(4.6, self.heroNextImage), 'Воу в нем так приколько', .06, .6, 'hero-cut2.png'],\r\n [(10, None), 'Рад что тебе понравился, у меня как раз есть одна просба, не мог бы ты помочь достать сундук с воды который я случайно обронил ?', .06, .8, 'old-man-cut0.png'],\r\n [(4.6, None), 'Конечно, отправляемся прямо сейчас', .06, .6, 'hero-cut2.png'],\r\n ]\r\n self.heroImages = [1, 1, 2]\r\n \r\n # добавление елементов в обработчика событиый\r\n self.myMap.addElement([self.oldMan, self.hero])\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.onResize)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.dialogAutoUpdate)\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.nextDialog)\r\n\r\n def heroNextImage(self):\r\n self.hero.changeImage(self.heroImages[0])\r\n self.heroImages.pop(0)\r\n if len(self.heroImages) == 1:\r\n self.oldMan.changeImage(1)\r\n elif len(self.heroImages) == 0:\r\n self.oldMan.changeImage(0)\r\n\r\n # меняет диалоговый текст беря данные с переменной self.dialogs\r\n def nextDialog(self, *args):\r\n self.currentDialog += 1\r\n if len(self.dialogs) > self.currentDialog:\r\n self.nextDialogTime = [time(), self.dialogs[self.currentDialog][0][0]]\r\n if self.dialogs[self.currentDialog][0][1]:\r\n self.dialogs[self.currentDialog][0][1]()\r\n dialogManager.start(self.myMap, *self.dialogs[self.currentDialog][1:])\r\n else:\r\n dialogManager.stop()\r\n self.myMap.eventHandler.onClick.delElement(self.myMap)\r\n self.myMap.eventHandler.onLoopUpdate.delElement(self.dialogAutoUpdate)\r\n darkScreenAnimation.newStory(gameStory.Story1())\r\n setLvl(1)\r\n\r\n def dialogAutoUpdate(self):\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialog()\r\n \r\n def onResize(self, *args):\r\n w, h = sc.get_size()\r\n if (self.myMap.w != w) & (self.myMap.h != h):\r\n multiplier = setMaxScreenSizeToElement(self.myMap)\r\n mapW, mapH = self.myMap.w, self.myMap.h\r\n\r\n self.oldMan.resize(self.oldMan.w*multiplier, self.oldMan.h*multiplier)\r\n self.oldMan.setPos(mapW*self.oldManPos[0], mapH*self.oldManPos[1])\r\n\r\n self.hero.resize(self.hero.w*multiplier, self.hero.h*multiplier)\r\n self.hero.setPos(mapW*self.heroPos[0], mapH*self.heroPos[1])\r\n\r\n class Story1:\r\n def reset(self):\r\n try:\r\n self.myMap.x = 0\r\n self.myMap.y = 0\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n self.respawn = None\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('вода 0')\r\n self.myMap = game.getMap('вода 0')\r\n self.reset()\r\n self.myMap.addElement(waterRect)\r\n\r\n self.borders = [\r\n selfRect(-100, 500)\r\n ]\r\n\r\n self.chest = Image(['images/chest.png', 'images/chest-open.png'], 50, self.myMap.h - 60, 60, 60)\r\n self.chestBg = Image('images/chest-floor.png', 0, 0, 4000, 2000)\r\n self.isChestOpen = False; self.isFoundChest = False\r\n self.book = Image('images/book0.png', 200, 160, 400, 400)\r\n self.sand = Image('images/sand.png', -1000, self.myMap.h, self.myMap.w + 2000, 800)\r\n self.chestHoverText = Button('Нажмите что бы открыть', 30, self.chest.y, fontSize=16)\r\n self.oldMan = Image('images/old-man.png', -40, 380, 100, 100)\r\n self.hero = Image(['images/hero.png', 'images/hero1.png', *[f'images/hero-jump{i}.png' for i in range(11)], 'images/hero2.png', 'images/hero4.png'], -14, 386, 90, 90)\r\n self.boat = Image('images/boat.png', -50, 370, 160, 160)\r\n self.jumpTime = .06\r\n self.startJumpTime = 0\r\n\r\n self.lines = []\r\n for i in range(4):\r\n self.lines.append([])\r\n for j in range((self.myMap.w) // 200):\r\n self.lines[-1].append(selfRect(50 + j*200 + randint(-50, 50), 300 + 230*i + randint(-50, 50), 10, 2, (240, 240, 240)))\r\n self.myMap.addElement(self.lines[-1][-1])\r\n\r\n self.dialogs = [\r\n [(3.6, self.dialogSkip), 'Ну что, ты готов ?', .06, .6, 'old-man-cut0.png'],\r\n [(3.8, None), 'Щас только акваланг одену', .06, .6, 'hero-cut0.png'],\r\n [(3.6, self.setHeroImage), 'Воо теперь готов', .06, .6, 'hero-cut2.png'],\r\n [(5, None), 'Уоп нежданчик, управление на (W,A,S,D)', .03, -.03, 'robot.png'],\r\n [(5, None), 'Поплылии!!', .06, .6, 'hero-cut2.png'],\r\n ]\r\n self.actions = [\r\n self.mooveBoat, self.nextDialog, self.nextDialog, self.jumpToWater, self.mooveHero, \r\n self.nextDialog, self.mooveHero, self.nextDialog, self.nextDialog\r\n ]\r\n self.heroMooveY = 1\r\n self.myMap.addElement([self.oldMan, self.hero, self.boat, self.sand])\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.actionsPlay)\r\n self.spawnDecor()\r\n\r\n def spawnDecor(self):\r\n try:\r\n for i in self.fish:\r\n if i[1][0] > i[0].x:\r\n i[0].moove(1)\r\n if i[2]: i[0].flip(); i[2] = False\r\n elif i[1][0] == i[0].x:\r\n pass\r\n else:\r\n i[0].moove(-1)\r\n if not i[2]: i[0].flip(); i[2] = True\r\n if i[1][1] > i[0].y:\r\n i[0].moove(y=1)\r\n elif i[1][1] == i[0].y:\r\n pass\r\n else:\r\n i[0].moove(y=-1) \r\n if (i[0].x == i[1][0]) & (i[0].y == i[1][1]):\r\n i[1] = [int(randint(50, self.myMap.w - 100)), int(randint(500, self.myMap.h - 100))]\r\n except:\r\n self.decor = []\r\n for i in range(5):\r\n size = randint(300, 800)\r\n self.decor.append(Image(f'images/stone{randint(0, 1)}.png', randint(100, self.myMap.w - 70), self.myMap.h-size/2, size, size/2))\r\n while self.decor[-1].collideobjects(self.decor[:-1]):\r\n self.decor[-1].setPos(randint(100, self.myMap.w - 70))\r\n self.myMap.addElement(self.decor[-1])\r\n self.myMap.addElement(self.chest)\r\n self.fish = []\r\n for i in range(20):\r\n size = randint(30, 50)\r\n self.decor.append(Image(f'images/leaves{randint(0, 3)}.png', randint(100, self.myMap.w - 70), self.myMap.h-size, size, size))\r\n self.myMap.addElement(self.decor[-1])\r\n for i in range(14):\r\n pos = [int(randint(50, self.myMap.w - 100)), int(randint(500, self.myMap.h - 100))]\r\n pos2 = [int(randint(50, self.myMap.w - 100)), int(randint(500, self.myMap.h - 100))]\r\n self.fish.append([Image(f'images/fish{randint(0, 7)}.png', *pos, 30, 30), pos2, False])\r\n self.myMap.addElement(self.fish[-1][0])\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.spawnDecor)\r\n\r\n def borderRespawn(self):\r\n if self.respawn == None:\r\n self.respawn = time() + .6\r\n try:\r\n for i in self.mermaid[:4]:\r\n i.moove(4)\r\n for i in self.mermaid[4:]:\r\n i.moove(-4)\r\n\r\n except:\r\n self.mermaid = []\r\n x = -600\r\n for i in range(4):\r\n self.mermaid.append(Image('images/mermaid.png', x, 300 + 220 * i, 130, 130))\r\n x = self.myMap.w + 600\r\n for i in range(4):\r\n self.mermaid.append(Image('images/mermaid.png', x, 300 + 220 * i, 130, 130))\r\n self.myMap.addElement(self.mermaid)\r\n\r\n def showChestText(self, *args):\r\n self.myMap.addElement(self.chestHoverText)\r\n \r\n def hideChestText(self, *args):\r\n self.myMap.delElement(self.chestHoverText)\r\n\r\n def openChest(self, *args):\r\n self.chest.changeImage(1)\r\n self.myMap.anchorCameraAtElement()\r\n self.myMap.addElement([self.chestBg, self.book])\r\n self.myMap.x = 0\r\n self.myMap.y = 0\r\n self.isChestOpen = True; self.isFoundChest = True\r\n self.nextDialogTime[1] = 0\r\n self.dialogs = [\r\n [(6, self.closeChest), '... Здесь только книга, отнесука я её дедушке', .06, 1, 'hero-cut2.png'],\r\n ]\r\n self.actions.pop(0)\r\n self.myMap.eventHandler.onClick.delElement(self.chest)\r\n self.myMap.eventHandler.mouseHover.delElement(self.chest)\r\n\r\n def closeChest(self, *args):\r\n self.myMap.anchorCameraAtElement(self.hero)\r\n self.myMap.delElement(self.chestBg)\r\n self.myMap.delElement(self.book)\r\n self.isChestOpen = False\r\n self.myMap.delElement(self.chestHoverText)\r\n\r\n def dialogSkip(self, *args):\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.resetDialogTime)\r\n\r\n def resetDialogTime(self, *args):\r\n self.nextDialogTime[1] = .001\r\n\r\n def goHomeStart(self):\r\n darkScreenAnimation.newStory(gameStory.Story2())\r\n self.actions = [self.goHome]\r\n\r\n def goHome(self):\r\n self.actions = [self.goHome]\r\n self.boat.moove(-2)\r\n self.hero.moove(-2)\r\n self.oldMan.moove(-2)\r\n \r\n def mooveHero(self):\r\n if (self.hero.x < 0) or (self.hero.x > self.myMap.w):\r\n self.borderRespawn()\r\n if self.respawn != None:\r\n if type(self.respawn) == str:\r\n pass\r\n else:\r\n if time() > self.respawn:\r\n if difficultSetting.actionAfterDie == 'respawn':\r\n darkScreenAnimation.newStory(gameStory.Story1())\r\n else:\r\n darkScreenAnimation.newStory(gameStory.Story0())\r\n self.respawn = \"a\"\r\n if dialogManager.work: dialogManager.stop()\r\n if not self.isChestOpen:\r\n keys.updateKeys()\r\n speed = 3\r\n x, y = [(-keys.a + keys.d) * speed, (-keys.w + keys.s) * speed]\r\n if y == 0:\r\n y = 1\r\n if x >= 1: self.hero.changeImage(1)\r\n if x <= -1: self.hero.changeImage(14)\r\n self.hero.moove(x, y)\r\n if self.hero.y > self.myMap.h - self.hero.h: self.hero.setPos(y=self.myMap.h - self.hero.h)\r\n if self.hero.y < 500: self.hero.setPos(y=500)\r\n if self.isFoundChest:\r\n if self.oldMan.x + 200 >= self.hero.x >= self.oldMan.x - 100:\r\n if self.hero.colliderect(self.boat):\r\n self.dialogs = [\r\n [(5, None), 'Там в сундуке была только книга', .06, .6, 'hero-cut0.png'],\r\n [(7, None), 'Мне эта книга что то напоминает, поплыли домой а я потом тебе расскажу о чём там', .06, .6, 'old-man-cut0.png'],\r\n [(5, self.goHomeStart), ' ', .06, .6],\r\n [(5, None), ' ', .06, .6],\r\n ]\r\n self.actions.pop(0)\r\n self.hero.changeImage(13)\r\n self.hero.setPos(self.oldMan.x + 60, self.oldMan.y + 16)\r\n setLvl(2)\r\n \r\n def setHeroImage(self):\r\n self.hero.changeImage(1)\r\n\r\n def mooveBoat(self):\r\n if self.boat.x < 600:\r\n self.boat.moove(2)\r\n self.hero.moove(2)\r\n self.oldMan.moove(2)\r\n else:\r\n self.actions.pop(0)\r\n\r\n def jumpToWater(self):\r\n if (self.hero.imgIndex < 12):\r\n if self.jumpTime <= time() - self.startJumpTime:\r\n self.startJumpTime = time()\r\n self.hero.changeImage(self.hero.imgIndex+1)\r\n\r\n if self.hero.x < 750:\r\n self.hero.moove(3, -1)\r\n elif self.hero.y >= self.myMap.h - self.hero.h:\r\n self.myMap.eventHandler.onClick.addElement(self.chest, self.openChest)\r\n self.myMap.eventHandler.mouseHover.addElement(self.chest, self.showChestText, self.hideChestText)\r\n self.actions.pop(0)\r\n dialogManager.stop()\r\n self.hero.changeImage(1)\r\n else:\r\n self.heroMooveY += .06\r\n if self.heroMooveY > 5:\r\n self.heroMooveY = 5\r\n self.hero.moove(1, self.heroMooveY)\r\n w, h = sc.get_size()\r\n if self.hero.y > h/2 - self.hero.h/2:\r\n self.myMap.anchorCameraAtElement(self.hero)\r\n \r\n def nextDialog(self, *args):\r\n if not self.isChestOpen: \r\n if len(self.dialogs) > 1:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n if self.dialogs[0][0][1]:\r\n self.dialogs[0][0][1]()\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n if len(self.dialogs) > 1:\r\n self.dialogs.pop(0)\r\n else:\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n self.startJumpTime = time()\r\n self.actions.pop(0)\r\n else:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n print(\"mmm\")\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n print('how')\r\n self.dialogs[0][0][1]()\r\n self.dialogs.pop(0)\r\n self.actions.pop(0)\r\n\r\n def actionsPlay(self):\r\n self.actions[0]()\r\n\r\n class Story2:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('дом 2')\r\n self.myMap = game.getMap('дом 2')\r\n self.reset()\r\n self.myMap.resize(*homeFirstSize)\r\n\r\n self.oldManPos = (.72, .6)\r\n self.heroPos = (.63, .63)\r\n mapW, mapH = self.myMap.w, self.myMap.h\r\n self.oldMan = Image('images/old-man-sitting.png', mapW*self.oldManPos[0], mapH*self.oldManPos[1], 100, 100)\r\n self.hero = Image('images/hero.png', mapW*self.heroPos[0], mapH*self.heroPos[1], 90, 90)\r\n\r\n self.currentDialog = -1\r\n self.dialogs = [\r\n [(14.6, None), 'Я прочитал эту книгу и там было написано про какойто затерянный город с богатствами, нам обязательно нужно его найти. Только отправимся завтра, ато спать охота', .08, 1, 'old-man-cut0.png'],\r\n ]\r\n self.heroImages = [1, 1, 2]\r\n \r\n self.myMap.addElement([self.oldMan, self.hero])\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.onResize)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.dialogAutoUpdate)\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.nextDialog)\r\n\r\n def nextDialog(self, *args):\r\n self.currentDialog += 1\r\n if len(self.dialogs) > self.currentDialog:\r\n self.nextDialogTime = [time(), self.dialogs[self.currentDialog][0][0]]\r\n if self.dialogs[self.currentDialog][0][1]:\r\n self.dialogs[self.currentDialog][0][1]()\r\n dialogManager.start(self.myMap, *self.dialogs[self.currentDialog][1:])\r\n else:\r\n dialogManager.stop()\r\n self.myMap.eventHandler.onClick.delElement(self.myMap)\r\n self.myMap.eventHandler.onLoopUpdate.delElement(self.dialogAutoUpdate)\r\n darkScreenAnimation.newStory(gameStory.Story3())\r\n setLvl(3)\r\n\r\n def dialogAutoUpdate(self):\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialog()\r\n\r\n def onResize(self, *args):\r\n w, h = sc.get_size()\r\n if (self.myMap.w != w) & (self.myMap.h != h):\r\n multiplier = setMaxScreenSizeToElement(self.myMap)\r\n mapW, mapH = self.myMap.w, self.myMap.h\r\n\r\n self.oldMan.resize(self.oldMan.w*multiplier, self.oldMan.h*multiplier)\r\n self.oldMan.setPos(mapW*self.oldManPos[0], mapH*self.oldManPos[1])\r\n\r\n self.hero.resize(self.hero.w*multiplier, self.hero.h*multiplier)\r\n self.hero.setPos(mapW*self.heroPos[0], mapH*self.heroPos[1])\r\n\r\n class Story3:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('улица')\r\n self.myMap = game.getMap('улица')\r\n self.reset()\r\n self.myMap.resize(*streetFirstSize)\r\n\r\n self.oldManPos = (.51, .6)\r\n self.heroPos = [.66, .65]\r\n mapW, mapH = self.myMap.w, self.myMap.h\r\n self.oldMan = Image('images/old-man.png', mapW*self.oldManPos[0], mapH*self.oldManPos[1], 70, 70)\r\n self.hero = Image(['images/hero2.png', 'images/hero.png'], mapW*self.heroPos[0], mapH*self.heroPos[1], 63, 63)\r\n\r\n self.currentDialog = -1\r\n self.dialogs = [\r\n [(3.6, None), 'Какой прекрасный день', .08, 1, 'old-man-cut0.png'],\r\n [(9, None), 'Чего встал ? Думал я с тобой поплыву ? Неет я для этого уже стар, оправляйся ка ты один', .08, 1, 'old-man-cut0.png'],\r\n [(3, None), 'Ааа.. ээээ.. ну ладно', .08, 1, 'hero-cut0.png'],\r\n [(3, lambda: self.myMap.eventHandler.onLoopUpdate.addElement(self.mooveRight)), 'Ааа.. ээээ.. ну ладно', .01, -10, 'hero-cut0.png'],\r\n [(0, self.stopMooveRight), 'Ааа.. ээээ.. ну ладно', .01, -10, 'hero-cut0.png'],\r\n ]\r\n self.heroImages = [1, 1, 2]\r\n \r\n self.myMap.addElement([self.oldMan, self.hero])\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.onResize)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.dialogAutoUpdate)\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.nextDialog)\r\n\r\n def mooveRight(self):\r\n self.hero.changeImage(1)\r\n mapW, mapH = self.myMap.w, self.myMap.h\r\n self.heroPos[0] += .002\r\n self.hero.setPos(mapW*self.heroPos[0], mapH*self.heroPos[1])\r\n\r\n def stopMooveRight(self):\r\n self.hero.changeImage(1)\r\n self.myMap.eventHandler.onLoopUpdate.delElement(self.mooveRight)\r\n\r\n def nextDialog(self, *args):\r\n self.currentDialog += 1\r\n if len(self.dialogs) > self.currentDialog:\r\n self.nextDialogTime = [time(), self.dialogs[self.currentDialog][0][0]]\r\n if self.dialogs[self.currentDialog][0][1]:\r\n self.dialogs[self.currentDialog][0][1]()\r\n dialogManager.start(self.myMap, *self.dialogs[self.currentDialog][1:])\r\n else:\r\n dialogManager.stop()\r\n self.myMap.eventHandler.onClick.delElement(self.myMap)\r\n self.myMap.eventHandler.onLoopUpdate.delElement(self.dialogAutoUpdate)\r\n darkScreenAnimation.newStory(gameStory.Story4())\r\n setLvl(4)\r\n\r\n def dialogAutoUpdate(self):\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialog()\r\n\r\n def onResize(self, *args):\r\n w, h = sc.get_size()\r\n if (self.myMap.w != w) & (self.myMap.h != h):\r\n multiplier = setMaxScreenSizeToElement(self.myMap)\r\n mapW, mapH = self.myMap.w, self.myMap.h\r\n\r\n self.oldMan.resize(self.oldMan.w*multiplier, self.oldMan.h*multiplier)\r\n self.oldMan.setPos(mapW*self.oldManPos[0], mapH*self.oldManPos[1])\r\n\r\n self.hero.resize(self.hero.w*multiplier, self.hero.h*multiplier)\r\n self.hero.setPos(mapW*self.heroPos[0], mapH*self.heroPos[1])\r\n\r\n class Story4:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('вода 1')\r\n self.myMap = game.getMap('вода 1')\r\n self.reset()\r\n self.myMap.addElement(waterRect1)\r\n self.heroPos = (.66, .63)\r\n self.temple = Image('images/temple.png', 2900, self.myMap.h - 180 ,180, 180)\r\n self.hero = Image(['images/hero.png', 'images/hero1.png', *[f'images/hero-jump{i}.png' for i in range(11)], 'images/hero4.png'], 300, 186, 90, 90)\r\n self.boat = Image(['images/boat.png'], 260, 160, 160, 160)\r\n self.sand = Image('images/sand.png', -1000, self.myMap.h, self.myMap.w + 2000, 800)\r\n self.templeHoverText = Button(\"Войти ? :}\", self.temple.x, self.temple.y + 40, fontSize=26)\r\n self.jumpTime = .06\r\n self.heroMooveY = 1\r\n self.respawn = None\r\n self.lines = []\r\n for i in range(4):\r\n self.lines.append([])\r\n for j in range((self.myMap.w) // 200):\r\n self.lines[-1].append(selfRect(50 + j*200 + randint(-50, 50), 300 + 230*i + randint(-50, 50), 10, 2, (240, 240, 240)))\r\n self.myMap.addElement(self.lines[-1][-1])\r\n\r\n self.currentDialog = -1\r\n self.dialogs = [\r\n [(5, lambda: self.myMap.eventHandler.onLoopUpdate.addElement(self.mooveRight)), '(Мдаа и что нашло на этого страрика)', .08, 1, 'hero-cut0.png'],\r\n [(4, None), 'Похоже приплыл', .08, 1, 'hero-cut0.png'],\r\n [(5, None), 'Так, мне надо найти храм', .08, 1, 'hero-cut0.png'],\r\n ]\r\n\r\n self.actions = [self.nextDialog, self.nextDialog, self.nextDialog, self.stopDialog, self.startJumpToWater, self.jumpToWater, self.moove]\r\n self.myMap.addElement([self.hero, self.boat, self.sand])\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.actionsPlay)\r\n self.myMap.eventHandler.mouseHover.addElement(self.temple, self.showTempleText, self.hideTempleText)\r\n self.myMap.eventHandler.onClick.addElement(self.temple, self.openTemple)\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.resetDialogTime)\r\n self.spawnDecor()\r\n\r\n def spawnDecor(self):\r\n try:\r\n for i in self.fish:\r\n if i[1][0] > i[0].x:\r\n i[0].moove(1)\r\n if i[2]: i[0].flip(); i[2] = False\r\n elif i[1][0] == i[0].x:\r\n pass\r\n else:\r\n i[0].moove(-1)\r\n if not i[2]: i[0].flip(); i[2] = True\r\n if i[1][1] > i[0].y:\r\n i[0].moove(y=1)\r\n elif i[1][1] == i[0].y:\r\n pass\r\n else:\r\n i[0].moove(y=-1) \r\n if (i[0].x == i[1][0]) & (i[0].y == i[1][1]):\r\n i[1] = [int(randint(50, self.myMap.w - 100)), int(randint(500, self.myMap.h - 100))]\r\n except:\r\n self.decor = []\r\n for i in range(5):\r\n size = randint(300, 800)\r\n self.decor.append(Image(f'images/stone{randint(0, 1)}.png', randint(100, self.myMap.w - 70), self.myMap.h-size/2, size, size/2))\r\n while self.decor[-1].collideobjects(self.decor[:-1]):\r\n self.decor[-1].setPos(randint(100, self.myMap.w - 70))\r\n self.myMap.addElement(self.decor[-1])\r\n self.fish = []\r\n self.myMap.addElement(self.temple)\r\n for i in range(20):\r\n size = randint(30, 50)\r\n self.decor.append(Image(f'images/leaves{randint(0, 3)}.png', randint(100, self.myMap.w - 70), self.myMap.h-size, size, size))\r\n self.myMap.addElement(self.decor[-1])\r\n for i in range(14):\r\n pos = [int(randint(50, self.myMap.w - 100)), int(randint(500, self.myMap.h - 100))]\r\n pos2 = [int(randint(50, self.myMap.w - 100)), int(randint(500, self.myMap.h - 100))]\r\n self.fish.append([Image(f'images/fish{randint(0, 7)}.png', *pos, 30, 30), pos2, False])\r\n self.myMap.addElement(self.fish[-1][0])\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.spawnDecor)\r\n\r\n def borderRespawn(self):\r\n if self.respawn == None:\r\n self.respawn = time() + .6\r\n try:\r\n for i in self.mermaid[:4]:\r\n i.moove(4)\r\n for i in self.mermaid[4:]:\r\n i.moove(-4)\r\n\r\n except:\r\n self.mermaid = []\r\n x = -600\r\n for i in range(4):\r\n self.mermaid.append(Image('images/mermaid.png', x, 300 + 220 * i, 130, 130))\r\n x = self.myMap.w + 600\r\n for i in range(4):\r\n self.mermaid.append(Image('images/mermaid.png', x, 300 + 220 * i, 130, 130))\r\n self.myMap.addElement(self.mermaid)\r\n\r\n def openTemple(self, *args):\r\n darkScreenAnimation.newStory(gameStory.Story5())\r\n setLvl(5)\r\n\r\n def showTempleText(self, *args):\r\n self.myMap.addElement(self.templeHoverText)\r\n \r\n def hideTempleText(self, *args):\r\n self.myMap.delElement(self.templeHoverText)\r\n\r\n def resetDialogTime(self, *args):\r\n self.nextDialogTime[1] = .001\r\n\r\n def startJumpToWater(self):\r\n self.startJumpTime = time()\r\n self.actions.pop(0)\r\n\r\n def jumpToWater(self):\r\n if (self.hero.imgIndex < 12):\r\n if self.jumpTime <= time() - self.startJumpTime:\r\n self.startJumpTime = time()\r\n self.hero.changeImage(self.hero.imgIndex+1)\r\n\r\n if self.hero.x < 650:\r\n self.hero.moove(3, -1)\r\n elif self.hero.y >= self.myMap.h - self.hero.h:\r\n self.actions.pop(0)\r\n dialogManager.stop()\r\n self.hero.changeImage(1)\r\n else:\r\n self.heroMooveY += .06\r\n if self.heroMooveY > 5:\r\n self.heroMooveY = 5\r\n self.hero.moove(1, self.heroMooveY)\r\n w, h = sc.get_size()\r\n if self.hero.y > h/2 - self.hero.h/2:\r\n self.myMap.anchorCameraAtElement(self.hero)\r\n\r\n def mooveRight(self):\r\n if self.hero.x < 550:\r\n self.hero.moove(1)\r\n self.boat.moove(1)\r\n else:\r\n self.hero.changeImage(1)\r\n self.myMap.eventHandler.onLoopUpdate.delElement(self.mooveRight)\r\n\r\n def nextDialog(self, *args): \r\n if len(self.dialogs) > 0:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n if self.dialogs[0][0][1]:\r\n self.dialogs[0][0][1]()\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n if len(self.dialogs) > 0:\r\n self.dialogs.pop(0)\r\n self.actions.pop(0)\r\n\r\n def stopDialog(self):\r\n dialogManager.stop()\r\n self.actions.pop(0)\r\n\r\n def moove(self):\r\n if (self.hero.x < 0) or (self.hero.x > self.myMap.w):\r\n self.borderRespawn()\r\n if self.respawn != None:\r\n if type(self.respawn) == str:\r\n pass\r\n else:\r\n if time() > self.respawn:\r\n if difficultSetting.actionAfterDie == 'respawn':\r\n darkScreenAnimation.newStory(gameStory.Story4())\r\n else:\r\n darkScreenAnimation.newStory(gameStory.Story0())\r\n self.respawn = \"a\"\r\n if dialogManager.work: dialogManager.stop()\r\n keys.updateKeys()\r\n speed = 3\r\n x, y = [(-keys.a + keys.d) * speed, (-keys.w + keys.s) * speed]\r\n if y == 0:\r\n y = 1\r\n if x >= 1: self.hero.changeImage(1)\r\n if x <= 1: self.hero.changeImage(13)\r\n self.hero.moove(x, y)\r\n if self.hero.y > self.myMap.h - self.hero.h: self.hero.setPos(y=self.myMap.h - self.hero.h)\r\n if self.hero.y < 500: self.hero.setPos(y=500)\r\n\r\n def actionsPlay(self):\r\n self.actions[0]()\r\n\r\n class Story5:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('храм вход')\r\n self.myMap = game.getMap('храм вход')\r\n self.reset()\r\n \r\n self.gate = Image(['images/gate2.png', 'images/gate1.png', 'images/gate0.png'], 100, 100, 200, 280)\r\n self.neptune = Image('images/neptune.png', 100, 100, 68*3, 68*3)\r\n self.myMap.addElement(self.gate)\r\n\r\n self.currentDialog = -1\r\n self.dialogs = [\r\n [(7, None), 'Чтоо, что бы войти внутрь надо собрать мозайку ?', .06, 2, 'hero-cut2.png'],\r\n [(4, None), 'Да да именно так, нужно кусочки от мозайки ставить на голубые квадратики', .05, 1, 'robot.png'],\r\n [(4, None), 'Уф, наконец собрал', .05, 1, 'hero-cut2.png'],\r\n [(2, self.nextGateImage), 'Уф, наконец собрал', .05, 1, 'hero-cut2.png'],\r\n [(1, self.nextGateImage), 'Уф, наконец собрал', .001, -10, 'hero-cut2.png'],\r\n [(.6, None), 'Уф, наконец собрал', .001, -10, 'hero-cut2.png'],\r\n ]\r\n\r\n self.mazeRects = []\r\n self.mazeImages = []\r\n self.mazePos = (0, 0)\r\n for i in range(3):\r\n self.mazeRects.append([])\r\n self.mazeImages.append([])\r\n for j in range(3):\r\n self.mazeImages[-1].append(Image(f\"images/neptune{3*i +(j+1)}.png\", self.mazePos[0] + 600 + randint(0, 160), self.mazePos[1] + 200 + randint(0, 160), 62, 62))\r\n self.mazeRects[-1].append(selfRect(self.mazePos[0]-2 + 66*j, self.mazePos[1]-2 + 66*i, 66, 66, (30, 30, 150)))\r\n self.myMap.addElement(self.mazeRects[-1])\r\n for i in range(3):\r\n self.myMap.addElement(self.mazeImages[i])\r\n self.catchedImage = None\r\n\r\n self.actions = [self.nextDialog, self.nextDialog, self.mooveMaze, self.nextDialog, self.nextDialog, self.nextDialog, self.nextDialog, self.nextStory]\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.resetDialogTime)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.onResize)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.actionsPlay)\r\n\r\n self.lines = []\r\n for i in range(4):\r\n self.lines.append([])\r\n for j in range((self.myMap.w) // 200):\r\n self.lines[-1].append(selfRect(50 + j*200 + randint(-50, 50), 300 + 230*i + randint(-50, 50), 10, 2, (240, 240, 240)))\r\n self.myMap.addElement(self.lines[-1][-1])\r\n self.myMap.addElement(Image('images/waterRect.png', 0, 0, 3000, 3000))\r\n\r\n def nextGateImage(self):\r\n self.gate.changeImage(self.gate.imgIndex + 1)\r\n\r\n def nextStory(self):\r\n darkScreenAnimation.newStory(gameStory.Story6())\r\n self.actions.pop(0)\r\n setLvl(6)\r\n\r\n def mooveMaze(self):\r\n pressed = pg.mouse.get_pressed()[0]\r\n x, y = getMousePos()\r\n plasedMaze = 0\r\n sixAndNineRects = [self.mazeRects[1][2], self.mazeRects[2][2]]\r\n if not(self.catchedImage and pressed):\r\n self.catchedImage = None\r\n for i in range(2, -1, -1):\r\n for j in range(2, -1, -1):\r\n if isMouseRectCollide(self.mazeImages[i][j]):\r\n if not self.catchedImage:\r\n if pressed:\r\n self.catchedImage = self.mazeImages[i][j]\r\n\r\n if self.catchedImage:\r\n self.catchedImage.setPos(x-10, y-10)\r\n else:\r\n for i in range(3):\r\n for j in range(3):\r\n if (j == 2) and ((i == 1) or (i == 2)):\r\n for k in sixAndNineRects:\r\n if k.x + 15 > self.mazeImages[i][j].x > k.x - 15:\r\n if k.y + 15 > self.mazeImages[i][j].y > k.y - 15:\r\n plasedMaze += 1\r\n sixAndNineRects.remove(k)\r\n else:\r\n if self.mazeRects[i][j].x + 15 > self.mazeImages[i][j].x > self.mazeRects[i][j].x - 15:\r\n if self.mazeRects[i][j].y + 15 > self.mazeImages[i][j].y > self.mazeRects[i][j].y - 15:\r\n plasedMaze += 1\r\n if plasedMaze == 9:\r\n self.complitePuzzle()\r\n\r\n def complitePuzzle(self):\r\n for i in range(3):\r\n for j in range(3):\r\n self.myMap.delElement(self.mazeImages[i][j])\r\n self.myMap.delElement(self.mazeRects[i][j])\r\n self.myMap.addElement(self.neptune)\r\n self.nextDialogTime = [time(), 0]\r\n self.actions.pop(0)\r\n\r\n def resetDialogTime(self, *args):\r\n self.nextDialogTime[1] = .001\r\n \r\n def nextDialog(self, *args):\r\n if len(self.dialogs) > 0:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n if self.dialogs[0][0][1]:\r\n self.dialogs[0][0][1]()\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n if len(self.dialogs) > 0:\r\n self.dialogs.pop(0)\r\n self.actions.pop(0)\r\n\r\n def stopDialog(self):\r\n dialogManager.stop()\r\n self.actions.pop(0)\r\n\r\n def actionsPlay(self):\r\n if len(self.actions) > 0:\r\n self.actions[0]()\r\n\r\n def onResize(self, *args):\r\n w, h = sc.get_size()\r\n if (self.myMap.w != w) | (self.myMap.h != h):\r\n self.myMap.resize(w, h)\r\n \r\n self.mazePos = [300, h/2 - 160]\r\n self.gate.setPos(self.mazePos[0] - 200, h/2 - 160)\r\n self.neptune.setPos(*self.mazePos)\r\n for i in range(3):\r\n for j in range(3):\r\n self.mazeRects[i][j].setPos(self.mazePos[0]-2 + 68*j, self.mazePos[1]-2 + 68*i)\r\n\r\n class Story6:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n w, h = sc.get_size()\r\n self.gameH = 600\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('дорога со стрелами')\r\n self.myMap = game.getMap('дорога со стрелами')\r\n self.reset()\r\n self.newMarlinTimeOut = randint(1, 3)\r\n self.newMarlinStartTime = time()\r\n self.marlins = []\r\n self.funText = [Button(\"Смотри как пляшет\", 1800, 120, fontSize=20), Button(\"каждый раз смеюсь с этого\", 1760, 150, fontSize=20)]\r\n self.drawFunText = False\r\n self.stoneWall = Image('images/stone-wall3.png', -120, self.gameH, 2160, 600)\r\n\r\n\r\n self.pathImgs = [Image(\"images/stone-wall2.png\", 0 + 960*i, 0, 960, self.gameH) for i in range(2)]\r\n self.hero = Image('images/hero3.png', 200, 200, 25, 50)\r\n self.heroY = 200\r\n self.towers = [Image(\"images/tower2.png\", 1820, 100 + 300*i, 120, 100) for i in range(2)]\r\n self.gates = [Image('images/gate3.png', 1920, 0, 120, self.gameH), Image('images/gate4.png', -120, 0, 120, self.gameH)]\r\n\r\n self.dialogs = [\r\n [(6, None), 'Что то не хорошее у меня предчуствие', .03, 2, 'hero-cut2.png'],\r\n [(6, None), '(хах) Явился не запылился, спустить марлинов!!!', .03, .3, 'mermaid.png'],\r\n [(1, None), 'Чтоо ?', .02, .01, 'hero-cut2.png'],\r\n [(2.5, None), 'Наконеец', .02, .01, 'hero-cut2.png'],\r\n ]\r\n\r\n self.actions = [self.nextDialog, self.nextDialog, self.nextDialog, self.stopDialog, self.moove, self.newStory]\r\n self.myMap.addElement([*self.pathImgs, self.stoneWall, *self.towers, *self.gates, self.hero])\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.resetDialogTime)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.actionsPlay)\r\n\r\n def newStory(self):\r\n darkScreenAnimation.newStory(gameStory.Story7())\r\n self.actions.pop(0)\r\n setLvl(7)\r\n\r\n def spawnMarlins(self):\r\n for i in self.marlins:\r\n if self.hero.colliderect(i):\r\n self.heroY = 200\r\n self.hero.setPos(200, 200)\r\n i.moove(-5)\r\n if not(self.hero.y - 30 < i.y < self.hero.y + self.hero.h - 20):\r\n i.moove(y=2 if i.y < self.hero.y + self.hero.h - 20 else -2)\r\n if i.x < -120:\r\n self.myMap.delElement(i)\r\n self.marlins.remove(i)\r\n self.newMarlinTimeOut -= time() - self.newMarlinStartTime\r\n self.newMarlinStartTime = time()\r\n if self.newMarlinTimeOut <= 0:\r\n self.newMarlinTimeOut = 1.8\r\n self.marlins.append(Image('images/marlin.png', 2040, 40 + randint(0, 10)*48, 100, 50))\r\n self.myMap.addElement(self.marlins[-1])\r\n if self.hero.collideobjects(self.marlins):\r\n playSound('marlin-hit')\r\n if difficultSetting.actionAfterDie == 'respawn':\r\n darkScreenAnimation.newStory(gameStory.Story6())\r\n else:\r\n darkScreenAnimation.newStory(gameStory.Story0())\r\n\r\n def moove(self):\r\n w, h = sc.get_size()\r\n if dialogManager.work: dialogManager.stop()\r\n keys.updateKeys(); speed = 3\r\n x, y = [(-keys.a + keys.d) * difficultSetting.selfSpeedInArrowMap, (-keys.w + keys.s) * 3.7]\r\n self.heroY += y\r\n self.hero.moove(x)\r\n if self.hero.collideobjects(self.towers): self.hero.moove(-x)\r\n self.hero.setPos(y=self.heroY)\r\n if self.hero.collideobjects(self.towers): self.heroY -= y; self.hero.setPos(y=self.heroY)\r\n if self.hero.x < 0: self.hero.setPos(0)\r\n if self.hero.y < 0: self.hero.setPos(y=0); self.heroY=0\r\n elif self.hero.y > self.gameH - self.hero.h: self.heroY=self.gameH - self.hero.h; self.hero.setPos(y=self.heroY)\r\n mapX = self.hero.x - 200\r\n if mapX > 1920 - w + 120: mapX = 1920 - w + 120\r\n elif mapX < -120: mapX = -120\r\n\r\n if not self.drawFunText:\r\n if self.hero.x > 2160 - w:\r\n self.drawFunText = True\r\n self.myMap.addElement(self.funText)\r\n\r\n self.myMap.x = -mapX\r\n if self.hero.colliderect(self.gates[0]):\r\n self.actions.pop(0)\r\n\r\n def resetDialogTime(self, *args):\r\n self.nextDialogTime[1] = .001\r\n \r\n def nextDialog(self, *args):\r\n if len(self.dialogs) > 0:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n if self.dialogs[0][0][1]:\r\n self.dialogs[0][0][1]()\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n if len(self.dialogs) > 0:\r\n self.dialogs.pop(0)\r\n self.actions.pop(0)\r\n\r\n def stopDialog(self):\r\n dialogManager.stop()\r\n self.actions.pop(0)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.spawnMarlins)\r\n\r\n def actionsPlay(self):\r\n if len(self.actions) > 0:\r\n self.actions[0]()\r\n\r\n def onResize(self, *args):\r\n w, h = sc.get_size()\r\n if (self.myMap.w != w) | (self.myMap.h != h):\r\n self.myMap.resize(w, h)\r\n \r\n self.mazePos = [300, h/2 - 160]\r\n self.gate.setPos(self.mazePos[0] - 200, h/2 - 160)\r\n self.neptune.setPos(*self.mazePos)\r\n for i in range(3):\r\n for j in range(3):\r\n self.mazeRects[i][j].setPos(self.mazePos[0]-2 + 68*j, self.mazePos[1]-2 + 68*i)\r\n\r\n class Story7:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n w, h = sc.get_size()\r\n self.gameH = 600\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('лабиринт')\r\n self.myMap = game.getMap('лабиринт')\r\n self.reset()\r\n \r\n self.floor = Image(\"images/stone-wall2.png\", 0, 0, 400 + 300*6 + 20, 300*6 + 20)\r\n self.carpet = Image('images/carpet.png', 400 + 300*6 - 200, 300*6 - 160, 140, 140)\r\n mazePos = [400, 0]\r\n length = 300\r\n mazeData = [\r\n [-2, 1, 2, 20],\r\n [0, 1, 20, 5],\r\n [0, 6, 5, 20],\r\n [0, 0, 6, 20],\r\n [6, 0, 20, 6],\r\n [5, 0, 20, 2],\r\n [4, 1, 20, 2],\r\n [4, 3, 2, 20],\r\n [0, 2, 2, 20],\r\n [1, 2, 20, 1],\r\n [1, 1, 2, 20],\r\n [3, 1, 20, 4],\r\n [2, 3, 1, 20],\r\n [1, 5, 2, 20],\r\n [0, 4, 5, 20],\r\n [4, 5, 1, 20],\r\n [4, 5, 20, 1],\r\n ]\r\n self.maze = []\r\n for i in mazeData:\r\n data = [i[0] * length + mazePos[0], i[1] * length + mazePos[1], i[2], i[3]]\r\n if data[2] == 20:\r\n data[3] *= length\r\n else:\r\n data[2] *= length\r\n self.maze.append(selfRect(*data, (20, 20, 20)))\r\n self.hero = Image('images/hero3.png', 100, 100, 25, 50)\r\n\r\n self.dialogs = [\r\n [(4, None), 'Уф, еле прошёл', .03, 2, 'hero-cut2.png'],\r\n [(3.6, None), 'Так а это похоже лабиринт', .03, .3, 'hero-cut2.png'],\r\n ]\r\n\r\n self.actions = [self.nextDialog, self.nextDialog, self.stopDialog, self.moove, self.newStory]\r\n self.myMap.addElement([self.floor, *self.maze, self.carpet, self.hero])\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.resetDialogTime)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.actionsPlay)\r\n\r\n def newStory(self):\r\n darkScreenAnimation.newStory(gameStory.Story8())\r\n self.actions.pop(0)\r\n setLvl(8)\r\n\r\n def moove(self):\r\n w, h = sc.get_size()\r\n if dialogManager.work: dialogManager.stop()\r\n keys.updateKeys(); speed = 4\r\n x, y = [(-keys.a + keys.d) * speed, (-keys.w + keys.s) * speed]\r\n self.hero.moove(x)\r\n if self.hero.collideobjects(self.maze):\r\n self.hero.moove(-x)\r\n self.hero.moove(y=y)\r\n if self.hero.collideobjects(self.maze):\r\n self.hero.moove(y=-y)\r\n\r\n mapPos = [self.hero.x - w/2, self.hero.y - h/2]\r\n if mapPos[0] < 0: mapPos[0] = 0\r\n elif self.hero.x > self.floor.w - w/2: mapPos[0] = self.floor.w - w\r\n if mapPos[1] < 0: mapPos[1] = 0\r\n elif self.hero.y > self.floor.h - h/2: mapPos[1] = self.floor.h - h\r\n if self.hero.colliderect(self.carpet):\r\n self.actions.pop(0)\r\n self.myMap.x = -mapPos[0]; self.myMap.y = -mapPos[1]\r\n if self.hero.x < 0:\r\n self.hero.setPos(0)\r\n elif self.hero.x > self.myMap.w - self.hero.w:\r\n self.hero.setPos(self.myMap.w - self.hero.w)\r\n if self.hero.y < 0:\r\n self.hero.setPos(y=0)\r\n elif self.hero.y > self.myMap.h - self.hero.h*5:\r\n self.hero.setPos(y=self.myMap.h - self.hero.h*5)\r\n \r\n def resetDialogTime(self, *args):\r\n self.nextDialogTime[1] = .001\r\n \r\n def nextDialog(self, *args):\r\n if len(self.dialogs) > 0:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n if self.dialogs[0][0][1]:\r\n self.dialogs[0][0][1]()\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n if len(self.dialogs) > 0:\r\n self.dialogs.pop(0)\r\n self.actions.pop(0)\r\n\r\n def stopDialog(self):\r\n dialogManager.stop()\r\n self.actions.pop(0)\r\n\r\n def actionsPlay(self):\r\n if len(self.actions) > 0:\r\n self.actions[0]()\r\n\r\n class Story8:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n w, h = sc.get_size()\r\n self.gameH = 600\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('босс')\r\n self.myMap = game.getMap('босс')\r\n self.reset()\r\n self.spawnTimeStart = 1\r\n self.tornadoTimeImage = .5\r\n \r\n self.hero = Image('images/hero3.png', 100, 300, 25, 50)\r\n self.neptune = Image('images/neptune-fight.png', w/2, h/2, 100, 60)\r\n self.marline = []\r\n self.marline2 = []\r\n self.marlineSpawnTime = time() + 2\r\n self.marlineSpawnTime2 = time() + 2\r\n self.tornado = []\r\n self.mirrirSpawn = time() + 2\r\n self.mirrorFish = []\r\n self.respawn = False\r\n self.dialogs = [\r\n [(7.6, None), 'А вот и ты, раньше чем я ожидал. Хвалю, Удивишь ли ты меня так же в сражении ?', .05, 2, 'neptune-cut.png'],\r\n [(8.2, None), '(Ёмае, куда я забрел.. я же просто хотел найти сокровища, а тут еще эта рыба драки хочет)', .05, .3, 'hero-cut2.png'],\r\n [(3.6, self.setFirstSpawnTime), 'А я обязательно должен сражаться ?', .03, .3, 'hero-cut2.png'],\r\n ]\r\n\r\n self.actions = [self.nextDialog, self.nextDialog, self.nextDialog, self.stopDialog, self.moove]\r\n self.myMap.addElement([self.neptune, self.hero])\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.resetDialogTime)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.actionsPlay)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.onResize)\r\n\r\n # устанавливает время когда станут появляется первые предметы\r\n def setFirstSpawnTime(self):\r\n self.marlineSpawnTime = time() + 1\r\n self.marlineSpawnTime2 = time() + difficultSetting.marlinWaitSpawnBos\r\n self.mirrirSpawn = time() + difficultSetting.mirrorFishWaitSpawn\r\n\r\n\r\n # спавнит предметы, двигает их, проверяет столкновения\r\n def mobsEngine(self):\r\n w, h = sc.get_size()\r\n if time() - self.spawnTimeStart > difficultSetting.tornadoSpawnSpeed:\r\n playSound('storm')\r\n self.spawnTimeStart = time()\r\n angle = getAngle((w/2 + 10, h/2), (w/2, h/2), (self.hero.x, self.hero.y))\r\n self.tornado.append([Image(['images/tornado0.png', 'images/tornado1.png'], w/2, h/2, 60, 60), angle, [w/2, h/2], False])\r\n self.myMap.addElement(self.tornado[-1][0])\r\n if time() - self.marlineSpawnTime > difficultSetting.marlinSpeedSpawnBos:\r\n self.marlineSpawnTime = time()\r\n self.marline.append(Image('images/marlin.png', w, self.hero.y + 10, 100, 50))\r\n self.myMap.addElement(self.marline[-1])\r\n if time() - self.marlineSpawnTime2 > 1 + difficultSetting.marlinSpeedSpawnBos:\r\n self.marlineSpawnTime2 = time()\r\n self.marline2.append(Image('images/marlin2.png', -100, self.hero.y + 10, 100, 50))\r\n self.myMap.addElement(self.marline2[-1])\r\n if time() - self.tornadoTimeImage > .1:\r\n self.tornadoTimeImage = time()\r\n for i in self.tornado:\r\n i[0].changeImage(0 if i[0].imgIndex else 1)\r\n if time() - self.mirrirSpawn > difficultSetting.mirrorFishRespawnBos:\r\n self.mirrirSpawn = time()\r\n self.mirrorFish.append(Image('images/mirror-fish.png', w*.2, - 100, 40, 80))\r\n self.myMap.addElement(self.mirrorFish[-1])\r\n for i in self.tornado:\r\n for j in self.mirrorFish:\r\n if i[0].colliderect(j):\r\n angle = getAngle((j.x + 10, j.y), (j.x, j.y), (w/2, h/2 + 10))\r\n i[1] = angle\r\n i[3] = True\r\n\r\n i[2][0] += cos(radians(i[1]))*4\r\n i[2][1] += sin(radians(i[1]))*4\r\n i[0].setPos(*i[2])\r\n if i[3] == True:\r\n if i[0].colliderect(self.neptune):\r\n darkScreenAnimation.newStory(gameStory.Story9())\r\n setLvl(9)\r\n self.actions.pop(0)\r\n if (i[0].x < -70) or (i[0].x > w) or (i[0].y < -70) or (i[0].y > h):\r\n self.myMap.delElement(i[0])\r\n self.tornado.remove(i)\r\n\r\n for i in self.marline:\r\n i.moove(-5)\r\n if i.x > w:\r\n self.myMap.delElement(i)\r\n self.marline.remove(i)\r\n\r\n for i in self.marline2:\r\n i.moove(5)\r\n if i.x > w:\r\n self.myMap.delElement(i)\r\n self.marline2.remove(i)\r\n for i in self.mirrorFish:\r\n i.moove(y=3)\r\n if i.y > h:\r\n self.myMap.delElement(i)\r\n self.mirrorFish.remove(i)\r\n\r\n for i in self.tornado:\r\n if i[0].colliderect(self.hero):\r\n if not self.respawn:\r\n if difficultSetting.actionAfterDie == 'respawn':\r\n darkScreenAnimation.newStory(gameStory.Story8())\r\n else:\r\n darkScreenAnimation.newStory(gameStory.Story0())\r\n self.respawn = True\r\n\r\n if self.hero.collideobjects(self.marline) or self.hero.collideobjects(self.marline2):\r\n playSound('marlin-hit')\r\n if not self.respawn:\r\n if difficultSetting.actionAfterDie == 'respawn':\r\n darkScreenAnimation.newStory(gameStory.Story8())\r\n else:\r\n darkScreenAnimation.newStory(gameStory.Story0())\r\n self.reset = True\r\n\r\n # движение героя\r\n def moove(self):\r\n w, h = sc.get_size()\r\n self.mobsEngine()\r\n if dialogManager.work: dialogManager.stop()\r\n keys.updateKeys(); speed = 4\r\n x, y = [(-keys.a + keys.d) * speed, (-keys.w + keys.s) * speed]\r\n self.hero.moove(x)\r\n self.hero.moove(y=y)\r\n if self.hero.x < 0:\r\n self.hero.setPos(0)\r\n elif self.hero.x > w - self.hero.w:\r\n self.hero.setPos(w - self.hero.w)\r\n if self.hero.y < 0:\r\n self.hero.setPos(y=0)\r\n elif self.hero.y > h - self.hero.h:\r\n self.hero.setPos(y=h - self.hero.h)\r\n \r\n def resetDialogTime(self, *args):\r\n self.nextDialogTime[1] = .001\r\n \r\n def nextDialog(self, *args):\r\n if len(self.dialogs) > 0:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n if self.dialogs[0][0][1]:\r\n self.dialogs[0][0][1]()\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n if len(self.dialogs) > 0:\r\n self.dialogs.pop(0)\r\n self.actions.pop(0)\r\n\r\n def stopDialog(self):\r\n dialogManager.stop()\r\n self.actions.pop(0)\r\n\r\n def actionsPlay(self):\r\n if len(self.actions) > 0:\r\n self.actions[0]()\r\n\r\n def onResize(self, *args):\r\n w, h = sc.get_size()\r\n if (self.myMap.w != w) | (self.myMap.h != h):\r\n self.myMap.resize(w, h)\r\n \r\n self.neptune.setPos(w/2, h/2)\r\n\r\n class Story9:\r\n def reset(self):\r\n try:\r\n self.myMap.clear()\r\n except:\r\n pass\r\n\r\n def start(self):\r\n setLvl(10)\r\n w, h = sc.get_size()\r\n self.gameH = 600\r\n self.nextDialogTime = [time(), 0]\r\n game.setMap('босс конец')\r\n self.myMap = game.getMap('босс конец')\r\n self.reset()\r\n self.myMap.resize(w, h)\r\n \r\n self.hero = Image('images/hero3.png', 400, 400, 50, 100)\r\n self.neptune = Image(['images/neptune-fight.png', 'images/old-man3.png', 'images/old-man2.png'], 560, 400, 130, 130)\r\n self.merlins = []\r\n for i in range(2):\r\n for j in range(2):\r\n self.merlins.append(Image(\"images/marlin.png\", w + 140*i, 160 + 100*j, 150, 75))\r\n self.money = Image('images/moneys.png', w + 10, 170, 200, 200)\r\n self.book = Image('images/book0.png', w + 10, 190, 30, 30)\r\n\r\n self.dialogs = [\r\n [(4, None), 'Ты сражался отлично', .05, 2, 'neptune-cut.png'],\r\n [(6, lambda: self.myMap.eventHandler.onLoopUpdate.addElement(self.getTreasures)), 'Вот держи сокровиша', .05, .3, 'neptune-cut.png'],\r\n [(3, lambda: self.neptune.changeImage(1)), '....', .4, .3, 'old-man-cut-hidden.png'],\r\n [(2, None), '', .05, .3, 'old-man-cut2.png'],\r\n [(6, lambda: self.neptune.changeImage(2)), 'Что, почему эта рыба стало дедушкой, как так, почему?', .03, .3, 'hero-cut3.png'],\r\n [(4, None), 'Мм, что это за книга?', .03, .3, 'hero-cut3.png'],\r\n [(4, None), 'Что же в ней написано', .03, .3, 'hero-cut0.png'],\r\n [(29, lambda: self.myMap.eventHandler.onLoopUpdate.addElement(self.resizeDialog)), 'Я сегодня плавал. И вдруг начал думать, что живу скучно. Мне захотелось как то разнообразить свою жизнь. И я решил украсть мальчика с деревни. Я стер ему воспоминая, добавил немного о себе и положил спать на полу в доме. Когда он очнулся, я подумал, что будет интересно, если он попытается попасть в мой дворец. И вот я дал ему акваланг и попросил, что бы он помог достать мой сундук. А в сундук я положил свою книгу. Несколько раз он нарушал мои правила. И приходилось заного стерать воспоминания. Но вот он, наконец, попал в мой дворец. и я подумал. Теперь то я повеселюсь. Только сначала надо подготовить подарок', .03, .3, 'hero-cut0.png'],\r\n ]\r\n\r\n self.actions = [self.nextDialog, self.nextDialog, self.nextDialog, self.nextDialog, self.nextDialog, self.nextDialog, self.nextDialog, self.nextDialog, lambda:game.setMap(\"главное меню\")]\r\n self.myMap.addElement([self.neptune, self.hero, *self.merlins, self.money, self.book])\r\n self.myMap.eventHandler.onClick.addElement(self.myMap, self.resetDialogTime)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.actionsPlay)\r\n self.myMap.eventHandler.onLoopUpdate.addElement(self.onResize)\r\n\r\n def resizeDialog(self):\r\n dialogManager.bg.resize(h=600)\r\n\r\n def getTreasures(self):\r\n for i in self.merlins:\r\n i.moove(-3)\r\n self.money.moove(-3)\r\n self.book.moove(-3)\r\n if self.money.x <= sc.get_size()[0]/2 - 80:\r\n self.money.moove(3)\r\n self.book.moove(3)\r\n for i in self.merlins:\r\n i.moove(-5)\r\n if self.merlins[0].x < -500:\r\n self.myMap.eventHandler.onLoopUpdate.delElement(self.getTreasures)\r\n\r\n def setFirstSpawnTime(self):\r\n self.marlineSpawnTime = time() + 1\r\n self.marlineSpawnTime2 = time() + 6\r\n self.mirrirSpawn = time() + 1\r\n\r\n def moove(self):\r\n w, h = sc.get_size()\r\n if dialogManager.work: dialogManager.stop()\r\n self.spawnTornado()\r\n keys.updateKeys(); speed = 4\r\n x, y = [(-keys.a + keys.d) * speed, (-keys.w + keys.s) * speed]\r\n self.hero.moove(x)\r\n self.hero.moove(y=y)\r\n\r\n \r\n def resetDialogTime(self, *args):\r\n self.nextDialogTime[1] = .001\r\n \r\n def nextDialog(self, *args):\r\n if len(self.dialogs) > 0:\r\n if self.nextDialogTime[1] <= 0:\r\n self.nextDialogTime = [time(), self.dialogs[0][0][0]]\r\n dialogManager.start(self.myMap, *self.dialogs[0][1:])\r\n if self.dialogs[0][0][1]:\r\n self.dialogs[0][0][1]()\r\n else:\r\n self.nextDialogTime[1] -= time() - self.nextDialogTime[0]\r\n self.nextDialogTime[0] = time()\r\n if self.nextDialogTime[1] <= 0:\r\n if len(self.dialogs) > 0:\r\n self.dialogs.pop(0)\r\n self.actions.pop(0)\r\n\r\n def stopDialog(self):\r\n dialogManager.stop()\r\n self.actions.pop(0)\r\n\r\n def actionsPlay(self):\r\n if len(self.actions) > 0:\r\n self.actions[0]()\r\n\r\n def onResize(self, *args):\r\n w, h = sc.get_size()\r\n if (self.myMap.w != w) | (self.myMap.h != h):\r\n self.myMap.resize(w, h)\r\n self.hero.setPos(w/2 - 60, h/2) \r\n self.neptune.setPos(w/2, h/2)\r\n\r\n\r\n# cтавит по центру кнопку играть\r\ndef setToCenterWelkomMenu():\r\n w, h = sc.get_size()\r\n if w != welkomMenu.w or h != welkomMenu.h:\r\n setElementToCenterX(playBtn)\r\n welkomMenu.resize(w,h)\r\n\r\n# анимация затемнения экрана\r\nclass DarkScreenAnimation:\r\n actionWithDarkImage = ''\r\n darkImages = []\r\n blackImage = Image(\"images/blackColor.png\", 0, 0, 20, 20)\r\n story = None\r\n def __init__(self):\r\n topMap.addElement(self.blackImage)\r\n self.blackImage.setAlpha(0)\r\n def newStory(self, story):\r\n self.story = story\r\n story.reset()\r\n self.blackImage.setAlpha(0)\r\n self.actionWithDarkImage = 'add'\r\n self.blackImage.setAlpha(0)\r\n topMap.addElement(self.blackImage)\r\n topMap.eventHandler.onLoopUpdate.addElement(self.onLoopUpdate)\r\n \r\n def onLoopUpdate(self):\r\n if self.actionWithDarkImage == 'add':\r\n if self.blackImage.alpha < 255:\r\n alpha = self.blackImage.alpha\r\n alpha += 10\r\n if alpha > 255: alpha = 255\r\n self.blackImage.setAlpha(alpha)\r\n else:\r\n self.actionWithDarkImage = 'del'\r\n self.story.start()\r\n elif self.actionWithDarkImage == 'del':\r\n if self.blackImage.alpha > 0:\r\n alpha = self.blackImage.alpha; alpha -= 4\r\n if alpha < 0: alpha = 0\r\n self.blackImage.setAlpha(alpha)\r\n else:\r\n self.actionWithDarkImage = ''\r\n topMap.delElement(self.blackImage)\r\n topMap.eventHandler.onLoopUpdate.delElement(self.onLoopUpdate)\r\n self.blackImage.resize(*sc.get_size())\r\n\r\n# детектор основных клавиш\r\nclass Keys:\r\n w = a = s = d = False\r\n def updateKeys(self):\r\n pgKeys = pg.key.get_pressed()\r\n self.w = pgKeys[pg.K_w]\r\n self.a = pgKeys[pg.K_a]\r\n self.s = pgKeys[pg.K_s]\r\n self.d = pgKeys[pg.K_d]\r\n\r\n# ставит уровень открытых локаций, сохраняет текушую доступнуя локацию\r\ndef setLvl(lvl):\r\n global passedLevels\r\n if passedLevels < lvl:\r\n passedLevels = lvl\r\n with open('lvls.txt', 'w') as f:\r\n f.write(str(passedLevels))\r\n for i in range(len(storyBtnsEnter)):\r\n welkomMenu.eventHandler.onClick.delElement(storyBtnsEnter[i])\r\n welkomMenu.eventHandler.mouseHover.delElement(storyBtnsEnter[i])\r\n storyBtnsEnter[i].bgColor = [10, 10, 10]\r\n storyBtnsEnter[i].drawableBgColor = [10, 10, 10]\r\n if passedLevels >= i + 1:\r\n storyBtnsEnter[i].bgColor = [120, 120, 120]\r\n storyBtnsEnter[i].drawableBgColor = [120, 120, 120]\r\n addButtonHoverEffect(welkomMenu, storyBtnsEnter[i]); storyBtnsEnter[i].id = i+1\r\n welkomMenu.eventHandler.onClick.addElement(storyBtnsEnter[i], setStory)\r\n\r\n# запускает локацию по нажатию кнопок в главном меню\r\ndef setStory(el, *args):\r\n darkScreenAnimation.newStory(storys[el.id-1]())\r\n\r\n# регулеровщик кнопки громкости музыки\r\ndef mooveSoundBtn(*args):\r\n pressed = pg.mouse.get_pressed()[0]\r\n if pressed:\r\n if isMouseRectCollide(soundRectBtn):\r\n soundRectBtn.pressed = True\r\n else:\r\n soundRectBtn.pressed = False\r\n if soundRectBtn.pressed:\r\n x, y = getMousePos()\r\n soundRectBtn.setPos(x - 10)\r\n if soundRectBtn.x < soundRectBg.x + 4:\r\n soundRectBtn.setPos(soundRectBg.x + 4)\r\n elif soundRectBtn.x > soundRectBg.x + soundRectBg.w - 24:\r\n soundRectBtn.setPos(soundRectBg.x + soundRectBg.w - 24)\r\n global sound_volume\r\n sound_volume = (soundRectBtn.x - soundRectBg.x - 4)/(soundRectBg.w - 28)\r\n soundLabel.updateText(str(sound_volume))\r\n pg.mixer.music.set_volume(sound_volume)\r\n\r\ndef mooveEffectBtn(*args):\r\n pressed = pg.mouse.get_pressed()[0]\r\n if pressed:\r\n if isMouseRectCollide(effectRectBtn):\r\n effectRectBtn.pressed = True\r\n else:\r\n effectRectBtn.pressed = False\r\n if effectRectBtn.pressed:\r\n x, y = getMousePos()\r\n effectRectBtn.setPos(x - 10)\r\n if effectRectBtn.x < effectRectBg.x + 4:\r\n effectRectBtn.setPos(effectRectBg.x + 4)\r\n elif effectRectBtn.x > effectRectBg.x + effectRectBg.w - 24:\r\n effectRectBtn.setPos(effectRectBg.x + effectRectBg.w - 24)\r\n global effects_volume\r\n effects_volume = (effectRectBtn.x - effectRectBg.x - 4)/(effectRectBg.w - 28)\r\n effectLabel.updateText(str(effects_volume))\r\n\r\n\r\n# запускает звук\r\ndef playSound(sound):\r\n pg.mixer.Channel(1).play(pg.mixer.Sound(f'sounds\\{sound}.wav'))\r\n pg.mixer.Channel(1).set_volume(effects_volume)\r\n\r\n# устанавливет сложность\r\ndef setEasy(*args):\r\n for i in difficultBtns:\r\n i.bgColor = (200, 200, 200)\r\n i.drawableBgColor = (200, 200, 200)\r\n difficultBtn0.bgColor = (150, 150, 150)\r\n difficultBtn0.drawableBgColor = (100, 100, 100)\r\n difficultSetting.selfSpeedInArrowMap = 5\r\n difficultSetting.tornadoSpawnSpeed = 3\r\n difficultSetting.marlinSpeedSpawnBos = 4\r\n difficultSetting.marlinWaitSpawnBos = 20\r\n difficultSetting.mirrorFishWaitSpawn = 6\r\n difficultSetting.mirrorFishRespawnBos = 1.5 \r\n difficultSetting.actionAfterDie = 'respawn'\r\n\r\n# устанавливет сложность\r\ndef setNormal(*args):\r\n for i in difficultBtns:\r\n i.bgColor = (200, 200, 200)\r\n i.drawableBgColor = (200, 200, 200)\r\n difficultBtn1.bgColor = (150, 150, 150)\r\n difficultBtn1.drawableBgColor = (100, 100, 100)\r\n difficultSetting.selfSpeedInArrowMap = 4\r\n difficultSetting.tornadoSpawnSpeed = 1.8\r\n difficultSetting.marlinSpeedSpawnBos = 2.5\r\n difficultSetting.marlinWaitSpawnBos = 7\r\n difficultSetting.mirrorFishWaitSpawn = 15\r\n difficultSetting.mirrorFishRespawnBos = 3\r\n difficultSetting.actionAfterDie = 'respawn'\r\n\r\n# устанавливет сложность\r\ndef setHard(*args):\r\n for i in difficultBtns:\r\n i.bgColor = (200, 200, 200)\r\n i.drawableBgColor = (200, 200, 200)\r\n difficultBtn2.bgColor = (150, 150, 150)\r\n difficultBtn2.drawableBgColor = (100, 100, 100)\r\n difficultSetting.selfSpeedInArrowMap = 3\r\n difficultSetting.tornadoSpawnSpeed = .8\r\n difficultSetting.marlinSpeedSpawnBos = 1.6\r\n difficultSetting.marlinWaitSpawnBos = 6\r\n difficultSetting.mirrorFishWaitSpawn = 24\r\n difficultSetting.mirrorFishRespawnBos = 5 \r\n difficultSetting.actionAfterDie = 'die :D'\r\n\r\n# устанавливет сложность\r\ndef setHardcore(*args):\r\n for i in difficultBtns:\r\n i.bgColor = (200, 200, 200)\r\n i.drawableBgColor = (200, 200, 200)\r\n difficultBtn3.bgColor = (150, 150, 150)\r\n difficultBtn3.drawableBgColor = (100, 100, 100)\r\n\r\n difficultSetting.selfSpeedInArrowMap = 2\r\n difficultSetting.tornadoSpawnSpeed = .5\r\n difficultSetting.marlinSpeedSpawnBos = 1\r\n difficultSetting.marlinWaitSpawnBos = 3\r\n difficultSetting.mirrorFishWaitSpawn = 30\r\n difficultSetting.mirrorFishRespawnBos = 6 \r\n difficultSetting.actionAfterDie = 'die :D'\r\n\r\n# данные используемые для настройки сложности\r\nclass DifficultSetting:\r\n selfSpeedInArrowMap = 3\r\n tornadoSpawnSpeed = .4\r\n marlinSpeedSpawnBos = 1\r\n marlinWaitSpawnBos = 10\r\n mirrorFishWaitSpawn = 10\r\n mirrorFishRespawnBos = 2 \r\n actionAfterDie = 'respawn'\r\n\r\ntopMap = Map(0, 0, 4000, 4000)\r\ndifficultSetting = DifficultSetting()\r\nkeys = Keys()\r\ndarkScreenAnimation = DarkScreenAnimation()\r\ndialogManager = DialogManager()\r\ngameStory = GameStory()\r\nstorys = [gameStory.Story0, gameStory.Story1, gameStory.Story2, gameStory.Story3, gameStory.Story4, gameStory.Story5, gameStory.Story6, gameStory.Story7, gameStory.Story8, gameStory.Story9] \r\n\r\n# инициализация карт, елементов карт\r\ngame = Game()\r\nwelkomMenu = Map(0, 0, 4000, 4000, (60, 170, 120))\r\nsettingsMenu = Map(0, 0, 3000, 3000, (60, 170, 120))\r\n\r\nhomeFirstSize = (640, 360)\r\nstreetFirstSize = (544, 326)\r\nhome = Map(0, 0, 640, 360, bgImage=\"images/house-room.png\")\r\nhome2 = Map(0, 0, 640, 360, bgImage=\"images/house-room.png\")\r\n\r\ntombEnterMap = Map(0, 0, 4000, 2000, bgImage=\"images/stone-wall.png\")\r\ntombArrowsMap = Map(0, 0, 4000, 2000, (60, 120, 216))\r\nlabirinthMap = Map(0, 0, 4000, 2000, (60, 120, 216))\r\nbossMap = Map(0, 0, 1000, 1000, bgImage=\"images/stone-wall2.png\")\r\nbossMapEnd = Map(0, 0, 1000, 1000, bgImage=\"images/stone-wall2.png\")\r\nstreetMap = Map(0, 0, 544, 326, bgImage=\"images/street0.png\")\r\nwaterMap0 = Map(0, 0, 6000, 1200, (207, 226, 243))\r\nwaterMap1 = Map(0, 0, 6000, 1200, (207, 226, 243))\r\nwaterRect = selfRect(-1000, 200, 8000, 1000, (60, 120, 216))\r\nwaterRect1 = selfRect(-1000, 200, 8000, 1000, (60, 120, 216))\r\n\r\nbackBtn = Button('Назад', 0, 0, fontSize=21)\r\naddButtonHoverEffect(settingsMenu, backBtn)\r\nsettingsMenu.eventHandler.onClick.addElement(backBtn, lambda e: game.setMap('главное меню'))\r\n\r\ndifficultBtn0 = Button('Легко', 50, 400, fontSize=18, size=(80, 30))\r\ndifficultBtn1 = Button('Средне', 150, 400, fontSize=18, size=(80, 30))\r\ndifficultBtn2 = Button('Тяжело', 250, 400, fontSize=18, size=(80, 30))\r\ndifficultBtn3 = Button('Хардкор', 350, 400, fontSize=18, size=(80, 30))\r\ndifficultBtns = [difficultBtn0, difficultBtn1, difficultBtn2, difficultBtn3]\r\naddButtonHoverEffect(settingsMenu, difficultBtn0)\r\naddButtonHoverEffect(settingsMenu, difficultBtn1)\r\naddButtonHoverEffect(settingsMenu, difficultBtn2)\r\naddButtonHoverEffect(settingsMenu, difficultBtn3)\r\nsettingsMenu.eventHandler.onClick.addElement(difficultBtn0, setEasy)\r\nsettingsMenu.eventHandler.onClick.addElement(difficultBtn1, setNormal)\r\nsettingsMenu.eventHandler.onClick.addElement(difficultBtn2, setHard)\r\nsettingsMenu.eventHandler.onClick.addElement(difficultBtn3, setHardcore)\r\n\r\n\r\nopenSettingBtn = Button('Настройки', 0, 0, fontSize=21)\r\naddButtonHoverEffect(welkomMenu, openSettingBtn)\r\nwelkomMenu.eventHandler.onClick.addElement(openSettingBtn, lambda e: game.setMap('настройки'))\r\n\r\nsoundLabel0 = Label('Громкость музыки', 18, 50, 140)\r\nsoundLabel = Label('0.03', 18, 50, 170)\r\nsoundRectBg = selfRect(50, 200, 228, 28, (180, 180, 180))\r\nsoundRectBtn = Button(\"\", 54, 204, (120, 120, 120), size=[20, 20])\r\nsoundRectBtn.pressed = False\r\naddButtonHoverEffect(settingsMenu, soundRectBtn)\r\nsettingsMenu.eventHandler.onLoopUpdate.addElement(mooveSoundBtn)\r\nsound_elements = [soundRectBg, soundRectBtn, soundLabel0, soundLabel] \r\n\r\neffectLabel0 = Label('Громкость эффектов', 18, 350, 140)\r\neffectLabel = Label('0.03', 18, 350, 170)\r\neffectRectBg = selfRect(350, 200, 228, 28, (180, 180, 180))\r\neffectRectBtn = Button(\"\", 354, 204, (120, 120, 120), size=[20, 20])\r\neffectRectBtn.pressed = False\r\naddButtonHoverEffect(settingsMenu, effectRectBtn)\r\nsettingsMenu.eventHandler.onLoopUpdate.addElement(mooveEffectBtn)\r\neffect_elements = [effectRectBg, effectRectBtn, effectLabel0, effectLabel] \r\n\r\n\r\nstoryBtns = []\r\nstoryBtnsEnter = []\r\nfor i in range(10):\r\n storyBtns.append(Button(f'Квест {i+1}', 18 + 150*(i%5), 298 + 80*(i//5), fontSize=20, size=[104, 64]))\r\n storyBtnsEnter.append(Button(f'Войти', 20 + 150*(i%5), 330 + 80*(i//5), (130, 130, 130), fontSize=20, size=[100, 30]))\r\n\r\nplayBtn = Button(\"Играть\", 0, 200)\r\n\r\nwelkomMenu.addElement([openSettingBtn, playBtn, *storyBtns, *storyBtnsEnter])\r\nsettingsMenu.addElement([backBtn, *sound_elements, *effect_elements,*difficultBtns])\r\naddButtonHoverEffect(welkomMenu, playBtn)\r\nsetToCenterWelkomMenu()\r\n\r\nwelkomMenu.eventHandler.onClick.addElement(playBtn, lambda e: darkScreenAnimation.newStory(gameStory.Story0()))\r\nwelkomMenu.eventHandler.onLoopUpdate.addElement(setToCenterWelkomMenu)\r\n\r\n# добавление карт в менеджера игры\r\ngame.addElement(welkomMenu, 'главное меню')\r\ngame.addElement(home, 'дом')\r\ngame.addElement(home2, 'дом 2')\r\ngame.addElement(waterMap0, 'вода 0')\r\ngame.addElement(waterMap1, 'вода 1')\r\ngame.addElement(streetMap, 'улица')\r\ngame.addElement(tombEnterMap, 'храм вход')\r\ngame.addElement(tombArrowsMap, 'дорога со стрелами')\r\ngame.addElement(labirinthMap, 'лабиринт')\r\ngame.addElement(bossMap, 'босс')\r\ngame.addElement(bossMapEnd, 'босс конец')\r\ngame.addElement(settingsMenu, 'настройки')\r\ngame.addElement(topMap, None, True)\r\n\r\nsound_volume = 0.03\r\neffects_volume = 0.03\r\n\r\nmusic_list = ['musics/music1.mp3', 'musics/music2.mp3']\r\n\r\nwith open('lvls.txt', 'r') as f:\r\n passedLevels = int(f.readlines()[0])\r\n\r\nsetLvl(passedLevels)\r\nsetHard()\r\noldScreenSize = sc.get_size()\r\nwhile running:\r\n if pg.key.get_pressed()[pg.K_ESCAPE]:\r\n game.setMap('главное меню')\r\n if not pg.mixer.music.get_busy():\r\n music_list = music_list[::-1]\r\n pg.mixer.music.load(music_list[0])\r\n pg.mixer.music.set_volume(sound_volume)\r\n pg.mixer.music.play()\r\n\r\n events = pg.event.get()\r\n game.updateEventHandlerData(events)\r\n for e in events:\r\n if e.type == pg.QUIT:\r\n running = False\r\n\r\n sc.fill(bgColor)\r\n game.draw()\r\n pg.display.flip()\r\n clock.tick(fps)\r\n\r\npg.quit()","repo_name":"Denj0k/underwater-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":83860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"72308696881","text":"# base64编码\n\n\"\"\"\n引题:读写图片、视频我们读出二进制。\n计算机之间交流一般用通用格式信息,比如字符串'hello world'。 比较老式的服务器由于硬件和软件的限制只支持ascii编码,不支持特殊字符,直接传输字节信息可能出错。\n\n图片(ipg编码) → 二进制 (base64编码) → 处理后的字符串\n\n处理好的字符串适合在网络中传播\n\nbase64编码:一种简单的加密方法。主要作用是轻度加密和兼容老服务器,会把原信息转换成由大小写字母和常见字符组成的新字符串\n场景:\n1. 网址。 网址含有中文,比如http://www.baidu.com/新闻/国内,网址复制粘贴出来后形如http://8E%E8%AE%A4%E8%AF%81/, 这就是经过base64转码后的结果,服务器识别网址是就不会出错了。\n2. 传图片。不传字符而传通用的字符串。\n3. 简单加密。比如V2ex论坛上自曝微信号的时候发出来是经过base64编码后的内容。\n\n参数 (了解)百度百科 https://baike.baidu.com/item/base64/8545775?fr=aladdin\n\n\"\"\"\nimport base64\n# content = base64.b64encode('我的电话是12356768888'.encode()) #b64编码简单信息\n# b'5oiR55qE55S16K+d5pivMTIzNTY3Njg4ODg='\n# b64编码图片\nwith open('6 img.jpg', 'rb') as f:\n content_bytes = f.read()\n content_b64 = base64.b64encode(content_bytes)\n print(content_bytes)\n\n\n\n\n\"\"\"\nb64encode(待转换的字符)\n\"\"\"\n\n\n\n\n\n","repo_name":"gitshangxy/tutorial","sub_path":"L7本地文件读写/9 base64编码.py","file_name":"9 base64编码.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9047835853","text":"# -*- coding:utf-8 -*- \n \nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule \nfrom scrapy.linkextractors.sgml import SgmlLinkExtractor \nfrom scrapy.selector import Selector \nfrom CSDNblog.items import CsdnblogItem\n\n \n \nclass CSDNBlogCrawlSpider(CrawlSpider): \n \n \"\"\"继承自CrawlSpider,实现自动爬取的爬虫。\"\"\" \n \n name = \"CSDNBlogCrawlSpider\" \n #设置下载延时 \n download_delay = 0.1 \n allowed_domains = ['blog.csdn.net'] \n #第一篇文章地址 \n start_urls = ['http://blog.csdn.net/u012150179/article/details/11749017'] \n \n #rules编写法一,官方文档方式 \n #rules = [ \n # #提取“下一篇”的链接并**跟进**,若不使用restrict_xpaths参数限制,会将页面中所有 \n # #符合allow链接全部抓取 \n # Rule(SgmlLinkExtractor(allow=('/u012150179/article/details'), \n # restrict_xpaths=('//li[@class=\"next_article\"]')), \n # follow=True) \n # \n # #提取“下一篇”链接并执行**处理** \n # #Rule(SgmlLinkExtractor(allow=('/u012150179/article/details')), \n # # callback='parse_item', \n # # follow=False), \n #] \n \n #rules编写法二,更推荐的方式(自己测验,使用法一时经常出现爬到中间就finish情况,并且无错误码) \n rules = [ \n Rule(scrapy.linkextractors.LinkExtractor(allow=('/u012150179/article/details'), \n restrict_xpaths=('//li[@class=\"next_article\"]')), \n callback='parse_item', \n follow=True) \n ] \n \n def parse_item(self, response): \n \n #print \"parse_item>>>>>>\" \n item = CsdnblogItem() \n sel = Selector(response) \n blog_url = str(response.url) \n blog_name = sel.xpath('//div[@id=\"article_details\"]/div/h1/span/a/text()').extract() \n \n item['article_name'] = [n.encode('utf-8') for n in blog_name] \n item['article_url'] = blog_url.encode('utf-8') \n \n yield item \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n#!/usr/bin/python \n# -*- coding:utf-8 -*- \n\nimport scrapy\nfrom CSDNblog.items import CsdnblogItem\nfrom scrapy.selector import Selector\n\nclass CsdnblogSpider(scrapy.Spider):\n\n\tname = \"Csdnblog\"\n\n\tdownload_delay = 1\n\n\tallowed_domains = [\"blog.csdn.net\"]\n\tstart_urls = (\n\t\t\"http://blog.csdn.net/u012150179/article/details/11749017\",\n\t)\n\n\n\tdef parse(self,response):\n#\t\tlis = response.xpath('/html/body/div[4]/div[3]/div[1]/div/div[3]/div[1]/h1/span')\n\n\t\tsel = Selector(response)\t\t\n\n\t\titem = CsdnblogItem()\n\n\t\tarticle_name = sel.xpath('//div[@id=\"article_details\"]/div/h1/span/a/text()').extract() \n\t\tarticle_url = str(response.url)\n\t\titem['article_name'] = [n.encode('utf-8') for n in article_name]\n\t\titem['article_url'] = article_url.encode('utf-8')\n\n\t\tyield item\n\n\t\turls = sel.xpath('//li[@id=\"next_article\"]/a/@href').extract()\n\t\tfor url in urls:\n\t\t\tprint url\n\t\t\turl = \"http://blog.csdn.net\" + url\n\t\t\tprint url\n\t\t\tyield Request(url, callback=self.parse) \n\n\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"michaelcjl/learn_scrapy_demo","sub_path":"CSDNblog/CSDNblog/spiders/CSDNBlog.py","file_name":"CSDNBlog.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22105173707","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn.svm\n\n\nCAMERA_URL = \"http://localhost:8080/zm/cgi-bin/zms?monitor=1\"\n\n\nUPPER_LEFT = (900, 40)\nLOWER_RIGHT = (1200, 380)\nSLICE_X = slice(UPPER_LEFT[0], LOWER_RIGHT[0])\nSLICE_Y = slice(UPPER_LEFT[1], LOWER_RIGHT[1])\n\n\nclass SVM:\n CLOSED_IMGS = [\n \"data/closed_001.jpg\",\n \"data/closed_002.jpg\",\n \"data/closed_003.jpg\"\n ]\n OPEN_IMGS = [\n \"data/open_001.jpg\"\n \"data/open_002.jpg\"\n ]\n UPPER_LEFT = (900, 40)\n LOWER_RIGHT = (1200, 380)\n slice_x = SLICE_X\n slice_y = SLICE_Y\n\n def __init__(self):\n self.x_open = self.load_data(self.OPEN_IMGS)\n self.x_closed = self.load_data(self.CLOSED_IMGS)\n self.svm = sklearn.svm.SVC()\n self._fit()\n\n def _fit(self):\n X = np.concatenate((self.x_open, self.x_closed))\n y = np.zeros((X.shape[0]),)\n y[:self.x_open.shape[0]] = 1\n\n self.svm.fit(X, y)\n\n def predict(self, img):\n img = img[self.slice_y, self.slice_x]\n img = img.flatten()\n img = np.expand_dims(img, axis=0)\n return self.svm.predict(img)[0]\n\n @staticmethod\n def load_data(imgs):\n img_data = []\n for img_fn in imgs:\n img = cv2.imread(img_fn, cv2.IMREAD_GRAYSCALE)\n img = img[SLICE_Y, SLICE_X]\n img = img.flatten()\n img_data.append(img)\n\n return np.array(img_data)\n\nclass Detector:\n CLOSED_IMG = \"closed3.jpg\"\n OPEN_IMG = \"open.jpg\"\n UPPER_LEFT = (900, 40)\n LOWER_RIGHT = (1200, 380)\n\n THRESH = .2\n\n def __init__(self, ref_image_fn=CLOSED_IMG):\n\n self.slice_x = slice(self.UPPER_LEFT[0], self.LOWER_RIGHT[0])\n self.slice_y = slice(self.UPPER_LEFT[1], self.LOWER_RIGHT[1])\n\n self.ref_image = None\n self._create_ref_image(ref_image_fn)\n\n def _create_ref_image(self, ref_image_fn):\n img = cv2.imread(ref_image_fn, 0)\n # self.ref_image = img\n self.ref_image = img[self.slice_y, self.slice_x]\n # self.ref_image = cv2.normalize(self.ref_image)\n\n def absdiff(self, img):\n\n img = img[self.slice_y, self.slice_x]\n return cv2.absdiff(img, self.ref_image)\n\n def is_closed(self, img):\n img = img[self.slice_y, self.slice_x]\n norm = cv2.norm(self.ref_image,\n img,\n cv2.NORM_L2SQR | cv2.NORM_RELATIVE)\n\n return norm < self.THRESH\n\n def matcher(self, img):\n\n img = img[self.slice_y, self.slice_x]\n # orb = cv2.SIFT()\n orb = cv2.ORB_create()\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n ref_kps, ref_desc = orb.detectAndCompute(self.ref_image, None)\n kps, desc = orb.detectAndCompute(img, None)\n\n matches = bf.match(ref_desc, desc)\n out = np.array([])\n matches = sorted(matches, key=lambda x: x.distance)\n print([m.distance for m in matches[:5]])\n img_comp = cv2.drawMatches(self.ref_image, ref_kps,\n img, kps, matches[:5], out)\n import matplotlib.pyplot as plt\n plt.imshow(img_comp)\n plt.show()\n\n return matches\n\n\ndef noise_sample(img_fn):\n img = cv2.imread(img_fn, 0)\n detector = Detector()\n diff_img = detector.is_closed(img)\n print('{}'.format(np.sum(diff_img)))\n cv2.imshow(\"\", diff_img)\n cv2.waitKey(0)\n\n\ndef test_matcher():\n open_img = cv2.imread(Detector.OPEN_IMG)\n closed_img = cv2.imread(\"closed3.jpg\")\n\n detector = Detector()\n orb = cv2.ORB_create()\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n ref_kps, ref_desc = orb.detectAndCompute(self.ref_image, None)\n img = img[self.slice_y, self.slice_x]\n kps, desc = orb.detectAndCompute(img, None)\n matches = bf.match(ref_desc, desc)\n\n matches1 = detector.matcher(open_img)\n matches2 = detector.matcher(closed_img)\n img_match = cv2.drawMatches(\n open_img,\n\n )\n return (\n\n open_img,\n closed_img\n )\n\n\nclass Capture:\n\n def __init__(self, url=CAMERA_URL):\n self.url = CAMERA_URL\n self.cap = cv2.VideoCapture()\n assert self.cap.open(self.url) is True\n\n def get_image(self):\n rc, img_color = self.cap.read()\n img = cv2.cvtColor(img_color, cv2.COLOR_RGB2GRAY)\n return img\n\n\nif __name__ == '__main__':\n cap = Capture()\n detector = Detector()\n\n img = cap.get_image()\n svm = SVM()\n\n print(svm.predict(img))\n # assert svm.predict(img) == 0, \"Garage is open!\"\n # img = cap.get_image()\n #\n # plt.imshow(img)\n # plt.show()\n\n\n # is_closed = detector.is_closed(img)\n # print(is_closed)\n # detector.matcher(img)\n # matcher1, matcher2, img1, img2 = test_matcher()\n # if not is_closed:\n # raise Exception(\"Garage is open\")\n","repo_name":"ebarcikowski/garage","sub_path":"garage/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40297786276","text":"import copy\r\nclass Solution:\r\n def maxDistance(self, grid):\r\n n = len(grid)\r\n visited = [[0 for i in range(n)] for j in range(n)]\r\n level = []\r\n for i in range(n):\r\n for j in range(n):\r\n if grid[i][j]==1:\r\n level.append([i, j])\r\n visited[i][j] = 1\r\n if len(level) == 0 or len(level) == n*n: return -1\r\n next_level = []\r\n d = 0\r\n while level or next_level:\r\n dirs = [[-1, 0], [1, 0], [0, -1], [0, 1]]\r\n while level:\r\n i, j = level.pop(0)\r\n for ii, jj in dirs:\r\n if i+ii>=0 and i+ii=0 and j+jj Stocks[username_][\"value\"]:\n print(\"se pasa\")\n else:\n print(\"date\")\n\ndef sell(Stocks,username_):\n want_to_sell = input(\"How many stock do you want to sell, remember that you have: \" + str(Stocks[username_][\"stocks\"]) + \" \")\n selling = int(want_to_sell)\n if selling > Stocks[username_][\"stocks\"]:\n print(\"se pasa\")\n else:\n print(\"date\")\n total = selling*Stocks[username_][\"market_value\"]\n Stocks[username_][\"value\"] = Stocks[username_][\"value\"] + total\n print(\"Now you have \"+ str( Stocks[username_][\"value\"]))\n\ndef add(Stocks,username_):\n adding = input(\"Please introduce how much you want to add to your account: \")\n Stocks[username_][\"value\"] = Stocks[username_][\"value\"] + int(adding)\n print(\"Your new balance is: \" + str(Stocks[username_][\"value\"]))\n return Stocks[username_][\"value\"]\n\ndef openfile(Name, Stocks):\n print(\"Opening file\")\n with open('stocks.txt','w') as file:\n file.write(\"Name: \" + str(Name[\"user\"]) + \"\\nCECID: \" + str(Name[\"cec\"]) +\"\\nCompany: \" + str(Name[\"company\"]) + \"\\nStock's information: \"+ \"Stock's Number: \"+str(Stocks[Name[\"cec\"]][\"stocks\"]) + \" Stock's value: \"+str(Stocks[Name[\"cec\"]][\"value\"])) \n \n with open('stocks.txt') as file1: \n x = file1.read()\n print(x)\n\ndef menuJob(option, username_):\n option = int(option)\n print(\"Option is: \" + str(option))\n if option == 1: \n print(\"Change PIN\")\n pin1 = input(\"Please type your PIN: \")\n if pin1 != str(PINs[str(username_)]):\n print(\"wrong password\")\n else:\n pin2 = input(\"Please insert the new pin: \")\n print(\"New Pin for \" + username_ + \" is \"+pin2)\n printMenu()\n elif option == 2: \n print(\"Review Stocks\")\n print(username_ + \"::\" + str(Stocks[str(username_)][\"stocks\"]) +\" || \"+ str(Stocks[str(username_)][\"value\"]) + \" || \"+ str(Stocks[str(username_)][\"market_value\"]))\n elif option == 3:\n print(\"Buy Stocks\")\n buy(Stocks, username_)\n elif option == 4:\n print(\"Sell Stocks\")\n sell(Stocks, username_)\n elif option == 5:\n print(\"Deposit Funds\")\n add(Stocks, username_)\n elif option == 6 : \n print(\"Generate Account Statement\")\n openfile(Name, Stocks)\n elif option == 7:\n print(\"End Session\")\n else:\n print(\"Exit\")\n exit()\n printMenu()\n\n\ndef auth():\n print(\"Hello Cisconian\")\n username_ = input(\"Enter your cec: \\n\")\n print(\"Hello \"+ username_)\n password_ = input(\"\\n Now you password: \\n\")\n # print(PINs[username_])\n while str(password_) != str(PINs[username_]):\n print(\"wrong password\")\n password_ = input(\"Re-enter password: \")\n print(\"\\n Printing menu \\n\")\n printMenu()\n return username_\n\n#Main #\nif __name__=='__main__':\n username_ = auth()\n while True: \n option = input(\"Please be kind to select an option: \")\n menuJob(option, username_)","repo_name":"OrnelasE/CXCoders","sub_path":"ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40032068078","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nfrom collections import deque\n\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n\n :type root: TreeNode\n :rtype: str\n \"\"\"\n if root is None:\n return \"\"\n res = \"\"\n p = root\n while p.left is None and p.right is None:\n if p.right:\n p = p.right\n elif p.left:\n p = p.left\n dq = deque()\n dq.append(root)\n while dq:\n node = dq.popleft()\n if node == \"\":\n res = res + \"+\"\n dq.append(\"\")\n dq.append(\"\")\n else:\n res = res + str(node.val) + \"+\"\n if node == p:\n break\n if node.left:\n dq.append(node.left)\n else:\n dq.append(\"\")\n if node.right:\n dq.append(node.right)\n else:\n dq.append(\"\")\n return res\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n\n :type data: str\n :rtype: TreeNode\n \"\"\"\n if data == \"\":\n return None\n data_list = data.split(\"+\")\n dq = deque()\n for i in range(len(data_list) - 1, 2):\n if data_list[0] == \"\":\n node = None\n else:\n node = TreeNode(int(data_list[0]))\n dq.append(node)\n root = dq.popleft()\n p = root\n while dq:\n if p is not None:\n p.left = dq[0]\n p.right = dq[1]\n node = dq.popleft()\n p = node\n return root\n\n\n\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# ans = deser.deserialize(ser.serialize(root))\n\n# def main():\n# root =\n# res = Codec().deserialize(Codec().serialize(root))\n# print(res)\n#\n#\n# if __name__ == \"__main__\":\n# main()","repo_name":"Jintaimeng/Leetcode","sub_path":"中级算法/设计问题/297、二叉树的序列化与反序列化.py","file_name":"297、二叉树的序列化与反序列化.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21455113766","text":"class Solution(object):\n def myAtoi(self, str):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n statemap={\n 'start':{'bspace','sign','number'},\n 'bspace':{'bspace','sign','number'},\n 'sign':{'number'},\n 'number':{'number','end'},\n }\n charmap={' ':'bspace','+':'sign','-':'sign'}\n charmap.update((ic,'number') for ic in '0123456789')\n status='start'\n v=0\n bsig=1\n for c in str:\n nxt_stat=charmap.get(c)\n if nxt_stat is None: break\n if nxt_stat not in statemap[status]:\n return 0\n if nxt_stat=='sign' and c=='-': bsig=-1\n if nxt_stat=='number':\n if ' ' in charmap: charmap.pop(' ')\n ad=ord(c)-ord('0')\n if v<214748364 or (v==214748364 and ((bsig==1 and ad<7) or (bsig==-1 and ad<8))):\n v=v*10+ad\n elif bsig==1: \n return 2147483647\n else:\n return -2147483648\n status=nxt_stat\n if 'end' not in statemap[status]: return 0\n return bsig*v\n","repo_name":"neoall/lc-python-java","sub_path":"string-to-integer-atoi/zxj.py","file_name":"zxj.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5616485303","text":"import os\nimport csv\n\n# Assign .csv data file name\nfile_to_load = os.path.join(\"resources\",\"election_results.csv\")\n\n# Assign .txt file to save analysis\nfile_to_save = os.path.join(\"analysis\",\"election_analysis.txt\")\n\n# Initialize total vote counter, candidate options, and candidate votes\ntotal_votes = 0\ncandidate_options = []\ncandidate_votes = {}\n\n# Initialize a county list and county votes dictionary\ncounty_list = []\ncounty_votes = {}\n\n# Declare winning candidate variables\nwinning_candidate = \"\"\nwinning_count = 0\nwinning_percentage = 0\n\n# Track the largest county and county voter turnout\nlargest_county = \"\"\nlargest_county_votes = 0\n\n# Open the .csv data file\nwith open(file_to_load) as election_data:\n\n # Read csv file\n file_reader = csv.reader(election_data)\n\n # Read the headers\n headers = next(election_data)\n\n # Loop over the rows of election data\n for row in file_reader:\n\n # Increment the total votes count\n total_votes += 1\n\n # Get the candidate name from each row\n candidate_name = row[2]\n\n # Get the county name from each row\n county_name = row[1]\n\n # Add the candidate name to the candidate options if not already there\n if candidate_name not in candidate_options:\n\n # Add candidate name to candidate_options\n candidate_options.append(candidate_name)\n\n # Add candidate name to candidate_votes\n candidate_votes[candidate_name] = 0\n \n # Add vote to candidate count\n candidate_votes[candidate_name] += 1\n\n # Check if county is in county list\n if county_name not in county_list:\n \n # Add county name to county list\n county_list.append(county_name)\n\n # Add county name ot county votes\n county_votes[county_name] = 0\n \n # Add vote to county votes\n county_votes[county_name] += 1\n \n # Save the results to txt file\nwith open(file_to_save,\"w\") as txt_file:\n\n election_results = (\n f\"Election Results\\n\"\n f\"-------------------------\\n\"\n f\"Total Votes: {total_votes:,}\\n\"\n f\"-------------------------\\n\"\n f\"County Votes:\\n\"\n )\n\n # Print results and save to txt file\n print(election_results,end=\"\")\n txt_file.write(election_results)\n\n # Loop through al counties and calculate and print their percentage of the vote\n for county in county_votes:\n\n votes = county_votes[county]\n\n vote_percentage = float(votes) / float(total_votes) * 100\n\n county_results = (f\"{county}: {vote_percentage:.1f}% ({votes:,})\\n\")\n\n # Print county results and save to file\n print(county_results)\n txt_file.write(county_results)\n\n # Determine the largest county turnout\n if votes > largest_county_votes:\n\n largest_county = county\n\n largest_county_votes = votes\n \n # Establish largest county and formatting\n county_summary = (\n f\"-------------------------\\n\"\n f\"Largest County Turnout: {largest_county}\\n\"\n f\"-------------------------\\n\"\n\n )\n\n # Print and save largest county with \n print(county_summary)\n txt_file.write(county_summary)\n\n # Loop through all candidates and calculate and print their percentage of the vote\n for candidate_name in candidate_options:\n\n votes = candidate_votes[candidate_name]\n\n vote_percentage = float(votes) / float(total_votes) * 100\n\n candidate_results = (f\"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\\n\")\n\n # Print candidate results and save to file\n print(candidate_results)\n txt_file.write(candidate_results)\n\n # Determine the winning candidate\n if (votes > winning_count) and (vote_percentage > winning_percentage):\n\n winning_count = votes\n\n winning_percentage = vote_percentage\n\n winning_candidate = candidate_name\n\n winning_candidate_summary = (\n f\"-------------------------\\n\"\n f\"Winner: {winning_candidate}\\n\"\n f\"Winning Vote Count: {winning_count:,}\\n\"\n f\"Winning Percentage: {winning_percentage:.1f}%\\n\"\n f\"-------------------------\\n\"\n )\n\n # Print winning candidate summary and save to txt file\n print(winning_candidate_summary)\n txt_file.write(winning_candidate_summary)","repo_name":"btweedster/election_analysis","sub_path":"PyPoll_Challenge.py","file_name":"PyPoll_Challenge.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70406636724","text":"#!/usr/bin/python3\n\n#this is a simple program that will code change a written massage to different letterss with a given key.\n#The key shift the letters by the given number of positions.\n#also will be writtten as a function, that can be imported in another function or used in the code.\n#the usagee of this function can be seen in the function file \n\ndef cipher(message, key):\n for i in message:\n if ord(i) >= 65 and ord(i) <= 90: #checks for uppercase letter, to maintain uppercase\n i = chr(ord(i) + key) #inputs key to cipher and move the letters in key number position\n if ord(i) > 90: #if key ciphers above ascii valu of Z, result wont rotate to continue from A \n i = chr(ord(i) - 26) # Rotates to start from begining A, instead of continue to take next ASCII value\n print(i, end='')\n \n elif ord(i) >= 97 and ord(i) <= 122:#checks if lowercase letter to maintain\n i = chr(ord(i) + key)\n if ord(i) > 122:\n i = chr(ord(i) - 26)\n print(i, end='')\n\n else:\n print(i, end='')\n\nif __name__ == \"__main__\":\n \"\"\"this is mostly used in files to tell the code under it to only execute when the file is run as a script\n on the comand line. Name = main means that the command line sees the name of the file as the main function(which\n is usually the first function to run). This means that the command line wants to run the codes in the file.\n If this is not put, the codes in the file will be run when the file is imported on the command line or in another file.\n \"\"\"\n cipher(input(\"Please input message to cipher : \"), int(input(\"Please input key: \")))\n\n\n\n\n ","repo_name":"chesahkalu/My_Python","sub_path":"Beginer_basics/14-cipher.py","file_name":"14-cipher.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2393792522","text":"'''\nCreated on 06-Apr-2020\n\n@author: choudaiahp\n'''\nfrom Test_Cases.BaseTestClass import *\nfrom Test_Cases.Settings import *\nfrom Test_Cases.HomepageXpath import HomepageXpath\n\n\n\nclass HomepageUIverification:\n \n def homepageVerification(self):\n home=HomepageXpath()\n try:\n wait=WebDriverWait(driver,60)\n print(\"verifying welcome admin is displayed\")\n admin=wait.until(EC.visibility_of_element_located((By.XPATH,home.welcomeAdmin())))\n print(admin.text)\n if admin.text==\"Welcome Admin\":\n print(\"welcome admin is displayed\")\n else:\n raise Exception\n except Exception:\n print (\"welcome admin not displayed\")\n traceback.print_exc()\n raise Exception\n \n print(\"verifying vendortab is displayed\")\n \n \n \n \n \n \n '''\n print(\"verifying vendortab is displayed\")\n home.vendorsTab()\n \n print(\"verifying Title is displayed\")\n home.vendorVettingTitle()\n \n print(\"verifying Plus icon(add vendor)in vendor page\")\n home.plusIconVendor()\n \n print(\"verifying global search field\")\n home.Globalsearch()\n \n print(\"Verifying the pagination is displayed\")\n home.pagination()\n \n print(\"Verifying pagedropdown\")\n home.pageDropdown()\n ''' \n \n \n \n \n \n def mainHomepage(self):\n \n Hm=HomepageUIverification()\n Hm.homepageVerification()\n \n\n \n driver.close()\n\n\n\nif __name__ == '__main__':\n vvl=BaseTestClass()\n vvl.userLogin()\n \n Hm=HomepageUIverification()\n Hm.homepageVerification()\n ","repo_name":"chethan22/AutomationPython","sub_path":"Test_Cases/HomepageUIverification.py","file_name":"HomepageUIverification.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2951650179","text":"import json\n\nfrom flask import Flask, Response, request\nfrom flask_cors import CORS\n\nfrom models.pipelines.evaluation.bow_evaluation_pipeline import BoWEvaluationPipeline\nfrom models.pipelines.evaluation.self_attention_evaluation_pipeline import AttentionEvaluationPipeline\nfrom models.properties.attention_model_properties import AttentionModelProperties\nfrom models.properties.bow_properties import BoWModelProperties\n\napp = Flask(__name__)\nCORS(app)\nbow_evaluation: BoWEvaluationPipeline = BoWEvaluationPipeline(BoWModelProperties.tiny_own_embedding())\nattention_evaluation: AttentionEvaluationPipeline = AttentionEvaluationPipeline(\n AttentionModelProperties.small_bertolt())\n\n\n@app.route('/predict_bow', methods=['POST'])\ndef predict_stance_bow():\n request_body = json.loads(request.get_data().decode(\"utf-8\"))\n return Response(json.dumps(bow_evaluation.evaluate(request_body['question'], request_body['comment'])),\n mimetype=\"application/json\")\n\n\n@app.route('/predict_attention', methods=['POST'])\ndef predict_stance_attention():\n request_body = json.loads(request.get_data().decode(\"utf-8\"))\n return Response(json.dumps(attention_evaluation.evaluate(request_body['question'], request_body['comment'])),\n mimetype=\"application/json\")\n\n\n@app.route('/health', methods=['GET'])\ndef health():\n res = {\"health\": \"ok\"}\n return Response(json.dumps(res), mimetype=\"application/json\")\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='5000')\n","repo_name":"aditen/x-stance-models","sub_path":"web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40416162498","text":"\"\"\"\nThis file is used to rename image files from sjtu and classs them into keras orgnization structure\n\"\"\"\n\nimport os\nimport os.path \nimport shutil \nimport random\nfrom config import config\n\nfrom exception import C8Exception as c8e\n\nfrom .dataset_base import dataset_base\n\n\n\nclass dataset_extract_random(dataset_base):\n def __init__(self, inpath, outpath):\n self.__inpath = inpath\n self.__outpath = outpath\n\n self.__extract_fd = open(config.configured_extract_output_file, 'w+')\n \n dataset_base.__init__(self)\n \n \n def checkenv(self):\n \"\"\"\n Check whether input directory exist\n Create directory and sub-directory for output path\n \"\"\"\n if not os.path.exists(self.__inpath):\n raise c8e('Input directory not exist {}'.format(self.__inpath))\n \n if os.path.exists(self.__outpath):\n shutil.rmtree(self.__outpath)\n \n os.mkdir(self.__outpath)\n \n for phase in config.configured_dirs:\n phase_path = os.path.join(self.__outpath, phase)\n os.mkdir(phase_path)\n \n for label in config.configured_classes:\n subpath = os.path.join(phase_path, label)\n os.mkdir(subpath)\n \n #Need check again?\n \n def is_camera0_image(self, image):\n \n slices = image.split('-')\n if '0' == slices[1]: #2nd field in image name\n return True\n else:\n return False\n \n \n def find_images_camera0(self, classpath): \n \n images = []\n for it in os.listdir(classpath):\n if self.is_camera0_image(it):\n images.append(it)\n \n return images\n \n def extract_random_process(self, images_camera0, classes, phase, count):\n \n for i in range(count):\n if 0 >= len(images_camera0):\n return\n \n image = random.choice(images_camera0)\n \n group = self.find_related_file(os.path.join(self.__inpath, classes), \n image)\n \n for it in group:\n head, tail = os.path.split(it) \n \n abs_dst_file = os.path.join(self.__outpath,\n phase,\n classes,\n tail)\n shutil.copy(it, abs_dst_file)\n print('copy from', it, 'to', abs_dst_file) \n \n self.__extract_fd.write('{} -> {}'.format(it, abs_dst_file))\n \n images_camera0.remove(image)\n\n\n def extract_random(self, images_camera0, classes):\n \n total = len(images_camera0) \n \n valid_count = int(total * config.configured_phase_percent[1]) #index of valid\n predict_count = int(total * config.configured_phase_percent[2]) #index of predict\n train_count = total - valid_count - predict_count\n \n self.extract_random_process(images_camera0, classes, config.configured_valid_dir, valid_count)\n self.extract_random_process(images_camera0, classes, config.configured_predict_dir, predict_count)\n self.extract_random_process(images_camera0, classes, config.configured_train_dir, train_count)\n \n \n def extract(self):\n \"\"\"Extract random group images into specified folder\n \"\"\"\n \n self.checkenv()\n \n for l1 in os.listdir(self.__inpath):\n \n if l1 not in config.configured_classes:\n raise c8e('wrong folder in input directory: {}, its {}'.format(self.__inpath, l1))\n \n l1path = os.path.join(self.__inpath, l1)\n images_camera0 = self.find_images_camera0(l1path)\n if 3 >= len(images_camera0):\n raise c8e('Wrong images count in {}'.format(l1path)) \n \n self.extract_random(images_camera0, l1) #l1 is the classes name\n \n \n ","repo_name":"TangLisan/AIObstacleAvoidance","sub_path":"dataset/dataset_extract_random.py","file_name":"dataset_extract_random.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19200724293","text":"\"\"\"\nhttps://leetcode.com/problems/reorder-list/\n\nYou are given the head of a singly linked-list. The list can be represented as:\n\nL0 → L1 → … → Ln - 1 → Ln\nReorder the list to be on the following form:\n\nL0 → Ln → L1 → Ln - 1 → L2 → Ln - 2 → …\nYou may not modify the values in the list's nodes. Only nodes themselves may be changed.\n\nExample 1:\nInput: head = [1,2,3,4]\nOutput: [1,4,2,3]\n\nExample 2:\nInput: head = [1,2,3,4,5]\nOutput: [1,5,2,4,3]\n\nConstraints:\nThe number of nodes in the list is in the range [1, 5 * 104].\n1 <= Node.val <= 1000\n\"\"\"\n\nfrom typing import Optional\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef reorder_list(head: Optional[ListNode]) -> None:\n # Find middle\n slow, fast = head, head.next\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n \n # Reverse second half\n second = slow.next\n prev = None\n slow.next = None\n while second:\n temp = second.next\n second.next = prev\n prev = second\n second = temp\n \n # Merge two halves\n first = head\n second = prev\n while second:\n temp1 = first.next\n temp2 = second.next\n first.next = second\n second.next = temp1\n first = temp1\n second = temp2\n \n return head","repo_name":"nwthomas/code-challenges","sub_path":"src/leetcode/medium/reorder-list/reorder_list.py","file_name":"reorder_list.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"71347074161","text":"from preprocessing import PPGraph\nfrom sorted_heap import SortedHeap\nfrom genetic_algorithm import GeneticAlgorithm\nfrom analytical_model import AnalyticalModel\nimport statsmodels.api as sm\nimport multiprocessing as mp\nimport pandas as pd\nimport numpy as np\nimport scipy.stats\nimport itertools\nimport copy\nimport time\nimport os\nimport random\n\n\nuse_parallel = True\n\n\nclass AttributeTransformation:\n default_transformation = {\n 'nodes': {\n 1: {'function': 'log', 'args': {'c': 'x1'}},\n 2: {'function': 'log10', 'args': {'c': 'x1'}},\n 3: {'function': 'square', 'args': {'c': 'x1'}},\n 6: {'function': 'reciprocal', 'args': {'c': 'x1'}},\n 7: {'function': 'polynomial', 'args': {'c': ['x1'], 'r': 'Response'}},\n 8: {'function': 'power', 'args': {'c': 'x1', 'p': 3}},\n 9: {'function': 'squareroot', 'args': {'c': 'x1'}},\n },\n 'edges': []\n }\n\n def __init__(self, data, response_data):\n self._data = copy.copy(data)\n if type(self._data) is not pd.DataFrame:\n self._data = self._data.to_frame()\n self._data[\"Response\"] = response_data.values\n self.transformed_data = {}\n for n, v in self.default_transformation[\"nodes\"].items():\n v['args']['c'] = data.name\n if 'r' in v['args'].keys():\n v['args']['r'] = response_data.columns.values.tolist()[0]\n self.data = PPGraph(self._data, self.default_transformation).data\n self.columns = self.data.columns\n d1 = response_data.to_numpy().flatten()\n self.data = self.data.drop(\"Response\", axis=1)\n for f in self.data.columns:\n if not np.any(np.isnan(self.data[[f]])):\n d2 = self.data[[f]].to_numpy().flatten()\n if d2.min() == d2.max(): # d2 is constant\n pearsons = [np.nan, np.nan]\n else:\n pearsons = scipy.stats.pearsonr(d1, d2)\n self.transformed_data[f] = pearsons[0]\n\n def evaluate(self, transformation_threshold):\n best_pearson = 0\n best_transform = None\n for t, v in self.transformed_data.items():\n if abs(v) > best_pearson:\n best_pearson = abs(v)\n best_transform = t\n if best_transform != self.columns[0]:\n if best_pearson / abs(self.transformed_data[self.columns[0]]) > (1. + transformation_threshold):\n return [\n best_transform,\n self.transformed_data[best_transform],\n self.data[best_transform]\n ]\n return [\n self.columns[0],\n self.transformed_data[self.columns[0]],\n self.data[self.columns[0]]\n ]\n\n\nclass AttributeInteraction:\n\n interaction_polynomial = {\n 'nodes': {\n 1: {'function': 'product', 'args': {'c': ['x1', 'x2'], 'r': 'Response'}},\n },\n 'edges': []\n }\n\n def __init__(self, data, response_data):\n self._data = copy.copy(data)\n self._data[\"Response\"] = copy.copy(response_data.values)\n self._columns = self._data.columns\n self.transformed_data = {}\n\n self.interaction_polynomial[\"nodes\"][1][\"args\"][\"c\"] = list(self._columns)\n self.data = PPGraph(self._data, self.interaction_polynomial).data\n self.columns = self.data.columns\n self.interaction_column = [c for c in list(self.columns) if \"IP\" in c][0]\n d1 = response_data.to_numpy().flatten()\n d2 = self.data[self.interaction_column].to_numpy().flatten()\n pearsons = scipy.stats.pearsonr(d1, d2)\n self.transformed_data[self.interaction_column] = round(pearsons[0], 5)\n\n def evaluate(self):\n return [\n self.interaction_column,\n self.transformed_data[self.interaction_column],\n self.data[self.interaction_column]\n ]\n\n\nparallel_results = []\n\n\nclass AutomatedModelBuilder:\n pearsons_threshold = 0.1\n pearsons_change_threshold = 0.0\n vif_threshold = 5.0\n training_split = 0.8\n data_attribute_ratio = 10\n parallel_threshold = 10000\n\n def __init__(\n self,\n data,\n response,\n genetic=False,\n evaluation=\"rmse\",\n skip_transformations=False,\n skip_interactions=False,\n one_out=False,\n model_config=None,\n ga_config=None\n ):\n print(\"Starting automated multiple linear regression\")\n self.response = response\n self.data = data\n self.evaluation = evaluation\n self.response_data = copy.copy(self.data[[response]])\n self.attribute_data = copy.copy(self.data).drop(response, axis=1)\n self.interaction_data = pd.DataFrame()\n self.one_out = one_out\n self.attribute_transformations = None\n self.valid_transformations = None\n self.valid_attributes = None\n self.model_config = model_config\n self.ga_config = ga_config\n if not skip_interactions:\n self.process_interactions()\n self.attribute_data = pd.concat([self.attribute_data, self.interaction_data], axis=1)\n self.process_attributes(bypass=skip_transformations) # step 1: Generate all transformation, validate transformations\n self.all_combinations = []\n self.best_models = SortedHeap(n=10, target=0.0)\n # step 2: Generate all combinations from valid/best transformations\n print(\"{} total attributes.\".format(len(self.valid_attributes)))\n # self.attribute_data.to_csv(os.path.join(\"data\", \"attribute_data.csv\"))\n # self.data.to_csv(os.path.join(\"data\", \"transformed_data.csv\"))\n # if self.valid_attributes.shape[1] > 15 or genetic: # 5 base variables\n if genetic:\n # use genetic algorithm\n print(\"Genetic algorithm flag active.\")\n self.run_genetic_algorithm()\n else:\n for i in range(1, int(self.data.shape[0]/self.data_attribute_ratio)):\n self.all_combinations += list(itertools.combinations(list(self.valid_attributes.columns), i))\n self.valid_combinations = []\n print(\"Validating all {} potential attribute combinations...\".format(len(self.all_combinations)))\n global use_parallel\n if len(self.all_combinations) >= self.parallel_threshold:\n use_parallel = True\n print(\"Potential attribute combinations above {}, turning parallel functions on.\".format(self.parallel_threshold))\n else:\n use_parallel = False\n if use_parallel:\n self.validate_combinations_parallel() # step 3: Validate all combinations in parallel\n else:\n self.validate_combinations() # step 3: Validate all combinations\n print(\"Found {} valid combinations\".format(len(self.valid_combinations)))\n self.results = []\n print(\"Building models...\")\n self.build_models() # step 4: Build models and validate results\n\n def process_attributes(self, bypass=False):\n if bypass:\n self.valid_attributes = self.attribute_data\n self.valid_transformations = []\n for ad in list(self.attribute_data.columns):\n self.valid_transformations.append([ad, np.nan, self.data[ad]])\n return\n # step 1.a Perform all possible transformations on attribute data\n print(\"Calculating attribute transformations...\")\n self.attribute_transformations = [\n AttributeTransformation(self.attribute_data[a], self.response_data) for a in self.attribute_data.columns\n ]\n # step 1.b Evalute all attribute transformations for best selection\n print(\"Evaluating all attribute transformations...\")\n self.valid_transformations = [\n t.evaluate(self.pearsons_change_threshold) for t in self.attribute_transformations\n ]\n self.valid_attributes = pd.DataFrame()\n for vt in self.valid_transformations:\n if vt is not None:\n if vt[0] not in self.data.columns:\n self.data[vt[0]] = vt[2]\n self.valid_attributes[vt[0]] = vt[2]\n\n def process_interactions(self):\n print(\"Calculating attribute interactions...\")\n attribute_combinations = list(itertools.combinations(self.attribute_data.columns, 2))\n print(\"Found {} attribute interactions\".format(len(attribute_combinations)))\n attribute_interactions = [\n AttributeInteraction(self.attribute_data[list(a)], self.response_data) for a in attribute_combinations\n ]\n # step 1.b Evalute all attribute transformations for best selection\n print(\"Evaluating all attribute interactions...\")\n valid_interactions = [\n t.evaluate() for t in attribute_interactions\n ]\n valid_interactions = list(filter(None, valid_interactions))\n for vt in valid_interactions:\n if vt is not None:\n if vt[0] not in self.interaction_data.columns:\n self.interaction_data[vt[0]] = vt[2]\n if vt[0] not in self.data.columns:\n self.data[vt[0]] = vt[2]\n del attribute_combinations\n del attribute_interactions\n del valid_interactions\n\n def validate_combinations_p(self, c):\n subset = self.data[list(c)]\n valid = True\n if len(c) > 1:\n for i in range(0, len(c)):\n subset_data = subset.drop(c[i], axis=1)\n mod = sm.OLS(subset[c[i]], sm.add_constant(subset_data))\n res = mod.fit()\n vif2 = 1. / (1. - res.rsquared)\n if vif2 > self.vif_threshold:\n valid = False\n break\n if valid:\n return c\n\n def validate_combinations_parallel(self):\n all_combinations = copy.copy(self.all_combinations)\n pool = mp.Pool(mp.cpu_count())\n parallel_results = pool.map(self.validate_combinations_p, [c for c in all_combinations])\n pool.close()\n pool.join()\n parallel_results = list(filter(None, parallel_results))\n self.valid_combinations = parallel_results\n\n def validate_combinations(self):\n for c in self.all_combinations:\n valid = True\n subset = self.data[list(c)]\n if len(c) > 1:\n for i in range(0, len(c)):\n subset_data = subset.drop(c[i], axis=1)\n mod = sm.OLS(subset[c[i]], sm.add_constant(subset_data))\n res = mod.fit()\n vif2 = 1./(1. - res.rsquared)\n if vif2 > self.vif_threshold:\n valid = False\n break\n if valid:\n self.valid_combinations.append(list(c))\n\n def build_models_parallel(self, c, model_config):\n m_data = copy.copy(self.data[list(c)])\n m_data[self.response] = self.response_data.values\n m = AnalyticalModel(m_data, self.response, one_out=self.one_out, model_config=model_config)\n return m\n\n def build_models(self):\n if use_parallel:\n all_combinations = copy.copy(self.valid_combinations)\n pool = mp.Pool(mp.cpu_count())\n pool_results = [pool.apply_async(self.build_models_parallel, (c, self.model_config)) for c in all_combinations]\n parallel_results = []\n for p in pool_results:\n parallel_results.append(p.get())\n pool.terminate()\n pool.join()\n [self.best_models.add(m, m.evaluate(use=self.evaluation)) for m in parallel_results]\n self.results = self.best_models\n else:\n for c in self.valid_combinations:\n m_data = copy.copy(self.data[list(c)])\n m_data[self.response] = self.response_data.values\n m = AnalyticalModel(m_data, self.response, training_split=0.80, one_out=self.one_out, model_config=self.model_config)\n self.best_models.add(m, m.evaluate(use=self.evaluation))\n self.results = self.best_models\n if self.results.s_heap[0][1] is not None:\n self.results.s_heap[0][1].plot_results()\n self.results.s_heap[0][1].print_summary()\n self.results.print(self.evaluation)\n else:\n print(\"No valid models found or all model outputs failed evaluation.\")\n\n def set_ga_config(self):\n if self.ga_config is None and use_parallel:\n self.ga_config = {\n \"config\":\n {\n \"mutate_percent\": 0.2,\n \"no_change_threshold\": 10,\n \"population_size\": 50\n },\n \"mutate_config\":\n {\n \"add_percent\": 0.2,\n \"delete_percent\": 0.2\n }\n }\n else:\n self.ga_config = {\n \"config\":\n {\n \"mutate_percent\": 0.2,\n \"no_change_threshold\": 20,\n \"population_size\": 100\n },\n \"mutate_config\":\n {\n \"add_percent\": 0.2,\n \"delete_percent\": 0.2\n }\n }\n\n def run_genetic_algorithm_parallel(self, i):\n validated_data = copy.copy(self.data[list(self.valid_attributes.columns)])\n validated_data[self.response] = self.response_data.values\n\n ga = GeneticAlgorithm(\n data=validated_data,\n response=self.response,\n attributes=list(self.valid_attributes.columns),\n evaluation=self.evaluation,\n seed=i,\n one_out=self.one_out,\n model_config=self.model_config,\n **self.ga_config[\"config\"]\n )\n ga.execute(**self.ga_config[\"mutate_config\"])\n return ga.best_models.s_heap\n\n def run_genetic_algorithm(self):\n use_parallel = False if len(self.valid_transformations) <= 21 else True\n # use_parallel = False\n self.set_ga_config()\n if use_parallel:\n concurrent_ga = mp.cpu_count()\n # concurrent_ga = 4\n pool = mp.Pool(concurrent_ga)\n pool_results = [pool.apply_async(self.run_genetic_algorithm_parallel, (i,)) for i in range(0, concurrent_ga)]\n parallel_results = []\n for r in pool_results:\n parallel_results.append(r.get())\n pool.terminate()\n pool.join()\n for m in parallel_results:\n for mi in m:\n metric = mi[1].evaluate(use=self.evaluation)\n self.best_models.add(mi[1], metric)\n self.results = self.best_models\n else:\n validated_data = copy.copy(self.data[list(self.valid_attributes.columns)])\n validated_data[self.response] = self.response_data.values\n ga = GeneticAlgorithm(\n data=validated_data,\n response=self.response,\n attributes=list(self.valid_attributes.columns),\n evaluation=self.evaluation,\n model_config=self.model_config,\n one_out=self.one_out,\n **self.ga_config[\"config\"]\n )\n ga.execute(**self.ga_config[\"mutate_config\"])\n self.results = ga.best_models\n if self.results.s_heap[0][1] is not None:\n self.results.s_heap[0][1].plot_results()\n self.results.s_heap[0][1].print_summary()\n self.results.print(self.evaluation)\n else:\n print(\"No valid models found or all model outputs failed evaluation.\")\n\n\nif __name__ == \"__main__\":\n t0 = time.time()\n\n # ------------ Data input and setup configuration ---------- #\n _raw_data = pd.read_excel(os.path.join(\"data\", \"VB_Data_1a.xlsx\")) # Data source\n _target = 'Response' # Column in data source to use as target attribute\n _raw_data = _raw_data.drop(\"ID\", axis=1)\n # _raw_data = _raw_data.drop([\"x5\",\"x6\",\"x7\",\"x8\",\"x9\"], axis=1)\n # _raw_data = _raw_data.drop([\"x6\",\"x7\",\"x8\",\"x9\"], axis=1)\n #_raw_data = _raw_data.drop([\"x1\",\"x2\",\"x3\"], axis=1)\n\n model_config = {\"type\": \"RFR\"}\n ga_config = {\n \"config\":\n {\n \"mutate_percent\": 0.2,\n \"no_change_threshold\": 10,\n \"population_size\": 50\n },\n \"mutate_config\":\n {\n \"add_percent\": 0.2,\n \"delete_percent\": 0.2\n }\n }\n amlr = AutomatedModelBuilder(\n _raw_data,\n _target,\n genetic=True,\n skip_transformations=False,\n skip_interactions=False,\n evaluation=\"rmse\",\n one_out=False,\n model_config=model_config,\n ga_config=None\n )\n\n t1 = time.time() - t0\n print(\"Time elapsed: {} sec\".format(round(t1, 3)))\n","repo_name":"dbsmith88/vb_experimental","sub_path":"automated_models.py","file_name":"automated_models.py","file_ext":"py","file_size_in_byte":17335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17250060109","text":"import socket\nimport struct\n\n__author__ = 'hukkinj1'\n\n\ndef ip_string_to_int(ip):\n \"\"\"\n Convert an IPv4 string address to int. For example convert '0.0.0.1' to 1\n \"\"\"\n packed_ip = socket.inet_aton(ip)\n return struct.unpack(\"!L\", packed_ip)[0]\n\n\ndef int_to_ip_string(int_ip):\n packed_ip = struct.pack(\"!L\", int_ip)\n return socket.inet_ntoa(packed_ip)\n\n\ndef connect(ip, port):\n \"\"\"\n :param ip: Ip address. Either IP or DNS name as string\n :param port: Integer\n :return: Connected socket, or None if unsuccessful\n \"\"\"\n sock = None\n\n try:\n addrinfo = socket.getaddrinfo(ip, port, socket.AF_UNSPEC, socket.SOCK_STREAM)\n except socket.error:\n return None\n\n for result in addrinfo:\n family, socktype, proto, _, addr = result\n try:\n sock = socket.socket(family, socktype, proto)\n except socket.error:\n sock = None\n continue\n try:\n sock.connect(addr)\n except socket.error as e:\n log_message = \"Failed to connect {}:{} due to {}\".format(addr[0], addr[1], e)\n print(log_message)\n sock.close()\n sock = None\n continue\n break\n return sock\n\n\ndef create_listen_socket(ip, port):\n \"\"\"\n :param ip:\n :param port:\n :return: Return the created blocking listen socket or None if unsuccessful\n \"\"\"\n sock = None\n\n try:\n addrinfo = socket.getaddrinfo(ip, port, socket.AF_UNSPEC, socket.SOCK_STREAM)\n except socket.error:\n return None\n\n for result in addrinfo:\n family, socktype, proto, _, addr = result\n try:\n sock = socket.socket(family, socktype, proto)\n except socket.error:\n sock = None\n continue\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n sock.bind(addr)\n sock.listen(128) # parameter value = maximum number of queued connections\n except socket.error as e:\n log_message = \"Failed to bind listen socket to address {}:{} due to {}\".format(addr[0], addr[1], e)\n print(log_message)\n sock.close()\n sock = None\n continue\n break\n return sock\n\n\ndef recvall(sock, n):\n \"\"\"\n :param sock:\n :param n:\n :return: If successful, return read n amount of bytes. Otherwise return None.\n \"\"\"\n data = b\"\"\n while len(data) < n:\n new_bytes = sock.recv(n - len(data))\n if not new_bytes:\n return None\n data += new_bytes\n return data\n","repo_name":"hukkin/aalto-p2p","sub_path":"networking/networking.py","file_name":"networking.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7210772826","text":"class Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n ret = []\n\n def rec(nums, remain, sofar):\n if remain < 0:\n return\n if remain == 0:\n ret.append(sofar)\n return\n for i in range(len(nums)):\n rec(nums[i:], remain - nums[i], sofar + [nums[i]])\n rec(candidates, target, [])\n return ret\n","repo_name":"zakimal/cpp-practice","sub_path":"leetcode_old/combination-sum.py","file_name":"combination-sum.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43183973357","text":"from django.utils.translation import ugettext_lazy as _\nfrom common.constants import Constant\nfrom shoutit.models import Permission, UserPermission\n\n\nclass ConstantPermission(Constant):\n counter = 0\n values = {}\n reversed_instances = {}\n\n def __init__(self, text, message):\n self.permission, created = Permission.objects.get_or_create(name=text)\n self.message = message\n self.value = self.permission.id\n self.__class__.reversed_instances[self.permission] = self\n Constant.__init__(self, text, self.value)\n\n def __hash__(self):\n return self.value.int\n\n def __int__(self):\n return self.value.int\n\n def __eq__(self, other):\n return self.value.int == other.value.int\n\n\nPERMISSION_USE_SHOUTIT = ConstantPermission(\"USE_SHOUTIT\", _(\"You're not allowed to use Shoutit\"))\nPERMISSION_SHOUT_MORE = ConstantPermission(\"SHOUT_MORE\", _(\"Activate your account to create more shouts (check your \"\n \"email for activation link)\"))\nPERMISSION_SHOUT_REQUEST = ConstantPermission(\"SHOUT_REQUEST\", _(\"You're not allowed to create requests\"))\nPERMISSION_SHOUT_OFFER = ConstantPermission(\"SHOUT_OFFER\", _(\"You're not allowed to create offers\"))\nPERMISSION_LISTEN_TO_TAG = ConstantPermission(\"LISTEN_TO_TAG\", _(\"You're not allowed to listen to interests\"))\nPERMISSION_LISTEN_TO_PROFILE = ConstantPermission(\"LISTEN_TO_PROFILE\", _(\"You're not allowed to listen to profiles\"))\nPERMISSION_SEND_MESSAGE = ConstantPermission(\"SEND_MESSAGE\", _(\"You are not allowed to send messages\"))\nPERMISSION_REPORT = ConstantPermission(\"SEND_REPORT\", _(\"You're not allowed to send reports\"))\n\nINITIAL_USER_PERMISSIONS = [\n PERMISSION_USE_SHOUTIT,\n PERMISSION_SHOUT_MORE,\n PERMISSION_SHOUT_REQUEST,\n PERMISSION_SHOUT_OFFER,\n PERMISSION_LISTEN_TO_TAG\n]\n\nACTIVATED_USER_PERMISSIONS = [\n PERMISSION_LISTEN_TO_PROFILE,\n PERMISSION_SEND_MESSAGE,\n PERMISSION_REPORT,\n]\n\nFULL_USER_PERMISSIONS = INITIAL_USER_PERMISSIONS + ACTIVATED_USER_PERMISSIONS\n\nANONYMOUS_USER_PERMISSIONS = [\n PERMISSION_USE_SHOUTIT,\n]\n\n\ndef give_user_permissions(user, permissions):\n for permission in permissions:\n if isinstance(permission, ConstantPermission):\n permission = permission.permission\n UserPermission.objects.get_or_create(user=user, permission=permission)\n\n\ndef take_permissions_from_user(user, permissions):\n for permission in permissions:\n if isinstance(permission, ConstantPermission):\n permission = permission.permission\n UserPermission.objects.filter(user=user, permission=permission).delete()\n","repo_name":"shoutit/shoutit-api","sub_path":"src/shoutit/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"43522886613","text":"import os\nimport argparse\nimport json\nimport csv\nimport random\n\ndef clean_text(text):\n return text.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace('\"', \"'\")\n\n\ndef write_pool(file_path, pool):\n with open(file_path, \"w\", encoding=\"utf-8\") as w:\n header = \"INPUT:url\\tINPUT:title\\tINPUT:text\\tGOLDEN:res\";\n w.write(header + \"\\n\")\n writer = csv.writer(w, delimiter='\\t', quoting=csv.QUOTE_MINIMAL)\n for r in pool:\n row = (r[\"url\"], r[\"title\"], r[\"text\"], r[\"res\"])\n writer.writerow(row)\n print(\"{} created!\".format(file_path))\n\ndef read_honey(honey_path):\n honey = []\n with open(honey_path, \"r\") as r:\n header = tuple(next(r).strip().split(\"\\t\"))\n assert header == (\"INPUT:url\", \"INPUT:title\", \"INPUT:text\", \"GOLDEN:res\")\n for line in r:\n url, title, text, res = line.strip().split(\"\\t\")\n honey.append({\"url\": url, \"title\": title, \"text\": text, \"res\": res})\n return honey\n\ndef main(original_json_path, output_dir, honey_path, honey_size, pool_size, skip):\n records = []\n with open(original_json_path, \"r\") as r:\n records = json.load(r)\n random.shuffle(records)\n\n\n honey = read_honey(honey_path)\n\n current_pool = []\n i = 0\n for record in records[skip:]:\n if i % pool_size == 0 and i != 0:\n clean_pool = []\n for r in current_pool:\n title = clean_text(r[\"title\"])\n text = clean_text(r[\"text\"])\n url = r[\"url\"]\n pool_record = {\"url\": url, \"title\": title, \"text\": text, \"res\": \"\"}\n clean_pool.append(pool_record)\n random.shuffle(honey)\n current_pool = clean_pool + honey[:honey_size]\n random.shuffle(current_pool)\n\n pool_num = (i - 1) // pool_size\n pool_file_name = \"pool_{}.tsv\".format(pool_num)\n pool_file_name = os.path.join(output_dir, pool_file_name)\n write_pool(pool_file_name, current_pool)\n current_pool = []\n if not clean_text(record[\"text\"]) or not clean_text(record[\"title\"]):\n print(\"Skipping: {}\".format(record[\"url\"]))\n continue\n current_pool.append(record)\n i += 1\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--original-json-path\", type=str, required=True)\n parser.add_argument(\"--output-dir\", type=str, required=True)\n parser.add_argument(\"--honey-path\", type=str, required=True)\n parser.add_argument(\"--pool-size\", type=int, default=80)\n parser.add_argument(\"--honey-size\", type=int, default=20)\n parser.add_argument(\"--skip\", type=int, default=0)\n args = parser.parse_args()\n main(**vars(args))\n","repo_name":"IlyaGusev/tgcontest","sub_path":"scripts/json2toloka.py","file_name":"json2toloka.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"75"} +{"seq_id":"23449651999","text":"import numpy as np\nimport pandas as pd\nimport os\n\ndef npm_to_csv(num,date):\n \n loc = os.path.join(os.getcwd(),\"datasets\\Carrada\")\n # loc = r\"C:\\Users\\Sidharth\\Documents\\code\\HAPD\\datasets\\Carrada\"\n cam = r\"camera_images\"\n ra = r\"range_angle_numpy\"\n rd = r\"range_doppler_numpy\"\n ad = \"angle_doppler_processed\"\n loc = os.path.join(loc,date)\n frame = \"\".join([\"0\" for _ in range(6-len(str(num)))])+str(num)\n\n # doppler = np.rot90(np.rot90(np.array(np.load(os.path.join(loc,os.path.join(rd,frame+\".npy\"))))))\n radar = np.load(os.path.join(loc,os.path.join(ra,frame+\".npy\")))\n # Convert the array to a Pandas dataframe\n df = pd.DataFrame(radar)\n # dff = pd.DataFrame(np.fft.fft(doppler).real)\n\n\n # Write the dataframe to a CSV file\n df.to_csv(f\"csv/{num}.csv\", index=False)\n # dff.to_csv('dataoutpy.csv',index=False)\n\n\ndate = \"2020-02-28-12-12-16\"\nfor x in range(30,90):\n npm_to_csv(x,date)","repo_name":"sidharth980/HAPD","sub_path":"npm_to_csv.py","file_name":"npm_to_csv.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6026651120","text":"# Models\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\nmodels = [\n {\n 'clf_name': 'Dummy',\n 'clf': DummyClassifier(random_state=1),\n 'has_scaling': True,\n 'search_params': {},\n 'best_params': {},\n 'best_score': 0 # weighted F1-Score\n },\n {\n 'clf_name': 'Logistic Regression',\n 'clf': LogisticRegression(class_weight='balanced', multi_class='multinomial', n_jobs=-1, random_state=1),\n 'has_scaling': True,\n 'search_params': {\n 'clf__penalty': ['l1', 'l2', 'elasticnet'],\n 'clf__tol': [1, 0.1, 0.01, 0.001, 0.0001, 1e-05],\n 'clf__C': [0.001, 0.01, 0.1, 1, 10],\n 'clf__solver': ['sag', 'saga', 'newton-cg']\n },\n 'best_params': {\n 'clf__tol': 1e-05,\n 'clf__solver': 'saga',\n 'clf__penalty': 'l2',\n 'clf__C': 0.001\n },\n 'best_score': 0.8391989484388706\n },\n {\n 'clf_name': 'Random Forest',\n 'clf': RandomForestClassifier(class_weight='balanced', n_jobs=-1, random_state=1),\n 'has_scaling': False,\n 'search_params': {\n 'clf__bootstrap': [True, False],\n 'clf__max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'clf__max_features': ['auto', 'sqrt'],\n 'clf__min_samples_leaf': [1, 2, 4],\n 'clf__min_samples_split': [2, 5, 10],\n 'clf__n_estimators': [100, 200, 300, 400, 500]\n },\n 'best_params': {\n 'clf__n_estimators': 500,\n 'clf__min_samples_split': 5,\n 'clf__min_samples_leaf': 1,\n 'clf__max_features': 'auto',\n 'clf__max_depth': 30,\n 'clf__bootstrap': False\n },\n 'best_score': 0.9939005405682252\n },\n {\n 'clf_name': 'Decision Tree',\n 'clf': DecisionTreeClassifier(class_weight='balanced', random_state=1),\n 'has_scaling': False,\n 'search_params': {\n 'clf__criterion': ['gini', 'entropy', 'log_loss'],\n 'clf__splitter': ['best', 'random'],\n 'clf__max_features': ['auto', 'sqrt', 'log2'],\n 'clf__max_depth': list(range(3, 18, 3)),\n 'clf__min_samples_leaf': [3, 5, 10, 15, 20],\n 'clf__min_samples_split': list(range(8, 22, 2)),\n },\n 'best_params': {\n 'clf__splitter': 'best',\n 'clf__min_samples_split': 14,\n 'clf__min_samples_leaf': 3,\n 'clf__max_features': 'auto',\n 'clf__max_depth': 15,\n 'clf__criterion': 'log_loss'\n },\n 'best_score': 0.9763104573565381,\n },\n {\n 'clf_name': 'Naive Bayes',\n 'clf': ComplementNB(),\n 'has_scaling': False,\n 'search_params': {\n 'clf__alpha': [0.01, 0.1, 1, 10],\n },\n 'best_params': {\n 'clf__alpha': 0.01\n },\n 'best_score': 0.6254139068432872,\n },\n {\n 'clf_name': 'KNN',\n 'clf': KNeighborsClassifier(n_jobs=-1),\n 'has_scaling': True,\n 'search_params': {\n 'clf__n_neighbors': list(range(5, 60, 10)),\n 'clf__weights': ['uniform', 'distance'],\n 'clf__algorithm': ['auto', 'ball_tree', 'kd_tree'],\n 'clf__leaf_size': list(range(10, 60, 10)),\n 'clf__p': [1, 2]\n },\n 'best_params': {\n 'clf__weights': 'distance',\n 'clf__p': 2,\n 'clf__n_neighbors': 5,\n 'clf__leaf_size': 50,\n 'clf__algorithm': 'ball_tree'\n },\n 'best_score': 0.9604647579792767,\n },\n {\n 'clf_name': 'SVM',\n 'clf': SVC(class_weight='balanced', random_state=1),\n 'has_scaling': True,\n 'search_params': {\n 'clf__C': [0.01, 0.1, 1, 10],\n 'clf__kernel': ['linear', 'rbf'],\n 'clf__gamma': [0.001, 0.01, 0.1, 10]\n },\n 'best_params': {\n 'clf__kernel': 'rbf',\n 'clf__gamma': 10,\n 'clf__C': 10\n },\n 'best_score': 0.9549206834280316\n }\n]\n","repo_name":"manelfideles/net-wars","sub_path":"src/model_configs.py","file_name":"model_configs.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21929775028","text":"if __name__ == '__main__':\n for _ in range(int(input())):\n name = input()\n score = float(input())\n if (_ == 0):\n students = list()\n students.append([name, score])\n\n def myfunc(e):\n return (e[1])\n students.sort(key=myfunc)\n minx = students[0][1]\n temp = list()\n for i in students:\n if (minx != i[1]):\n temp.append(i)\n minx = temp[0][1]\n result = list()\n for i in students:\n if (minx == i[1]):\n result.append(i[0])\n result.sort()\n for i in result:\n print (i)\n","repo_name":"ducthach1401/Hackerrank","sub_path":"Python/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8690584943","text":"import re\r\nimport requests\r\n\r\n# match()方法\r\ncontent = 'Hello 123 4567 World_This is a Regex Demo'\r\nresult = re.match('^Hello\\s\\d\\d\\d\\s\\d{4}\\s\\w{10}', content)\r\nprint(result)\r\nprint(result.group())\r\nprint(result.span())\r\n\r\n# 匹配目标\r\nresult = re.match('^Hello\\s(\\d{3})\\s(\\d{4})\\sWorld', content)\r\nprint(result)\r\nprint(result.group(1), result.group(2))\r\n\r\n# 通用匹配\r\nresult = re.match('^Hello.*Demo$', content)\r\nprint(result)\r\nprint(result.group())\r\n\r\n# 贪婪与非贪婪\r\nresult = re.match('^He.*(\\d+).*Demo$', content) # 单纯的.*表示贪婪匹配\r\nprint('贪婪匹配:', result.group(1))\r\nresult = re.match('^He.*?(\\d+).*Demo$', content) # 而.*?则表示非贪婪匹配\r\nprint('非贪婪匹配:', result.group(1))\r\n\r\n# 修饰符\r\ncontent = \"\"'Hello 1234567 World_This' \\\r\n 'is a Regex Demo' # 此处的content与之前的区别在于中间多了一个换行符\r\nresult = re.match('^He.*?(\\d+).*?Demo$', content, re.S) # 这时我们传入修饰符re.S使得.匹配包括换行符在内的所有字符\r\nprint('修饰符', result.group(1))\r\n\r\n# 转义匹配\r\ncontent = '(百度)www.baidu.com'\r\nresult = re.match('^\\(百度\\)www\\.baidu\\.com$', content)\r\n\r\n# search()\r\ncontent = 'Extra stings Hello 1234567 World_This is a Regex Demo Extra stings'\r\nresult = re.search('Hello.*?(\\d+).*?Demo', content)\r\nprint('转义匹配:', result.group(1))\r\n\r\n# search()方法筛选爱奇艺影片\r\nresponse = requests.get('https://www.iqiyi.com/dianying/?vfrm=pcw_home&vfrmblk=C&vfrmrst=712211_channel_dianying')\r\nhtml = response.text\r\n# print(html)\r\nresult = re.search('data-indexfocus-currenttitleelem=(.*)\\n', html)\r\nprint(result.group(1))\r\n\r\n# findall()方法筛选爱奇艺影片\r\nresults = re.findall('data-indexfocus-currenttitleelem=(.*)\\n', html)\r\nprint(results)\r\nfor result in results:\r\n print(result)\r\n\r\n# sub()方法的使用\r\ncontent = '421rb4bre23eib'\r\ncontent = re.sub('\\d+', '', content) # 将所有数字替换为空,即删除所有数字\r\nprint(content)\r\n\r\n# compile()方法的使用\r\ncontent1 = '2016-12-15 12:00'\r\ncontent2 = '2016-12-17 12:55'\r\ncontent3 = '2016-12-22 13:21'\r\npattern = re.compile('\\d{2}:\\d{2}') # 将正则字符串编译成正则表达式类型\r\nresult1 = re.sub(pattern, '', content1)\r\nresult2 = re.sub(pattern, '', content2)\r\nresult3 = re.sub(pattern, '', content3)\r\nprint(result1, result2, result3)","repo_name":"wang-10086/pycrawler_learning","sub_path":"Chapter3 基本库的使用/3.3 正则表达式/regular expression.py","file_name":"regular expression.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"74228881203","text":"import argparse\nimport json\nimport os\nimport requests\nimport traceback\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, date\nfrom proxy import Proxy\n\nclass HTML:\n\n def __init__(self, nber_id):\n self.nber_id = nber_id\n \n def string_id(self):\n '''Returns the string format for the NBER ID. For NBER ID above 1000 it will return itself.\n '''\n if self.nber_id < 10:\n return f'000{self.nber_id}'\n elif 10 <= self.nber_id < 100:\n return f'00{self.nber_id}'\n elif 100 <= self.nber_id < 1000:\n return f'0{self.nber_id}'\n else:\n return str(self.nber_id)\n \n def url(self):\n '''Returns the URL string for the corresponding NBER paper.\n '''\n return f'https://www.nber.org/papers/w{self.string_id()}'\n \n def request(self):\n '''Make a web request for the corresponding NBER paper.\n '''\n status_code = None\n while status_code != 200:\n proxy = {'https': Proxy().get_proxy()}\n response = requests.get(self.url(), proxies=proxy, timeout=5)\n status_code = response.status_code\n if status_code in [403, 404]:\n break\n\n return response\n \n def content(self):\n '''Parse the HTML for the corresponding NBER paper.\n '''\n return BeautifulSoup(self.request().content, features='html.parser')\n\nclass Paper:\n '''This class processes the HTML text into JSON format.\n '''\n\n def __init__(self, content, nber_id):\n self.content = content\n self.nber_id = nber_id\n \n def to_date(self, x):\n '''Returns a date which will be used for several columns.\n '''\n return date(x.year, x.month, x.day)\n \n def citation_title(self):\n '''Returns the paper title if any, otherwise returns nothing.\n '''\n try:\n return self.content.find('meta', {'name': 'citation_title'}).attrs['content']\n except AttributeError:\n return None\n \n def citation_author(self):\n '''Returns a list of author(s) for the corresponding NBER paper. Can be more than one hence it is stored as a list.\n '''\n try:\n return [x.attrs['content'] for x in self.content.find_all('meta', {'name': 'citation_author'})]\n except AttributeError:\n return None\n\n def citation_publication_date(self):\n '''Returns the publication date if any, otherwise returns nothing.\n '''\n try:\n citation_publication_date = self.content.find('meta', {'name': 'citation_publication_date'})\n citation_publication_date = self.to_date(datetime.strptime(citation_publication_date.attrs['content'], '%Y/%m/%d'))\n return citation_publication_date\n except AttributeError:\n return None\n \n def paper_datetime(self):\n '''Returns a timestamp which will consist either issuance date, revision date, or both. Returns nothing if empty.\n '''\n try:\n return [x.attrs['datetime'][:10] for x in self.content.find_all('time')]\n except AttributeError:\n return None\n \n def issue_date(self):\n '''Returns the issuance date if any, otherwise returns nothing.\n '''\n try:\n return self.to_date(datetime.strptime(self.paper_datetime()[0], '%Y-%m-%d'))\n except IndexError:\n return None\n \n def revision_date(self):\n '''Returns the revision date if any, otherwise returns nothing.\n '''\n try:\n revision_date = self.paper_datetime()[1]\n revision_date = self.to_date(datetime.strptime(revision_date, '%Y-%m-%d'))\n return revision_date\n except IndexError:\n return None\n\n def related(self):\n '''Returns a list of items that relates to the corresponding paper such as topics, programs, and working groups.\n Returns nothing if empty.\n '''\n try:\n return self.content.find_all('div', {'class': 'info-grid__item'})\n except AttributeError:\n return None\n \n def related_title(self):\n '''Returns the text that comes from the related method.\n '''\n try:\n return [x.find('h3').text for x in self.related()]\n except AttributeError:\n return None\n\n def get_related(self, title):\n '''Returns a list of either topics, programs, or working groups for the corresponding paper. Returns nothing if empty.\n '''\n try:\n index = self.related_title().index(title)\n item = [x.text for x in self.related()[index].find('div').contents if x.text != '']\n return item\n except ValueError:\n return None\n \n def abstract(self):\n '''Returns the abstract if any, otherwise returns nothing.\n '''\n try:\n return self.content.find('div', {'class': 'page-header__intro-inner'}).text\n except AttributeError:\n return None\n \n def acknowledgement(self):\n '''Returns the acknowledgement if any, otherwise returns nothing.\n '''\n try:\n return self.content.find('div', {'class': 'accordion__body', 'id': 'accordion-body-guid1'}).text\n except AttributeError:\n return None\n \n def save(self):\n '''Save the paper using JSON format.\n '''\n data = {\n 'id': self.nber_id,\n 'citation_title': self.citation_title(),\n 'citation_author': self.citation_author(),\n 'citation_publication_date': str(self.citation_publication_date()),\n 'issue_date': str(self.issue_date()),\n 'revision_date': str(self.revision_date()),\n 'topics': self.get_related('Topics'),\n 'program': self.get_related('Programs'),\n 'projects': self.get_related('Projects'),\n 'working_groups': self.get_related('Working Groups'),\n 'abstract': self.abstract(),\n 'acknowledgement': self.acknowledgement()\n }\n \n with open(f'data/nber/{self.nber_id}.json', 'w') as file:\n json.dump(data, file, indent=4)\n\ndef main(start, end):\n while start < end:\n raw = HTML(start)\n file_check = os.path.exists(f'data/nber/{start}.json')\n if not file_check:\n try:\n print(f'[DOWNLOAD \\U0001F4BE]: {raw.url()}')\n content = raw.content()\n paper = Paper(content, raw.nber_id)\n paper.save()\n print(f'[SUCCEED \\U00002705]: {raw.url()}')\n except Exception as err:\n print(traceback.print_exc())\n print(f'{err}: {start}')\n pass\n else:\n print(f'[IGNORE \\U0001F4C1]: {raw.url()}')\n \n start += 1\n \nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser()\n PARSER.add_argument('-s', '--start', type=int, default=0, help='Starting NBER ID', metavar='')\n PARSER.add_argument('-e', '--end', type=int, default=5, help='Ending NBER ID', metavar='')\n ARGS = PARSER.parse_args()\n START = ARGS.start\n END = ARGS.end\n main(start=START, end=END)\n","repo_name":"ledwindra/nber","sub_path":"src/nber.py","file_name":"nber.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"19861382917","text":"from datetime import datetime\nfrom functools import partial\nimport yaml\nimport yamlordereddictloader\n\nimport pandas as pd\nimport pytest\n\nfrom runScenarios import add_config_parameter_column\nimport runScenarios as rs\n\nyaml_load = partial(yaml.load, Loader=yamlordereddictloader.Loader)\n\n\n@pytest.fixture\ndef original_df():\n return pd.DataFrame({'sample_number': [1, 2, 3, 4, 5]})\n\n\ndef test_add_config_parameter_column__int(original_df):\n new_df = add_config_parameter_column(original_df, \"new_column\", 10)\n\n correct_df = pd.DataFrame({\n 'sample_number': [1, 2, 3, 4, 5],\n 'new_column': [10]*5})\n pd.testing.assert_frame_equal(new_df, correct_df)\n\n\ndef test_add_config_parameter_column__matrix(original_df):\n f = {'matrix': [[9, 8], [7, 6]]}\n new_df = add_config_parameter_column(original_df, \"new_column\", f)\n assert new_df.shape == (5, 5)\n correct_df = pd.DataFrame({\n 'sample_number': [1, 2, 3, 4, 5],\n 'new_column1_1': [9]*5,\n 'new_column1_2': [8]*5,\n 'new_column2_1': [7]*5,\n 'new_column2_2': [6]*5,\n })\n pd.testing.assert_frame_equal(new_df, correct_df)\n\n\ndef test_add_config_parameter_column__random_uniform(original_df):\n f = {'np.random': 'uniform', 'function_kwargs': {'low': 5, 'high': 6}}\n new_df = add_config_parameter_column(original_df, \"new_column\", f)\n assert new_df.shape == (5, 2)\n assert \"new_column\" in new_df.columns\n assert all((new_df[\"new_column\"] >= 5) & (new_df[\"new_column\"] <= 6))\n\n\ndef test_add_config_parameter_column__datetotimestep():\n df = pd.DataFrame({'sample_number': [1, 2, 3, 4, 5],\n 'startdate': [datetime(2020, 2, 20)]*5})\n f = {'custom_function': 'DateToTimestep',\n 'function_kwargs': {'dates': datetime(2020, 3, 1), 'startdate_col': 'startdate'}}\n new_df = add_config_parameter_column(df, \"new_column\", f)\n correct_df = pd.DataFrame({\n 'sample_number': [1, 2, 3, 4, 5],\n 'startdate': [datetime(2020, 2, 20)]*5,\n 'new_column': [10]*5})\n pd.testing.assert_frame_equal(new_df, correct_df)\n\n\ndef test_add_config_parameter_column__subtract():\n df = pd.DataFrame({'sample_number': [1, 2, 3, 4, 5],\n 'col1': [2, 4, 6, 8, 10],\n 'col2': [1, 3, 5, 7, 9]})\n f = {'custom_function': 'subtract',\n 'function_kwargs': {'x1': 'col1', 'x2': 'col2'}}\n new_df = add_config_parameter_column(df, \"new_column\", f)\n correct_df = pd.DataFrame({\n 'sample_number': [1, 2, 3, 4, 5],\n 'col1': [2, 4, 6, 8, 10],\n 'col2': [1, 3, 5, 7, 9],\n 'new_column': [1]*5})\n pd.testing.assert_frame_equal(new_df, correct_df)\n\n\ndef test_add_config_parameter_column__error():\n f = {'weird_function': {}}\n with pytest.raises(ValueError, match=\"Unknown type of parameter\"):\n add_config_parameter_column(pd.DataFrame, \"new_column\", f)\n\n\n@pytest.mark.parametrize(\"region, expected\", [(\"EMS_11\", 1), (\"EMS_10\", 2)])\ndef test_add_sampled_parameters_regions(region, expected):\n # Test that we correctly add sampled parameters by region, including\n # a default for regions not otherwise specified.\n config = \"\"\"\n sampled_parameters:\n myparam:\n EMS_11:\n np.random: choice\n function_kwargs: {'a': [1]} \n np.random: choice\n function_kwargs: {'a': [2]}\n \"\"\"\n df_in = pd.DataFrame({'sample_num': [1, 2, 3]})\n df_exp = df_in.assign(myparam=len(df_in) * [expected])\n\n df_out = rs.add_parameters(df_in, \"sampled_parameters\", yaml_load(config), region, None)\n\n pd.testing.assert_frame_equal(df_out, df_exp)\n\n\ndef test_add_sampled_parameters_expand_age():\n config = \"\"\"\n sampled_parameters:\n myparam:\n expand_by_age: True\n np.random: choice\n function_kwargs:\n - {'a': [1]}\n - {'a': [2]}\n \"\"\"\n df_in = pd.DataFrame({'sample_num': [1, 2]})\n df_exp = df_in.assign(myparam_42=[1, 1], myparam_113=[2, 2])\n\n df_out = rs.add_parameters(df_in, \"sampled_parameters\", yaml_load(config), None, ['42', '113'])\n\n pd.testing.assert_frame_equal(df_out, df_exp)\n\n\ndef test_add_sampled_parameters_expand_age_same_value():\n # \"Expand\" age parameters even if everything has the same value\n config = \"\"\"\n sampled_parameters:\n myparam:\n expand_by_age: True\n np.random: choice\n function_kwargs: {'a': [1]}\n \"\"\"\n df_in = pd.DataFrame({'sample_num': [1, 2]})\n df_exp = df_in.assign(myparam_42=[1, 1], myparam_113=[1, 1])\n\n df_out = rs.add_parameters(df_in, \"sampled_parameters\", yaml_load(config), None, ['42', '113'])\n\n pd.testing.assert_frame_equal(df_out, df_exp)\n\n\ndef test_add_sampled_parameters_expand_age_with_defaults():\n # Verify that you can provide a \"default\" for all ages, and set a specific\n # parameter later.\n config = \"\"\"\n sampled_parameters:\n myparam:\n expand_by_age: True\n np.random: choice\n function_kwargs: {'a': [1]}\n myparam_0:\n np.random: choice\n function_kwargs: {'a': [2]}\n \"\"\"\n df_in = pd.DataFrame({'sample_num': [1, 2]})\n df_exp = df_in.assign(myparam_0=[2, 2], myparam_42=[1, 1], myparam_113=[1, 1])\n\n df_out = rs.add_parameters(\n df_in, \"sampled_parameters\", yaml_load(config), None, ['0', '42', '113'])\n\n pd.testing.assert_frame_equal(df_out, df_exp)\n\n\ndef test_add_sampled_parameters_expand_age_error():\n # We should get an error if the number of distributions doesn't match\n # the number of age bins.\n config = \"\"\"\n sampled_parameters:\n myparam:\n expand_by_age: True\n np.random: choice\n function_kwargs:\n - {'a': [1]}\n - {'a': [2]}\n \"\"\"\n df_in = pd.DataFrame({'sample_num': [1, 2]})\n with pytest.raises(ValueError, match=\"function_kwargs for myparam have 2 entries\"):\n rs.add_parameters(df_in, \"sampled_parameters\", yaml_load(config), None, ['0', '42', '113'])\n","repo_name":"numalariamodeling/covid-chicago","sub_path":"tests/test_runScenarios.py","file_name":"test_runScenarios.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"36828025375","text":"# !/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\nimport re,os,re\nfrom urllib.parse import urlparse\nfrom lib.common import readConfig\nfrom lib.common.utils import Utils\nfrom lib.Database import DatabaseType\nfrom lib.common.cmdline import CommandLines\n\n\nclass BeautyJs():\n\n def __init__(self,projectTag):\n self.projectTag = projectTag\n\n def beauty_js(self,filePath):\n lines = open(filePath, encoding=\"utf-8\",errors=\"ignore\").read().split(\";\")\n indent = 0\n formatted = []\n for line in lines:\n newline = []\n for char in line:\n newline.append(char)\n if char == '{':\n indent += 1\n newline.append(\"\\n\")\n newline.append(\"\\t\" * indent)\n if char == \"}\":\n indent -= 1\n newline.append(\"\\n\")\n newline.append(\"\\t\" * indent)\n formatted.append(\"\\t\" * indent + \"\".join(newline))\n open(filePath, \"w\", encoding=\"utf-8\",errors=\"ignore\").writelines(\";\\n\".join(formatted))\n\n def rewrite_js(self):\n projectPath = DatabaseType(self.projectTag).getPathfromDB()\n for parent, dirnames, filenames in os.walk(projectPath, followlinks=True):\n for filename in filenames:\n if filename != self.projectTag + \".db\":\n filePath = os.path.join(parent, filename)\n BeautyJs(self.projectTag).beauty_js(filePath)\n","repo_name":"rtcatc/Packer-Fuzzer","sub_path":"lib/common/beautyJS.py","file_name":"beautyJS.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":2442,"dataset":"github-code","pt":"75"} +{"seq_id":"3765240601","text":"\"\"\"wideresnet model in tf.keras for 32*32 inputs\"\"\"\nfrom tensorflow.keras.layers import BatchNormalization, Conv2D, AveragePooling2D, Dense, Activation, Flatten, Dropout, \\\n Add\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.initializers import VarianceScaling\nfrom .commons import se_block\n\n\ndef build_wrn(inputs, depth, regularizer, num_classes=10, drop_rate=0.0, se_factor=0, channels_dict=None, width=None):\n \"\"\"builds a wideresnet model given a channels_dict\n if channels_dict is unspecified, width is used instead\"\"\"\n assert ((depth - 4) % 6 == 0) # 4 = the initial conv layer + the 3 conv1*1 when we change the width\n n = int((depth - 4) / 6)\n\n first_layer_name = \"Conv_0\"\n\n if channels_dict is None:\n assert (width is not None)\n channels_dict = {\"Conv_0\": 16}\n for i in range(1, 4):\n channels_dict[f\"Skip_{i}\"] = width\n for j in range(n):\n channels_dict[f\"Conv_{i}_{j}_1\"] = width\n channels_dict[f\"Conv_{i}_{j}_2\"] = width\n width *= 2\n\n # 1st conv before any network block\n x = Conv2D(channels_dict[first_layer_name], kernel_size=3, padding=\"same\", use_bias=False,\n name=first_layer_name, kernel_initializer=VarianceScaling(scale=2.0, mode='fan_out'),\n kernel_regularizer=regularizer)(inputs)\n\n # 1st block\n x = wrn_sub_network(x, 1, n, 1, drop_rate, channels_dict, regularizer=regularizer, se_factor=se_factor)\n # 2nd block\n x = wrn_sub_network(x, 2, n, 2, drop_rate, channels_dict, regularizer=regularizer, se_factor=se_factor)\n # 3rd block\n x = wrn_sub_network(x, 3, n, 2, drop_rate, channels_dict, regularizer=regularizer, se_factor=se_factor)\n # global average pooling and classifier\n x = BatchNormalization(beta_regularizer=regularizer, gamma_regularizer=regularizer)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n x = Flatten()(x)\n outputs = Dense(units=num_classes, activation=None,\n kernel_initializer=VarianceScaling(scale=1/3, mode='fan_in', distribution=\"uniform\"),\n kernel_regularizer=regularizer, bias_regularizer=regularizer)(x)\n\n return Model(inputs=inputs, outputs=outputs)\n\n\ndef wrn_sub_network(x, subnet_id, nb_layers, stride, drop_rate, channels_dict, regularizer, se_factor):\n \"\"\"creates a w.r.n subnetwork with the given parameters, x denotes the input, the output is returned\"\"\"\n for i in range(nb_layers):\n conv1_name = f\"Conv_{subnet_id}_{i}_1\"\n if conv1_name not in channels_dict or channels_dict[conv1_name] == 0:\n if i == 0: # in the case the first layer was skipped, we have to use the skip 1*1 convolution as\n # input for the second layer\n skip_name = f\"Skip_{subnet_id}\"\n x = BatchNormalization(beta_regularizer=regularizer, gamma_regularizer=regularizer)(x)\n x = Activation('relu')(x)\n x = Conv2D(channels_dict[skip_name], kernel_size=1, padding=\"same\", use_bias=False, strides=stride,\n name=skip_name, kernel_initializer=VarianceScaling(scale=2.0, mode='fan_out'),\n kernel_regularizer=regularizer)(x)\n\n # otherwise there is nothing to add to the WRN\n else:\n x = wrn_block(x, subnet_id, i, stride if i == 0 else 1, drop_rate, channels_dict, regularizer=regularizer,\n se_factor=se_factor)\n\n return x\n\n\ndef wrn_block(x, subnet_id, block_offset, stride, drop_rate, channels_dict, regularizer, se_factor):\n \"\"\"creates a w.r.n block with the given parameters, the output is returned\"\"\"\n conv1_name = f\"Conv_{subnet_id}_{block_offset}_1\"\n conv2_name = f\"Conv_{subnet_id}_{block_offset}_2\"\n\n x_bn_a_1 = BatchNormalization(beta_regularizer=regularizer, gamma_regularizer=regularizer)(x)\n x_bn_a_1 = Activation('relu')(x_bn_a_1)\n out = Conv2D(channels_dict[conv1_name], kernel_size=3, padding=\"same\", use_bias=False, strides=stride,\n name=conv1_name, kernel_initializer=VarianceScaling(scale=2.0, mode='fan_out'),\n kernel_regularizer=regularizer)(x_bn_a_1)\n\n if drop_rate > 0:\n out = Dropout(rate=drop_rate)(out)\n\n out = BatchNormalization(beta_regularizer=regularizer, gamma_regularizer=regularizer)(out)\n out = Activation('relu')(out)\n out = Conv2D(channels_dict[conv2_name], kernel_size=3, padding=\"same\", use_bias=False, strides=1,\n name=conv2_name, kernel_initializer=VarianceScaling(scale=2.0, mode='fan_out'),\n kernel_regularizer=regularizer)(out)\n\n if block_offset == 0: # skip_layer\n skip_name = f\"Skip_{subnet_id}\"\n x = Conv2D(channels_dict[skip_name], kernel_size=1, padding=\"same\", use_bias=False, strides=stride,\n name=skip_name, kernel_initializer=VarianceScaling(scale=2.0, mode='fan_out'),\n kernel_regularizer=regularizer)(x_bn_a_1)\n\n if se_factor != 0 and stride == 1:\n x = se_block(x, channels_dict[conv2_name], se_factor, regularizer)\n\n return Add()([out, x])\n","repo_name":"NatGr/Master_Thesis","sub_path":"training_from_scratch/models/wideresnet.py","file_name":"wideresnet.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"75"} +{"seq_id":"28876344873","text":"class Solution:\n def validPath(self, n: int, edges: List[List[int]], source: int, destination: int) -> bool:\n \n graph = {}\n \n for i in edges:\n if i[0] not in graph.keys():\n graph[i[0]]=[]\n if i[1] not in graph.keys():\n graph[i[1]]=[]\n graph[i[0]].append(i[1])\n graph[i[1]].append(i[0])\n if len(edges)==0:\n if source==destination:\n return True\n else:\n return False\n \n visited=set()\n \n #print(visited)\n #rint(graph)\n result=False\n @lru_cache(maxsize=1000)\n def helper(curr_node):\n nonlocal result\n nonlocal destination\n nonlocal visited\n nonlocal graph\n #rint(curr_node)\n visited.add(curr_node)\n \n if result==True:\n return\n elif curr_node==destination:\n result = True\n return\n else:\n for i in graph[curr_node]:\n if i not in visited:\n helper(i)\n return\n \n helper(source)\n \n return result","repo_name":"ddangwal1909/Leetcode_Solutions","sub_path":"1971-find-if-path-exists-in-graph/1971-find-if-path-exists-in-graph.py","file_name":"1971-find-if-path-exists-in-graph.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43276019782","text":"from Models.Client import Client\nfrom Models.Card import Card\nfrom Requests.ClienteRequest import ClienteRequest\nfrom Requests.ClienteRequest import ClientUpdateRequest\nfrom fastapi import APIRouter, HTTPException\nfrom datetime import date\nfrom fastapi.responses import JSONResponse\nfrom responseHelper import *\n\n\ncliente_routes = APIRouter(tags=[\"Client\"])\n\n\n@cliente_routes.post(\"/affiliate\", responses={\n 200: set_custom_response(\"OK\", {\"message\": \"Client registered successfully\", \"numTarjeta\": 1, \"numCliente\": 1}),\n 401: set_401_response(),\n 409: set_409_response()\n})\nasync def client_create(c: ClienteRequest):\n client = Client.select().where((Client.phoneNumber == c.telefono) | (Client.email == c.correo)).first()\n if client:\n raise HTTPException(detail={\"message\": \"There's already an existing record\"}, status_code=409)\n new_client = Client.create(\n name=c.nombre,\n lastname=c.apellidos,\n phoneNumber=c.telefono,\n email=c.correo,\n address=c.direccion\n )\n num_cliente = new_client.id\n card = Card.create(\n client_id=num_cliente,\n register_date=date.today()\n )\n response = JSONResponse({\n \"message\": \"Client registered successfully\",\n \"numTarjeta\": card.id,\n \"numCliente\": num_cliente\n })\n return response\n\n\n@cliente_routes.get(\"/{client_id}\", responses={\n 200: set_custom_response(\"OK\", {\n \"name\": \"farid\",\n \"lastname\": \"castillo\",\n \"phoneNumber\": \"123-456-78-90\",\n \"address\": \"address\",\n \"email\": \"example@gmail.com\",\n \"card_id\": 1\n }),\n 401: set_401_response(),\n 404: set_404_response()\n})\nasync def get_client(client_id: int):\n client = Client.select().where(Client.id == client_id).first()\n if client:\n card = Card.select().where(Card.client_id == client.id).first()\n r = JSONResponse({**client.todict(), \"card_id\": card.id})\n return r\n else:\n raise HTTPException(detail={\"message\": \"Client not found\"}, status_code=404)\n\n\n@cliente_routes.put(\"/update/{client_id}\", responses={\n 200: set_custom_response(\"OK\", {\"message\": \"Update successfully\"}),\n 400: set_custom_response(\"Bad request\", {\"detail\": {\"message\": \"The request is empty\"}}),\n 401: set_401_response(),\n 404: set_404_response(),\n 409: set_409_response()\n})\nasync def update_client(client_id, c: ClientUpdateRequest):\n client = Client.select().where(Client.id == client_id).first()\n if client is None:\n raise HTTPException(404, detail={\"detail\": {\"message\": \"Client not found\"}})\n existing_data = Client.select().where((Client.email == c.correo) | (Client.phoneNumber == c.telefono)).first()\n if existing_data:\n raise HTTPException(409, detail={\"detail\": {\"message\": \"There's already an existing record\"}})\n if c.correo is not None:\n client.email = c.correo\n if c.nombre is not None:\n client.name = c.nombre\n if c.apellidos is not None:\n client.lastname = c.apellidos\n if c.telefono is not None:\n client.phoneNumber = c.telefono\n if c.direccion is not None:\n client.address = c.direccion\n if c.correo is None and c.telefono is None and c.nombre is None and c.apellidos is None and c.direccion is None:\n raise HTTPException(detail={\"message\": \"The request is empty\"}, status_code=400)\n client.save()\n return JSONResponse({\"message\": \"Update successfully\"})\n","repo_name":"FaridCG343/FarmaciauwuAPI","sub_path":"Controllers/ClientController.py","file_name":"ClientController.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1283938869","text":"# -*- coding: utf8 -*-\nimport os\nimport sys\n\nfrom flask import Flask, session\nfrom flask_admin import Admin, BaseView, expose\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_babelex import Babel\nfrom flask_sqlalchemy import SQLAlchemy\n\nimport src.common.util as util\nfrom src import __version__\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\nbabel = Babel(app)\napp.config['BABEL_DEFAULT_LOCALE'] = 'zh_CN'\n\n\n@babel.localeselector\ndef get_locale():\n override = 'zh_CN'\n if override:\n session['lang'] = override\n return session.get('lang', 'en')\n\n\n# set optional bootswatch theme\napp.config['FLASK_ADMIN_SWATCH'] = 'cerulean'\napp.config['SECRET_KEY'] = '123456'\nadmin = Admin(app, name='zyw-shadow', template_mode='bootstrap3')\n\n\n# Flask-SQLAlchemy initialization here\n# 配置数据库连接的对象\nclass Config(object):\n \"\"\"配置参数\"\"\"\n # sqlalchemy的配置参数\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n\n # 设置sqlalchemy自动更跟踪数据库(数据库表手动更新是同步跟新到对象)\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n\n # 查询时会显示原始SQL语句\n SQLALCHEMY_ECHO = True\n\n\n# 加载数据库配置对象\napp.config.from_object(Config)\n\n# 创建数据库sqlalchemy工具对象\ndb = SQLAlchemy(app)\n\n\nclass User(db.Model):\n __tablename__ = \"user\" # 将要创建的表名\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n extend_one = db.Column(db.String(81), unique=False)\n extend_two = db.Column(db.String(80), unique=False)\n note = db.Column(db.String(255), unique=False)\n\n def __repr__(self):\n return '<%s %s>' % (self.name, self.email)\n\n\nclass UserStock(db.Model):\n __tablename__ = \"user_stock\" # 将要创建的表名\n id = db.Column(db.Integer, primary_key=True)\n stock_code = db.Column(db.String(80), db.ForeignKey('stock.code'), nullable=False)\n user_name = db.Column(db.String(80), db.ForeignKey('user.name'), nullable=False)\n # user = db.relationship('User', backref=db.backref('stocks', lazy='dynamic'))\n user = db.relationship('User')\n stock = db.relationship('Stock')\n extend_one = db.Column(db.String(80), unique=False)\n extend_two = db.Column(db.String(80), unique=False)\n note = db.Column(db.String(255), unique=False)\n\n\nclass Stock(db.Model):\n __tablename__ = \"stock\" # 将要创建的表名\n id = db.Column(db.Integer, primary_key=True)\n code = db.Column(db.String(80), unique=True, nullable=False)\n name = db.Column(db.String(80), unique=False, nullable=False)\n listing_date = db.Column(db.String(80), unique=False) # 上市日期\n hit_new_date = db.Column(db.String(80), unique=False) # 打新日期\n second_bord = db.Column(db.String(80), unique=False)\n extend_one = db.Column(db.String(80), unique=False)\n extend_two = db.Column(db.String(80), unique=False)\n note = db.Column(db.String(255), unique=False)\n\n def __repr__(self):\n return '<%s %s 上市日期%s>' % (self.code, self.name, self.listing_date)\n\n\nadmin.add_view(ModelView(UserStock, db.session))\nadmin.add_view(ModelView(User, db.session))\nadmin.add_view(ModelView(Stock, db.session))\n\n\n# Add administrative views here\nclass MyView(BaseView):\n @expose('/')\n def index(self):\n return self.render('index.html')\n\n\nadmin.add_view(MyView(name='Hello'))\n\n\n@app.route('/')\ndef index():\n return 'hello'\n\n\ndef run():\n args = util.arg_conf()\n print('version:' + __version__)\n length = len(sys.argv)\n app.run(\n host=args.bind,\n port=args.port,\n debug=args.debug\n )\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"zyw19871007/FlaskStarter","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19401113231","text":"# def print_r(i):\n# if i <=-100:\n# return 0\n# print(i)\n# print_r(i-1)\n\n# print_r(10)\n\n\n\ndef merge_sort(ls):\n if len(ls) >= 2:\n mid=len(ls)//2\n l=ls[:mid]\n r=ls[mid:]\n merge_sort(l)\n merge_sort(r)\n ","repo_name":"lavanya2710/python-all-programs","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42296819426","text":"from math import sqrt\nimport time\n\nfrom xpra.log import Logger\nlog = Logger()\n\nfrom xpra.deque import maxdeque\nfrom xpra.server.stats.maths import logp, calculate_time_weighted_average, calculate_for_target, queue_inspect\nfrom xpra.simple_stats import add_list_stats\n\nNRECS = 500\ndebug = log.debug\n\n\nclass GlobalPerformanceStatistics(object):\n \"\"\"\n Statistics which are shared by all WindowSources\n \"\"\"\n def __init__(self):\n self.reset()\n\n #assume 100ms until we get some data to compute the real values\n DEFAULT_LATENCY = 0.1\n\n def reset(self):\n # mmap state:\n self.mmap_size = 0\n self.mmap_bytes_sent = 0\n self.mmap_free_size = 0 #how much of the mmap space is left (may be negative if we failed to write the last chunk)\n # queue statistics:\n self.damage_data_qsizes = maxdeque(NRECS) #size of the damage_data_queue before we add a new record to it\n #(event_time, size)\n self.damage_packet_qsizes = maxdeque(NRECS) #size of the damage_packet_queue before we add a new packet to it\n #(event_time, size)\n self.damage_packet_qpixels = maxdeque(NRECS) #number of pixels waiting in the damage_packet_queue for a specific window,\n #before we add a new packet to it\n #(event_time, wid, size)\n self.damage_last_events = maxdeque(NRECS) #records the x11 damage requests as they are received:\n #(wid, event time, no of pixels)\n self.client_decode_time = maxdeque(NRECS) #records how long it took the client to decode frames:\n #(wid, event_time, no of pixels, decoding_time*1000*1000)\n self.client_latency = maxdeque(NRECS) #how long it took for a packet to get to the client and get the echo back.\n #(wid, event_time, no of pixels, client_latency)\n self.client_ping_latency = maxdeque(NRECS) #time it took to get a ping_echo back from the client:\n #(event_time, elapsed_time_in_seconds)\n self.server_ping_latency = maxdeque(NRECS) #time it took for the client to get a ping_echo back from us:\n #(event_time, elapsed_time_in_seconds)\n self.client_load = None\n self.damage_events_count = 0\n self.packet_count = 0\n #these values are calculated from the values above (see update_averages)\n self.min_client_latency = self.DEFAULT_LATENCY\n self.avg_client_latency = self.DEFAULT_LATENCY\n self.recent_client_latency = self.DEFAULT_LATENCY\n self.min_client_ping_latency = self.DEFAULT_LATENCY\n self.avg_client_ping_latency = self.DEFAULT_LATENCY\n self.recent_client_ping_latency = self.DEFAULT_LATENCY\n self.min_server_ping_latency = self.DEFAULT_LATENCY\n self.avg_server_ping_latency = self.DEFAULT_LATENCY\n self.recent_server_ping_latency = self.DEFAULT_LATENCY\n\n def record_latency(self, wid, decode_time, start_send_at, end_send_at, pixels, bytecount):\n now = time.time()\n send_diff = now-start_send_at\n echo_diff = now-end_send_at\n send_latency = max(0, send_diff-decode_time/1000.0/1000.0)\n echo_latency = max(0, echo_diff-decode_time/1000.0/1000.0)\n debug(\"record_latency: took %.1f ms round trip (%.1f just for echo), %.1f for decoding of %s pixels, %s bytes sent over the network in %.1f ms (%.1f ms for echo)\",\n send_diff*1000, echo_diff*1000, decode_time/1000, pixels, bytecount, send_latency*1000, echo_latency*1000)\n if self.min_client_latency is None or self.min_client_latency>send_latency:\n self.min_client_latency = send_latency\n self.client_latency.append((wid, time.time(), pixels, send_latency))\n\n def get_damage_pixels(self, wid):\n \"\"\" returns the list of (event_time, pixelcount) for the given window id \"\"\"\n return [(event_time, value) for event_time, dwid, value in list(self.damage_packet_qpixels) if dwid==wid]\n\n def update_averages(self):\n if len(self.client_latency)>0:\n data = [(when, latency) for _, when, _, latency in list(self.client_latency)]\n self.min_client_latency = min([x for _,x in data])\n self.avg_client_latency, self.recent_client_latency = calculate_time_weighted_average(data)\n #client ping latency: from ping packets\n if len(self.client_ping_latency)>0:\n data = list(self.client_ping_latency)\n self.min_client_ping_latency = min([x for _,x in data])\n self.avg_client_ping_latency, self.recent_client_ping_latency = calculate_time_weighted_average(data)\n #server ping latency: from ping packets\n if len(self.server_ping_latency)>0:\n data = list(self.server_ping_latency)\n self.min_server_ping_latency = min([x for _,x in data])\n self.avg_server_ping_latency, self.recent_server_ping_latency = calculate_time_weighted_average(data)\n\n def get_factors(self, target_latency, pixel_count):\n factors = []\n if len(self.client_latency)>0:\n #client latency: (we want to keep client latency as low as can be)\n metric = \"client-latency\"\n l = 0.005 + self.min_client_latency\n wm = logp(l / 0.020)\n factors.append(calculate_for_target(metric, l, self.avg_client_latency, self.recent_client_latency, aim=0.8, slope=0.005, smoothing=sqrt, weight_multiplier=wm))\n if len(self.client_ping_latency)>0:\n metric = \"client-ping-latency\"\n l = 0.005 + self.min_client_ping_latency\n wm = logp(l / 0.050)\n factors.append(calculate_for_target(metric, l, self.avg_client_ping_latency, self.recent_client_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))\n if len(self.server_ping_latency)>0:\n metric = \"server-ping-latency\"\n l = 0.005 + self.min_server_ping_latency\n wm = logp(l / 0.050)\n factors.append(calculate_for_target(metric, l, self.avg_server_ping_latency, self.recent_server_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))\n #damage packet queue size: (includes packets from all windows)\n factors.append(queue_inspect(\"damage-packet-queue-size\", self.damage_packet_qsizes, smoothing=sqrt))\n #damage packet queue pixels (global):\n qpix_time_values = [(event_time, value) for event_time, _, value in list(self.damage_packet_qpixels)]\n factors.append(queue_inspect(\"damage-packet-queue-pixels\", qpix_time_values, div=pixel_count, smoothing=sqrt))\n #damage data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)\n factors.append(queue_inspect(\"damage-data-queue\", self.damage_data_qsizes))\n if self.mmap_size>0:\n #full: effective range is 0.0 to ~1.2\n full = 1.0-float(self.mmap_free_size)/self.mmap_size\n #aim for ~33%\n factors.append((\"mmap-area\", \"%s%% full\" % int(100*full), logp(3*full), (3*full)**2))\n return factors\n\n def add_stats(self, info, suffix=\"\"):\n info[\"damage.events%s\" % suffix] = self.damage_events_count\n info[\"damage.packets_sent%s\" % suffix] = self.packet_count\n info[\"client.connection.mmap_bytecount%s\" % suffix] = self.mmap_bytes_sent\n if self.min_client_latency is not None:\n info[\"client.latency%s.absmin\" % suffix] = int(self.min_client_latency*1000)\n qsizes = [x for _,x in list(self.damage_data_qsizes)]\n add_list_stats(info, \"damage.data_queue.size%s\" % suffix, qsizes)\n qsizes = [x for _,x in list(self.damage_packet_qsizes)]\n add_list_stats(info, \"damage.packet_queue.size%s\" % suffix, qsizes)\n latencies = [x*1000 for (_, _, _, x) in list(self.client_latency)]\n add_list_stats(info, \"client.latency%s\" % suffix, latencies)\n\n add_list_stats(info, \"server.ping_latency%s\" % suffix, [1000.0*x for _, x in list(self.server_ping_latency)])\n add_list_stats(info, \"client.ping_latency%s\" % suffix, [1000.0*x for _, x in list(self.client_ping_latency)])\n\n #client pixels per second:\n now = time.time()\n time_limit = now-30 #ignore old records (30s)\n #pixels per second: decode time and overall\n total_pixels = 0 #total number of pixels processed\n total_time = 0 #total decoding time\n start_time = None #when we start counting from (oldest record)\n region_sizes = []\n for _, event_time, pixels, decode_time in list(self.client_decode_time):\n #time filter and ignore failed decoding (decode_time==0)\n if event_timeevent_time:\n start_time = event_time\n total_pixels += pixels\n total_time += decode_time\n region_sizes.append(pixels)\n debug(\"total_time=%s, total_pixels=%s\", total_time, total_pixels)\n if total_time>0:\n pixels_decoded_per_second = int(total_pixels *1000*1000 / total_time)\n info[\"encoding.pixels_decoded_per_second%s\" % suffix] = pixels_decoded_per_second\n if start_time:\n elapsed = now-start_time\n pixels_per_second = int(total_pixels/elapsed)\n info[\"encoding.pixels_per_second%s\" % suffix] = pixels_per_second\n info[\"encoding.regions_per_second%s\" % suffix] = int(len(region_sizes)/elapsed)\n info[\"encoding.average_region_size%s\" % suffix] = int(total_pixels/len(region_sizes))\n","repo_name":"dscho/Xpra","sub_path":"src/xpra/server/source_stats.py","file_name":"source_stats.py","file_ext":"py","file_size_in_byte":10157,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"} +{"seq_id":"40470600104","text":"import requests as rq\nimport re\nimport os\nimport os.path\nimport time\n\nindexURL = 'http://www.meizitu.com/a/'\nheader = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'}\n\ndef run(url,page=1):\n url = url + str(page) + '.html'\n html = rq.get(url,headers=header)\n document = html.text\n results = re.findall(r'scrollLoading.+?src=\\\"(.+?)\\\"',document)\n for i in range(len(results)):\n html = rq.get(results[i], headers=header)\n content = html.content\n path = 'meizi/'+str(page)+'/'\n if not os.path.exists(path):\n os.makedirs(path)\n with open(path+str(i)+\".jpg\",'wb') as f:\n f.write(content)\n f.flush()\n print('page=%4i, jpg=%2i has down'%(page,i))\n time.sleep(2)\n\ndef manager(start=1,end=50):\n start = max(1,start)\n end = min(1000, end)\n for i in range(start,end):\n run(indexURL,i)\n\n\nif __name__ == '__main__':\n manager()\n","repo_name":"darkalex2014/jiandan","sub_path":"py3-meizitu/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9765253472","text":"import random\nfrom flask import render_template, Blueprint, request, redirect, url_for\nfrom flask.ext.login import current_user\nfrom sqlalchemy.sql.expression import func\n\nfrom .forms import ClassifyForm\nfrom project import db\nfrom project.models import VisitedPhoto, Classification, Feature, Photo\n\n# define blueprints\nclassify_blueprint = Blueprint(\n 'classify', __name__,\n template_folder='templates'\n)\n\ndef mark_photo_visited(random_img):\n visited_photo = VisitedPhoto(\n user_id = current_user.id,\n photo_id = random_img.id\n )\n db.session.add(visited_photo)\n db.session.commit()\n\n# delete all items from visited photo for the current user\ndef clear_visited_photos():\n sql = 'delete from visited_photos where user_id =' + str(current_user.id)\n db.engine.execute(sql)\n\n# count the number of photos not visited\ndef available_photos_count():\n return Photo.query.count() - VisitedPhoto.query.count()\n\ndef random_available_image():\n count = available_photos_count()\n # if there are 2 or more unvisited photos, show a random unvisited photo\n if count > 1:\n offset = random.randrange(1, count)\n # if there is one 1 unvisited photo, show the photo\n elif count == 1:\n offset = 0\n # else delete all visited photos for the current user and show first image\n else:\n if current_user.is_authenticated():\n clear_visited_photos()\n offset = 0\n # select one photo that has not been visited\n sql = 'select id from photos where id not in (select photo_id from visited_photos) limit 1 offset ' + str(offset)\n row = db.engine.execute(sql).first()\n return Photo.query.filter_by(id=row.id).first()\n\ndef classify_photo(request):\n # fields: csrf_token, selected features, hidden photo_id.\n # loop all the fields.\n for field in request.form:\n # ignore csrf_token and photo_id fields\n if field != 'csrf_token' and field != 'photo_id':\n # find feature in the database\n feature = Feature.query.filter_by(slug=field).first()\n\n # create new classification object\n classification = Classification(\n user_id = current_user.id,\n feature_id = feature.id,\n photo_id = request.form['photo_id']\n )\n\n # save to database\n db.session.add(classification)\n db.session.commit()\n # redirect after saving data\n return redirect(url_for('classify.index'))\n\n@classify_blueprint.route('/classify', methods = ['POST', 'GET'])\ndef index():\n # set up form\n error = None\n form = ClassifyForm(request.form)\n # grab random image that has not been visited\n random_img = random_available_image()\n\n if current_user.is_authenticated():\n template = 'classify.html'\n # mark current photo visited so the image won't be shown again\n if available_photos_count() > 0:\n mark_photo_visited(random_img)\n else:\n template = 'classify_static.html'\n\n if request.method == 'POST' and form.validate():\n # request.form contains a dictionary of all the fields in the form\n classify_photo(request)\n return redirect(url_for('classify.index'))\n\n # render the template\n return render_template(template,\n current_user = current_user,\n form = form,\n error = error,\n photo = random_img\n )\n\n","repo_name":"LearnTeachCode/marsrocks","sub_path":"project/classify/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"21016183856","text":"# === Imports ===\nimport time\nimport numpy as np\n\nimport pipes\nimport Live_Plot_Data_Point as livePlotter\nfrom utilities import DataLoggerUtility as dlu\n\n\n\n# === Main ===\ndef run(parameters, smu_systems, arduino_systems, share=None):\n\t# This script uses the default Arduino, which is the first one in the list of SMU systems\n\tarduino_names = list(arduino_systems.keys())\n\tarduino_reference = arduino_systems[arduino_names[0]]\n\n\t# Get shorthand name to easily refer to configuration parameters\n\ts_parameters = parameters['runConfigs']['Sensor']\n\n\t# Print the starting message\n\tprint('Measuring sensor, with parameters: ' + str(s_parameters))\n\n\t# === START ===\t\n\n\tprint('Starting prothrombin time measurements.')\n\tresults = runSensor( arduino_reference,\n\t\t\t\t\t\t\ttotal_duration=s_parameters['totalDuration'],\n\t\t\t\t\t\t\tdelay_between_measurements=s_parameters['delayBetweenMeasurements'],\n\t\t\t\t\t\t\tshare=share)\n\t\n\t# === COMPLETE ===\n\n\t# Add important metrics from the run to the parameters for easy access later in ParametersHistory\n\tparameters['Computed'] = results['Computed']\n\n\t# Print the metrics\n\tfor metric, value in results['Computed'].items():\n\t\tprint(f\"'{metric}': {value}\")\n\n\t# Copy parameters and add in the test results\n\tjsonData = dict(parameters)\n\tjsonData['Results'] = results['Raw']\n\n\t# Save results as a JSON object\n\tprint('Saving JSON: ' + str(dlu.getDeviceDirectory(parameters)))\n\tdlu.saveJSON(dlu.getDeviceDirectory(parameters), s_parameters['saveFileName'], jsonData, subDirectory=parameters['experimentSubFolder']+str(parameters['startIndexes']['experimentNumber']))\n\n\treturn jsonData\n\n# === Data Collection ===\ndef runSensor(arduino_reference, total_duration, delay_between_measurements, share=None):\n\tdata = {}\n\ttimestamps = []\n\tstart_time = time.time()\n\t\n\twhile(time.time() < start_time + total_duration):\n\t\t# Send a progress message\n\t\tindex = len(timestamps)\n\t\tpipes.progressUpdate(share, 'Sensor Data', start=0, current=index, end=int(index * total_duration / (time.time() - start_time)), enableAbort=True)\n\n\t\tif(delay_between_measurements > 0):\n\t\t\ttime.sleep(delay_between_measurements)\n\n\t\t# Take Measurement and timestamp it\n\t\tmeasurement = arduino_reference.takeMeasurement()\n\t\ttimestamp = time.time()\n\t\t\n\t\tfor key, value in measurement.items():\n\t\t\tif(key not in data): \n\t\t\t\tdata[key] = []\n\t\t\tdata[key].append(value)\n\t\ttimestamps.append(timestamp)\n\n\t\t# Send a data message\n\t\tfor key, value in measurement.items():\n\t\t\tpoints = [value] if(not isinstance(value, list)) else value\n\t\t\t\n\t\t\tpipes.livePlotUpdate(share, plots=\n\t\t\t[livePlotter.createLiveDataPoint(plotID=key, \n\t\t\t\t\t\t\t\t\t\t\tlabels=[f\"{key}{i+1 if(len(points) > 1) else ''}\" for i in range(len(points))],\n\t\t\t\t\t\t\t\t\t\t\txValues=[timestamp]*len(points), \n\t\t\t\t\t\t\t\t\t\t\tyValues=points, \n\t\t\t\t\t\t\t\t\t\t\txAxisTitle='Time (s)', \n\t\t\t\t\t\t\t\t\t\t\tyAxisTitle=f\"Sensor ({key})\", \n\t\t\t\t\t\t\t\t\t\t\tyscale='linear', \n\t\t\t\t\t\t\t\t\t\t\tplotMode='lines',\n\t\t\t\t\t\t\t\t\t\t\tenumerateLegend=False,\n\t\t\t\t\t\t\t\t\t\t\ttimeseries=True),\n\t\t\t])\n\t\t\t\n\treturn {\n\t\t'Raw': {'timestamps':timestamps, **data},\n\t\t'Computed':{}\n\t}\n\n\n\t","repo_name":"stevennoyce/AutexysHost","sub_path":"source/procedures/Sensor.py","file_name":"Sensor.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"40976333397","text":"# -*- coding: utf-8 -*-\n\nif __name__ == '__main__':\n # Constantes\n notas = [2, 5, 10, 20, 50, 100]\n # Entrada\n entrada = input()\n # Condicao de parada\n while(entrada != '0 0'):\n valor_compra = int(entrada.split(' ')[0])\n valor_pago = int(entrada.split(' ')[1])\n # Valor do troco\n troco = valor_pago - valor_compra\n # Quantidade de notas para dar o troco\n qt_nota = 0\n # Percorrer a lista de notas (em ordem inversa) e encontrar sempre\n # A maior nota possivel para fornecer o troco\n while(troco > 0):\n for i in range(len(notas)-1, -1, -1):\n if(troco >= notas[i]):\n troco -= notas[i]\n qt_nota += 1\n break\n # Caso nao exista nenhuma nota possivel para atingir o troco\n else:\n break\n\n # Atendendo a resolucao (quantidade de notas e troco total entregue)\n if(qt_nota == 2 and troco == 0):\n print('possible')\n else:\n print('impossible')\n # Proxima entrada\n entrada = input()","repo_name":"CleitonSilvaT/URI_Python","sub_path":"1-Iniciante/2140.py","file_name":"2140.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6466110962","text":"import os\nimport flask\nimport cloudinary\nimport sys\nfrom loguru import logger\nfrom pathlib import Path\n\nfrom server.consts import DEBUG_MODE, LOG_RETENTION\nfrom server.app_config import Config\nfrom server.api.blueprints import login, user, teacher, student, appointments, topics\nfrom server.extensions import login_manager\nfrom server.api.database import database\nfrom server import error_handling\nfrom server.api import push_notifications, babel\n\n\ndef register_extensions_and_blueprints(flask_app):\n \"\"\"Register Flask extensions and blueprints (each has init_app method).\"\"\"\n for module in (\n database,\n login_manager,\n error_handling,\n login,\n appointments,\n topics,\n user,\n teacher,\n student,\n push_notifications,\n babel,\n ):\n module.init_app(flask_app)\n\n\ndef create_app(**test_config):\n \"\"\"An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.\n :param config: The configuration object to use.\n \"\"\"\n flask_app = flask.Flask(__name__)\n path_to_logs = Path(__file__).resolve().parents[1]\n log_file = str(path_to_logs / \"logs\" / \"dryvo_log.log\")\n logger.add(log_file, retention=LOG_RETENTION)\n logger.debug(\"Starting Flask app\")\n config = Config()\n config.update(test_config)\n flask_app.config.from_object(config)\n register_extensions_and_blueprints(flask_app)\n add_endpoints(flask_app)\n return flask_app\n\n\n# app = create_app(Config)\n\n\ndef add_endpoints(app):\n @app.route(\"/\")\n def home():\n return \"Debug mode enabled!\" if DEBUG_MODE else \"Production mode enabled!\"\n","repo_name":"AdamGold/Dryvo","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":598,"dataset":"github-code","pt":"75"} +{"seq_id":"892005868","text":"import pandas as pd\nimport urllib.request, json\nimport requests\n\n\n\n# reading url\njson_file = dict()\nwith urllib.request.urlopen(\"https://iss.moex.com/iss/engines/stock/markets/bonds/securities.json\") as url:\n json_file = json.loads(url.read().decode())\n\n# Get data from json file\ndata = json_file[\"securities\"]['data']\n# Get columns' name from json file\ncols = list((json_file[\"securities\"][\"columns\"]))\nprint(cols)\n# Create DataFrame from the data and the columns\ndf = pd.DataFrame(data, columns=cols, dtype=None, copy=False)\ngood_column = ['SECID', 'SHORTNAME', 'PREVWAPRICE', 'YIELDATPREVWAPRICE', 'COUPONVALUE', 'NEXTCOUPON', 'FACEVALUE',\n 'ISIN', 'COUPONPERIOD', 'FACEUNIT', 'BUYBACKPRICE', 'LOTVALUE']\n\nfor column in cols:\n if not(column in good_column):\n df = df.drop(column, 1)\n\ndf = df.dropna()\n\nunique_names = list(set(df['SHORTNAME'].tolist()))[0:10]\nmask = df['SHORTNAME'].isin(unique_names)\ndf = df[mask]\ndf.to_csv('dataset.csv', encoding='utf-8')\n","repo_name":"abromoon/obli-bot","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73194713202","text":"from django.urls import path\n\nfrom . import views\n\n# Create namespace for the application\napp_name = \"search\"\n\nurlpatterns = [\n # Path: /search/\n path('', views.index, name='index'),\n # Path: /search/compare/\n # Referring to view \"product_compare\" in /search/views.py\n # Be careful to the order of the paths to avoid wrong referencing due to the more generic pattern\n # Here product_compare won't work if below product detail\n path('compare/', views.product_compare, name='product_compare'),\n # Path: /search//\n # Referring to view \"product_detail\" in /search/views.py\n path('/', views.product_detail, name='product_detail'),\n\n]","repo_name":"Aurelien-GZL/Django_NutriFood","sub_path":"NutriFood/search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70959551921","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\r\nfrom torch.autograd import Variable\r\nimport random\r\nimport numpy as np\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n\r\nclass GRU(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, bias=True):\r\n super(GRU, self).__init__()\r\n\r\n # Hidden dimensions\r\n self.hidden_dim = hidden_dim\r\n\r\n self.gru_cell = GRUCell(input_dim, hidden_dim)\r\n\r\n def forward(self, x):\r\n # x: b * l * d\r\n \r\n # b * d\r\n h0 = torch.zeros((x.size(0), self.hidden_dim), device=device)\r\n \r\n # b * d\r\n hn = h0\r\n for seq in range(x.size(1)):\r\n hn = self.gru_cell(x[:,seq,:], hn) \r\n out = hn \r\n\r\n return out\r\n\r\n\r\nclass GRUCell(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, bias=True):\r\n super(GRUCell, self).__init__()\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n \r\n self.bias = bias\r\n self.Wi = nn.Linear(input_dim, 3 * hidden_dim, bias=bias)\r\n self.Wh = nn.Linear(hidden_dim, 3 * hidden_dim, bias=bias)\r\n \r\n def forward(self, x, hk_1):\r\n # x: b * d\r\n # hk_1: b * d\r\n\r\n batch_size = x.shape[0]\r\n x = x.reshape(-1, x.shape[-1])\r\n \r\n # b * (3*d)\r\n W_i = self.Wi(x) \r\n W_h = self.Wh(hk_1)\r\n \r\n # b * d\r\n W_ir_i, W_iz_i, W_in_i = W_i.chunk(3, dim=-1) \r\n W_hr_h, W_hz_h, W_hn_h = W_h.chunk(3, dim=-1)\r\n\r\n # b * c * d2\r\n r = torch.sigmoid(W_ir_i + W_hr_h)\r\n z = torch.sigmoid(W_iz_i + W_hz_h)\r\n n = torch.tanh(W_in_i + (r * W_hn_h))\r\n\r\n hy = (z * n + (1.0 - z) * hk_1) \r\n \r\n return hy\r\n\r\n\r\n\r\nclass dynamicGRU(nn.Module):\r\n def __init__(self, num_beh, input_dim, num_classes, capsule_length, bias=True):\r\n super(dynamicGRU, self).__init__()\r\n # Hidden dimensions\r\n self.num_beh = num_beh\r\n self.hidden_dim = num_classes * capsule_length\r\n self.num_classes = num_classes\r\n self.capsule_length = capsule_length\r\n\r\n self.gru_cell = dynamicGRUCell(num_beh, input_dim, num_classes, capsule_length)\r\n \r\n \r\n def forward(self, x, c, detach=False):\r\n # x: b * t * l * d\r\n # c: b * t * l * c * d2\r\n\r\n length = x.shape[2]\r\n H0 = torch.zeros((x.size(0), self.num_beh, self.num_classes, self.capsule_length), device=device)\r\n \r\n Ns = []\r\n\r\n # b * t * c * d2\r\n Hk = H0\r\n for k in range(length):\r\n # b * t * c * d2, b * t * c * d2\r\n Hk, N = self.gru_cell(x[:,:,k,:], Hk, c[:,:,k,:,:], detach=detach) \r\n Ns.append(N)\r\n\r\n # b * t * c * d2\r\n out = Hk\r\n\r\n # b * t * l * c * d2\r\n Ns = torch.stack(Ns, dim=2) \r\n \r\n # b * t * c * d2, b * t * l * c * d2\r\n return out, Ns\r\n\r\n\r\nclass dynamicGRUCell(nn.Module):\r\n def __init__(self, num_beh, input_dim, num_classes, capsule_length, bias=True):\r\n super(dynamicGRUCell, self).__init__()\r\n self.num_beh = num_beh\r\n self.input_dim = input_dim\r\n self.num_classes = num_classes\r\n self.capsule_length = capsule_length\r\n self.bias = bias\r\n \r\n x_std = np.sqrt(1.0 / input_dim)\r\n h_std = np.sqrt(1.0 / capsule_length)\r\n\r\n\r\n # t * d * (c*3*d2)\r\n self.Wi = nn.Parameter(torch.rand((num_beh, input_dim, num_classes, 3 * capsule_length)) * 2*x_std - x_std)\r\n # t * d2 * (3*d2)\r\n self.Wh = nn.Parameter(torch.rand((num_beh, num_classes, capsule_length, 3 * capsule_length)) * 2*h_std - h_std)\r\n self.Wc = nn.Parameter(torch.rand((num_beh, num_classes, capsule_length, 3 * capsule_length)) * 2*h_std - h_std)\r\n\r\n self.Bx = nn.Parameter(torch.rand((num_beh, num_classes, 3 * capsule_length)) * 2 * x_std - x_std)\r\n self.Bh = nn.Parameter(torch.rand((num_beh, num_classes, 3 * capsule_length)) * 2 * h_std - h_std)\r\n self.Bc = nn.Parameter(torch.rand((num_beh, num_classes, 3 * capsule_length)) * 2 * h_std - h_std)\r\n\r\n\r\n def forward(self, x, Hk_1, C, detach=False):\r\n # x: b * t * d\r\n # Hk_1: b * t * c * d2\r\n # C: b * t * c * d2\r\n\r\n if detach: \r\n # b * t * c * (3*d2)\r\n W_i = torch.einsum('bti,tick->btck', x.detach(), self.Wi.detach()) + self.Bx.detach().unsqueeze(0)\r\n W_h = torch.einsum('btcd,tcdl->btcl', Hk_1.detach(), self.Wh.detach()) + self.Bh.detach().unsqueeze(0)\r\n W_c = torch.einsum('btcd,tcdl->btcl', C.detach(), self.Wc.detach()) + self.Bc.detach().unsqueeze(0)\r\n \r\n else:\r\n # b * t * c * (3*d2)\r\n W_i = torch.einsum('bti,tick->btck', x, self.Wi) + self.Bx.unsqueeze(0)\r\n W_h = torch.einsum('btcd,tcdl->btcl', Hk_1, self.Wh) + self.Bh.unsqueeze(0)\r\n W_c = torch.einsum('btcd,tcdl->btcl', C, self.Wc) + self.Bc.unsqueeze(0)\r\n \r\n # b * t * c * d2\r\n W_ir_i, W_iz_i, W_in_i = W_i.chunk(3, dim=-1) \r\n W_hr_H, W_hz_H, W_hn_H = W_h.chunk(3, dim=-1)\r\n W_cr_C, W_cz_C, W_cn_C = W_c.chunk(3, dim=-1)\r\n\r\n W_ir_i = W_ir_i * W_cr_C\r\n W_iz_i = W_iz_i * W_cz_C\r\n W_in_i = W_in_i * W_cn_C\r\n\r\n # b * t * c * d2\r\n R = torch.sigmoid(W_ir_i + W_hr_H)\r\n Z = torch.sigmoid(W_iz_i + W_hz_H)\r\n N = torch.tanh(W_in_i + (R * W_hn_H) )\r\n\r\n H = (Z * N + (1.0 - Z) * Hk_1) \r\n \r\n return H, N","repo_name":"Junsu-Cho/DyMuS","sub_path":"model_util.py","file_name":"model_util.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"31862990150","text":"import os\nimport json\nimport csv\nimport shutil\nfrom helper import remove_hidden_folder\nfrom variables import *\n\ndef convert_yolo_bbox(img_size, box, category):\n dw = 1./img_size[0]\n dh = 1./img_size[1]\n x = (int(box[0]) + int(box[2]))/2.0\n y = (int(box[1]) + int(box[3]))/2.0\n w = abs(int(box[2]) - int(box[0]))\n h = abs(int(box[3]) - int(box[1]))\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n\n return [category, x, y, w, h]\n\ndef handle_annotations(item_list):\n for ingredient in item_list:\n annotation_list = os.listdir(f\"{images_bbox_directory}/{ingredient}/{ann}\")\n annotation_list = remove_hidden_folder(annotation_list)\n \n category = int(annotation_list[0][:annotation_list[0].index(\"-\")])\n print(\"Category\", category)\n\n for file_name in annotation_list:\n print(\"Input file name\", file_name)\n target_file_name = file_name[:file_name.index(\".\")] + \".txt\"\n print(\"Target file name\", target_file_name)\n\n bbox_details = json.load(\n open(f\"{images_bbox_directory}/{ingredient}/{ann}/{file_name}\", \"r\"))\n\n image_height = bbox_details[size][height]\n image_width = bbox_details[size][width]\n\n all_bboxes = bbox_details[objects]\n\n yolo_bbox_list = []\n\n for bbox in all_bboxes:\n bbox_coordinates = bbox[points][exterior]\n top_left = bbox_coordinates[0]\n bottom_right = bbox_coordinates[1]\n\n print(\"Original_points\", image_height,\n image_width, top_left, bottom_right)\n yolo_bbox = convert_yolo_bbox((image_width, image_height),\n (top_left[0], top_left[1], bottom_right[0], bottom_right[1]), category)\n print(\"Yolo_points\", yolo_bbox)\n\n yolo_bbox_list.append(yolo_bbox)\n\n # write yolo_bbox_list\n csv_writer_object = csv.writer(\n open(f\"{all_image_details_directory}/{target_file_name}\", \"w+\"), delimiter=\" \")\n csv_writer_object.writerows(yolo_bbox_list)\n\n print(\"=\"*50)\n\n\ndef handle_images(item_list):\n for ingredient in item_list:\n image_list = os.listdir(f\"{images_bbox_directory}/{ingredient}/{img}\")\n image_list = remove_hidden_folder(image_list)\n\n for image in image_list:\n print(f\"Copying {images_bbox_directory}/{ingredient}/{img}/{image}\")\n shutil.copy(f\"{images_bbox_directory}/{ingredient}/{img}/{image}\", all_image_details_directory)\n\n\n# variables defined in file variables.py\nif not os.path.isdir(all_image_details_directory):\n os.mkdir(all_image_details_directory)\n\nitem_list = [d for d in os.listdir(images_bbox_directory) if os.path.isdir(os.path.join(images_bbox_directory, d))]\nitem_list = sorted(remove_hidden_folder(item_list))\n\nitem_list = remove_hidden_folder(item_list)\n\nprint(\"Handling Annotations\")\nhandle_annotations(item_list)\nprint(\"Handling Images\")\nhandle_images(item_list)","repo_name":"pratul29/whats-for-lunch","sub_path":"2_make_yolo_compatilble_bbox.py","file_name":"2_make_yolo_compatilble_bbox.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"26600481284","text":"from converter import to_rgba\n\n\nclass Object:\n def __init__(self):\n self.voxels = [[[]]]\n self.name = \"\"\n self.author = \"\"\n self.description = \"\"\n self.time = 0\n self.s = [0, 0, 0]\n\n def load(self, file):\n text = file.read()\n lines = text.split(\"\\n\")\n palette: dict = {}\n block = \"\"\n x = y = z = -1\n\n\n for line in lines:\n line = line.split('#')[0].strip()\n seq = line.split(' ')\n\n if not line:\n nline = True\n continue\n if line[-1] == ':':\n block = line[:-1]\n continue\n if block == \"meta\":\n # Заносим метаданные в свойства объекта\n if seq[0] == 'n':\n self.name = line[2:]\n elif seq[0] == 'a':\n self.author = line[2:]\n elif seq[0] == 'd':\n self.description = line[2:]\n elif seq[0] == 't':\n self.time = float(line[2:])\n elif seq[0] == 's':\n self.size = list(map(int, (seq[1], seq[2], seq[3])))\n\n elif block == \"pal\":\n # Создаём палитру цветов в ассоциации с символами\n palette[seq[0]] = to_rgba(map(float, (seq[1], seq[2], seq[3], seq[4])))\n\n elif block == \"pic\":\n # Заносим воксели в список voxels с учётом цвета rgba\n if nline:\n z += 1\n y = -1\n self.voxels.append([])\n self.voxels[z].append([])\n y += 1\n for x in line:\n self.voxels[z][y].append(palette.get(x, 0))\n\n nline = False\n\n\n\n\ndef main():\n from datetime import datetime\n obj = Object()\n with open(\"test_pics/super_cube.tvp\", 'r') as f:\n obj.load(f)\n\n print(\"==\", obj.name, \"==\")\n print(\"by\", obj.author)\n print(\"Создано\", datetime.utcfromtimestamp(obj.time).strftime('%Y.%m.%d, в %H:%M:%S'))\n print()\n print(obj.description)\n print()\n print()\n for z in obj.voxels:\n for y in z:\n print(' \\t'.join(map(str, y)))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"USBashka/UVEiUVE","sub_path":"object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"72186017201","text":"from math import log2\n\nfrom deprecation import deprecated\n\nfrom ._tversky import Tversky\nfrom .. import __version__\n\n__all__ = ['Jaccard', 'dist_jaccard', 'sim_jaccard', 'tanimoto']\n\n\nclass Jaccard(Tversky):\n r\"\"\"Jaccard similarity.\n\n For two sets X and Y, the Jaccard similarity coefficient\n :cite:`Jaccard:1901,Ruzicka:1958` is\n\n .. math::\n\n sim_{Jaccard}(X, Y) =\n \\frac{|X \\cap Y|}{|X \\cup Y|}`.\n\n This is identical to the Tanimoto similarity coefficient\n :cite:`Tanimoto:1958`\n and the Tversky index :cite:`Tversky:1977` for\n :math:`\\alpha = \\beta = 1`.\n\n In :ref:`2x2 confusion table terms `, where a+b+c+d=n,\n this is\n\n .. math::\n\n sim_{Jaccard} =\n \\frac{a}{a+b+c}\n\n Notes\n -----\n The multiset variant is termed Ellenberg similarity :cite:`Ellenberg:1956`.\n\n .. versionadded:: 0.3.6\n\n \"\"\"\n\n def __init__(self, tokenizer=None, intersection_type='crisp', **kwargs):\n \"\"\"Initialize Jaccard instance.\n\n Parameters\n ----------\n tokenizer : _Tokenizer\n A tokenizer instance from the :py:mod:`abydos.tokenizer` package\n intersection_type : str\n Specifies the intersection type, and set type as a result:\n See :ref:`intersection_type ` description in\n :py:class:`_TokenDistance` for details.\n **kwargs\n Arbitrary keyword arguments\n\n Other Parameters\n ----------------\n qval : int\n The length of each q-gram. Using this parameter and tokenizer=None\n will cause the instance to use the QGram tokenizer with this\n q value.\n metric : _Distance\n A string distance measure class for use in the ``soft`` and\n ``fuzzy`` variants.\n threshold : float\n A threshold value, similarities above which are counted as\n members of the intersection for the ``fuzzy`` variant.\n\n\n .. versionadded:: 0.4.0\n\n \"\"\"\n super(Jaccard, self).__init__(\n alpha=1,\n beta=1,\n bias=None,\n tokenizer=tokenizer,\n intersection_type=intersection_type,\n **kwargs\n )\n\n def sim(self, src, tar):\n r\"\"\"Return the Jaccard similarity of two strings.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n\n Returns\n -------\n float\n Jaccard similarity\n\n Examples\n --------\n >>> cmp = Jaccard()\n >>> cmp.sim('cat', 'hat')\n 0.3333333333333333\n >>> cmp.sim('Niall', 'Neil')\n 0.2222222222222222\n >>> cmp.sim('aluminum', 'Catalan')\n 0.0625\n >>> cmp.sim('ATCG', 'TAGC')\n 0.0\n\n\n .. versionadded:: 0.1.0\n .. versionchanged:: 0.3.6\n Encapsulated in class\n\n \"\"\"\n return super(Jaccard, self).sim(src, tar)\n\n def tanimoto_coeff(self, src, tar):\n \"\"\"Return the Tanimoto distance between two strings.\n\n Tanimoto distance :cite:`Tanimoto:1958` is\n :math:`-log_{2} sim_{Tanimoto}(X, Y)`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n\n Returns\n -------\n float\n Tanimoto distance\n\n Examples\n --------\n >>> cmp = Jaccard()\n >>> cmp.tanimoto_coeff('cat', 'hat')\n -1.5849625007211563\n >>> cmp.tanimoto_coeff('Niall', 'Neil')\n -2.1699250014423126\n >>> cmp.tanimoto_coeff('aluminum', 'Catalan')\n -4.0\n >>> cmp.tanimoto_coeff('ATCG', 'TAGC')\n -inf\n\n\n .. versionadded:: 0.1.0\n .. versionchanged:: 0.3.6\n Encapsulated in class\n\n \"\"\"\n coeff = self.sim(src, tar)\n if coeff != 0:\n return log2(coeff)\n\n return float('-inf')\n\n\n@deprecated(\n deprecated_in='0.4.0',\n removed_in='0.6.0',\n current_version=__version__,\n details='Use the Jaccard.sim method instead.',\n)\ndef sim_jaccard(src, tar, qval=2):\n \"\"\"Return the Jaccard similarity of two strings.\n\n This is a wrapper for :py:meth:`Jaccard.sim`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram\n\n Returns\n -------\n float\n Jaccard similarity\n\n Examples\n --------\n >>> sim_jaccard('cat', 'hat')\n 0.3333333333333333\n >>> sim_jaccard('Niall', 'Neil')\n 0.2222222222222222\n >>> sim_jaccard('aluminum', 'Catalan')\n 0.0625\n >>> sim_jaccard('ATCG', 'TAGC')\n 0.0\n\n\n .. versionadded:: 0.1.0\n\n \"\"\"\n return Jaccard(qval=qval).sim(src, tar)\n\n\n@deprecated(\n deprecated_in='0.4.0',\n removed_in='0.6.0',\n current_version=__version__,\n details='Use the Jaccard.dist method instead.',\n)\ndef dist_jaccard(src, tar, qval=2):\n \"\"\"Return the Jaccard distance between two strings.\n\n This is a wrapper for :py:meth:`Jaccard.dist`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram\n\n Returns\n -------\n float\n Jaccard distance\n\n Examples\n --------\n >>> dist_jaccard('cat', 'hat')\n 0.6666666666666667\n >>> dist_jaccard('Niall', 'Neil')\n 0.7777777777777778\n >>> dist_jaccard('aluminum', 'Catalan')\n 0.9375\n >>> dist_jaccard('ATCG', 'TAGC')\n 1.0\n\n\n .. versionadded:: 0.1.0\n\n \"\"\"\n return Jaccard(qval=qval).dist(src, tar)\n\n\n@deprecated(\n deprecated_in='0.4.0',\n removed_in='0.6.0',\n current_version=__version__,\n details='Use the Jaccard.tanimoto_coeff method instead.',\n)\ndef tanimoto(src, tar, qval=2):\n \"\"\"Return the Tanimoto coefficient of two strings.\n\n This is a wrapper for :py:meth:`Jaccard.tanimoto_coeff`.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n qval : int\n The length of each q-gram\n\n Returns\n -------\n float\n Tanimoto distance\n\n Examples\n --------\n >>> tanimoto('cat', 'hat')\n -1.5849625007211563\n >>> tanimoto('Niall', 'Neil')\n -2.1699250014423126\n >>> tanimoto('aluminum', 'Catalan')\n -4.0\n >>> tanimoto('ATCG', 'TAGC')\n -inf\n\n\n .. versionadded:: 0.1.0\n\n \"\"\"\n return Jaccard(qval=qval).tanimoto_coeff(src, tar)\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","repo_name":"Anjali-Jha22/Final-Year-Project","sub_path":"env/Lib/site-packages/abydos/distance/_jaccard.py","file_name":"_jaccard.py","file_ext":"py","file_size_in_byte":7016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20417935775","text":"\"\"\"\nSupport for monitoring the Deluge BitTorrent client API.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.deluge/\n\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nfrom homeassistant.const import (\n CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_NAME, CONF_PORT,\n CONF_MONITORED_VARIABLES, STATE_IDLE)\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.exceptions import PlatformNotReady\n\nREQUIREMENTS = ['deluge-client==1.4.0']\n\n_LOGGER = logging.getLogger(__name__)\n_THROTTLED_REFRESH = None\n\nDEFAULT_NAME = 'Deluge'\nDEFAULT_PORT = 58846\nDHT_UPLOAD = 1000\nDHT_DOWNLOAD = 1000\nSENSOR_TYPES = {\n 'current_status': ['Status', None],\n 'download_speed': ['Down Speed', 'kB/s'],\n 'upload_speed': ['Up Speed', 'kB/s'],\n}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_HOST): cv.string,\n vol.Required(CONF_PASSWORD): cv.string,\n vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,\n vol.Required(CONF_USERNAME): cv.string,\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All(\n cv.ensure_list, [vol.In(SENSOR_TYPES)]),\n})\n\n\n# pylint: disable=unused-argument\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the Deluge sensors.\"\"\"\n from deluge_client import DelugeRPCClient\n\n name = config.get(CONF_NAME)\n host = config.get(CONF_HOST)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n port = config.get(CONF_PORT)\n\n deluge_api = DelugeRPCClient(host, port, username, password)\n try:\n deluge_api.connect()\n except ConnectionRefusedError:\n _LOGGER.error(\"Connection to Deluge Daemon failed\")\n raise PlatformNotReady\n dev = []\n for variable in config[CONF_MONITORED_VARIABLES]:\n dev.append(DelugeSensor(variable, deluge_api, name))\n\n add_devices(dev)\n\n\nclass DelugeSensor(Entity):\n \"\"\"Representation of a Deluge sensor.\"\"\"\n\n def __init__(self, sensor_type, deluge_client, client_name):\n \"\"\"Initialize the sensor.\"\"\"\n self._name = SENSOR_TYPES[sensor_type][0]\n self.client = deluge_client\n self.type = sensor_type\n self.client_name = client_name\n self._state = None\n self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]\n self.data = None\n self._available = False\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return '{} {}'.format(self.client_name, self._name)\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._state\n\n @property\n def available(self):\n \"\"\"Return true if device is available.\"\"\"\n return self._available\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\n return self._unit_of_measurement\n\n def update(self):\n \"\"\"Get the latest data from Deluge and updates the state.\"\"\"\n from deluge_client import FailedToReconnectException\n try:\n self.data = self.client.call('core.get_session_status',\n ['upload_rate', 'download_rate',\n 'dht_upload_rate',\n 'dht_download_rate'])\n self._available = True\n except FailedToReconnectException:\n _LOGGER.error(\"Connection to Deluge Daemon Lost\")\n self._available = False\n return\n\n upload = self.data[b'upload_rate'] - self.data[b'dht_upload_rate']\n download = self.data[b'download_rate'] - self.data[\n b'dht_download_rate']\n\n if self.type == 'current_status':\n if self.data:\n if upload > 0 and download > 0:\n self._state = 'Up/Down'\n elif upload > 0 and download == 0:\n self._state = 'Seeding'\n elif upload == 0 and download > 0:\n self._state = 'Downloading'\n else:\n self._state = STATE_IDLE\n else:\n self._state = None\n\n if self.data:\n if self.type == 'download_speed':\n kb_spd = float(download)\n kb_spd = kb_spd / 1024\n self._state = round(kb_spd, 2 if kb_spd < 0.1 else 1)\n elif self.type == 'upload_speed':\n kb_spd = float(upload)\n kb_spd = kb_spd / 1024\n self._state = round(kb_spd, 2 if kb_spd < 0.1 else 1)\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/sensor/deluge.py","file_name":"deluge.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"20416658025","text":"\"\"\"Google Assistant OAuth View.\"\"\"\n\nimport logging\n\n# Typing imports\n# pylint: disable=using-constant-test,unused-import,ungrouped-imports\n# if False:\nfrom aiohttp.web import Request, Response # NOQA\nfrom typing import Dict, Any # NOQA\n\nfrom homeassistant.core import HomeAssistant # NOQA\nfrom homeassistant.components.http import HomeAssistantView\nfrom homeassistant.const import (\n HTTP_BAD_REQUEST,\n HTTP_UNAUTHORIZED,\n HTTP_MOVED_PERMANENTLY,\n)\n\nfrom .const import (\n GOOGLE_ASSISTANT_API_ENDPOINT,\n CONF_PROJECT_ID, CONF_CLIENT_ID, CONF_ACCESS_TOKEN\n)\n\nBASE_OAUTH_URL = 'https://oauth-redirect.googleusercontent.com'\nREDIRECT_TEMPLATE_URL = \\\n '{}/r/{}#access_token={}&token_type=bearer&state={}'\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass GoogleAssistantAuthView(HomeAssistantView):\n \"\"\"Handle Google Actions auth requests.\"\"\"\n\n url = GOOGLE_ASSISTANT_API_ENDPOINT + '/auth'\n name = 'api:google_assistant:auth'\n requires_auth = False\n\n def __init__(self, hass: HomeAssistant, cfg: Dict[str, Any]) -> None:\n \"\"\"Initialize instance of the view.\"\"\"\n super().__init__()\n\n self.project_id = cfg.get(CONF_PROJECT_ID)\n self.client_id = cfg.get(CONF_CLIENT_ID)\n self.access_token = cfg.get(CONF_ACCESS_TOKEN)\n\n async def get(self, request: Request) -> Response:\n \"\"\"Handle oauth token request.\"\"\"\n query = request.query\n redirect_uri = query.get('redirect_uri')\n if not redirect_uri:\n msg = 'missing redirect_uri field'\n _LOGGER.warning(msg)\n return self.json_message(msg, status_code=HTTP_BAD_REQUEST)\n\n if self.project_id not in redirect_uri:\n msg = 'missing project_id in redirect_uri'\n _LOGGER.warning(msg)\n return self.json_message(msg, status_code=HTTP_BAD_REQUEST)\n\n state = query.get('state')\n if not state:\n msg = 'oauth request missing state'\n _LOGGER.warning(msg)\n return self.json_message(msg, status_code=HTTP_BAD_REQUEST)\n\n client_id = query.get('client_id')\n if self.client_id != client_id:\n msg = 'invalid client id'\n _LOGGER.warning(msg)\n return self.json_message(msg, status_code=HTTP_UNAUTHORIZED)\n\n generated_url = redirect_url(self.project_id, self.access_token, state)\n\n _LOGGER.info('user login in from Google Assistant')\n return self.json_message(\n 'redirect success',\n status_code=HTTP_MOVED_PERMANENTLY,\n headers={'Location': generated_url})\n\n\ndef redirect_url(project_id: str, access_token: str, state: str) -> str:\n \"\"\"Generate the redirect format for the oauth request.\"\"\"\n return REDIRECT_TEMPLATE_URL.format(BASE_OAUTH_URL, project_id,\n access_token, state)\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/google_assistant/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"24478703351","text":"from turtle import *\n\n'''\nPossible changes:\n- More smoothness into function drawing\n- A input to select a funcion or maybe input a function\n'''\n\n\ndef main():\n # Setting the Canvas Size\n setworldcoordinates(-100, -100, 100, 100)\n hideturtle()\n tracer(0)\n\n # Drawing Lines X and Y\n color(\"red\")\n for i in range(4):\n fd(95)\n bk(95)\n rt(90)\n\n # Drawing Grid of dots(This draw a spiral)\n speed(1)\n dot(8)\n color('black')\n penup()\n for i in range(1, 87, 2):\n for do in range(i//4):\n pendown()\n dot(6)\n penup()\n fd(10)\n rt(90)\n '''\n # Drawing Grid of dots(simplest way to do it, by using one 'for' for each axis)\n \n color('gray')\n for x in range(-100, 101):\n for y in range(-100, 101):\n if x%5 == 0 and y % 5 == 0: # Best way for scaling the dots into the grid\n pu(); goto(x, y); pd(); dot(4)\n '''\n\n # Creating Functions\n # Here we can add another funtions based into mathematical equations, easy to add and comprehend\n\n def function1(x):\n y = x**4 / 4 - x**3 / 3 - 3 * x * x\n return y\n\n def function2(x):\n y = 1 - 2*x - x**2\n return y\n\n def function3(x):\n y = x**3 - 6 * x**2 + 4 * x + 12\n return y\n\n def function4(x):\n y = x**2\n return y\n\n # Tracing the line\n point = Turtle()\n point.hideturtle()\n point.penup()\n point.color('black')\n point.width(2)\n point.speed(0)\n\n for x in range(-25, 25):\n y = function4(x) # Select wich function will be used. A input can be used in the future\n point.goto(x*1, y*1) # The current coordinates, can be modified by multiplication for better look into the graphic\n point.pendown()\n print(point.pos()) # This give us a feedback to improve the for cicle and the function drawing\n\nif __name__ == '__main__':\n main()\nexitonclick()\n","repo_name":"Clerijr/Turtle-Study","sub_path":"Cartesian Graph.py","file_name":"Cartesian Graph.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42916998932","text":"import os\nimport sys\nimport argparse\n\nfrom mpi4py import MPI\n\nimport dolfin as dlf\nimport fenicsmechanics as fm\nfrom fenicsmechanics.dolfincompat import MPI_COMM_WORLD\n\n# For use with emacs python shell.\ntry:\n sys.argv.remove(\"--simple-prompt\")\nexcept ValueError:\n pass\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"--pressure\",\n default=0.004, type=float,\n help=\"Pressure to be applied at z = 0.\")\n\nmesh_file = fm.get_mesh_file_names(\"beam\", refinements=[\"1000\"], ext=\"h5\")\nparser.add_argument(\"--mesh-file\",\n type=str, default=mesh_file,\n help=\"Name of mesh file to use for mesh and facet function.\")\nparser.add_argument(\"--generate-mesh\",\n action=\"store_true\", help=\"Generates mesh using mshr.\")\nparser.add_argument(\"--resolution\",\n default=70, type=int,\n help=\"Resolution used to generate mesh with mshr.\")\nparser.add_argument(\"--incompressible\",\n action=\"store_true\", help=\"Model as incompressible material.\")\nparser.add_argument(\"--bulk-modulus\",\n type=float, default=1e3, dest=\"kappa\",\n help=\"Bulk modulus of the material.\")\nparser.add_argument(\"--loading-steps\", \"-ls\",\n type=int, default=10,\n help=\"Number of loading steps to use.\")\nparser.add_argument(\"--polynomial-degree\", \"-pd\",\n type=int, default=2, dest=\"pd\", choices=[1, 2, 3],\n help=\"Polynomial degree to be used for displacement.\")\nargs = parser.parse_args()\n\n# Region IDs\nHNBC = 0 # homogeneous Neumann BC\nHDBC = 10 # homogeneous Dirichlet BC\nINBC = 20 # inhomogeneous Neumann BC\n\nKAPPA = 1e100 if args.incompressible else args.kappa\n\nif args.generate_mesh:\n import mshr\n domain = mshr.Box(dlf.Point(), dlf.Point(10, 1, 1))\n mesh_file = mshr.generate_mesh(domain, args.resolution)\n\n boundaries = dlf.MeshFunction(\"size_t\", mesh_file, 2)\n boundaries.set_all(HNBC)\n\n hdbc = dlf.CompiledSubDomain(\"near(x[0], 0.0) && on_boundary\")\n hdbc.mark(boundaries, HDBC)\n\n inbc = dlf.CompiledSubDomain(\"near(x[2], 0.0) && on_boundary\")\n inbc.mark(boundaries, INBC)\nelse:\n mesh_file = boundaries = args.mesh_file\n\nmesh = {\n 'mesh_file': mesh_file,\n 'boundaries': boundaries\n}\n\ncf = dlf.Constant([1., 0., 0.])\ncs = dlf.Constant([0., 1., 0.])\nmaterial = {\n 'type': 'elastic',\n 'const_eqn': 'guccione',\n 'incompressible': args.incompressible,\n 'density': 0.0,\n 'C': 2.0,\n 'bf': 8.0,\n 'bt': 2.0,\n 'bfs': 4.0,\n 'kappa': KAPPA,\n 'fibers': {\n 'fiber_files': [cf, cs],\n 'fiber_names': ['e1', 'e2'],\n }\n}\n\ninterval = [0., 1.]\ndt = (interval[1] - interval[0])/args.loading_steps\nformulation = {\n 'time': {\n 'dt': dt,\n 'interval': interval\n },\n 'domain': 'lagrangian',\n 'bcs': {\n 'dirichlet': {\n 'displacement': [[0., 0., 0.]],\n 'regions': [HDBC]\n },\n 'neumann': {\n 'regions': [INBC],\n 'values': [\"%f*t\" % args.pressure],\n 'types': ['pressure']\n }\n }\n}\n\nif args.incompressible:\n formulation['element'] = 'p%i-p%i' % (args.pd, args.pd - 1)\nelse:\n formulation['element'] = 'p%i' % args.pd\n\nconfig = {'mesh': mesh, 'material': material, 'formulation': formulation}\n\nif args.incompressible:\n fname_disp = \"results/beam-displacement-incompressible.xml.gz\"\n fname_pressure = \"results/beam-pressure.xml.gz\"\n fname_hdf5 = \"results/beam-incompressible.h5\"\n fname_xdmf = \"results/beam-incompressible-viz.xdmf\"\nelse:\n fname_disp = \"results/beam-displacement.xml.gz\"\n fname_pressure = None\n fname_hdf5 = \"results/beam.h5\"\n fname_xdmf = \"results/beam-viz.xdmf\"\nproblem = fm.SolidMechanicsProblem(config)\nsolver = fm.SolidMechanicsSolver(problem, fname_disp=fname_disp,\n fname_pressure=fname_pressure,\n fname_hdf5=fname_hdf5,\n fname_xdmf=fname_xdmf)\nsolver.full_solve()\n\nrank = dlf.MPI.rank(MPI_COMM_WORLD)\ndisp_dof = problem.displacement.function_space().dim()\nif rank == 0:\n print(\"DOF(u) = \", disp_dof)\n\nimport numpy as np\nvals = np.zeros(3)\nx = np.array([10., 0.5, 1.])\ntry:\n problem.displacement.eval(vals, x)\n write_to_file = True\nexcept RuntimeError:\n write_to_file = False\n\nif write_to_file:\n print(\"(rank %i) vals = \" % rank, vals)\n print(\"(rank %i) vals + x = \" % rank, vals + x)\n\n final_position = x + vals\n data = np.hstack((disp_dof, x+vals)).reshape([1, -1])\n with open(\"beam-final_location.dat\", \"ab\") as f:\n np.savetxt(f, data, fmt=(\"%i\", \"%f\", \"%f\", \"%f\"))\n","repo_name":"ElsevierSoftwareX/SOFTX_2018_73","sub_path":"test/manual/land_benchmark/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"75"} +{"seq_id":"26360406853","text":"\"\"\"\nModule that contains the main script that runs the models.\n\"\"\"\nimport settings\nimport argparse\nimport torch\nimport utils.utils\nimport numpy as np\nfrom models.model_simple import MDANet\nfrom models.model_temporal_common import MDANTemporalCommon\nfrom models.model_temporal_double import MDANTemporalDouble\nfrom models.model_temporal_single import MDANTemporalSingle\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport pickle\nfrom loaders.load_webcamt import CameraData, CameraTimeData, FrameData, VehicleData\nimport loaders.load_webcamt\nfrom loaders.load_ucspeds import VideoDataUCS, FrameDataUCS\nimport loaders.load_ucspeds\nimport joblib\nimport gc\nimport plotter\nimport copy\nimport os\nfrom models.model_original import FCN_rLSTM\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", help=\"Name used to save the log file.\", type = str, default=\"webcamT\")\n\nparser.add_argument(\"--seed\", help=\"Random seed.\", type=int, default=42)\n\nparser.add_argument(\"--mu\", help=\"Hyperparameter of the coefficient for the domain adversarial loss\",\n type=float, default=1e-2)\nparser.add_argument('--lambda', default=1e-2, type=float, metavar='', help='trade-off between density estimation and vehicle count losses (see eq. 7 in the paper)')\nparser.add_argument(\"--epochs\", help=\"Number of training epochs\", type=int, default=2)\nparser.add_argument(\"--batch_size\", help=\"Batch size during training\", type=int, default=10)\nparser.add_argument(\"--mode\", help=\"Mode of combination rule for MDANet: [maxmin|dynamic|average]\", type=str, default=\"average\")\nparser.add_argument('--use_visdom', default=False, type=int, metavar='', help='use Visdom to visualize plots')\nparser.add_argument('--visdom_env', default='MDAN', type=str, metavar='', help='Visdom environment name')\nparser.add_argument('--visdom_port', default=8444, type=int, metavar='', help='Visdom port')\nparser.add_argument('--results_file', default='None', type=str, metavar='', help = 'Name for results file')\nparser.add_argument('--cuda', default='0', type=str, metavar='', help = 'CUDA GPU')\nparser.add_argument('--lr', default=1e-4, type=float, metavar='', help='Learning Rate')\nparser.add_argument('--model', default='simple', type=str, metavar='', help='Model [simple|common|double|single|original|original_temporal]')\nparser.add_argument('--prefix_data', default='first', type=str, metavar='', help='Data Prefix')\nparser.add_argument('--prefix_densities', default='first', type=str, metavar='', help='Densities Prefix')\nparser.add_argument('--dataset', default='webcamt', type=str, metavar='', help='Dataset [webcamt|ucspeds]')\nparser.add_argument('--use_mask', default=True, type=int, metavar='', help='Use mask')\nparser.add_argument('--use_transformations', default=True, type=int, metavar='', help='Use Data Augmentation')\nparser.add_argument('--max_frames_per_domain', default=2000, type=int, metavar='', help='Max. number of frames per domain')\nparser.add_argument('--sequence_size', default=10, type=int, metavar='', help='Sequence Size for temporal models')\nparser.add_argument('--results_file_param', default='None', type=str, metavar='', help='Parameter to add in results file')\nparser.add_argument('--gamma', default=10, type=float, metavar='', help='Gamma parameter for multisource domain adaptation')\n# Compile and configure all the model parameters.\nargs = parser.parse_args()\ndevice = torch.device(\"cuda:\"+args.cuda if torch.cuda.is_available() else \"cpu\")\n\nlogger = utils.utils.get_logger(args.name)\nlogger.info(\"Using device: \"+str(device))\n# Set random number seed.\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\n# Loading the randomly partition the amazon data set.\nlogger.info('Started loading data')\n\nsettings.DATASET = args.dataset\n\nif args.model == 'simple' or args.model == 'original':\n settings.TEMPORAL = False\nelse:\n settings.TEMPORAL = True\n\nif args.model == 'original' or args.model == 'original_temporal':\n ORIGINAL = True\nelse:\n ORIGINAL = False\n\nsettings.PREFIX_DENSITIES = args.prefix_densities\nsettings.PREFIX_DATA = args.prefix_data\nsettings.SEQUENCE_SIZE = args.sequence_size\n\nsettings.USE_MASK = args.use_mask\n\nsettings.LOAD_DATA_AUGMENTATION = args.use_transformations # This was changed on 14/06-15:19. Before it meant to use transformations on the run\n\nif args.dataset == 'webcamt':\n data, data_insts = loaders.load_webcamt.load_insts(settings.PREFIX_DATA, args.max_frames_per_domain)\nelif args.dataset == 'ucspeds':\n data, data_insts = loaders.load_ucspeds.load_insts(settings.PREFIX_DATA, args.max_frames_per_domain)\n\nif settings.TEMPORAL:\n data_insts= utils.utils.group_sequences(data_insts, settings.SEQUENCE_SIZE)\n\ndata_insts_no_aug = utils.utils.remove_augmentations(data_insts)\n\n\n##############################\n################################\nnum_epochs = args.epochs\nbatch_size = args.batch_size\nnum_domains = len(data_insts) - 1\nlr = args.lr\nmu = args.mu\ngamma = args.gamma\nmode = args.mode\nargs_dict = vars(args)\nlambda_ = args_dict[\"lambda\"]\n\nif args.results_file == \"None\":\n if args.results_file_param=='None':\n extra_param = '_'\n else:\n extra_param = '_'+args.results_file_param+'_'\n if ORIGINAL:\n results_file = args.model+extra_param+settings.PREFIX_DENSITIES+'_'+'noapply'+'_'+str(args.lr)+'_mask'+str(args.use_mask)+'_sequence_size'+str(args.sequence_size)+'_max_frames_per_domain'+str(args.max_frames_per_domain)+'_noapply'\n else:\n results_file = args.model+extra_param+settings.PREFIX_DENSITIES+'_'+args.mode+'_'+str(args.lr)+'_mask'+str(args.use_mask)+'_sequence_size'+str(args.sequence_size)+'_max_frames_per_domain'+str(args.max_frames_per_domain)+'_'+str(args.mu)\nelse:\n results_file = args.results_file\n\nif args_dict['use_visdom']:\n loss_plt = plotter.VisdomLossPlotter(env_name=args.dataset+'_'+results_file,\n port=args_dict['visdom_port'])\n\nlogger.info(\"Training with domain adaptation using PyTorch madnNet: \")\nerror_dicts = {}\nresults = {}\nresults['total count (mse)'] = {}\nresults['total density (mse)'] = {}\nresults['total count (mae)'] = {}\nresults['best val count (mse)'] = {}\nresults['best val density (mse)'] = {}\nresults['best val count (mae)'] = {}\nresults['test count (mse)'] = {}\nresults['test density (mse)'] = {}\nresults['test count (mae)'] = {}\n\ncounts_register = []\nfor i in range(len(data_insts)):\n counts_register.append([])\n\nfor i in range(len(data_insts)):\n if args.dataset == 'webcamt':\n domain_id = settings.WEBCAMT_DOMAINS[i]\n else:\n domain_id = settings.UCSPEDS_DOMAINS[i]\n\n results['best val density (mse)'][domain_id] = np.inf\n results['best val count (mse)'][domain_id] = np.inf\n results['best val count (mae)'][domain_id] = np.inf\n \n # Train DannNet.\n if args.model == 'simple':\n mdan = MDANet(num_domains).to(device)\n best_mdan = MDANet(num_domains).to(device)\n elif args.model == 'common':\n mdan = MDANTemporalCommon(num_domains, settings.get_new_shape()).to(device)\n best_mdan = MDANTemporalCommon(num_domains, settings.get_new_shape()).to(device)\n elif args.model == 'double':\n mdan = MDANTemporalDouble(num_domains, settings.get_new_shape()).to(device)\n best_mdan = MDANTemporalDouble(num_domains, settings.get_new_shape()).to(device)\n elif args.model == 'single':\n mdan = MDANTemporalSingle(num_domains, settings.get_new_shape()).to(device)\n best_mdan = MDANTemporalSingle(num_domains, settings.get_new_shape()).to(device)\n elif ORIGINAL:\n mdan = FCN_rLSTM(settings.TEMPORAL, settings.get_new_shape()).to(device)\n best_mdan = FCN_rLSTM(settings.TEMPORAL, settings.get_new_shape()).to(device)\n\n optimizer = torch.optim.Adam(mdan.parameters(), lr=lr, weight_decay=0)\n\n val_insts, test_insts = utils.utils.split_test_validation(data_insts_no_aug[i]) \n\n \n if ORIGINAL:\n domain_insts = utils.utils.concatenate_data_insts(data_insts, i)\n else:\n domain_insts = data_insts\n\n # Training phase.\n\n logger.info(\"Start training Domain: {}...\".format(str(domain_id)))\n for t in range(num_epochs):\n mdan.train()\n running_loss = 0.0\n running_count_loss = 0.0\n running_density_loss = 0.0\n no_batches = 0\n running_domain_losses = np.zeros(num_domains)\n train_loader = utils.utils.multi_data_loader(domain_insts, batch_size, settings.PREFIX_DATA, settings.PREFIX_DENSITIES, data)\n \n for batch_insts, batch_densities, batch_counts, batch_masks in train_loader:\n #logger.info(\"Starting batch\")\n # Build source instances.\n source_insts = []\n source_counts = []\n source_densities = []\n if settings.USE_MASK:\n source_masks = []\n else:\n source_masks = None\n for j in range(len(domain_insts)):\n if j != i or ORIGINAL:\n source_insts.append(torch.from_numpy(batch_insts[j] / 255.0).float().to(device)) \n source_densities.append(torch.from_numpy(batch_densities[j]).float().to(device))\n if settings.USE_MASK:\n source_masks.append(torch.from_numpy(batch_masks[j]).float().to(device))\n \n if settings.USE_MASK:\n if settings.TEMPORAL:\n dim = (2,3,4)\n else:\n dim = (1,2,3)\n source_counts.append(torch.sum(source_densities[-1]*source_masks[-1], dim=dim))\n else:\n source_counts.append(torch.from_numpy(batch_counts[j]).float().to(device))\n \n if ORIGINAL:\n source_insts = source_insts[0]\n source_counts = source_counts[0]\n source_densities = source_densities[0]\n if settings.USE_MASK:\n source_masks = source_masks[0]\n else:\n tinputs = torch.from_numpy(batch_insts[i] / 255.0).float().to(device) \n if settings.USE_MASK:\n tmask = torch.from_numpy(batch_masks[i]).float().to(device) \n else:\n tmask = None\n slabels = []\n tlabels = []\n for k in range(num_domains):\n slabels.append(torch.ones(len(source_insts[k]), requires_grad=False).type(torch.LongTensor).to(device))\n tlabels.append(torch.zeros(len(source_insts[k]), requires_grad=False).type(torch.LongTensor).to(device))\n \n if t==0:\n counts_register[i].append(source_counts)\n \n optimizer.zero_grad()\n\n \n if ORIGINAL:\n model_densities, model_counts = mdan(source_insts, source_masks)\n else:\n model_densities, model_counts, sdomains, tdomains = mdan(source_insts, tinputs, source_masks, tmask)\n \n if settings.TEMPORAL: \n if ORIGINAL:\n N, T, C, H, W = model_densities.shape\n else:\n N, T, C, H, W = model_densities[0].shape\n size = N*T\n else:\n if ORIGINAL:\n N, C, H, W = model_densities.shape\n else:\n N, C, H, W = model_densities[0].shape\n size = N\n\n if ORIGINAL:\n # Compute prediction accuracy on multiple training sources.\n density_loss = torch.sum((model_densities - source_densities)**2)/size\n count_loss = torch.sum((model_counts - source_counts)**2)/size\n loss = density_loss + lambda_*count_loss\n no_batches += 1\n running_loss += loss.item()\n running_count_loss += count_loss.item()\n running_density_loss += density_loss.item()\n else:\n \n # Compute prediction accuracy on multiple training sources.\n density_losses = torch.stack([(torch.sum((model_densities[j] - source_densities[j])**2)/size) for j in range(num_domains)])\n count_losses = torch.stack([(torch.sum((model_counts[j] - source_counts[j])**2)/size) for j in range(num_domains)])\n losses = density_losses + lambda_*count_losses\n domain_losses = torch.stack([F.nll_loss(sdomains[j], slabels[j]) +\n F.nll_loss(tdomains[j], tlabels[j]) for j in range(num_domains)])\n # Different final loss function depending on different training modes.\n if mode == \"maxmin\":\n loss = torch.max(losses) + mu * torch.min(domain_losses)\n elif mode == \"dynamic\":\n loss = torch.log(torch.sum(torch.exp(gamma * (losses + mu * domain_losses)))) / gamma\n elif mode == 'average':\n loss = torch.mean(losses+mu*domain_losses)\n else:\n raise ValueError(\"No support for the training mode on madnNet: {}.\".format(mode))\n no_batches += 1\n running_loss += loss.item()\n running_count_loss += count_losses.mean().item()\n running_density_loss += density_losses.mean().item()\n running_domain_losses += domain_losses.detach().cpu().numpy()\n \n loss.backward()\n optimizer.step()\n \n logger.info(\"Iteration {}, loss = {}, mean count loss = {}, mean density loss = {}\".format(t, running_loss, running_count_loss / no_batches, running_density_loss / no_batches))\n logger.info(\"Mean domain losses = \" + str(running_domain_losses / no_batches))\n if args_dict['use_visdom']:\n # plot the losses\n loss_plt.plot('global loss ('+str(domain_id)+')', 'train', 'MSE', t, running_loss)\n loss_plt.plot('density loss ('+str(domain_id)+')', 'train', 'MSE', t, running_density_loss / no_batches)\n loss_plt.plot('count loss ('+str(domain_id)+')', 'train', 'MSE', t, running_count_loss / no_batches)\n loss_plt.plot('domain loss ('+str(domain_id)+')', 'train', '', t, np.mean(running_domain_losses) / no_batches)\n\n \n \n mse_density, mse_count, mae_count = utils.utils.eval_mdan(mdan, val_insts, batch_size, device, settings.PREFIX_DATA, settings.PREFIX_DENSITIES, data)\n\n logger.info(\"Validation, Count MSE: {}, Density MSE: {}, Count MAE: {}\".\n format(mse_count, mse_density, mae_count))\n \n if args_dict['use_visdom']:\n # plot the losses\n loss_plt.plot('count error ('+str(domain_id)+')', 'valid', 'MAE', t, mae_count)\n loss_plt.plot('density loss ('+str(domain_id)+')', 'valid', 'MSE', t, mse_density)\n loss_plt.plot('count loss ('+str(domain_id)+')', 'valid', 'MSE', t, mse_count)\n\n if mse_density < results['best val density (mse)'][domain_id]:\n results['best val density (mse)'][domain_id] = mse_density\n\n if mse_count < results['best val count (mse)'][domain_id]:\n results['best val count (mse)'][domain_id] = mse_count\n best_mdan.load_state_dict(mdan.state_dict())\n\n if mae_count < results['best val count (mae)'][domain_id]:\n results['best val count (mae)'][domain_id] = mae_count\n \n total_mse_density, total_mse_count, total_mae_count = utils.utils.eval_mdan(mdan, data_insts_no_aug[i], batch_size, device, settings.PREFIX_DATA, settings.PREFIX_DENSITIES, data)\n\n results['total density (mse)'][domain_id] = total_mse_density\n results['total count (mse)'][domain_id] = total_mse_count\n results['total count (mae)'][domain_id] = total_mae_count\n\n logger.info(\"All Target Data, Count MSE: {}, Density MSE: {}, Count MAE: {}\".\n format(total_mse_count, total_mse_density, total_mae_count))\n\n final_mse_density, final_mse_count, final_mae_count = utils.utils.eval_mdan(best_mdan, test_insts, batch_size, device, settings.PREFIX_DATA, settings.PREFIX_DENSITIES, data)\n\n results['test density (mse)'][domain_id] = final_mse_density\n results['test count (mse)'][domain_id] = final_mse_count\n results['test count (mae)'][domain_id] = final_mae_count\n \n logger.info(\"Testing, Count MSE: {}, Density MSE: {}, Count MAE: {}\".\n format(final_mse_count, final_mse_density, final_mae_count))\n\n\n\nlogger.info(\"Prediction accuracy with multiple source domain adaptation using madnNet: \")\nif settings.DATASET == 'webcamt':\n args_dict['webcamt_domains'] = settings.WEBCAMT_DOMAINS\nresults['args'] = args_dict\nlogger.info(results)\n\nif args.dataset == 'webcamt':\n directory = settings.WEBCAMT_PREPROCESSED_DIRECTORY.lower()\nelif args.dataset == 'ucspeds':\n directory = settings.UCSPEDS_PREPROCESSED_DIRECTORY.lower()\n\nresults_directory = os.path.join(settings.DATASET_DIRECTORY, '../Results/'+directory)\nif not os.path.exists(results_directory):\n os.makedirs(results_directory)\n\npickle.dump(results, open(os.path.join(results_directory, results_file+'.npy'), 'wb+'))","repo_name":"francis-andrade/domain-adaptation-lstm","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":17907,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"24805866537","text":"\r\nimport pygame, sys \r\nfrom pygame.locals import * \r\nimport random, time \r\n \r\n#Initialzing \r\npygame.init() \r\n \r\n#Setting up FPS \r\nFPS = 60 \r\nFramePerSec = pygame.time.Clock() \r\n \r\n#Creating colors \r\nBLACK = (0, 0, 0) \r\nWHITE = (255, 255, 255) \r\nRED = (255, 0, 0) \r\n \r\n#Other Variables for use in the program \r\nSCREEN_WIDTH = 400 \r\nSCREEN_HEIGHT = 600 \r\nSPEED = 5 \r\nSCORE = 0 \r\nSCORE_COINS = 0 \r\nN = 10 \r\n#Setting up Fonts \r\nfont = pygame.font.SysFont(\"Verdana\", 60) \r\nfont_small = pygame.font.SysFont(\"Verdana\", 20) \r\ngame_over = font.render(\"Game Over\", True, BLACK) \r\n \r\nbackground = pygame.image.load(\"racer_images/AnimatedStreet.png\") \r\n \r\n#Create a white screen \r\nDISPLAYSURF = pygame.display.set_mode((400,600)) \r\nDISPLAYSURF.fill(WHITE) \r\npygame.display.set_caption(\"Game\") \r\n \r\n \r\nclass Enemy(pygame.sprite.Sprite): \r\n def __init__(self): \r\n super().__init__() \r\n self.image = pygame.image.load(\"racer_images/Enemy.png\") \r\n self.rect = self.image.get_rect() \r\n self.rect.center = (random.randint(40,SCREEN_WIDTH-40), 0) \r\n \r\n def move(self): \r\n global SCORE \r\n self.rect.move_ip(0,SPEED) \r\n if (self.rect.bottom > 600): \r\n SCORE += 1 \r\n self.rect.top = 0 \r\n self.rect.center = (random.randint(40, SCREEN_WIDTH - 40), 0) \r\n \r\n \r\nclass Player(pygame.sprite.Sprite): \r\n def __init__(self): \r\n super().__init__() \r\n self.image = pygame.image.load(\"racer_images/Player.png\") \r\n self.rect = self.image.get_rect() \r\n self.rect.center = (160, 520) \r\n \r\n def move(self): \r\n pressed_keys = pygame.key.get_pressed() \r\n \r\n if self.rect.left > 0: \r\n if pressed_keys[K_LEFT]: \r\n self.rect.move_ip(-5, 0) \r\n if self.rect.right < SCREEN_WIDTH: \r\n if pressed_keys[K_RIGHT]: \r\n self.rect.move_ip(5, 0) \r\n if pressed_keys[K_UP]: \r\n if self.rect.bottom > 0: \r\n self.rect.move_ip(0, -5) \r\n if pressed_keys[K_DOWN]: \r\n if self.rect.top < SCREEN_HEIGHT: \r\n self.rect.move_ip(0, 5) \r\n \r\nclass Coin(pygame.sprite.Sprite): \r\n def __init__(self): \r\n super().__init__() \r\n self.image = pygame.transform.scale(pygame.image.load('racer_images/coin.png'), (25,24)) \r\n self.rect = self.image.get_rect() \r\n self.rect.center = (random.randint(40,SCREEN_WIDTH-40), random.randint(90,SCREEN_HEIGHT-90)) \r\n \r\n def update(self): \r\n self.rect.center = (random.randint(40,SCREEN_WIDTH-40), random.randint(90,SCREEN_HEIGHT-90)) \r\n \r\nclass Coin2(pygame.sprite.Sprite): \r\n def __init__(self): \r\n super().__init__() \r\n self.image = pygame.transform.scale(pygame.image.load('racer_images/PINK.png'), (25,24)) \r\n self.rect = self.image.get_rect() \r\n self.rect.center = (random.randint(40,SCREEN_WIDTH-40), random.randint(90,SCREEN_HEIGHT-90)) \r\n \r\n def update(self): \r\n self.rect.center = (random.randint(40,SCREEN_WIDTH-40), random.randint(90,SCREEN_HEIGHT-90)) \r\n \r\n \r\n#Setting up Sprites \r\nP1 = Player() \r\nE1 = Enemy() \r\nC1 = Coin() \r\nC2 = Coin2() \r\n \r\n#Creating Sprites Groups \r\nenemies = pygame.sprite.Group() \r\nenemies.add(E1) \r\n \r\ncoins = pygame.sprite.Group() \r\ncoins.add(C1) \r\n \r\ncoins2 = pygame.sprite.Group() \r\ncoins2.add(C2) \r\n \r\nall_sprites = pygame.sprite.Group() \r\nall_sprites.add(P1) \r\nall_sprites.add(E1) \r\nall_sprites.add(C1) \r\nall_sprites.add(C2) \r\n \r\n#Adding a new User event \r\nINC_SPEED = pygame.USEREVENT + 1 \r\npygame.time.set_timer(INC_SPEED, 1000) \r\n \r\n#Game Loop \r\nwhile True: \r\n \r\n #Cycles through all events occuring \r\n for event in pygame.event.get(): \r\n if event.type == INC_SPEED: \r\n SPEED += 0.5 \r\n if event.type == QUIT: \r\n pygame.quit() \r\n sys.exit() \r\n \r\n \r\n DISPLAYSURF.blit(background, (0,0)) \r\n \r\n #Displaying points on the screen for each passage of the enemy past the player \r\n scores = font_small.render(str(SCORE), True, BLACK) \r\n DISPLAYSURF.blit(scores, (10,10)) \r\n \r\n #Withdrawal of points for coins \r\n scores2 = font_small.render(str(SCORE_COINS), True, BLACK) \r\n DISPLAYSURF.blit(scores2, (360,10)) \r\n \r\n P1.move() \r\n E1.move() \r\n \r\n #Moves and Re-draws all Sprites \r\n for entity in all_sprites: \r\n DISPLAYSURF.blit(entity.image, entity.rect) \r\n \r\n \r\n #To be run if collision occurs between Player and Enemy \r\n if pygame.sprite.spritecollideany(P1, enemies): \r\n pygame.mixer.Sound('crash.wav').play() \r\n time.sleep(1) \r\n \r\n DISPLAYSURF.fill(RED) \r\n DISPLAYSURF.blit(game_over, (30,250)) \r\n \r\n pygame.display.update() \r\n for entity in all_sprites: \r\n entity.kill() \r\n time.sleep(2) \r\n pygame.quit() \r\n sys.exit() \r\n \r\n #To be run if collision occurs between Player and Coin \r\n if pygame.sprite.spritecollideany(P1, coins): \r\n SCORE_COINS += 1 \r\n for entity in coins: \r\n entity.update() \r\n if pygame.sprite.spritecollideany(P1, coins2): \r\n SCORE_COINS += 10 \r\n for entity in coins2: \r\n entity.update() \r\n \r\n #When the number of coins exceeds N the enemy's speed will increase \r\n if SCORE_COINS >= 50: \r\n SPEED = 15 \r\n \r\n pygame.display.update() \r\n FramePerSec.tick(FPS)","repo_name":"zhonniks/PP2","sub_path":"lab9/racer9.py","file_name":"racer9.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26919316149","text":"from __future__ import print_function\nfrom imutils.video.pivideostream import PiVideoStream\nfrom imutils.video import FPS\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport argparse\nimport imutils\nimport cv2\nimport numpy as np\nimport time\nimport serial\nimport io\n\nstop_detect = cv2.CascadeClassifier('./stopsign_classifier.xml')\nyield_detect = cv2.CascadeClassifier('./yieldsign12Stages.xml')\nspeedlimits_detect = cv2.CascadeClassifier('./Speedlimit_24_15Stages.xml')\nfont = cv2.FONT_HERSHEY_SIMPLEX\nframes = 0\n\nvs = PiVideoStream().start()\ntime.sleep(2.0)\nfps = FPS().start()\n\nser = serial.Serial(\nport = '/dev/serial0', \\\nbaudrate = 115200, \\\nbytesize = serial.EIGHTBITS, \\\ntimeout = 0)\n\nprint(\"Sign Recognition Started\")\nprev = time.time()\nstart = time.time()\nwhile (time.time() - start) < 10:\n img = vs.read()\n #print(len(img), len(img[0]))\n #img = imutils.resize(img, width=150)\n #print(len(img), len(img[0]))\n #print(len(img[0]), len(img))\n img = img[:, (len(img[0])//2):]\n #print(len(img[0]), len(img))\n #ret, img =cam.read()\n frames = frames + 1\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n yields = yield_detect.detectMultiScale(gray,1.2,4)\n stops = stop_detect.detectMultiScale(gray,1.2,5)\n signs = speedlimits_detect.detectMultiScale(gray,1.1,3)\n\n '''\n for (x,y,h,w) in signs:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)\n cv2.putText(img,\"speed limit\", (x,y+h),font, 1,(255,0,0),2)\n print(\"speed limit sign\")\n '''\n\n for (x,y,h,w) in stops:\n print(\"stop sign\")\n #cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)\n #cv2.putText(img,\"stop\", (x,y+h),font, 1,(255,0,0),2)\n '''\n curr = time.time()\n if (curr - prevStop) >= 3:\n print(\"sending stop\")\n prevStop = curr\n ser.write(bytes(b'1'))\n '''\n\n '''\n for (x,y,h,w) in yields:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2) \n cv2.putText(img,\"yield\", (x,y+h),font, 1,(255,0,0),2)\n print(\"yield sign\")\n '''\n\n curr = time.time()\n\n if len(stops) > 0:\n if (curr - prev) >= 3:\n print(\"sending stop\")\n prev = curr\n ser.write('stop\\n'.encode('utf-8'))\n '''\n elif len(signs) > 0:\n ser.write('speed_limit\\n'.encode('utf-8'))\n elif len(yields) > 0:\n ser.write('yield\\n'.encode('utf-8'))\n '''\n\n #time.sleep(0.1)\n\n #cv2.imshow('im',img)\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n print(\"q pressed\")\n break\nend = time.time()\n\nprint(\"Time recorded\", end - start)\nprint(\"FPS\", frames/(end-start))\n#cam.release()\nser.close()\ncv2.destroyAllWindows()\nvs.stop()\n","repo_name":"gateszeng/ucsdrobocar07","sub_path":"sign_detector.py","file_name":"sign_detector.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19532931220","text":"from django.shortcuts import render\nfrom . models import Dosen, Mahasiswa, Tendik\nfrom pascasarjana.forms import FormDosen\nfrom pascasarjana.forms import FormTendik\nfrom pascasarjana.forms import FormMahasiswa\n\n# Create your views here.\ndef prodi8(request):\n judulpascasarjana = [\"Doktor Pendidikan\", \"Doktor Ilmu Akuntansi\", \"Magister Ilmu Hukum\", \"Magister Ilmu Pertanian\", \"Magister Administrasi Publik\", \"Magister Akuntansi\", \"Magister Ilmu Komunikasi\", \"Magister Manajemen\", \"Magister Teknik Kimia\", \"Pendidikan Bahasa Indonesia\", \"Pendidikan Bahasa Inggris\", \"Pendidikan Matematika\", \"Teknologi Pendidikan\"]\n dosen = Dosen.objects.all()\n tendik = Tendik.objects.all()\n mahasiswa = Mahasiswa.objects.all()\n form = FormDosen()\n form = FormTendik()\n form = FormMahasiswa()\n konteks = {\n 'titlepascasarjana' : judulpascasarjana,\n 'dataDosen': dosen,\n 'dataTendik': tendik,\n 'dataMahasiswa': mahasiswa,\n 'form' : form,\n 'form' : form,\n 'form' : form,\n }\n return render(request, 'pascasarjana/index.html', konteks)\n\n","repo_name":"rizkyanugrahwati03/staticfiles27092022","sub_path":"pascasarjana/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34868903597","text":"'''\n@Description:\n@Version: 1.0\n@Autor: Demoon\n@Date: 1970-01-01 08:00:00\nLastEditors: Please set LastEditors\nLastEditTime: 2021-01-09 16:03:26\n'''\n# 基础模块\nimport sys\nimport time\nimport os\nfrom DeeplTrans import DeeplTrans\n\nif __name__ == '__main__':\n # 定义为全局变量,方便其他模块使用\n global RUN_EVN\n try:\n RUN_EVN = sys.argv[1]\n except Exception:\n pass\n depts = DeeplTrans(True)\n org_path = './files/org/'\n out_path = './files/output/'\n for root, dirs, files in os.walk(org_path):\n for fname in files:\n org_file = os.path.join(root, fname)\n out_file = os.path.join(out_path, fname)\n with open(org_file, 'r', encoding='utf-8') as f:\n with open(out_file, 'a', encoding='utf-8') as nf:\n line = f.readline()\n while line:\n try:\n res = depts.runTranslate(line)\n except Exception:\n os.system('wideband_link_refresh.bat')\n time.sleep(3) # 等3秒 怕连不上\n res = depts.runTranslate(line)\n nf.writelines(res + \"\\n\")\n line = f.readline()\n os.remove(org_file)\n","repo_name":"UnDemoon/DeeplByPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43336092930","text":"import asyncio\n\nasync def worker1(queue):\n print('aaaa')\n a = await queue.get()\n print(a)\n\nasync def worker2(queue):\n await asyncio.sleep(2)\n print('put to queue:')\n await queue.put(3)\n\nasync def main():\n queue = asyncio.Queue()\n task1 = asyncio.create_task(worker1(queue))\n task2 = asyncio.create_task(worker2(queue))\n await task1\n await task2\n\nasyncio.run(main())\n","repo_name":"Mr-Slow/python-asyncio","sub_path":"async_queue.py","file_name":"async_queue.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3315222579","text":"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nimport requests\nimport json\nfrom pymongo import MongoClient\nfrom pprint import pformat\n\n\ndef get_mongo_client():\n \"\"\"Return MongoDB client.\"\"\"\n\n return MongoClient('mongodb://localhost/')\n\n\ndef get_db():\n \"\"\"Return figaro MongoDB database.\"\"\"\n\n return get_mongo_client()['figaro']\n\n\ndef get_execute_nodes():\n \"\"\"Return the names of all execute nodes.\"\"\"\n\n query = {\n \"size\": 0,\n \"facets\": {\n \"job.job_info.execute_node\": {\n \"terms\": {\n \"field\": \"job.job_info.execute_node\",\n \"all_terms\": True\n }\n }\n }\n }\n es_url = app.config['ES_URL']\n index = app.config['JOB_STATUS_INDEX']\n r = requests.post('%s/%s/_search' %\n (es_url, index), data=json.dumps(query))\n r.raise_for_status()\n result = r.json()\n # app.logger.debug(pformat(result))\n total = len(result['facets']['job.job_info.execute_node']['terms'])\n nodes = []\n for terms in result['facets']['job.job_info.execute_node']['terms']:\n nodes.append(terms['term'])\n nodes.sort()\n return nodes\n","repo_name":"hysds/figaro","sub_path":"figaro/lib/job_utils.py","file_name":"job_utils.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26135407101","text":"#!/usr/bin/python\n\n'''Grades mechanics on LIS 351 HTML/CSS assignments:\n\t\t* checks existence of >=3 HTML files, one CSS file, >=1 image file\n\t\t* checks \n\t\t* validates HTML files\n\t\t\t** checks for